diff --git a/_integrations/logcontext/README.md b/_integrations/logcontext/README.md new file mode 100644 index 000000000..ba5c24dcb --- /dev/null +++ b/_integrations/logcontext/README.md @@ -0,0 +1,8 @@ +# _integrations/logcontext [](https://godoc.org/github.com/newrelic/go-agent/_integrations/logcontext) + +Logs in Context. Each directory represents a different logging plugin. +Plugins allow you to add the context required to your log messages so you can +see linking in the APM UI. + +For more information, see +[godocs](https://godoc.org/github.com/newrelic/go-agent/_integrations/logcontext). diff --git a/_integrations/logcontext/logcontext.go b/_integrations/logcontext/logcontext.go new file mode 100644 index 000000000..b6244b479 --- /dev/null +++ b/_integrations/logcontext/logcontext.go @@ -0,0 +1,38 @@ +package logcontext + +import newrelic "github.com/newrelic/go-agent" + +// Keys used for logging context JSON. +const ( + KeyFile = "file.name" + KeyLevel = "log.level" + KeyLine = "line.number" + KeyMessage = "message" + KeyMethod = "method.name" + KeyTimestamp = "timestamp" + KeyTraceID = "trace.id" + KeySpanID = "span.id" + KeyEntityName = "entity.name" + KeyEntityType = "entity.type" + KeyEntityGUID = "entity.guid" + KeyHostname = "hostname" +) + +func metadataMapField(m map[string]interface{}, key, val string) { + if val != "" { + m[key] = val + } +} + +// AddLinkingMetadata adds the LinkingMetadata into a map. Only non-empty +// string fields are included in the map. The specific key names facilitate +// agent logs in context. These keys are: "trace.id", "span.id", +// "entity.name", "entity.type", "entity.guid", and "hostname". +func AddLinkingMetadata(m map[string]interface{}, md newrelic.LinkingMetadata) { + metadataMapField(m, KeyTraceID, md.TraceID) + metadataMapField(m, KeySpanID, md.SpanID) + metadataMapField(m, KeyEntityName, md.EntityName) + metadataMapField(m, KeyEntityType, md.EntityType) + metadataMapField(m, KeyEntityGUID, md.EntityGUID) + metadataMapField(m, KeyHostname, md.Hostname) +} diff --git a/_integrations/logcontext/nrlogrusplugin/README.md b/_integrations/logcontext/nrlogrusplugin/README.md new file mode 100644 index 000000000..e0f36542f --- /dev/null +++ b/_integrations/logcontext/nrlogrusplugin/README.md @@ -0,0 +1,10 @@ +# _integrations/logcontext/nrlogrusplugin [](https://godoc.org/github.com/newrelic/go-agent/_integrations/logcontext/nrlogrusplugin) + +Package `nrlogrusplugin` decorates logs for sending to the New Relic backend. + +```go +import "github.com/newrelic/go-agent/_integrations/logcontext/nrlogrusplugin" +``` + +For more information, see +[godocs](https://godoc.org/github.com/newrelic/go-agent/_integrations/logcontext/nrlogrusplugin). diff --git a/_integrations/logcontext/nrlogrusplugin/example/main.go b/_integrations/logcontext/nrlogrusplugin/example/main.go new file mode 100644 index 000000000..bdd96d40b --- /dev/null +++ b/_integrations/logcontext/nrlogrusplugin/example/main.go @@ -0,0 +1,70 @@ +package main + +import ( + "context" + "fmt" + "os" + "time" + + newrelic "github.com/newrelic/go-agent" + "github.com/newrelic/go-agent/_integrations/logcontext/nrlogrusplugin" + "github.com/sirupsen/logrus" +) + +func mustGetEnv(key string) string { + if val := os.Getenv(key); "" != val { + return val + } + panic(fmt.Sprintf("environment variable %s unset", key)) +} + +func doFunction2(txn newrelic.Transaction, e *logrus.Entry) { + defer newrelic.StartSegment(txn, "doFunction2").End() + e.Error("In doFunction2") +} + +func doFunction1(txn newrelic.Transaction, e *logrus.Entry) { + defer newrelic.StartSegment(txn, "doFunction1").End() + e.Trace("In doFunction1") + doFunction2(txn, e) +} + +func main() { + log := logrus.New() + // To enable New Relic log decoration, use the + // nrlogrusplugin.ContextFormatter{} + log.SetFormatter(nrlogrusplugin.ContextFormatter{}) + log.SetLevel(logrus.TraceLevel) + + log.Debug("Logger created") + + cfg := newrelic.NewConfig("Logrus Log Decoration", mustGetEnv("NEW_RELIC_LICENSE_KEY")) + cfg.DistributedTracer.Enabled = true + cfg.CrossApplicationTracer.Enabled = false + + app, err := newrelic.NewApplication(cfg) + if nil != err { + log.Panic("Failed to create application", err) + } + + log.Debug("Application created, waiting for connection") + + err = app.WaitForConnection(10 * time.Second) + if nil != err { + log.Panic("Failed to connect application", err) + } + log.Info("Application connected") + defer app.Shutdown(10 * time.Second) + + log.Debug("Starting transaction now") + txn := app.StartTransaction("main", nil, nil) + + // Add the transaction context to the logger. Only once this happens will + // the logs be properly decorated with all required fields. + e := log.WithContext(newrelic.NewContext(context.Background(), txn)) + + doFunction1(txn, e) + + e.Info("Ending transaction") + txn.End() +} diff --git a/_integrations/logcontext/nrlogrusplugin/nrlogrusplugin.go b/_integrations/logcontext/nrlogrusplugin/nrlogrusplugin.go new file mode 100644 index 000000000..38c389bf8 --- /dev/null +++ b/_integrations/logcontext/nrlogrusplugin/nrlogrusplugin.go @@ -0,0 +1,186 @@ +// Package nrlogrusplugin decorates logs for sending to the New Relic backend. +// +// Use this package if you want to enable the New Relic logging product and see +// your log messages in the New Relic UI. +// +// Since Logrus is completely api-compatible with the stdlib logger, you can +// replace your `"log"` imports with `log "github.com/sirupsen/logrus"` and +// follow the steps below to enable the logging product for use with the stdlib +// Go logger. +// +// Using `logger.WithField` +// (https://godoc.org/github.com/sirupsen/logrus#Logger.WithField) and +// `logger.WithFields` +// (https://godoc.org/github.com/sirupsen/logrus#Logger.WithFields) is +// supported. However, if the field key collides with one of the keys used by +// the New Relic Formatter, the value will be overwritten. Reserved keys are +// those found in the `logcontext` package +// (https://godoc.org/github.com/newrelic/go-agent/_integrations/logcontext/#pkg-constants). +// +// Supported types for `logger.WithField` and `logger.WithFields` field values +// are numbers, booleans, strings, and errors. Func types are dropped and all +// other types are converted to strings. +// +// Requires v1.4.0 of the Logrus package or newer. +// +// Configuration +// +// For the best linking experience be sure to enable Distributed Tracing: +// +// cfg := NewConfig("Example Application", "__YOUR_NEW_RELIC_LICENSE_KEY__") +// cfg.DistributedTracer.Enabled = true +// +// To enable log decoration, set your log's formatter to the +// `nrlogrusplugin.ContextFormatter` +// +// logger := log.New() +// logger.SetFormatter(nrlogrusplugin.ContextFormatter{}) +// +// or if you are using the logrus standard logger +// +// log.SetFormatter(nrlogrusplugin.ContextFormatter{}) +// +// The logger will now look for a newrelic.Transaction inside its context and +// decorate logs accordingly. Therefore, the Transaction must be added to the +// context and passed to the logger. For example, this logging call +// +// logger.Info("Hello New Relic!") +// +// must be transformed to include the context, such as: +// +// ctx := newrelic.NewContext(context.Background(), txn) +// logger.WithContext(ctx).Info("Hello New Relic!") +// +// Troubleshooting +// +// When properly configured, your log statements will be in JSON format with +// one message per line: +// +// {"message":"Hello New Relic!","log.level":"info","trace.id":"469a04f6c1278593","span.id":"9f365c71f0f04a98","entity.type":"SERVICE","entity.guid":"MTE3ODUwMHxBUE18QVBQTElDQVRJT058Mjc3MDU2Njc1","hostname":"my.hostname","timestamp":1568917432034,"entity.name":"Example Application"} +// +// If the `trace.id` key is missing, be sure that Distributed Tracing is +// enabled and that the Transaction context has been added to the logger using +// `WithContext` (https://godoc.org/github.com/sirupsen/logrus#Logger.WithContext). +package nrlogrusplugin + +import ( + "bytes" + "encoding/json" + "fmt" + + newrelic "github.com/newrelic/go-agent" + "github.com/newrelic/go-agent/_integrations/logcontext" + "github.com/newrelic/go-agent/internal" + "github.com/newrelic/go-agent/internal/jsonx" + "github.com/sirupsen/logrus" +) + +func init() { internal.TrackUsage("integration", "logcontext", "logrus") } + +type logFields map[string]interface{} + +// ContextFormatter is a `logrus.Formatter` that will format logs for sending +// to New Relic. +type ContextFormatter struct{} + +// Format renders a single log entry. +func (f ContextFormatter) Format(e *logrus.Entry) ([]byte, error) { + // 12 = 6 from GetLinkingMetadata + 6 more below + data := make(logFields, len(e.Data)+12) + for k, v := range e.Data { + data[k] = v + } + + if ctx := e.Context; nil != ctx { + if txn := newrelic.FromContext(ctx); nil != txn { + logcontext.AddLinkingMetadata(data, txn.GetLinkingMetadata()) + } + } + + data[logcontext.KeyTimestamp] = uint64(e.Time.UnixNano()) / uint64(1000*1000) + data[logcontext.KeyMessage] = e.Message + data[logcontext.KeyLevel] = e.Level + + if e.HasCaller() { + data[logcontext.KeyFile] = e.Caller.File + data[logcontext.KeyLine] = e.Caller.Line + data[logcontext.KeyMethod] = e.Caller.Function + } + + var b *bytes.Buffer + if e.Buffer != nil { + b = e.Buffer + } else { + b = &bytes.Buffer{} + } + writeDataJSON(b, data) + return b.Bytes(), nil +} + +func writeDataJSON(buf *bytes.Buffer, data logFields) { + buf.WriteByte('{') + var needsComma bool + for k, v := range data { + if needsComma { + buf.WriteByte(',') + } else { + needsComma = true + } + jsonx.AppendString(buf, k) + buf.WriteByte(':') + writeValue(buf, v) + } + buf.WriteByte('}') + buf.WriteByte('\n') +} + +func writeValue(buf *bytes.Buffer, val interface{}) { + switch v := val.(type) { + case string: + jsonx.AppendString(buf, v) + case bool: + if v { + buf.WriteString("true") + } else { + buf.WriteString("false") + } + case uint8: + jsonx.AppendInt(buf, int64(v)) + case uint16: + jsonx.AppendInt(buf, int64(v)) + case uint32: + jsonx.AppendInt(buf, int64(v)) + case uint64: + jsonx.AppendInt(buf, int64(v)) + case uint: + jsonx.AppendInt(buf, int64(v)) + case uintptr: + jsonx.AppendInt(buf, int64(v)) + case int8: + jsonx.AppendInt(buf, int64(v)) + case int16: + jsonx.AppendInt(buf, int64(v)) + case int32: + jsonx.AppendInt(buf, int64(v)) + case int: + jsonx.AppendInt(buf, int64(v)) + case int64: + jsonx.AppendInt(buf, v) + case float32: + jsonx.AppendFloat(buf, float64(v)) + case float64: + jsonx.AppendFloat(buf, v) + case logrus.Level: + jsonx.AppendString(buf, v.String()) + case error: + jsonx.AppendString(buf, v.Error()) + default: + if m, ok := v.(json.Marshaler); ok { + if js, err := m.MarshalJSON(); nil == err { + buf.Write(js) + return + } + } + jsonx.AppendString(buf, fmt.Sprintf("%#v", v)) + } +} diff --git a/_integrations/logcontext/nrlogrusplugin/nrlogrusplugin_test.go b/_integrations/logcontext/nrlogrusplugin/nrlogrusplugin_test.go new file mode 100644 index 000000000..28268752a --- /dev/null +++ b/_integrations/logcontext/nrlogrusplugin/nrlogrusplugin_test.go @@ -0,0 +1,394 @@ +package nrlogrusplugin + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "io" + "testing" + "time" + + newrelic "github.com/newrelic/go-agent" + "github.com/newrelic/go-agent/internal" + "github.com/newrelic/go-agent/internal/integrationsupport" + "github.com/newrelic/go-agent/internal/sysinfo" + "github.com/sirupsen/logrus" +) + +var ( + testTime = time.Date(2014, time.November, 28, 1, 1, 0, 0, time.UTC) + matchAnything = struct{}{} +) + +func newTestLogger(out io.Writer) *logrus.Logger { + l := logrus.New() + l.Formatter = ContextFormatter{} + l.SetReportCaller(true) + l.SetOutput(out) + return l +} + +func validateOutput(t *testing.T, out *bytes.Buffer, expected map[string]interface{}) { + var actual map[string]interface{} + if err := json.Unmarshal(out.Bytes(), &actual); nil != err { + t.Fatal("failed to unmarshal log output:", err) + } + for k, v := range expected { + found, ok := actual[k] + if !ok { + t.Errorf("key %s not found:\nactual=%s", k, actual) + } + if v != matchAnything && found != v { + t.Errorf("value for key %s is incorrect:\nactual=%s\nexpected=%s", k, found, v) + } + } + for k, v := range actual { + if _, ok := expected[k]; !ok { + t.Errorf("unexpected key found:\nkey=%s\nvalue=%s", k, v) + } + } +} + +func BenchmarkWithOutTransaction(b *testing.B) { + log := newTestLogger(bytes.NewBuffer([]byte(""))) + ctx := context.Background() + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + log.WithContext(ctx).Info("Hello World!") + } +} + +func BenchmarkJSONFormatter(b *testing.B) { + log := newTestLogger(bytes.NewBuffer([]byte(""))) + log.Formatter = new(logrus.JSONFormatter) + ctx := context.Background() + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + log.WithContext(ctx).Info("Hello World!") + } +} + +func BenchmarkTextFormatter(b *testing.B) { + log := newTestLogger(bytes.NewBuffer([]byte(""))) + log.Formatter = new(logrus.TextFormatter) + ctx := context.Background() + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + log.WithContext(ctx).Info("Hello World!") + } +} + +func BenchmarkWithTransaction(b *testing.B) { + app := integrationsupport.NewTestApp(nil, nil) + txn := app.StartTransaction("TestLogDistributedTracingDisabled", nil, nil) + log := newTestLogger(bytes.NewBuffer([]byte(""))) + ctx := newrelic.NewContext(context.Background(), txn) + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + log.WithContext(ctx).Info("Hello World!") + } +} + +func TestLogNoContext(t *testing.T) { + out := bytes.NewBuffer([]byte{}) + log := newTestLogger(out) + log.WithTime(testTime).Info("Hello World!") + validateOutput(t, out, map[string]interface{}{ + "file.name": matchAnything, + "line.number": matchAnything, + "log.level": "info", + "message": "Hello World!", + "method.name": "github.com/newrelic/go-agent/_integrations/logcontext/nrlogrusplugin.TestLogNoContext", + "timestamp": float64(1417136460000), + }) +} + +func TestLogNoTxn(t *testing.T) { + out := bytes.NewBuffer([]byte{}) + log := newTestLogger(out) + log.WithTime(testTime).WithContext(context.Background()).Info("Hello World!") + validateOutput(t, out, map[string]interface{}{ + "file.name": matchAnything, + "line.number": matchAnything, + "log.level": "info", + "message": "Hello World!", + "method.name": "github.com/newrelic/go-agent/_integrations/logcontext/nrlogrusplugin.TestLogNoTxn", + "timestamp": float64(1417136460000), + }) +} + +func TestLogDistributedTracingDisabled(t *testing.T) { + app := integrationsupport.NewTestApp(nil, nil) + txn := app.StartTransaction("TestLogDistributedTracingDisabled", nil, nil) + out := bytes.NewBuffer([]byte{}) + log := newTestLogger(out) + ctx := newrelic.NewContext(context.Background(), txn) + host, _ := sysinfo.Hostname() + log.WithTime(testTime).WithContext(ctx).Info("Hello World!") + validateOutput(t, out, map[string]interface{}{ + "entity.name": integrationsupport.SampleAppName, + "entity.type": "SERVICE", + "file.name": matchAnything, + "hostname": host, + "line.number": matchAnything, + "log.level": "info", + "message": "Hello World!", + "method.name": "github.com/newrelic/go-agent/_integrations/logcontext/nrlogrusplugin.TestLogDistributedTracingDisabled", + "timestamp": float64(1417136460000), + }) +} + +func TestLogSampledFalse(t *testing.T) { + app := integrationsupport.NewTestApp( + func(reply *internal.ConnectReply) { + reply.AdaptiveSampler = internal.SampleNothing{} + reply.TraceIDGenerator = internal.NewTraceIDGenerator(12345) + }, + func(cfg *newrelic.Config) { + cfg.DistributedTracer.Enabled = true + cfg.CrossApplicationTracer.Enabled = false + }) + txn := app.StartTransaction("TestLogSampledFalse", nil, nil) + out := bytes.NewBuffer([]byte{}) + log := newTestLogger(out) + ctx := newrelic.NewContext(context.Background(), txn) + host, _ := sysinfo.Hostname() + log.WithTime(testTime).WithContext(ctx).Info("Hello World!") + validateOutput(t, out, map[string]interface{}{ + "entity.name": integrationsupport.SampleAppName, + "entity.type": "SERVICE", + "file.name": matchAnything, + "hostname": host, + "line.number": matchAnything, + "log.level": "info", + "message": "Hello World!", + "method.name": "github.com/newrelic/go-agent/_integrations/logcontext/nrlogrusplugin.TestLogSampledFalse", + "timestamp": float64(1417136460000), + "trace.id": "d9466896a525ccbf", + }) +} + +func TestLogSampledTrue(t *testing.T) { + app := integrationsupport.NewTestApp( + func(reply *internal.ConnectReply) { + reply.AdaptiveSampler = internal.SampleEverything{} + reply.TraceIDGenerator = internal.NewTraceIDGenerator(12345) + }, + func(cfg *newrelic.Config) { + cfg.DistributedTracer.Enabled = true + cfg.CrossApplicationTracer.Enabled = false + }) + txn := app.StartTransaction("TestLogSampledTrue", nil, nil) + out := bytes.NewBuffer([]byte{}) + log := newTestLogger(out) + ctx := newrelic.NewContext(context.Background(), txn) + host, _ := sysinfo.Hostname() + log.WithTime(testTime).WithContext(ctx).Info("Hello World!") + validateOutput(t, out, map[string]interface{}{ + "entity.name": integrationsupport.SampleAppName, + "entity.type": "SERVICE", + "file.name": matchAnything, + "hostname": host, + "line.number": matchAnything, + "log.level": "info", + "message": "Hello World!", + "method.name": "github.com/newrelic/go-agent/_integrations/logcontext/nrlogrusplugin.TestLogSampledTrue", + "span.id": "bcfb32e050b264b8", + "timestamp": float64(1417136460000), + "trace.id": "d9466896a525ccbf", + }) +} + +func TestEntryUsedTwice(t *testing.T) { + out := bytes.NewBuffer([]byte{}) + log := newTestLogger(out) + entry := log.WithTime(testTime) + + // First log has dt enabled, ensure trace.id and span.id are included + app := integrationsupport.NewTestApp( + func(reply *internal.ConnectReply) { + reply.AdaptiveSampler = internal.SampleEverything{} + reply.TraceIDGenerator = internal.NewTraceIDGenerator(12345) + }, + func(cfg *newrelic.Config) { + cfg.DistributedTracer.Enabled = true + cfg.CrossApplicationTracer.Enabled = false + }) + txn := app.StartTransaction("TestEntryUsedTwice1", nil, nil) + ctx := newrelic.NewContext(context.Background(), txn) + host, _ := sysinfo.Hostname() + entry.WithContext(ctx).Info("Hello World!") + validateOutput(t, out, map[string]interface{}{ + "entity.name": integrationsupport.SampleAppName, + "entity.type": "SERVICE", + "file.name": matchAnything, + "hostname": host, + "line.number": matchAnything, + "log.level": "info", + "message": "Hello World!", + "method.name": "github.com/newrelic/go-agent/_integrations/logcontext/nrlogrusplugin.TestEntryUsedTwice", + "span.id": "bcfb32e050b264b8", + "timestamp": float64(1417136460000), + "trace.id": "d9466896a525ccbf", + }) + + // First log has dt enabled, ensure trace.id and span.id are included + out.Reset() + app = integrationsupport.NewTestApp(nil, + func(cfg *newrelic.Config) { + cfg.DistributedTracer.Enabled = false + }) + txn = app.StartTransaction("TestEntryUsedTwice2", nil, nil) + ctx = newrelic.NewContext(context.Background(), txn) + host, _ = sysinfo.Hostname() + entry.WithContext(ctx).Info("Hello World! Again!") + validateOutput(t, out, map[string]interface{}{ + "entity.name": integrationsupport.SampleAppName, + "entity.type": "SERVICE", + "file.name": matchAnything, + "hostname": host, + "line.number": matchAnything, + "log.level": "info", + "message": "Hello World! Again!", + "method.name": "github.com/newrelic/go-agent/_integrations/logcontext/nrlogrusplugin.TestEntryUsedTwice", + "timestamp": float64(1417136460000), + }) +} + +func TestEntryError(t *testing.T) { + app := integrationsupport.NewTestApp(nil, nil) + txn := app.StartTransaction("TestEntryError", nil, nil) + out := bytes.NewBuffer([]byte{}) + log := newTestLogger(out) + ctx := newrelic.NewContext(context.Background(), txn) + host, _ := sysinfo.Hostname() + log.WithTime(testTime).WithContext(ctx).WithField("func", func() {}).Info("Hello World!") + validateOutput(t, out, map[string]interface{}{ + "entity.name": integrationsupport.SampleAppName, + "entity.type": "SERVICE", + "file.name": matchAnything, + "hostname": host, + "line.number": matchAnything, + "log.level": "info", + // Since the err field on the Entry is private we cannot record it. + //"logrus_error": `can not add field "func"`, + "message": "Hello World!", + "method.name": "github.com/newrelic/go-agent/_integrations/logcontext/nrlogrusplugin.TestEntryError", + "timestamp": float64(1417136460000), + }) +} + +func TestWithCustomField(t *testing.T) { + app := integrationsupport.NewTestApp(nil, nil) + txn := app.StartTransaction("TestWithCustomField", nil, nil) + out := bytes.NewBuffer([]byte{}) + log := newTestLogger(out) + ctx := newrelic.NewContext(context.Background(), txn) + host, _ := sysinfo.Hostname() + log.WithTime(testTime).WithContext(ctx).WithField("zip", "zap").Info("Hello World!") + validateOutput(t, out, map[string]interface{}{ + "entity.name": integrationsupport.SampleAppName, + "entity.type": "SERVICE", + "file.name": matchAnything, + "hostname": host, + "line.number": matchAnything, + "log.level": "info", + "message": "Hello World!", + "method.name": "github.com/newrelic/go-agent/_integrations/logcontext/nrlogrusplugin.TestWithCustomField", + "timestamp": float64(1417136460000), + "zip": "zap", + }) +} + +func TestCustomFieldTypes(t *testing.T) { + out := bytes.NewBuffer([]byte{}) + + testcases := []struct { + input interface{} + output string + }{ + {input: true, output: "true"}, + {input: false, output: "false"}, + {input: uint8(42), output: "42"}, + {input: uint16(42), output: "42"}, + {input: uint32(42), output: "42"}, + {input: uint(42), output: "42"}, + {input: uintptr(42), output: "42"}, + {input: int8(42), output: "42"}, + {input: int16(42), output: "42"}, + {input: int32(42), output: "42"}, + {input: int64(42), output: "42"}, + {input: float32(42), output: "42"}, + {input: float64(42), output: "42"}, + {input: errors.New("Ooops an error"), output: `"Ooops an error"`}, + {input: []int{1, 2, 3}, output: `"[]int{1, 2, 3}"`}, + } + + for _, test := range testcases { + out.Reset() + writeValue(out, test.input) + if out.String() != test.output { + t.Errorf("Incorrect output written:\nactual=%s\nexpected=%s", + out.String(), test.output) + } + } +} + +func TestUnsetCaller(t *testing.T) { + out := bytes.NewBuffer([]byte{}) + log := newTestLogger(out) + log.SetReportCaller(false) + log.WithTime(testTime).Info("Hello World!") + validateOutput(t, out, map[string]interface{}{ + "log.level": "info", + "message": "Hello World!", + "timestamp": float64(1417136460000), + }) +} + +func TestCustomFieldNameCollision(t *testing.T) { + out := bytes.NewBuffer([]byte{}) + log := newTestLogger(out) + log.SetReportCaller(false) + log.WithTime(testTime).WithField("timestamp", "Yesterday").Info("Hello World!") + validateOutput(t, out, map[string]interface{}{ + "log.level": "info", + "message": "Hello World!", + // Reserved keys will be overwritten + "timestamp": float64(1417136460000), + }) +} + +type gopher struct { + name string +} + +func (g *gopher) MarshalJSON() ([]byte, error) { + return json.Marshal(g.name) +} + +func TestCustomJSONMarshaller(t *testing.T) { + out := bytes.NewBuffer([]byte{}) + log := newTestLogger(out) + log.SetReportCaller(false) + log.WithTime(testTime).WithField("gopher", &gopher{name: "sam"}).Info("Hello World!") + validateOutput(t, out, map[string]interface{}{ + "gopher": "sam", + "log.level": "info", + "message": "Hello World!", + "timestamp": float64(1417136460000), + }) +} diff --git a/_integrations/nrawssdk/README.md b/_integrations/nrawssdk/README.md new file mode 100644 index 000000000..3886ce4f7 --- /dev/null +++ b/_integrations/nrawssdk/README.md @@ -0,0 +1,3 @@ +# _integrations/nrawssdk [](https://godoc.org/github.com/newrelic/go-agent/_integrations/nrawssdk) + +Integrations for AWS SDKs versions 1 and 2. diff --git a/_integrations/nrawssdk/internal/internal.go b/_integrations/nrawssdk/internal/internal.go new file mode 100644 index 000000000..0ff5817f0 --- /dev/null +++ b/_integrations/nrawssdk/internal/internal.go @@ -0,0 +1,99 @@ +package internal + +import ( + "context" + "net/http" + "reflect" + + newrelic "github.com/newrelic/go-agent" + "github.com/newrelic/go-agent/internal/integrationsupport" +) + +type contextKeyType struct{} + +var segmentContextKey = contextKeyType(struct{}{}) + +type endable interface{ End() error } + +func getTableName(params interface{}) string { + var tableName string + + v := reflect.ValueOf(params) + if v.IsValid() && v.Kind() == reflect.Ptr { + e := v.Elem() + if e.Kind() == reflect.Struct { + n := e.FieldByName("TableName") + if n.IsValid() { + if name, ok := n.Interface().(*string); ok { + if nil != name { + tableName = *name + } + } + } + } + } + + return tableName +} + +func getRequestID(hdr http.Header) string { + id := hdr.Get("X-Amzn-Requestid") + if id == "" { + // Alternative version of request id in the header + id = hdr.Get("X-Amz-Request-Id") + } + return id +} + +// StartSegmentInputs is used as the input to StartSegment. +type StartSegmentInputs struct { + HTTPRequest *http.Request + ServiceName string + Operation string + Region string + Params interface{} +} + +// StartSegment starts a segment of either type DatastoreSegment or +// ExternalSegment given the serviceName provided. The segment is then added to +// the request context. +func StartSegment(input StartSegmentInputs) *http.Request { + + httpCtx := input.HTTPRequest.Context() + txn := newrelic.FromContext(httpCtx) + + var segment endable + // Service name capitalization is different for v1 and v2. + if input.ServiceName == "dynamodb" || input.ServiceName == "DynamoDB" { + segment = &newrelic.DatastoreSegment{ + Product: newrelic.DatastoreDynamoDB, + Collection: getTableName(input.Params), + Operation: input.Operation, + ParameterizedQuery: "", + QueryParameters: nil, + Host: input.HTTPRequest.URL.Host, + PortPathOrID: input.HTTPRequest.URL.Port(), + DatabaseName: "", + StartTime: newrelic.StartSegmentNow(txn), + } + } else { + segment = newrelic.StartExternalSegment(txn, input.HTTPRequest) + } + + integrationsupport.AddAgentSpanAttribute(txn, newrelic.SpanAttributeAWSOperation, input.Operation) + integrationsupport.AddAgentSpanAttribute(txn, newrelic.SpanAttributeAWSRegion, input.Region) + + ctx := context.WithValue(httpCtx, segmentContextKey, segment) + return input.HTTPRequest.WithContext(ctx) +} + +// EndSegment will end any segment found in the given context. +func EndSegment(ctx context.Context, hdr http.Header) { + if segment, ok := ctx.Value(segmentContextKey).(endable); ok { + if id := getRequestID(hdr); "" != id { + txn := newrelic.FromContext(ctx) + integrationsupport.AddAgentSpanAttribute(txn, newrelic.SpanAttributeAWSRequestID, id) + } + segment.End() + } +} diff --git a/_integrations/nrawssdk/internal/internal_test.go b/_integrations/nrawssdk/internal/internal_test.go new file mode 100644 index 000000000..3985d4909 --- /dev/null +++ b/_integrations/nrawssdk/internal/internal_test.go @@ -0,0 +1,113 @@ +package internal + +import ( + "net/http" + "strings" + "testing" + + requestv2 "github.com/aws/aws-sdk-go-v2/aws" + restv2 "github.com/aws/aws-sdk-go-v2/private/protocol/rest" + "github.com/aws/aws-sdk-go-v2/service/lambda" + requestv1 "github.com/aws/aws-sdk-go/aws/request" + restv1 "github.com/aws/aws-sdk-go/private/protocol/rest" +) + +func TestGetTableName(t *testing.T) { + str := "this is a string" + var emptyStr string + strPtr := &str + emptyStrPtr := &emptyStr + + testcases := []struct { + params interface{} + expected string + }{ + {params: nil, expected: ""}, + {params: str, expected: ""}, + {params: strPtr, expected: ""}, + {params: struct{ other string }{other: str}, expected: ""}, + {params: &struct{ other string }{other: str}, expected: ""}, + {params: struct{ TableName bool }{TableName: true}, expected: ""}, + {params: &struct{ TableName bool }{TableName: true}, expected: ""}, + {params: struct{ TableName string }{TableName: str}, expected: ""}, + {params: &struct{ TableName string }{TableName: str}, expected: ""}, + {params: struct{ TableName *string }{TableName: nil}, expected: ""}, + {params: &struct{ TableName *string }{TableName: nil}, expected: ""}, + {params: struct{ TableName *string }{TableName: emptyStrPtr}, expected: ""}, + {params: &struct{ TableName *string }{TableName: emptyStrPtr}, expected: ""}, + {params: struct{ TableName *string }{TableName: strPtr}, expected: ""}, + {params: &struct{ TableName *string }{TableName: strPtr}, expected: str}, + } + + for i, test := range testcases { + if out := getTableName(test.params); test.expected != out { + t.Error(i, out, test.params, test.expected) + } + } +} + +func TestGetRequestID(t *testing.T) { + primary := "X-Amzn-Requestid" + secondary := "X-Amz-Request-Id" + + testcases := []struct { + hdr http.Header + expected string + }{ + {hdr: http.Header{ + "hello": []string{"world"}, + }, expected: ""}, + + {hdr: http.Header{ + strings.ToUpper(primary): []string{"hello"}, + }, expected: ""}, + + {hdr: http.Header{ + primary: []string{"hello"}, + }, expected: "hello"}, + + {hdr: http.Header{ + secondary: []string{"hello"}, + }, expected: "hello"}, + + {hdr: http.Header{ + primary: []string{"hello"}, + secondary: []string{"world"}, + }, expected: "hello"}, + } + + for i, test := range testcases { + if out := getRequestID(test.hdr); test.expected != out { + t.Error(i, out, test.hdr, test.expected) + } + } + + // Make sure our assumptions still hold against aws-sdk-go + for _, test := range testcases { + req := &requestv1.Request{ + HTTPResponse: &http.Response{ + Header: test.hdr, + }, + } + restv1.UnmarshalMeta(req) + if out := getRequestID(test.hdr); req.RequestID != out { + t.Error("requestId assumptions incorrect", out, req.RequestID, + test.hdr, test.expected) + } + } + + // Make sure our assumptions still hold against aws-sdk-go-v2 + for _, test := range testcases { + req := &requestv2.Request{ + HTTPResponse: &http.Response{ + Header: test.hdr, + }, + Data: &lambda.InvokeOutput{}, + } + restv2.UnmarshalMeta(req) + if out := getRequestID(test.hdr); req.RequestID != out { + t.Error("requestId assumptions incorrect", out, req.RequestID, + test.hdr, test.expected) + } + } +} diff --git a/_integrations/nrawssdk/v1/README.md b/_integrations/nrawssdk/v1/README.md new file mode 100644 index 000000000..c05194fb8 --- /dev/null +++ b/_integrations/nrawssdk/v1/README.md @@ -0,0 +1,10 @@ +# _integrations/nrawssdk/v1 [](https://godoc.org/github.com/newrelic/go-agent/_integrations/nrawssdk/v1) + +Package `nrawssdk` instruments https://github.com/aws/aws-sdk-go requests. + +```go +import "github.com/newrelic/go-agent/_integrations/nrawssdk/v1" +``` + +For more information, see +[godocs](https://godoc.org/github.com/newrelic/go-agent/_integrations/nrawssdk/v1). diff --git a/_integrations/nrawssdk/v1/nrawssdk.go b/_integrations/nrawssdk/v1/nrawssdk.go new file mode 100644 index 000000000..e9907bf54 --- /dev/null +++ b/_integrations/nrawssdk/v1/nrawssdk.go @@ -0,0 +1,84 @@ +// Package nrawssdk instruments https://github.com/aws/aws-sdk-go requests. +package nrawssdk + +import ( + "github.com/aws/aws-sdk-go/aws/request" + internal "github.com/newrelic/go-agent/_integrations/nrawssdk/internal" + agentinternal "github.com/newrelic/go-agent/internal" +) + +func init() { agentinternal.TrackUsage("integration", "library", "aws-sdk-go") } + +func startSegment(req *request.Request) { + input := internal.StartSegmentInputs{ + HTTPRequest: req.HTTPRequest, + ServiceName: req.ClientInfo.ServiceName, + Operation: req.Operation.Name, + Region: req.ClientInfo.SigningRegion, + Params: req.Params, + } + req.HTTPRequest = internal.StartSegment(input) +} + +func endSegment(req *request.Request) { + ctx := req.HTTPRequest.Context() + internal.EndSegment(ctx, req.HTTPResponse.Header) +} + +// InstrumentHandlers will add instrumentation to the given *request.Handlers. +// +// A Segment will be created for each out going request. The Transaction must +// be added to the `http.Request`'s Context in order for the segment to be +// recorded. For DynamoDB calls, these segments will be +// `newrelic.DatastoreSegment` type and for all others they will be +// `newrelic.ExternalSegment` type. +// +// Additional attributes will be added to Transaction Trace Segments and Span +// Events: aws.region, aws.requestId, and aws.operation. +// +// To add instrumentation to the Session and see segments created for each +// invocation that uses the Session, call InstrumentHandlers with the session's +// Handlers and add the current Transaction to the `http.Request`'s Context: +// +// ses := session.New() +// // Add instrumentation to handlers +// nrawssdk.InstrumentHandlers(&ses.Handlers) +// lambdaClient = lambda.New(ses, aws.NewConfig()) +// +// req, out := lambdaClient.InvokeRequest(&lambda.InvokeInput{ +// ClientContext: aws.String("MyApp"), +// FunctionName: aws.String("Function"), +// InvocationType: aws.String("Event"), +// LogType: aws.String("Tail"), +// Payload: []byte("{}"), +// } +// // Add txn to http.Request's context +// req.HTTPRequest = newrelic.RequestWithTransactionContext(req.HTTPRequest, txn) +// err := req.Send() +// +// To add instrumentation to a Request and see a segment created just for the +// individual request, call InstrumentHandlers with the `request.Request`'s +// Handlers and add the current Transaction to the `http.Request`'s Context: +// +// req, out := lambdaClient.InvokeRequest(&lambda.InvokeInput{ +// ClientContext: aws.String("MyApp"), +// FunctionName: aws.String("Function"), +// InvocationType: aws.String("Event"), +// LogType: aws.String("Tail"), +// Payload: []byte("{}"), +// } +// // Add instrumentation to handlers +// nrawssdk.InstrumentHandlers(&req.Handlers) +// // Add txn to http.Request's context +// req.HTTPRequest = newrelic.RequestWithTransactionContext(req.HTTPRequest, txn) +// err := req.Send() +func InstrumentHandlers(handlers *request.Handlers) { + handlers.Send.SetFrontNamed(request.NamedHandler{ + Name: "StartNewRelicSegment", + Fn: startSegment, + }) + handlers.Send.SetBackNamed(request.NamedHandler{ + Name: "EndNewRelicSegment", + Fn: endSegment, + }) +} diff --git a/_integrations/nrawssdk/v1/nrawssdk_test.go b/_integrations/nrawssdk/v1/nrawssdk_test.go new file mode 100644 index 000000000..62a107ae5 --- /dev/null +++ b/_integrations/nrawssdk/v1/nrawssdk_test.go @@ -0,0 +1,586 @@ +package nrawssdk + +import ( + "bytes" + "errors" + "io/ioutil" + "net/http" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/dynamodb" + "github.com/aws/aws-sdk-go/service/lambda" + newrelic "github.com/newrelic/go-agent" + "github.com/newrelic/go-agent/internal" + "github.com/newrelic/go-agent/internal/integrationsupport" +) + +func testApp() integrationsupport.ExpectApp { + return integrationsupport.NewTestApp(integrationsupport.SampleEverythingReplyFn, integrationsupport.DTEnabledCfgFn) +} + +type fakeTransport struct{} + +func (t fakeTransport) RoundTrip(r *http.Request) (*http.Response, error) { + return &http.Response{ + Status: "200 OK", + StatusCode: 200, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + Header: http.Header{ + "X-Amzn-Requestid": []string{requestID}, + }, + }, nil +} + +type fakeCreds struct{} + +func (c *fakeCreds) Retrieve() (credentials.Value, error) { + return credentials.Value{}, nil +} +func (c *fakeCreds) IsExpired() bool { return false } + +func newSession() *session.Session { + r := "us-west-2" + ses := session.New() + ses.Config.Credentials = credentials.NewCredentials(&fakeCreds{}) + ses.Config.HTTPClient.Transport = &fakeTransport{} + ses.Config.Region = &r + return ses +} + +const ( + requestID = "testing request id" + txnName = "aws-txn" +) + +var ( + genericSpan = internal.WantEvent{ + Intrinsics: map[string]interface{}{ + "name": "OtherTransaction/Go/" + txnName, + "sampled": true, + "category": "generic", + "priority": internal.MatchAnything, + "guid": internal.MatchAnything, + "transactionId": internal.MatchAnything, + "nr.entryPoint": true, + "traceId": internal.MatchAnything, + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{}, + } + externalSpan = internal.WantEvent{ + Intrinsics: map[string]interface{}{ + "name": "External/lambda.us-west-2.amazonaws.com/http/POST", + "sampled": true, + "category": "http", + "priority": internal.MatchAnything, + "guid": internal.MatchAnything, + "transactionId": internal.MatchAnything, + "traceId": internal.MatchAnything, + "parentId": internal.MatchAnything, + "component": "http", + "span.kind": "client", + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{ + "aws.operation": "Invoke", + "aws.region": "us-west-2", + "aws.requestId": requestID, + "http.method": "POST", + "http.url": "https://lambda.us-west-2.amazonaws.com/2015-03-31/functions/non-existent-function/invocations", + }, + } + externalSpanNoRequestID = internal.WantEvent{ + Intrinsics: map[string]interface{}{ + "name": "External/lambda.us-west-2.amazonaws.com/http/POST", + "sampled": true, + "category": "http", + "priority": internal.MatchAnything, + "guid": internal.MatchAnything, + "transactionId": internal.MatchAnything, + "traceId": internal.MatchAnything, + "parentId": internal.MatchAnything, + "component": "http", + "span.kind": "client", + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{ + "aws.operation": "Invoke", + "aws.region": "us-west-2", + "http.method": "POST", + "http.url": "https://lambda.us-west-2.amazonaws.com/2015-03-31/functions/non-existent-function/invocations", + }, + } + datastoreSpan = internal.WantEvent{ + Intrinsics: map[string]interface{}{ + "name": "Datastore/statement/DynamoDB/thebesttable/DescribeTable", + "sampled": true, + "category": "datastore", + "priority": internal.MatchAnything, + "guid": internal.MatchAnything, + "transactionId": internal.MatchAnything, + "traceId": internal.MatchAnything, + "parentId": internal.MatchAnything, + "component": "DynamoDB", + "span.kind": "client", + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{ + "aws.operation": "DescribeTable", + "aws.region": "us-west-2", + "aws.requestId": requestID, + "db.collection": "thebesttable", + "db.statement": "'DescribeTable' on 'thebesttable' using 'DynamoDB'", + "peer.address": "dynamodb.us-west-2.amazonaws.com:unknown", + "peer.hostname": "dynamodb.us-west-2.amazonaws.com", + }, + } + + txnMetrics = []internal.WantMetric{ + {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/all", Scope: "", Forced: false, Data: nil}, + {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/allOther", Scope: "", Forced: false, Data: nil}, + {Name: "OtherTransaction/Go/" + txnName, Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransaction/all", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransactionTotalTime/Go/" + txnName, Scope: "", Forced: false, Data: nil}, + {Name: "OtherTransactionTotalTime", Scope: "", Forced: true, Data: nil}, + } + externalMetrics = append([]internal.WantMetric{ + {Name: "External/all", Scope: "", Forced: true, Data: nil}, + {Name: "External/allOther", Scope: "", Forced: true, Data: nil}, + {Name: "External/lambda.us-west-2.amazonaws.com/all", Scope: "", Forced: false, Data: nil}, + {Name: "External/lambda.us-west-2.amazonaws.com/http/POST", Scope: "OtherTransaction/Go/" + txnName, Forced: false, Data: nil}, + }, txnMetrics...) + datastoreMetrics = append([]internal.WantMetric{ + {Name: "Datastore/DynamoDB/all", Scope: "", Forced: true, Data: nil}, + {Name: "Datastore/DynamoDB/allOther", Scope: "", Forced: true, Data: nil}, + {Name: "Datastore/all", Scope: "", Forced: true, Data: nil}, + {Name: "Datastore/allOther", Scope: "", Forced: true, Data: nil}, + {Name: "Datastore/instance/DynamoDB/dynamodb.us-west-2.amazonaws.com/unknown", Scope: "", Forced: false, Data: nil}, + {Name: "Datastore/operation/DynamoDB/DescribeTable", Scope: "", Forced: false, Data: nil}, + {Name: "Datastore/statement/DynamoDB/thebesttable/DescribeTable", Scope: "", Forced: false, Data: nil}, + {Name: "Datastore/statement/DynamoDB/thebesttable/DescribeTable", Scope: "OtherTransaction/Go/" + txnName, Forced: false, Data: nil}, + }, txnMetrics...) +) + +func TestInstrumentRequestExternal(t *testing.T) { + app := testApp() + txn := app.StartTransaction(txnName, nil, nil) + + client := lambda.New(newSession()) + input := &lambda.InvokeInput{ + ClientContext: aws.String("MyApp"), + FunctionName: aws.String("non-existent-function"), + InvocationType: aws.String("Event"), + LogType: aws.String("Tail"), + Payload: []byte("{}"), + } + + req, out := client.InvokeRequest(input) + InstrumentHandlers(&req.Handlers) + req.HTTPRequest = newrelic.RequestWithTransactionContext(req.HTTPRequest, txn) + + err := req.Send() + if nil != err { + t.Error(err) + } + if 200 != *out.StatusCode { + t.Error("wrong status code on response", out.StatusCode) + } + + txn.End() + + app.ExpectMetrics(t, externalMetrics) + app.ExpectSpanEvents(t, []internal.WantEvent{ + genericSpan, externalSpan}) +} + +func TestInstrumentRequestDatastore(t *testing.T) { + app := testApp() + txn := app.StartTransaction(txnName, nil, nil) + + client := dynamodb.New(newSession()) + input := &dynamodb.DescribeTableInput{ + TableName: aws.String("thebesttable"), + } + + req, _ := client.DescribeTableRequest(input) + InstrumentHandlers(&req.Handlers) + req.HTTPRequest = newrelic.RequestWithTransactionContext(req.HTTPRequest, txn) + + err := req.Send() + if nil != err { + t.Error(err) + } + + txn.End() + + app.ExpectMetrics(t, datastoreMetrics) + app.ExpectSpanEvents(t, []internal.WantEvent{ + genericSpan, datastoreSpan}) +} + +func TestInstrumentRequestExternalNoTxn(t *testing.T) { + client := lambda.New(newSession()) + input := &lambda.InvokeInput{ + ClientContext: aws.String("MyApp"), + FunctionName: aws.String("non-existent-function"), + InvocationType: aws.String("Event"), + LogType: aws.String("Tail"), + Payload: []byte("{}"), + } + + req, out := client.InvokeRequest(input) + InstrumentHandlers(&req.Handlers) + + err := req.Send() + if nil != err { + t.Error(err) + } + if 200 != *out.StatusCode { + t.Error("wrong status code on response", out.StatusCode) + } +} + +func TestInstrumentRequestDatastoreNoTxn(t *testing.T) { + client := dynamodb.New(newSession()) + input := &dynamodb.DescribeTableInput{ + TableName: aws.String("thebesttable"), + } + + req, _ := client.DescribeTableRequest(input) + InstrumentHandlers(&req.Handlers) + + err := req.Send() + if nil != err { + t.Error(err) + } +} + +func TestInstrumentSessionExternal(t *testing.T) { + app := testApp() + txn := app.StartTransaction(txnName, nil, nil) + + ses := newSession() + InstrumentHandlers(&ses.Handlers) + client := lambda.New(ses) + + input := &lambda.InvokeInput{ + ClientContext: aws.String("MyApp"), + FunctionName: aws.String("non-existent-function"), + InvocationType: aws.String("Event"), + LogType: aws.String("Tail"), + Payload: []byte("{}"), + } + + req, out := client.InvokeRequest(input) + req.HTTPRequest = newrelic.RequestWithTransactionContext(req.HTTPRequest, txn) + + err := req.Send() + if nil != err { + t.Error(err) + } + if 200 != *out.StatusCode { + t.Error("wrong status code on response", out.StatusCode) + } + + txn.End() + + app.ExpectMetrics(t, externalMetrics) + app.ExpectSpanEvents(t, []internal.WantEvent{ + genericSpan, externalSpan}) +} + +func TestInstrumentSessionDatastore(t *testing.T) { + app := testApp() + txn := app.StartTransaction(txnName, nil, nil) + + ses := newSession() + InstrumentHandlers(&ses.Handlers) + client := dynamodb.New(ses) + + input := &dynamodb.DescribeTableInput{ + TableName: aws.String("thebesttable"), + } + + req, _ := client.DescribeTableRequest(input) + req.HTTPRequest = newrelic.RequestWithTransactionContext(req.HTTPRequest, txn) + + err := req.Send() + if nil != err { + t.Error(err) + } + + txn.End() + + app.ExpectMetrics(t, datastoreMetrics) + app.ExpectSpanEvents(t, []internal.WantEvent{ + genericSpan, datastoreSpan}) +} + +func TestInstrumentSessionExternalNoTxn(t *testing.T) { + ses := newSession() + InstrumentHandlers(&ses.Handlers) + client := lambda.New(ses) + + input := &lambda.InvokeInput{ + ClientContext: aws.String("MyApp"), + FunctionName: aws.String("non-existent-function"), + InvocationType: aws.String("Event"), + LogType: aws.String("Tail"), + Payload: []byte("{}"), + } + + req, out := client.InvokeRequest(input) + req.HTTPRequest = newrelic.RequestWithTransactionContext(req.HTTPRequest, nil) + + err := req.Send() + if nil != err { + t.Error(err) + } + if 200 != *out.StatusCode { + t.Error("wrong status code on response", out.StatusCode) + } +} + +func TestInstrumentSessionDatastoreNoTxn(t *testing.T) { + ses := newSession() + InstrumentHandlers(&ses.Handlers) + client := dynamodb.New(ses) + + input := &dynamodb.DescribeTableInput{ + TableName: aws.String("thebesttable"), + } + + req, _ := client.DescribeTableRequest(input) + req.HTTPRequest = newrelic.RequestWithTransactionContext(req.HTTPRequest, nil) + + err := req.Send() + if nil != err { + t.Error(err) + } +} + +func TestInstrumentSessionExternalTxnNotInCtx(t *testing.T) { + app := testApp() + txn := app.StartTransaction(txnName, nil, nil) + + ses := newSession() + InstrumentHandlers(&ses.Handlers) + client := lambda.New(ses) + + input := &lambda.InvokeInput{ + ClientContext: aws.String("MyApp"), + FunctionName: aws.String("non-existent-function"), + InvocationType: aws.String("Event"), + LogType: aws.String("Tail"), + Payload: []byte("{}"), + } + + req, out := client.InvokeRequest(input) + + err := req.Send() + if nil != err { + t.Error(err) + } + if 200 != *out.StatusCode { + t.Error("wrong status code on response", out.StatusCode) + } + + txn.End() + + app.ExpectMetrics(t, txnMetrics) +} + +func TestInstrumentSessionDatastoreTxnNotInCtx(t *testing.T) { + app := testApp() + txn := app.StartTransaction(txnName, nil, nil) + + ses := newSession() + InstrumentHandlers(&ses.Handlers) + client := dynamodb.New(ses) + + input := &dynamodb.DescribeTableInput{ + TableName: aws.String("thebesttable"), + } + + req, _ := client.DescribeTableRequest(input) + + err := req.Send() + if nil != err { + t.Error(err) + } + + txn.End() + + app.ExpectMetrics(t, txnMetrics) +} + +func TestDoublyInstrumented(t *testing.T) { + hs := &request.Handlers{} + if found := hs.Send.Len(); 0 != found { + t.Error("unexpected number of Send handlers found:", found) + } + + InstrumentHandlers(hs) + if found := hs.Send.Len(); 2 != found { + t.Error("unexpected number of Send handlers found:", found) + } + + InstrumentHandlers(hs) + if found := hs.Send.Len(); 2 != found { + t.Error("unexpected number of Send handlers found:", found) + } +} + +type firstFailingTransport struct { + failing bool +} + +func (t *firstFailingTransport) RoundTrip(r *http.Request) (*http.Response, error) { + if t.failing { + t.failing = false + return nil, errors.New("Oops this failed") + } + return &http.Response{ + Status: "200 OK", + StatusCode: 200, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + Header: http.Header{ + "X-Amzn-Requestid": []string{requestID}, + }, + }, nil +} + +func TestRetrySend(t *testing.T) { + app := testApp() + txn := app.StartTransaction(txnName, nil, nil) + + ses := newSession() + ses.Config.HTTPClient.Transport = &firstFailingTransport{failing: true} + + client := lambda.New(ses) + input := &lambda.InvokeInput{ + ClientContext: aws.String("MyApp"), + FunctionName: aws.String("non-existent-function"), + InvocationType: aws.String("Event"), + LogType: aws.String("Tail"), + Payload: []byte("{}"), + } + + req, out := client.InvokeRequest(input) + InstrumentHandlers(&req.Handlers) + req.HTTPRequest = newrelic.RequestWithTransactionContext(req.HTTPRequest, txn) + + err := req.Send() + if nil != err { + t.Error(err) + } + if 200 != *out.StatusCode { + t.Error("wrong status code on response", out.StatusCode) + } + + txn.End() + + app.ExpectMetrics(t, externalMetrics) + app.ExpectSpanEvents(t, []internal.WantEvent{ + genericSpan, externalSpanNoRequestID, externalSpan}) +} + +func TestRequestSentTwice(t *testing.T) { + app := testApp() + txn := app.StartTransaction(txnName, nil, nil) + + client := lambda.New(newSession()) + input := &lambda.InvokeInput{ + ClientContext: aws.String("MyApp"), + FunctionName: aws.String("non-existent-function"), + InvocationType: aws.String("Event"), + LogType: aws.String("Tail"), + Payload: []byte("{}"), + } + + req, out := client.InvokeRequest(input) + InstrumentHandlers(&req.Handlers) + req.HTTPRequest = newrelic.RequestWithTransactionContext(req.HTTPRequest, txn) + + firstErr := req.Send() + if nil != firstErr { + t.Error(firstErr) + } + if 200 != *out.StatusCode { + t.Error("wrong status code on response", out.StatusCode) + } + + secondErr := req.Send() + if nil != secondErr { + t.Error(secondErr) + } + if 200 != *out.StatusCode { + t.Error("wrong status code on response", out.StatusCode) + } + + txn.End() + + app.ExpectMetrics(t, []internal.WantMetric{ + {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/all", Scope: "", Forced: false, Data: nil}, + {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/allOther", Scope: "", Forced: false, Data: nil}, + {Name: "External/all", Scope: "", Forced: true, Data: []float64{2}}, + {Name: "External/allOther", Scope: "", Forced: true, Data: []float64{2}}, + {Name: "External/lambda.us-west-2.amazonaws.com/all", Scope: "", Forced: false, Data: []float64{2}}, + {Name: "External/lambda.us-west-2.amazonaws.com/http/POST", Scope: "OtherTransaction/Go/" + txnName, Forced: false, Data: []float64{2}}, + {Name: "OtherTransaction/Go/" + txnName, Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransaction/all", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransactionTotalTime/Go/" + txnName, Scope: "", Forced: false, Data: nil}, + {Name: "OtherTransactionTotalTime", Scope: "", Forced: true, Data: nil}, + }) + app.ExpectSpanEvents(t, []internal.WantEvent{ + genericSpan, externalSpan, externalSpan}) +} + +type noRequestIDTransport struct{} + +func (t *noRequestIDTransport) RoundTrip(r *http.Request) (*http.Response, error) { + return &http.Response{ + Status: "200 OK", + StatusCode: 200, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil +} + +func TestNoRequestIDFound(t *testing.T) { + app := testApp() + txn := app.StartTransaction(txnName, nil, nil) + + ses := newSession() + ses.Config.HTTPClient.Transport = &noRequestIDTransport{} + + client := lambda.New(ses) + input := &lambda.InvokeInput{ + ClientContext: aws.String("MyApp"), + FunctionName: aws.String("non-existent-function"), + InvocationType: aws.String("Event"), + LogType: aws.String("Tail"), + Payload: []byte("{}"), + } + + req, out := client.InvokeRequest(input) + InstrumentHandlers(&req.Handlers) + req.HTTPRequest = newrelic.RequestWithTransactionContext(req.HTTPRequest, txn) + + err := req.Send() + if nil != err { + t.Error(err) + } + if 200 != *out.StatusCode { + t.Error("wrong status code on response", out.StatusCode) + } + + txn.End() + + app.ExpectMetrics(t, externalMetrics) + app.ExpectSpanEvents(t, []internal.WantEvent{ + genericSpan, externalSpanNoRequestID}) +} diff --git a/_integrations/nrawssdk/v2/README.md b/_integrations/nrawssdk/v2/README.md new file mode 100644 index 000000000..dc2ca26ad --- /dev/null +++ b/_integrations/nrawssdk/v2/README.md @@ -0,0 +1,10 @@ +# _integrations/nrawssdk/v2 [](https://godoc.org/github.com/newrelic/go-agent/_integrations/nrawssdk/v2) + +Package `nrawssdk` instruments https://github.com/aws/aws-sdk-go-v2 requests. + +```go +import "github.com/newrelic/go-agent/_integrations/nrawssdk/v2" +``` + +For more information, see +[godocs](https://godoc.org/github.com/newrelic/go-agent/_integrations/nrawssdk/v2). diff --git a/_integrations/nrawssdk/v2/nrawssdk.go b/_integrations/nrawssdk/v2/nrawssdk.go new file mode 100644 index 000000000..51955fc24 --- /dev/null +++ b/_integrations/nrawssdk/v2/nrawssdk.go @@ -0,0 +1,85 @@ +// Package nrawssdk instruments https://github.com/aws/aws-sdk-go-v2 requests. +package nrawssdk + +import ( + "github.com/aws/aws-sdk-go-v2/aws" + internal "github.com/newrelic/go-agent/_integrations/nrawssdk/internal" + agentinternal "github.com/newrelic/go-agent/internal" +) + +func init() { agentinternal.TrackUsage("integration", "library", "aws-sdk-go-v2") } + +func startSegment(req *aws.Request) { + input := internal.StartSegmentInputs{ + HTTPRequest: req.HTTPRequest, + ServiceName: req.Metadata.ServiceName, + Operation: req.Operation.Name, + Region: req.Metadata.SigningRegion, + Params: req.Params, + } + req.HTTPRequest = internal.StartSegment(input) +} + +func endSegment(req *aws.Request) { + ctx := req.HTTPRequest.Context() + internal.EndSegment(ctx, req.HTTPResponse.Header) +} + +// InstrumentHandlers will add instrumentation to the given *aws.Handlers. +// +// A Segment will be created for each out going request. The Transaction must +// be added to the `http.Request`'s Context in order for the segment to be +// recorded. For DynamoDB calls, these segments will be +// `newrelic.DatastoreSegment` type and for all others they will be +// `newrelic.ExternalSegment` type. +// +// Additional attributes will be added to Transaction Trace Segments and Span +// Events: aws.region, aws.requestId, and aws.operation. +// +// To add instrumentation to a Config and see segments created for each +// invocation that uses that Config, call InstrumentHandlers with the config's +// Handlers and add the current Transaction to the `http.Request`'s Context: +// +// cfg, _ := external.LoadDefaultAWSConfig() +// cfg.Region = endpoints.UsWest2RegionID +// // Add instrumentation to handlers +// nrawssdk.InstrumentHandlers(&cfg.Handlers) +// lambdaClient = lambda.New(cfg) +// +// req := lambdaClient.InvokeRequest(&lambda.InvokeInput{ +// ClientContext: aws.String("MyApp"), +// FunctionName: aws.String("Function"), +// InvocationType: lambda.InvocationTypeEvent, +// LogType: lambda.LogTypeTail, +// Payload: []byte("{}"), +// } +// // Add txn to http.Request's context +// ctx := newrelic.NewContext(req.Context(), txn) +// resp, err := req.Send(ctx) +// +// To add instrumentation to a Request and see a segment created just for the +// individual request, call InstrumentHandlers with the `aws.Request`'s +// Handlers and add the current Transaction to the `http.Request`'s Context: +// +// req := lambdaClient.InvokeRequest(&lambda.InvokeInput{ +// ClientContext: aws.String("MyApp"), +// FunctionName: aws.String("Function"), +// InvocationType: lambda.InvocationTypeEvent, +// LogType: lambda.LogTypeTail, +// Payload: []byte("{}"), +// } +// // Add instrumentation to handlers +// nrawssdk.InstrumentHandlers(&req.Handlers) +// // Add txn to http.Request's context +// ctx := newrelic.NewContext(req.Context(), txn) +// resp, err := req.Send(ctx) +func InstrumentHandlers(handlers *aws.Handlers) { + handlers.Send.SetFrontNamed(aws.NamedHandler{ + Name: "StartNewRelicSegment", + Fn: startSegment, + }) + handlers.Send.SetBackNamed(aws.NamedHandler{ + Name: "EndNewRelicSegment", + Fn: endSegment, + }) +} diff --git a/_integrations/nrawssdk/v2/nrawssdk_test.go b/_integrations/nrawssdk/v2/nrawssdk_test.go new file mode 100644 index 000000000..c92046045 --- /dev/null +++ b/_integrations/nrawssdk/v2/nrawssdk_test.go @@ -0,0 +1,565 @@ +package nrawssdk + +import ( + "bytes" + "errors" + "io/ioutil" + "net/http" + "testing" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/endpoints" + "github.com/aws/aws-sdk-go-v2/aws/external" + "github.com/aws/aws-sdk-go-v2/service/dynamodb" + "github.com/aws/aws-sdk-go-v2/service/lambda" + newrelic "github.com/newrelic/go-agent" + "github.com/newrelic/go-agent/internal" + "github.com/newrelic/go-agent/internal/integrationsupport" +) + +func testApp() integrationsupport.ExpectApp { + return integrationsupport.NewTestApp(integrationsupport.SampleEverythingReplyFn, integrationsupport.DTEnabledCfgFn) +} + +type fakeTransport struct{} + +func (t fakeTransport) RoundTrip(r *http.Request) (*http.Response, error) { + return &http.Response{ + Status: "200 OK", + StatusCode: 200, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + Header: http.Header{ + "X-Amzn-Requestid": []string{requestID}, + }, + }, nil +} + +type fakeCreds struct{} + +func (c fakeCreds) Retrieve() (aws.Credentials, error) { + return aws.Credentials{}, nil +} + +func newConfig(instrument bool) aws.Config { + cfg, _ := external.LoadDefaultAWSConfig() + cfg.Credentials = fakeCreds{} + cfg.Region = endpoints.UsWest2RegionID + cfg.HTTPClient = &http.Client{ + Transport: &fakeTransport{}, + } + + if instrument { + InstrumentHandlers(&cfg.Handlers) + } + return cfg +} + +const ( + requestID = "testing request id" + txnName = "aws-txn" +) + +var ( + genericSpan = internal.WantEvent{ + Intrinsics: map[string]interface{}{ + "name": "OtherTransaction/Go/" + txnName, + "sampled": true, + "category": "generic", + "priority": internal.MatchAnything, + "guid": internal.MatchAnything, + "transactionId": internal.MatchAnything, + "nr.entryPoint": true, + "traceId": internal.MatchAnything, + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{}, + } + externalSpan = internal.WantEvent{ + Intrinsics: map[string]interface{}{ + "name": "External/lambda.us-west-2.amazonaws.com/http/POST", + "sampled": true, + "category": "http", + "priority": internal.MatchAnything, + "guid": internal.MatchAnything, + "transactionId": internal.MatchAnything, + "traceId": internal.MatchAnything, + "parentId": internal.MatchAnything, + "component": "http", + "span.kind": "client", + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{ + "aws.operation": "Invoke", + "aws.region": "us-west-2", + "aws.requestId": requestID, + "http.method": "POST", + "http.url": "https://lambda.us-west-2.amazonaws.com/2015-03-31/functions/non-existent-function/invocations", + }, + } + externalSpanNoRequestID = internal.WantEvent{ + Intrinsics: map[string]interface{}{ + "name": "External/lambda.us-west-2.amazonaws.com/http/POST", + "sampled": true, + "category": "http", + "priority": internal.MatchAnything, + "guid": internal.MatchAnything, + "transactionId": internal.MatchAnything, + "traceId": internal.MatchAnything, + "parentId": internal.MatchAnything, + "component": "http", + "span.kind": "client", + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{ + "aws.operation": "Invoke", + "aws.region": "us-west-2", + "http.method": "POST", + "http.url": "https://lambda.us-west-2.amazonaws.com/2015-03-31/functions/non-existent-function/invocations", + }, + } + datastoreSpan = internal.WantEvent{ + Intrinsics: map[string]interface{}{ + "name": "Datastore/statement/DynamoDB/thebesttable/DescribeTable", + "sampled": true, + "category": "datastore", + "priority": internal.MatchAnything, + "guid": internal.MatchAnything, + "transactionId": internal.MatchAnything, + "traceId": internal.MatchAnything, + "parentId": internal.MatchAnything, + "component": "DynamoDB", + "span.kind": "client", + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{ + "aws.operation": "DescribeTable", + "aws.region": "us-west-2", + "aws.requestId": requestID, + "db.collection": "thebesttable", + "db.statement": "'DescribeTable' on 'thebesttable' using 'DynamoDB'", + "peer.address": "dynamodb.us-west-2.amazonaws.com:unknown", + "peer.hostname": "dynamodb.us-west-2.amazonaws.com", + }, + } + + txnMetrics = []internal.WantMetric{ + {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/all", Scope: "", Forced: false, Data: nil}, + {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/allOther", Scope: "", Forced: false, Data: nil}, + {Name: "OtherTransaction/Go/" + txnName, Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransaction/all", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransactionTotalTime/Go/" + txnName, Scope: "", Forced: false, Data: nil}, + {Name: "OtherTransactionTotalTime", Scope: "", Forced: true, Data: nil}, + } + externalMetrics = append(txnMetrics, []internal.WantMetric{ + {Name: "External/all", Scope: "", Forced: true, Data: nil}, + {Name: "External/allOther", Scope: "", Forced: true, Data: nil}, + {Name: "External/lambda.us-west-2.amazonaws.com/all", Scope: "", Forced: false, Data: nil}, + {Name: "External/lambda.us-west-2.amazonaws.com/http/POST", Scope: "OtherTransaction/Go/" + txnName, Forced: false, Data: nil}, + }...) + datastoreMetrics = append(txnMetrics, []internal.WantMetric{ + {Name: "Datastore/DynamoDB/all", Scope: "", Forced: true, Data: nil}, + {Name: "Datastore/DynamoDB/allOther", Scope: "", Forced: true, Data: nil}, + {Name: "Datastore/all", Scope: "", Forced: true, Data: nil}, + {Name: "Datastore/allOther", Scope: "", Forced: true, Data: nil}, + {Name: "Datastore/instance/DynamoDB/dynamodb.us-west-2.amazonaws.com/unknown", Scope: "", Forced: false, Data: nil}, + {Name: "Datastore/operation/DynamoDB/DescribeTable", Scope: "", Forced: false, Data: nil}, + {Name: "Datastore/statement/DynamoDB/thebesttable/DescribeTable", Scope: "", Forced: false, Data: nil}, + {Name: "Datastore/statement/DynamoDB/thebesttable/DescribeTable", Scope: "OtherTransaction/Go/" + txnName, Forced: false, Data: nil}, + }...) +) + +func TestInstrumentRequestExternal(t *testing.T) { + app := testApp() + txn := app.StartTransaction(txnName, nil, nil) + + client := lambda.New(newConfig(false)) + input := &lambda.InvokeInput{ + ClientContext: aws.String("MyApp"), + FunctionName: aws.String("non-existent-function"), + InvocationType: lambda.InvocationTypeEvent, + LogType: lambda.LogTypeTail, + Payload: []byte("{}"), + } + req := client.InvokeRequest(input) + InstrumentHandlers(&req.Handlers) + ctx := newrelic.NewContext(req.Context(), txn) + + _, err := req.Send(ctx) + if nil != err { + t.Error(err) + } + + txn.End() + + app.ExpectMetrics(t, externalMetrics) + app.ExpectSpanEvents(t, []internal.WantEvent{ + genericSpan, externalSpan}) +} + +func TestInstrumentRequestDatastore(t *testing.T) { + app := testApp() + txn := app.StartTransaction(txnName, nil, nil) + + client := dynamodb.New(newConfig(false)) + input := &dynamodb.DescribeTableInput{ + TableName: aws.String("thebesttable"), + } + + req := client.DescribeTableRequest(input) + InstrumentHandlers(&req.Handlers) + ctx := newrelic.NewContext(req.Context(), txn) + + _, err := req.Send(ctx) + if nil != err { + t.Error(err) + } + + txn.End() + + app.ExpectMetrics(t, datastoreMetrics) + app.ExpectSpanEvents(t, []internal.WantEvent{ + genericSpan, datastoreSpan}) +} + +func TestInstrumentRequestExternalNoTxn(t *testing.T) { + client := lambda.New(newConfig(false)) + input := &lambda.InvokeInput{ + ClientContext: aws.String("MyApp"), + FunctionName: aws.String("non-existent-function"), + InvocationType: lambda.InvocationTypeEvent, + LogType: lambda.LogTypeTail, + Payload: []byte("{}"), + } + + req := client.InvokeRequest(input) + InstrumentHandlers(&req.Handlers) + ctx := req.Context() + + _, err := req.Send(ctx) + if nil != err { + t.Error(err) + } +} + +func TestInstrumentRequestDatastoreNoTxn(t *testing.T) { + client := dynamodb.New(newConfig(false)) + input := &dynamodb.DescribeTableInput{ + TableName: aws.String("thebesttable"), + } + + req := client.DescribeTableRequest(input) + InstrumentHandlers(&req.Handlers) + ctx := req.Context() + + _, err := req.Send(ctx) + if nil != err { + t.Error(err) + } +} + +func TestInstrumentConfigExternal(t *testing.T) { + app := testApp() + txn := app.StartTransaction(txnName, nil, nil) + + client := lambda.New(newConfig(true)) + + input := &lambda.InvokeInput{ + ClientContext: aws.String("MyApp"), + FunctionName: aws.String("non-existent-function"), + InvocationType: lambda.InvocationTypeEvent, + LogType: lambda.LogTypeTail, + Payload: []byte("{}"), + } + + req := client.InvokeRequest(input) + ctx := newrelic.NewContext(req.Context(), txn) + + _, err := req.Send(ctx) + if nil != err { + t.Error(err) + } + + txn.End() + + app.ExpectMetrics(t, externalMetrics) + app.ExpectSpanEvents(t, []internal.WantEvent{ + genericSpan, externalSpan}) +} + +func TestInstrumentConfigDatastore(t *testing.T) { + app := testApp() + txn := app.StartTransaction(txnName, nil, nil) + + client := dynamodb.New(newConfig(true)) + + input := &dynamodb.DescribeTableInput{ + TableName: aws.String("thebesttable"), + } + + req := client.DescribeTableRequest(input) + ctx := newrelic.NewContext(req.Context(), txn) + + _, err := req.Send(ctx) + if nil != err { + t.Error(err) + } + + txn.End() + + app.ExpectMetrics(t, datastoreMetrics) + app.ExpectSpanEvents(t, []internal.WantEvent{ + genericSpan, datastoreSpan}) +} + +func TestInstrumentConfigExternalNoTxn(t *testing.T) { + client := lambda.New(newConfig(true)) + + input := &lambda.InvokeInput{ + ClientContext: aws.String("MyApp"), + FunctionName: aws.String("non-existent-function"), + InvocationType: lambda.InvocationTypeEvent, + LogType: lambda.LogTypeTail, + Payload: []byte("{}"), + } + + req := client.InvokeRequest(input) + ctx := req.Context() + + _, err := req.Send(ctx) + if nil != err { + t.Error(err) + } +} + +func TestInstrumentConfigDatastoreNoTxn(t *testing.T) { + client := dynamodb.New(newConfig(true)) + + input := &dynamodb.DescribeTableInput{ + TableName: aws.String("thebesttable"), + } + + req := client.DescribeTableRequest(input) + ctx := req.Context() + + _, err := req.Send(ctx) + if nil != err { + t.Error(err) + } +} + +func TestInstrumentConfigExternalTxnNotInCtx(t *testing.T) { + app := testApp() + txn := app.StartTransaction(txnName, nil, nil) + + client := lambda.New(newConfig(true)) + + input := &lambda.InvokeInput{ + ClientContext: aws.String("MyApp"), + FunctionName: aws.String("non-existent-function"), + InvocationType: lambda.InvocationTypeEvent, + LogType: lambda.LogTypeTail, + Payload: []byte("{}"), + } + + req := client.InvokeRequest(input) + ctx := req.Context() + + _, err := req.Send(ctx) + if nil != err { + t.Error(err) + } + + txn.End() + + app.ExpectMetrics(t, txnMetrics) +} + +func TestInstrumentConfigDatastoreTxnNotInCtx(t *testing.T) { + app := testApp() + txn := app.StartTransaction(txnName, nil, nil) + + client := dynamodb.New(newConfig(true)) + + input := &dynamodb.DescribeTableInput{ + TableName: aws.String("thebesttable"), + } + + req := client.DescribeTableRequest(input) + ctx := req.Context() + + _, err := req.Send(ctx) + if nil != err { + t.Error(err) + } + + txn.End() + + app.ExpectMetrics(t, txnMetrics) +} + +func TestDoublyInstrumented(t *testing.T) { + hs := &aws.Handlers{} + if found := hs.Send.Len(); 0 != found { + t.Error("unexpected number of Send handlers found:", found) + } + + InstrumentHandlers(hs) + if found := hs.Send.Len(); 2 != found { + t.Error("unexpected number of Send handlers found:", found) + } + + InstrumentHandlers(hs) + if found := hs.Send.Len(); 2 != found { + t.Error("unexpected number of Send handlers found:", found) + } +} + +type firstFailingTransport struct { + failing bool +} + +func (t *firstFailingTransport) RoundTrip(r *http.Request) (*http.Response, error) { + if t.failing { + t.failing = false + return nil, errors.New("Oops this failed") + } + return &http.Response{ + Status: "200 OK", + StatusCode: 200, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + Header: http.Header{ + "X-Amzn-Requestid": []string{requestID}, + }, + }, nil +} + +func TestRetrySend(t *testing.T) { + app := testApp() + txn := app.StartTransaction(txnName, nil, nil) + + cfg := newConfig(false) + cfg.HTTPClient = &http.Client{ + Transport: &firstFailingTransport{failing: true}, + } + + client := lambda.New(cfg) + input := &lambda.InvokeInput{ + ClientContext: aws.String("MyApp"), + FunctionName: aws.String("non-existent-function"), + InvocationType: lambda.InvocationTypeEvent, + LogType: lambda.LogTypeTail, + Payload: []byte("{}"), + } + req := client.InvokeRequest(input) + InstrumentHandlers(&req.Handlers) + ctx := newrelic.NewContext(req.Context(), txn) + + _, err := req.Send(ctx) + if nil != err { + t.Error(err) + } + + txn.End() + + app.ExpectMetrics(t, []internal.WantMetric{ + {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/all", Scope: "", Forced: false, Data: nil}, + {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/allOther", Scope: "", Forced: false, Data: nil}, + {Name: "External/all", Scope: "", Forced: true, Data: []float64{2}}, + {Name: "External/allOther", Scope: "", Forced: true, Data: []float64{2}}, + {Name: "External/lambda.us-west-2.amazonaws.com/all", Scope: "", Forced: false, Data: []float64{2}}, + {Name: "External/lambda.us-west-2.amazonaws.com/http/POST", Scope: "OtherTransaction/Go/" + txnName, Forced: false, Data: []float64{2}}, + {Name: "OtherTransaction/Go/" + txnName, Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransaction/all", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransactionTotalTime/Go/" + txnName, Scope: "", Forced: false, Data: nil}, + {Name: "OtherTransactionTotalTime", Scope: "", Forced: true, Data: nil}, + }) + app.ExpectSpanEvents(t, []internal.WantEvent{ + genericSpan, externalSpanNoRequestID, externalSpan}) +} + +func TestRequestSentTwice(t *testing.T) { + app := testApp() + txn := app.StartTransaction(txnName, nil, nil) + + client := lambda.New(newConfig(false)) + input := &lambda.InvokeInput{ + ClientContext: aws.String("MyApp"), + FunctionName: aws.String("non-existent-function"), + InvocationType: lambda.InvocationTypeEvent, + LogType: lambda.LogTypeTail, + Payload: []byte("{}"), + } + req := client.InvokeRequest(input) + InstrumentHandlers(&req.Handlers) + ctx := newrelic.NewContext(req.Context(), txn) + + _, firstErr := req.Send(ctx) + if nil != firstErr { + t.Error(firstErr) + } + + _, secondErr := req.Send(ctx) + if nil != secondErr { + t.Error(secondErr) + } + + txn.End() + + app.ExpectMetrics(t, []internal.WantMetric{ + {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/all", Scope: "", Forced: false, Data: nil}, + {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/allOther", Scope: "", Forced: false, Data: nil}, + {Name: "External/all", Scope: "", Forced: true, Data: []float64{2}}, + {Name: "External/allOther", Scope: "", Forced: true, Data: []float64{2}}, + {Name: "External/lambda.us-west-2.amazonaws.com/all", Scope: "", Forced: false, Data: []float64{2}}, + {Name: "External/lambda.us-west-2.amazonaws.com/http/POST", Scope: "OtherTransaction/Go/" + txnName, Forced: false, Data: []float64{2}}, + {Name: "OtherTransaction/Go/" + txnName, Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransaction/all", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransactionTotalTime/Go/" + txnName, Scope: "", Forced: false, Data: nil}, + {Name: "OtherTransactionTotalTime", Scope: "", Forced: true, Data: nil}, + }) + app.ExpectSpanEvents(t, []internal.WantEvent{ + genericSpan, externalSpan, externalSpan}) +} + +type noRequestIDTransport struct{} + +func (t *noRequestIDTransport) RoundTrip(r *http.Request) (*http.Response, error) { + return &http.Response{ + Status: "200 OK", + StatusCode: 200, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil +} + +func TestNoRequestIDFound(t *testing.T) { + app := testApp() + txn := app.StartTransaction(txnName, nil, nil) + + cfg := newConfig(false) + cfg.HTTPClient = &http.Client{ + Transport: &noRequestIDTransport{}, + } + + client := lambda.New(cfg) + input := &lambda.InvokeInput{ + ClientContext: aws.String("MyApp"), + FunctionName: aws.String("non-existent-function"), + InvocationType: lambda.InvocationTypeEvent, + LogType: lambda.LogTypeTail, + Payload: []byte("{}"), + } + req := client.InvokeRequest(input) + InstrumentHandlers(&req.Handlers) + ctx := newrelic.NewContext(req.Context(), txn) + + _, err := req.Send(ctx) + if nil != err { + t.Error(err) + } + + txn.End() + + app.ExpectMetrics(t, externalMetrics) + app.ExpectSpanEvents(t, []internal.WantEvent{ + genericSpan, externalSpanNoRequestID}) +} diff --git a/_integrations/nrb3/README.md b/_integrations/nrb3/README.md new file mode 100644 index 000000000..92f200e3b --- /dev/null +++ b/_integrations/nrb3/README.md @@ -0,0 +1,10 @@ +# _integrations/nrb3 [](https://godoc.org/github.com/newrelic/go-agent/_integrations/nrb3) + +Package `nrb3` supports adding B3 headers to outgoing requests. + +```go +import "github.com/newrelic/go-agent/_integrations/nrb3" +``` + +For more information, see +[godocs](https://godoc.org/github.com/newrelic/go-agent/_integrations/nrb3). diff --git a/_integrations/nrb3/example_test.go b/_integrations/nrb3/example_test.go new file mode 100644 index 000000000..641b27f49 --- /dev/null +++ b/_integrations/nrb3/example_test.go @@ -0,0 +1,69 @@ +package nrb3 + +import ( + "fmt" + "log" + "net/http" + "os" + + newrelic "github.com/newrelic/go-agent" + "github.com/openzipkin/zipkin-go" + reporterhttp "github.com/openzipkin/zipkin-go/reporter/http" +) + +func currentTxn() newrelic.Transaction { + return nil +} + +func ExampleNewRoundTripper() { + // When defining the client, set the Transport to the NewRoundTripper. This + // will create ExternalSegments and add B3 headers for each request. + client := &http.Client{ + Transport: NewRoundTripper(nil), + } + + // Distributed Tracing must be enabled for this application. + txn := currentTxn() + + req, err := http.NewRequest("GET", "http://example.com", nil) + if nil != err { + log.Fatalln(err) + } + + // Be sure to add the transaction to the request context. This step is + // required. + req = newrelic.RequestWithTransactionContext(req, txn) + resp, err := client.Do(req) + if nil != err { + log.Fatalln(err) + } + + defer resp.Body.Close() + fmt.Println(resp.StatusCode) +} + +// This example demonstrates how to create a Zipkin reporter using the standard +// Zipkin http reporter +// (https://godoc.org/github.com/openzipkin/zipkin-go/reporter/http) to send +// Span data to New Relic. Follow this example when your application uses +// Zipkin for tracing (instead of the New Relic Go Agent) and you wish to send +// span data to the New Relic backend. The example assumes you have the +// environment variable NEW_RELIC_API_KEY set to your New Relic Insights Insert +// Key. +func Example_zipkinReporter() { + // import ( + // reporterhttp "github.com/openzipkin/zipkin-go/reporter/http" + // ) + reporter := reporterhttp.NewReporter( + "https://trace-api.newrelic.com/trace/v1", + reporterhttp.RequestCallback(func(req *http.Request) { + req.Header.Add("X-Insert-Key", os.Getenv("NEW_RELIC_API_KEY")) + req.Header.Add("Data-Format", "zipkin") + req.Header.Add("Data-Format-Version", "2") + }), + ) + defer reporter.Close() + + // use the reporter to create a new tracer + zipkin.NewTracer(reporter) +} diff --git a/_integrations/nrb3/nrb3.go b/_integrations/nrb3/nrb3.go new file mode 100644 index 000000000..c5dfde672 --- /dev/null +++ b/_integrations/nrb3/nrb3.go @@ -0,0 +1,79 @@ +package nrb3 + +import ( + "net/http" + "time" + + newrelic "github.com/newrelic/go-agent" + "github.com/newrelic/go-agent/internal" +) + +func init() { internal.TrackUsage("integration", "b3") } + +// NewRoundTripper creates an `http.RoundTripper` to instrument external +// requests. The RoundTripper returned creates an external segment and adds B3 +// tracing headers to each request if and only if a `newrelic.Transaction` +// (https://godoc.org/github.com/newrelic/go-agent#Transaction) is found in the +// `http.Request`'s context. It then delegates to the original RoundTripper +// provided (or http.DefaultTransport if none is provided). +func NewRoundTripper(original http.RoundTripper) http.RoundTripper { + if nil == original { + original = http.DefaultTransport + } + return &b3Transport{ + idGen: internal.NewTraceIDGenerator(int64(time.Now().UnixNano())), + original: original, + } +} + +// cloneRequest mimics implementation of +// https://godoc.org/github.com/google/go-github/github#BasicAuthTransport.RoundTrip +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header, len(r.Header)) + for k, s := range r.Header { + r2.Header[k] = append([]string(nil), s...) + } + return r2 +} + +type b3Transport struct { + idGen *internal.TraceIDGenerator + original http.RoundTripper +} + +func txnSampled(txn newrelic.Transaction) string { + if txn.IsSampled() { + return "1" + } + return "0" +} + +func addHeader(request *http.Request, key, val string) { + if val != "" { + request.Header.Add(key, val) + } +} + +func (t *b3Transport) RoundTrip(request *http.Request) (*http.Response, error) { + if txn := newrelic.FromContext(request.Context()); nil != txn { + // The specification of http.RoundTripper requires that the request is never modified. + request = cloneRequest(request) + segment := &newrelic.ExternalSegment{ + StartTime: newrelic.StartSegmentNow(txn), + Request: request, + } + defer segment.End() + + md := txn.GetTraceMetadata() + addHeader(request, "X-B3-TraceId", md.TraceID) + addHeader(request, "X-B3-SpanId", t.idGen.GenerateTraceID()) + addHeader(request, "X-B3-ParentSpanId", md.SpanID) + addHeader(request, "X-B3-Sampled", txnSampled(txn)) + } + + return t.original.RoundTrip(request) +} diff --git a/_integrations/nrb3/nrb3_doc.go b/_integrations/nrb3/nrb3_doc.go new file mode 100644 index 000000000..9afc9a1a9 --- /dev/null +++ b/_integrations/nrb3/nrb3_doc.go @@ -0,0 +1,10 @@ +// Package nrb3 supports adding B3 headers to outgoing requests. +// +// When using the New Relic Go Agent, use this package if you want to add B3 +// headers ("X-B3-TraceId", etc., see +// https://github.com/openzipkin/b3-propagation) to outgoing requests. +// +// Distributed tracing must be enabled +// (https://docs.newrelic.com/docs/understand-dependencies/distributed-tracing/enable-configure/enable-distributed-tracing) +// for B3 headers to be added properly. +package nrb3 diff --git a/_integrations/nrb3/nrb3_test.go b/_integrations/nrb3/nrb3_test.go new file mode 100644 index 000000000..1141e86ff --- /dev/null +++ b/_integrations/nrb3/nrb3_test.go @@ -0,0 +1,154 @@ +package nrb3 + +import ( + "net/http" + "testing" + + newrelic "github.com/newrelic/go-agent" + "github.com/newrelic/go-agent/internal" + "github.com/newrelic/go-agent/internal/integrationsupport" +) + +func TestNewRoundTripperNil(t *testing.T) { + rt := NewRoundTripper(nil) + if orig := rt.(*b3Transport).original; orig != http.DefaultTransport { + t.Error("original is not as expected:", orig) + } +} + +type roundTripperFn func(*http.Request) (*http.Response, error) + +func (fn roundTripperFn) RoundTrip(r *http.Request) (*http.Response, error) { return fn(r) } + +func TestRoundTripperNoTxn(t *testing.T) { + app := integrationsupport.NewTestApp(nil, integrationsupport.DTEnabledCfgFn) + txn := app.StartTransaction("test", nil, nil) + + var count int + rt := NewRoundTripper(roundTripperFn(func(req *http.Request) (*http.Response, error) { + count++ + return &http.Response{ + StatusCode: 200, + }, nil + })) + client := &http.Client{Transport: rt} + + req, err := http.NewRequest("GET", "http://example.com", nil) + if nil != err { + t.Fatal(err) + } + _, err = client.Do(req) + if nil != err { + t.Fatal(err) + } + txn.End() + + if count != 1 { + t.Error("incorrect call count to RoundTripper:", count) + } + app.ExpectMetrics(t, []internal.WantMetric{ + {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/all", Scope: "", Forced: false, Data: nil}, + {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/allOther", Scope: "", Forced: false, Data: nil}, + {Name: "OtherTransaction/Go/test", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransaction/all", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransactionTotalTime", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransactionTotalTime/Go/test", Scope: "", Forced: false, Data: nil}, + }) +} + +func TestRoundTripperWithTxnSampled(t *testing.T) { + replyfn := func(reply *internal.ConnectReply) { + reply.AdaptiveSampler = internal.SampleEverything{} + reply.TraceIDGenerator = internal.NewTraceIDGenerator(123) + } + app := integrationsupport.NewTestApp(replyfn, integrationsupport.DTEnabledCfgFn) + txn := app.StartTransaction("test", nil, nil) + + var count int + var sent *http.Request + rt := NewRoundTripper(roundTripperFn(func(req *http.Request) (*http.Response, error) { + count++ + sent = req + return &http.Response{ + StatusCode: 200, + }, nil + })) + rt.(*b3Transport).idGen = internal.NewTraceIDGenerator(456) + client := &http.Client{Transport: rt} + + req, err := http.NewRequest("GET", "http://example.com", nil) + if nil != err { + t.Fatal(err) + } + req = newrelic.RequestWithTransactionContext(req, txn) + _, err = client.Do(req) + if nil != err { + t.Fatal(err) + } + txn.End() + + if count != 1 { + t.Error("incorrect call count to RoundTripper:", count) + } + // original request is not modified + if hdr := req.Header.Get("X-B3-TraceId"); hdr != "" { + t.Error("original request was modified, X-B3-TraceId header set:", hdr) + } + // b3 headers added + if hdr := sent.Header.Get("X-B3-TraceId"); hdr != "94d1331706b6a2b3" { + t.Error("unexpected value for X-B3-TraceId header:", hdr) + } + if hdr := sent.Header.Get("X-B3-SpanId"); hdr != "5a4f2d1b7f0cf06d" { + t.Error("unexpected value for X-B3-SpanId header:", hdr) + } + if hdr := sent.Header.Get("X-B3-ParentSpanId"); hdr != "3ffe00369da8a3b6" { + t.Error("unexpected value for X-B3-ParentSpanId header:", hdr) + } + if hdr := sent.Header.Get("X-B3-Sampled"); hdr != "1" { + t.Error("unexpected value for X-B3-Sampled header:", hdr) + } + app.ExpectMetrics(t, []internal.WantMetric{ + {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/all", Scope: "", Forced: false, Data: nil}, + {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/allOther", Scope: "", Forced: false, Data: nil}, + {Name: "External/all", Scope: "", Forced: true, Data: nil}, + {Name: "External/allOther", Scope: "", Forced: true, Data: nil}, + {Name: "External/example.com/all", Scope: "", Forced: false, Data: nil}, + {Name: "External/example.com/http/GET", Scope: "OtherTransaction/Go/test", Forced: false, Data: nil}, + {Name: "OtherTransaction/Go/test", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransaction/all", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransactionTotalTime", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransactionTotalTime/Go/test", Scope: "", Forced: false, Data: nil}, + }) +} + +func TestRoundTripperWithTxnNotSampled(t *testing.T) { + replyfn := func(reply *internal.ConnectReply) { + reply.AdaptiveSampler = internal.SampleNothing{} + } + app := integrationsupport.NewTestApp(replyfn, integrationsupport.DTEnabledCfgFn) + txn := app.StartTransaction("test", nil, nil) + + var sent *http.Request + rt := NewRoundTripper(roundTripperFn(func(req *http.Request) (*http.Response, error) { + sent = req + return &http.Response{ + StatusCode: 200, + }, nil + })) + client := &http.Client{Transport: rt} + + req, err := http.NewRequest("GET", "http://example.com", nil) + if nil != err { + t.Fatal(err) + } + req = newrelic.RequestWithTransactionContext(req, txn) + _, err = client.Do(req) + if nil != err { + t.Fatal(err) + } + txn.End() + + if hdr := sent.Header.Get("X-B3-Sampled"); hdr != "0" { + t.Error("unexpected value for X-B3-Sampled header:", hdr) + } +} diff --git a/_integrations/nrecho/README.md b/_integrations/nrecho/README.md new file mode 100644 index 000000000..ee8cab3c7 --- /dev/null +++ b/_integrations/nrecho/README.md @@ -0,0 +1,10 @@ +# _integrations/nrecho [](https://godoc.org/github.com/newrelic/go-agent/_integrations/nrecho) + +Package `nrecho` instruments https://github.com/labstack/echo applications. + +```go +import "github.com/newrelic/go-agent/_integrations/nrecho" +``` + +For more information, see +[godocs](https://godoc.org/github.com/newrelic/go-agent/_integrations/nrecho). diff --git a/_integrations/nrecho/example/main.go b/_integrations/nrecho/example/main.go new file mode 100644 index 000000000..f54a41f83 --- /dev/null +++ b/_integrations/nrecho/example/main.go @@ -0,0 +1,58 @@ +package main + +import ( + "fmt" + "net/http" + "os" + + "github.com/labstack/echo" + "github.com/labstack/echo/middleware" + "github.com/newrelic/go-agent" + "github.com/newrelic/go-agent/_integrations/nrecho" +) + +func mustGetEnv(key string) string { + if val := os.Getenv(key); "" != val { + return val + } + panic(fmt.Sprintf("environment variable %s unset", key)) +} + +func getUser(c echo.Context) error { + id := c.Param("id") + + if txn := nrecho.FromContext(c); nil != txn { + txn.AddAttribute("userId", id) + } + + return c.String(http.StatusOK, id) +} + +func main() { + cfg := newrelic.NewConfig("Echo App", mustGetEnv("NEW_RELIC_LICENSE_KEY")) + cfg.Logger = newrelic.NewDebugLogger(os.Stdout) + app, err := newrelic.NewApplication(cfg) + if nil != err { + fmt.Println(err) + os.Exit(1) + } + + // Echo instance + e := echo.New() + + // The New Relic Middleware should be the first middleware registered + e.Use(nrecho.Middleware(app)) + + // Routes + e.GET("/home", func(c echo.Context) error { + return c.String(http.StatusOK, "Hello, World!") + }) + + // Groups + g := e.Group("/user") + g.Use(middleware.Gzip()) + g.GET("/:id", getUser) + + // Start server + e.Start(":8000") +} diff --git a/_integrations/nrecho/nrecho.go b/_integrations/nrecho/nrecho.go new file mode 100644 index 000000000..15d2d2ef3 --- /dev/null +++ b/_integrations/nrecho/nrecho.go @@ -0,0 +1,90 @@ +// Package nrecho instruments https://github.com/labstack/echo applications. +// +// Use this package to instrument inbound requests handled by an echo.Echo +// instance. +// +// e := echo.New() +// // Add the nrecho middleware before other middlewares or routes: +// e.Use(nrecho.Middleware(app)) +// +// Example: https://github.com/newrelic/go-agent/tree/master/_integrations/nrecho/example/main.go +package nrecho + +import ( + "net/http" + "reflect" + + "github.com/labstack/echo" + newrelic "github.com/newrelic/go-agent" + "github.com/newrelic/go-agent/internal" +) + +func init() { internal.TrackUsage("integration", "framework", "echo") } + +// FromContext returns the Transaction from the context if present, and nil +// otherwise. +func FromContext(c echo.Context) newrelic.Transaction { + return newrelic.FromContext(c.Request().Context()) +} + +func handlerPointer(handler echo.HandlerFunc) uintptr { + return reflect.ValueOf(handler).Pointer() +} + +func transactionName(c echo.Context) string { + ptr := handlerPointer(c.Handler()) + if ptr == handlerPointer(echo.NotFoundHandler) { + return "NotFoundHandler" + } + if ptr == handlerPointer(echo.MethodNotAllowedHandler) { + return "MethodNotAllowedHandler" + } + return c.Path() +} + +// Middleware creates Echo middleware that instruments requests. +// +// e := echo.New() +// // Add the nrecho middleware before other middlewares or routes: +// e.Use(nrecho.Middleware(app)) +// +func Middleware(app newrelic.Application) func(echo.HandlerFunc) echo.HandlerFunc { + + if nil == app { + return func(next echo.HandlerFunc) echo.HandlerFunc { + return next + } + } + + return func(next echo.HandlerFunc) echo.HandlerFunc { + return func(c echo.Context) (err error) { + rw := c.Response().Writer + txn := app.StartTransaction(transactionName(c), rw, c.Request()) + defer txn.End() + + c.Response().Writer = txn + + // Add txn to c.Request().Context() + c.SetRequest(c.Request().WithContext(newrelic.NewContext(c.Request().Context(), txn))) + + err = next(c) + + // Record the response code. The response headers are not captured + // in this case because they are set after this middleware returns. + // Designed to mimic the logic in echo.DefaultHTTPErrorHandler. + if nil != err && !c.Response().Committed { + + txn.SetWebResponse(nil) + c.Response().Writer = rw + + if httperr, ok := err.(*echo.HTTPError); ok { + txn.WriteHeader(httperr.Code) + } else { + txn.WriteHeader(http.StatusInternalServerError) + } + } + + return + } + } +} diff --git a/_integrations/nrecho/nrecho_test.go b/_integrations/nrecho/nrecho_test.go new file mode 100644 index 000000000..d1e8de4a5 --- /dev/null +++ b/_integrations/nrecho/nrecho_test.go @@ -0,0 +1,247 @@ +package nrecho + +import ( + "errors" + "net/http" + "net/http/httptest" + "testing" + + "github.com/labstack/echo" + "github.com/newrelic/go-agent/internal" + "github.com/newrelic/go-agent/internal/integrationsupport" +) + +func TestBasicRoute(t *testing.T) { + app := integrationsupport.NewBasicTestApp() + + e := echo.New() + e.Use(Middleware(app)) + e.GET("/hello", func(c echo.Context) error { + return c.Blob(http.StatusOK, "text/html", []byte("Hello, World!")) + }) + + response := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/hello?remove=me", nil) + if err != nil { + t.Fatal(err) + } + + e.ServeHTTP(response, req) + if respBody := response.Body.String(); respBody != "Hello, World!" { + t.Error("wrong response body", respBody) + } + app.ExpectTxnMetrics(t, internal.WantTxn{ + Name: "hello", + IsWeb: true, + }) + app.ExpectTxnEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "name": "WebTransaction/Go/hello", + "nr.apdexPerfZone": "S", + }, + AgentAttributes: map[string]interface{}{ + "httpResponseCode": "200", + "request.method": "GET", + "response.headers.contentType": "text/html", + "request.uri": "/hello", + }, + UserAttributes: map[string]interface{}{}, + }}) +} + +func TestNilApp(t *testing.T) { + e := echo.New() + e.Use(Middleware(nil)) + e.GET("/hello", func(c echo.Context) error { + return c.String(http.StatusOK, "Hello, World!") + }) + + response := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/hello?remove=me", nil) + if err != nil { + t.Fatal(err) + } + + e.ServeHTTP(response, req) + if respBody := response.Body.String(); respBody != "Hello, World!" { + t.Error("wrong response body", respBody) + } +} + +func TestTransactionContext(t *testing.T) { + app := integrationsupport.NewBasicTestApp() + + e := echo.New() + e.Use(Middleware(app)) + e.GET("/hello", func(c echo.Context) error { + txn := FromContext(c) + if nil != txn { + txn.NoticeError(errors.New("ooops")) + } + return c.String(http.StatusOK, "Hello, World!") + }) + + response := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/hello?remove=me", nil) + if err != nil { + t.Fatal(err) + } + + e.ServeHTTP(response, req) + if respBody := response.Body.String(); respBody != "Hello, World!" { + t.Error("wrong response body", respBody) + } + app.ExpectTxnMetrics(t, internal.WantTxn{ + Name: "hello", + IsWeb: true, + NumErrors: 1, + }) +} + +func TestNotFoundHandler(t *testing.T) { + app := integrationsupport.NewBasicTestApp() + + e := echo.New() + e.Use(Middleware(app)) + + response := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/hello?remove=me", nil) + if err != nil { + t.Fatal(err) + } + + e.ServeHTTP(response, req) + app.ExpectTxnMetrics(t, internal.WantTxn{ + Name: "NotFoundHandler", + IsWeb: true, + }) +} + +func TestMethodNotAllowedHandler(t *testing.T) { + app := integrationsupport.NewBasicTestApp() + + e := echo.New() + e.Use(Middleware(app)) + e.GET("/hello", func(c echo.Context) error { + return c.String(http.StatusOK, "Hello, World!") + }) + + response := httptest.NewRecorder() + req, err := http.NewRequest("POST", "/hello?remove=me", nil) + if err != nil { + t.Fatal(err) + } + + e.ServeHTTP(response, req) + app.ExpectTxnMetrics(t, internal.WantTxn{ + Name: "MethodNotAllowedHandler", + IsWeb: true, + NumErrors: 1, + }) +} + +func TestReturnsHTTPError(t *testing.T) { + app := integrationsupport.NewBasicTestApp() + + e := echo.New() + e.Use(Middleware(app)) + e.GET("/hello", func(c echo.Context) error { + return echo.NewHTTPError(http.StatusTeapot, "I'm a teapot!") + }) + + response := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/hello?remove=me", nil) + if err != nil { + t.Fatal(err) + } + + e.ServeHTTP(response, req) + app.ExpectTxnMetrics(t, internal.WantTxn{ + Name: "hello", + IsWeb: true, + NumErrors: 1, + }) + app.ExpectTxnEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "name": "WebTransaction/Go/hello", + "nr.apdexPerfZone": "F", + }, + AgentAttributes: map[string]interface{}{ + "httpResponseCode": "418", + "request.method": "GET", + "request.uri": "/hello", + }, + UserAttributes: map[string]interface{}{}, + }}) +} + +func TestReturnsError(t *testing.T) { + app := integrationsupport.NewBasicTestApp() + + e := echo.New() + e.Use(Middleware(app)) + e.GET("/hello", func(c echo.Context) error { + return errors.New("ooooooooops") + }) + + response := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/hello?remove=me", nil) + if err != nil { + t.Fatal(err) + } + + e.ServeHTTP(response, req) + app.ExpectTxnMetrics(t, internal.WantTxn{ + Name: "hello", + IsWeb: true, + NumErrors: 1, + }) + app.ExpectTxnEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "name": "WebTransaction/Go/hello", + "nr.apdexPerfZone": "F", + }, + AgentAttributes: map[string]interface{}{ + "httpResponseCode": "500", + "request.method": "GET", + "request.uri": "/hello", + }, + UserAttributes: map[string]interface{}{}, + }}) +} + +func TestResponseCode(t *testing.T) { + app := integrationsupport.NewBasicTestApp() + + e := echo.New() + e.Use(Middleware(app)) + e.GET("/hello", func(c echo.Context) error { + return c.Blob(http.StatusTeapot, "text/html", []byte("Hello, World!")) + }) + + response := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/hello?remove=me", nil) + if err != nil { + t.Fatal(err) + } + + e.ServeHTTP(response, req) + app.ExpectTxnMetrics(t, internal.WantTxn{ + Name: "hello", + IsWeb: true, + NumErrors: 1, + }) + app.ExpectTxnEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "name": "WebTransaction/Go/hello", + "nr.apdexPerfZone": "F", + }, + AgentAttributes: map[string]interface{}{ + "httpResponseCode": "418", + "request.method": "GET", + "response.headers.contentType": "text/html", + "request.uri": "/hello", + }, + UserAttributes: map[string]interface{}{}, + }}) +} diff --git a/_integrations/nrgin/v1/README.md b/_integrations/nrgin/v1/README.md new file mode 100644 index 000000000..f7e561669 --- /dev/null +++ b/_integrations/nrgin/v1/README.md @@ -0,0 +1,10 @@ +# _integrations/nrgin/v1 [](https://godoc.org/github.com/newrelic/go-agent/_integrations/nrgin/v1) + +Package `nrgin` instruments https://github.com/gin-gonic/gin applications. + +```go +import "github.com/newrelic/go-agent/_integrations/nrgin/v1" +``` + +For more information, see +[godocs](https://godoc.org/github.com/newrelic/go-agent/_integrations/nrgin/v1). diff --git a/_integrations/nrgin/v1/example/main.go b/_integrations/nrgin/v1/example/main.go new file mode 100644 index 000000000..3dfb4fd0e --- /dev/null +++ b/_integrations/nrgin/v1/example/main.go @@ -0,0 +1,93 @@ +package main + +import ( + "fmt" + "os" + + "github.com/gin-gonic/gin" + "github.com/newrelic/go-agent" + "github.com/newrelic/go-agent/_integrations/nrgin/v1" +) + +func makeGinEndpoint(s string) func(*gin.Context) { + return func(c *gin.Context) { + c.Writer.WriteString(s) + } +} + +func v1login(c *gin.Context) { c.Writer.WriteString("v1 login") } +func v1submit(c *gin.Context) { c.Writer.WriteString("v1 submit") } +func v1read(c *gin.Context) { c.Writer.WriteString("v1 read") } + +func endpoint404(c *gin.Context) { + c.Writer.WriteHeader(404) + c.Writer.WriteString("returning 404") +} + +func endpointChangeCode(c *gin.Context) { + // gin.ResponseWriter buffers the response code so that it can be + // changed before the first write. + c.Writer.WriteHeader(404) + c.Writer.WriteHeader(200) + c.Writer.WriteString("actually ok!") +} + +func endpointResponseHeaders(c *gin.Context) { + // Since gin.ResponseWriter buffers the response code, response headers + // can be set afterwards. + c.Writer.WriteHeader(200) + c.Writer.Header().Set("Content-Type", "application/json") + c.Writer.WriteString(`{"zip":"zap"}`) +} + +func endpointNotFound(c *gin.Context) { + c.Writer.WriteString("there's no endpoint for that!") +} + +func endpointAccessTransaction(c *gin.Context) { + if txn := nrgin.Transaction(c); nil != txn { + txn.SetName("custom-name") + } + c.Writer.WriteString("changed the name of the transaction!") +} + +func mustGetEnv(key string) string { + if val := os.Getenv(key); "" != val { + return val + } + panic(fmt.Sprintf("environment variable %s unset", key)) +} + +func main() { + cfg := newrelic.NewConfig("Gin App", mustGetEnv("NEW_RELIC_LICENSE_KEY")) + cfg.Logger = newrelic.NewDebugLogger(os.Stdout) + app, err := newrelic.NewApplication(cfg) + if nil != err { + fmt.Println(err) + os.Exit(1) + } + + router := gin.Default() + router.Use(nrgin.Middleware(app)) + + router.GET("/404", endpoint404) + router.GET("/change", endpointChangeCode) + router.GET("/headers", endpointResponseHeaders) + router.GET("/txn", endpointAccessTransaction) + + // Since the handler function name is used as the transaction name, + // anonymous functions do not get usefully named. We encourage + // transforming anonymous functions into named functions. + router.GET("/anon", func(c *gin.Context) { + c.Writer.WriteString("anonymous function handler") + }) + + v1 := router.Group("/v1") + v1.GET("/login", v1login) + v1.GET("/submit", v1submit) + v1.GET("/read", v1read) + + router.NoRoute(endpointNotFound) + + router.Run(":8000") +} diff --git a/_integrations/nrgin/v1/nrgin.go b/_integrations/nrgin/v1/nrgin.go new file mode 100644 index 000000000..c400c57d2 --- /dev/null +++ b/_integrations/nrgin/v1/nrgin.go @@ -0,0 +1,120 @@ +// Package nrgin instruments https://github.com/gin-gonic/gin applications. +// +// Use this package to instrument inbound requests handled by a gin.Engine. +// Call nrgin.Middleware to get a gin.HandlerFunc which can be added to your +// application as a middleware: +// +// router := gin.Default() +// // Add the nrgin middleware before other middlewares or routes: +// router.Use(nrgin.Middleware(app)) +// +// Example: https://github.com/newrelic/go-agent/tree/master/_integrations/nrgin/v1/example/main.go +package nrgin + +import ( + "net/http" + + "github.com/gin-gonic/gin" + newrelic "github.com/newrelic/go-agent" + "github.com/newrelic/go-agent/internal" +) + +func init() { internal.TrackUsage("integration", "framework", "gin", "v1") } + +// headerResponseWriter gives the transaction access to response headers and the +// response code. +type headerResponseWriter struct{ w gin.ResponseWriter } + +func (w *headerResponseWriter) Header() http.Header { return w.w.Header() } +func (w *headerResponseWriter) Write([]byte) (int, error) { return 0, nil } +func (w *headerResponseWriter) WriteHeader(int) {} + +var _ http.ResponseWriter = &headerResponseWriter{} + +// replacementResponseWriter mimics the behavior of gin.ResponseWriter which +// buffers the response code rather than writing it when +// gin.ResponseWriter.WriteHeader is called. +type replacementResponseWriter struct { + gin.ResponseWriter + txn newrelic.Transaction + code int + written bool +} + +var _ gin.ResponseWriter = &replacementResponseWriter{} + +func (w *replacementResponseWriter) flushHeader() { + if !w.written { + w.txn.WriteHeader(w.code) + w.written = true + } +} + +func (w *replacementResponseWriter) WriteHeader(code int) { + w.code = code + w.ResponseWriter.WriteHeader(code) +} + +func (w *replacementResponseWriter) Write(data []byte) (int, error) { + w.flushHeader() + return w.ResponseWriter.Write(data) +} + +func (w *replacementResponseWriter) WriteString(s string) (int, error) { + w.flushHeader() + return w.ResponseWriter.WriteString(s) +} + +func (w *replacementResponseWriter) WriteHeaderNow() { + w.flushHeader() + w.ResponseWriter.WriteHeaderNow() +} + +// Context avoids making this package 1.7+ specific. +type Context interface { + Value(key interface{}) interface{} +} + +// Transaction returns the transaction stored inside the context, or nil if not +// found. +func Transaction(c Context) newrelic.Transaction { + if v := c.Value(internal.GinTransactionContextKey); nil != v { + if txn, ok := v.(newrelic.Transaction); ok { + return txn + } + } + if v := c.Value(internal.TransactionContextKey); nil != v { + if txn, ok := v.(newrelic.Transaction); ok { + return txn + } + } + return nil +} + +// Middleware creates a Gin middleware that instruments requests. +// +// router := gin.Default() +// // Add the nrgin middleware before other middlewares or routes: +// router.Use(nrgin.Middleware(app)) +// +func Middleware(app newrelic.Application) gin.HandlerFunc { + return func(c *gin.Context) { + if app != nil { + name := c.HandlerName() + w := &headerResponseWriter{w: c.Writer} + txn := app.StartTransaction(name, w, c.Request) + defer txn.End() + + repl := &replacementResponseWriter{ + ResponseWriter: c.Writer, + txn: txn, + code: http.StatusOK, + } + c.Writer = repl + defer repl.flushHeader() + + c.Set(internal.GinTransactionContextKey, txn) + } + c.Next() + } +} diff --git a/_integrations/nrgin/v1/nrgin_context_test.go b/_integrations/nrgin/v1/nrgin_context_test.go new file mode 100644 index 000000000..626f3ce47 --- /dev/null +++ b/_integrations/nrgin/v1/nrgin_context_test.go @@ -0,0 +1,115 @@ +// +build go1.7 + +package nrgin + +import ( + "context" + "errors" + "net/http" + "net/http/httptest" + "testing" + + "github.com/gin-gonic/gin" + newrelic "github.com/newrelic/go-agent" + "github.com/newrelic/go-agent/internal" + "github.com/newrelic/go-agent/internal/integrationsupport" +) + +func accessTransactionContextContext(c *gin.Context) { + var ctx context.Context = c + // Transaction is designed to take both a context.Context and a + // *gin.Context. + if txn := Transaction(ctx); nil != txn { + txn.NoticeError(errors.New("problem")) + } + c.Writer.WriteString("accessTransactionContextContext") +} + +func TestContextContextTransaction(t *testing.T) { + app := integrationsupport.NewBasicTestApp() + router := gin.Default() + router.Use(Middleware(app)) + router.GET("/txn", accessTransactionContextContext) + + response := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/txn", nil) + if err != nil { + t.Fatal(err) + } + router.ServeHTTP(response, req) + if respBody := response.Body.String(); respBody != "accessTransactionContextContext" { + t.Error("wrong response body", respBody) + } + if response.Code != 200 { + t.Error("wrong response code", response.Code) + } + app.ExpectTxnMetrics(t, internal.WantTxn{ + Name: pkg + ".accessTransactionContextContext", + IsWeb: true, + NumErrors: 1, + }) +} + +func accessTransactionFromContext(c *gin.Context) { + // This tests that FromContext will find the transaction added to a + // *gin.Context and by nrgin.Middleware. + if txn := newrelic.FromContext(c); nil != txn { + txn.NoticeError(errors.New("problem")) + } + c.Writer.WriteString("accessTransactionFromContext") +} + +func TestFromContext(t *testing.T) { + app := integrationsupport.NewBasicTestApp() + router := gin.Default() + router.Use(Middleware(app)) + router.GET("/txn", accessTransactionFromContext) + + response := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/txn", nil) + if err != nil { + t.Fatal(err) + } + router.ServeHTTP(response, req) + if respBody := response.Body.String(); respBody != "accessTransactionFromContext" { + t.Error("wrong response body", respBody) + } + if response.Code != 200 { + t.Error("wrong response code", response.Code) + } + app.ExpectTxnMetrics(t, internal.WantTxn{ + Name: pkg + ".accessTransactionFromContext", + IsWeb: true, + NumErrors: 1, + }) +} + +func TestContextWithoutTransaction(t *testing.T) { + txn := Transaction(context.Background()) + if txn != nil { + t.Error("didn't expect a transaction", txn) + } + ctx := context.WithValue(context.Background(), internal.TransactionContextKey, 123) + txn = Transaction(ctx) + if txn != nil { + t.Error("didn't expect a transaction", txn) + } +} + +func TestNewContextTransaction(t *testing.T) { + // This tests that nrgin.Transaction will find a transaction added to + // to a context using newrelic.NewContext. + app := integrationsupport.NewBasicTestApp() + txn := app.StartTransaction("name", nil, nil) + ctx := newrelic.NewContext(context.Background(), txn) + if tx := Transaction(ctx); nil != tx { + tx.NoticeError(errors.New("problem")) + } + txn.End() + + app.ExpectTxnMetrics(t, internal.WantTxn{ + Name: "name", + IsWeb: false, + NumErrors: 1, + }) +} diff --git a/_integrations/nrgin/v1/nrgin_test.go b/_integrations/nrgin/v1/nrgin_test.go new file mode 100644 index 000000000..83c389245 --- /dev/null +++ b/_integrations/nrgin/v1/nrgin_test.go @@ -0,0 +1,250 @@ +package nrgin + +import ( + "errors" + "net/http" + "net/http/httptest" + "testing" + + "github.com/gin-gonic/gin" + newrelic "github.com/newrelic/go-agent" + "github.com/newrelic/go-agent/internal" + "github.com/newrelic/go-agent/internal/integrationsupport" +) + +var ( + pkg = "github.com/newrelic/go-agent/_integrations/nrgin/v1" +) + +func hello(c *gin.Context) { + c.Writer.WriteString("hello response") +} + +func TestBasicRoute(t *testing.T) { + app := integrationsupport.NewBasicTestApp() + router := gin.Default() + router.Use(Middleware(app)) + router.GET("/hello", hello) + + response := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/hello", nil) + if err != nil { + t.Fatal(err) + } + router.ServeHTTP(response, req) + if respBody := response.Body.String(); respBody != "hello response" { + t.Error("wrong response body", respBody) + } + app.ExpectTxnMetrics(t, internal.WantTxn{ + Name: pkg + ".hello", + IsWeb: true, + }) +} + +func TestRouterGroup(t *testing.T) { + app := integrationsupport.NewBasicTestApp() + router := gin.Default() + router.Use(Middleware(app)) + group := router.Group("/group") + group.GET("/hello", hello) + + response := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/group/hello", nil) + if err != nil { + t.Fatal(err) + } + router.ServeHTTP(response, req) + if respBody := response.Body.String(); respBody != "hello response" { + t.Error("wrong response body", respBody) + } + app.ExpectTxnMetrics(t, internal.WantTxn{ + Name: pkg + ".hello", + IsWeb: true, + }) +} + +func TestAnonymousHandler(t *testing.T) { + app := integrationsupport.NewBasicTestApp() + router := gin.Default() + router.Use(Middleware(app)) + router.GET("/anon", func(c *gin.Context) { + c.Writer.WriteString("anonymous function handler") + }) + + response := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/anon", nil) + if err != nil { + t.Fatal(err) + } + router.ServeHTTP(response, req) + if respBody := response.Body.String(); respBody != "anonymous function handler" { + t.Error("wrong response body", respBody) + } + app.ExpectTxnMetrics(t, internal.WantTxn{ + Name: pkg + ".TestAnonymousHandler.func1", + IsWeb: true, + }) +} + +func multipleWriteHeader(c *gin.Context) { + // Unlike http.ResponseWriter, gin.ResponseWriter does not immediately + // write the first WriteHeader. Instead, it gets buffered until the + // first Write call. + c.Writer.WriteHeader(200) + c.Writer.WriteHeader(500) + c.Writer.WriteString("multipleWriteHeader") +} + +func TestMultipleWriteHeader(t *testing.T) { + app := integrationsupport.NewBasicTestApp() + router := gin.Default() + router.Use(Middleware(app)) + router.GET("/header", multipleWriteHeader) + + response := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/header", nil) + if err != nil { + t.Fatal(err) + } + router.ServeHTTP(response, req) + if respBody := response.Body.String(); respBody != "multipleWriteHeader" { + t.Error("wrong response body", respBody) + } + if response.Code != 500 { + t.Error("wrong response code", response.Code) + } + // Error metrics test the 500 response code capture. + app.ExpectTxnMetrics(t, internal.WantTxn{ + Name: pkg + ".multipleWriteHeader", + IsWeb: true, + NumErrors: 1, + }) +} + +func accessTransactionGinContext(c *gin.Context) { + if txn := Transaction(c); nil != txn { + txn.NoticeError(errors.New("problem")) + } + c.Writer.WriteString("accessTransactionGinContext") +} + +func TestContextTransaction(t *testing.T) { + app := integrationsupport.NewBasicTestApp() + router := gin.Default() + router.Use(Middleware(app)) + router.GET("/txn", accessTransactionGinContext) + + response := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/txn", nil) + if err != nil { + t.Fatal(err) + } + router.ServeHTTP(response, req) + if respBody := response.Body.String(); respBody != "accessTransactionGinContext" { + t.Error("wrong response body", respBody) + } + if response.Code != 200 { + t.Error("wrong response code", response.Code) + } + app.ExpectTxnMetrics(t, internal.WantTxn{ + Name: pkg + ".accessTransactionGinContext", + IsWeb: true, + NumErrors: 1, + }) +} + +func TestNilApp(t *testing.T) { + var app newrelic.Application + router := gin.Default() + router.Use(Middleware(app)) + router.GET("/hello", hello) + + response := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/hello", nil) + if err != nil { + t.Fatal(err) + } + router.ServeHTTP(response, req) + if respBody := response.Body.String(); respBody != "hello response" { + t.Error("wrong response body", respBody) + } +} + +func errorStatus(c *gin.Context) { + c.String(500, "an error happened") +} + +func TestStatusCodes(t *testing.T) { + // Test that we are correctly able to collect status code. + // This behavior changed with this pull request: https://github.com/gin-gonic/gin/pull/1606 + // In Gin v1.4.0 and below, we always recorded a 200 status, whereas with + // newer Gin versions we now correctly capture the status. + app := integrationsupport.NewBasicTestApp() + router := gin.Default() + router.Use(Middleware(app)) + router.GET("/err", errorStatus) + + response := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/err", nil) + if err != nil { + t.Fatal(err) + } + router.ServeHTTP(response, req) + if respBody := response.Body.String(); respBody != "an error happened" { + t.Error("wrong response body", respBody) + } + if response.Code != 500 { + t.Error("wrong response code", response.Code) + } + app.ExpectTxnEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "name": "WebTransaction/Go/" + pkg + ".errorStatus", + "nr.apdexPerfZone": internal.MatchAnything, + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{ + "httpResponseCode": 500, + "request.method": "GET", + "request.uri": "/err", + "response.headers.contentType": "text/plain; charset=utf-8", + }, + }}) +} + +func noBody(c *gin.Context) { + c.Status(500) +} + +func TestNoResponseBody(t *testing.T) { + // Test that when no response body is sent (i.e. c.Writer.Write is never + // called) that we still capture status code. + app := integrationsupport.NewBasicTestApp() + router := gin.Default() + router.Use(Middleware(app)) + router.GET("/nobody", noBody) + + response := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/nobody", nil) + if err != nil { + t.Fatal(err) + } + router.ServeHTTP(response, req) + if respBody := response.Body.String(); respBody != "" { + t.Error("wrong response body", respBody) + } + if response.Code != 500 { + t.Error("wrong response code", response.Code) + } + app.ExpectTxnEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "name": "WebTransaction/Go/" + pkg + ".noBody", + "nr.apdexPerfZone": internal.MatchAnything, + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{ + "httpResponseCode": 500, + "request.method": "GET", + "request.uri": "/nobody", + }, + }}) +} diff --git a/_integrations/nrgorilla/v1/README.md b/_integrations/nrgorilla/v1/README.md new file mode 100644 index 000000000..db0ada898 --- /dev/null +++ b/_integrations/nrgorilla/v1/README.md @@ -0,0 +1,10 @@ +# _integrations/nrgorilla/v1 [](https://godoc.org/github.com/newrelic/go-agent/_integrations/nrgorilla/v1) + +Package `nrgorilla` instruments https://github.com/gin-gonic/gin applications. + +```go +import "github.com/newrelic/go-agent/_integrations/nrgorilla/v1" +``` + +For more information, see +[godocs](https://godoc.org/github.com/newrelic/go-agent/_integrations/nrgorilla/v1). diff --git a/_integrations/nrgorilla/v1/example/main.go b/_integrations/nrgorilla/v1/example/main.go new file mode 100644 index 000000000..a9e9404ae --- /dev/null +++ b/_integrations/nrgorilla/v1/example/main.go @@ -0,0 +1,50 @@ +package main + +import ( + "fmt" + "net/http" + "os" + + "github.com/gorilla/mux" + newrelic "github.com/newrelic/go-agent" + nrgorilla "github.com/newrelic/go-agent/_integrations/nrgorilla/v1" +) + +func makeHandler(text string) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte(text)) + }) +} + +func mustGetEnv(key string) string { + if val := os.Getenv(key); "" != val { + return val + } + panic(fmt.Sprintf("environment variable %s unset", key)) +} + +func main() { + cfg := newrelic.NewConfig("Gorilla App", mustGetEnv("NEW_RELIC_LICENSE_KEY")) + cfg.Logger = newrelic.NewDebugLogger(os.Stdout) + app, err := newrelic.NewApplication(cfg) + if nil != err { + fmt.Println(err) + os.Exit(1) + } + + r := mux.NewRouter() + r.Handle("/", makeHandler("index")) + r.Handle("/alpha", makeHandler("alpha")) + + users := r.PathPrefix("/users").Subrouter() + users.Handle("/add", makeHandler("adding user")) + users.Handle("/delete", makeHandler("deleting user")) + + // The route name will be used as the transaction name if one is set. + r.Handle("/named", makeHandler("named route")).Name("special-name-route") + + // The NotFoundHandler will be instrumented if it is set. + r.NotFoundHandler = makeHandler("not found") + + http.ListenAndServe(":8000", nrgorilla.InstrumentRoutes(r, app)) +} diff --git a/_integrations/nrgorilla/v1/nrgorilla.go b/_integrations/nrgorilla/v1/nrgorilla.go new file mode 100644 index 000000000..e63aa306a --- /dev/null +++ b/_integrations/nrgorilla/v1/nrgorilla.go @@ -0,0 +1,74 @@ +// Package nrgorilla instruments https://github.com/gorilla/mux applications. +// +// Use this package to instrument inbound requests handled by a gorilla +// mux.Router. Call nrgorilla.InstrumentRoutes on your gorilla mux.Router +// after your routes have been added to it. +// +// Example: https://github.com/newrelic/go-agent/tree/master/_integrations/nrgorilla/v1/example/main.go +package nrgorilla + +import ( + "net/http" + + "github.com/gorilla/mux" + newrelic "github.com/newrelic/go-agent" + "github.com/newrelic/go-agent/internal" +) + +func init() { internal.TrackUsage("integration", "framework", "gorilla", "v1") } + +type instrumentedHandler struct { + name string + app newrelic.Application + orig http.Handler +} + +func (h instrumentedHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + txn := h.app.StartTransaction(h.name, w, r) + defer txn.End() + + r = newrelic.RequestWithTransactionContext(r, txn) + + h.orig.ServeHTTP(txn, r) +} + +func instrumentRoute(h http.Handler, app newrelic.Application, name string) http.Handler { + if _, ok := h.(instrumentedHandler); ok { + return h + } + return instrumentedHandler{ + name: name, + orig: h, + app: app, + } +} + +func routeName(route *mux.Route) string { + if nil == route { + return "" + } + if n := route.GetName(); n != "" { + return n + } + if n, _ := route.GetPathTemplate(); n != "" { + return n + } + n, _ := route.GetHostTemplate() + return n +} + +// InstrumentRoutes instruments requests through the provided mux.Router. Use +// this after the routes have been added to the router. +func InstrumentRoutes(r *mux.Router, app newrelic.Application) *mux.Router { + if app != nil { + r.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error { + h := instrumentRoute(route.GetHandler(), app, routeName(route)) + route.Handler(h) + return nil + }) + if nil != r.NotFoundHandler { + r.NotFoundHandler = instrumentRoute(r.NotFoundHandler, app, "NotFoundHandler") + } + } + return r +} diff --git a/_integrations/nrgorilla/v1/nrgorilla_test.go b/_integrations/nrgorilla/v1/nrgorilla_test.go new file mode 100644 index 000000000..19b2c5481 --- /dev/null +++ b/_integrations/nrgorilla/v1/nrgorilla_test.go @@ -0,0 +1,127 @@ +package nrgorilla + +import ( + "net/http" + "net/http/httptest" + "testing" + + "github.com/gorilla/mux" + newrelic "github.com/newrelic/go-agent" + "github.com/newrelic/go-agent/internal" + "github.com/newrelic/go-agent/internal/integrationsupport" +) + +func makeHandler(text string) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte(text)) + }) +} + +func TestBasicRoute(t *testing.T) { + app := integrationsupport.NewBasicTestApp() + r := mux.NewRouter() + r.Handle("/alpha", makeHandler("alpha response")) + InstrumentRoutes(r, app) + response := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/alpha", nil) + if err != nil { + t.Fatal(err) + } + r.ServeHTTP(response, req) + if respBody := response.Body.String(); respBody != "alpha response" { + t.Error("wrong response body", respBody) + } + app.ExpectTxnMetrics(t, internal.WantTxn{ + Name: "alpha", + IsWeb: true, + }) +} + +func TestSubrouterRoute(t *testing.T) { + app := integrationsupport.NewBasicTestApp() + r := mux.NewRouter() + users := r.PathPrefix("/users").Subrouter() + users.Handle("/add", makeHandler("adding user")) + InstrumentRoutes(r, app) + response := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/users/add", nil) + if err != nil { + t.Fatal(err) + } + r.ServeHTTP(response, req) + if respBody := response.Body.String(); respBody != "adding user" { + t.Error("wrong response body", respBody) + } + app.ExpectTxnMetrics(t, internal.WantTxn{ + Name: "users/add", + IsWeb: true, + }) +} + +func TestNamedRoute(t *testing.T) { + app := integrationsupport.NewBasicTestApp() + r := mux.NewRouter() + r.Handle("/named", makeHandler("named route")).Name("special-name-route") + InstrumentRoutes(r, app) + response := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/named", nil) + if err != nil { + t.Fatal(err) + } + r.ServeHTTP(response, req) + if respBody := response.Body.String(); respBody != "named route" { + t.Error("wrong response body", respBody) + } + app.ExpectTxnMetrics(t, internal.WantTxn{ + Name: "special-name-route", + IsWeb: true, + }) +} + +func TestRouteNotFound(t *testing.T) { + app := integrationsupport.NewBasicTestApp() + r := mux.NewRouter() + r.NotFoundHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(500) + w.Write([]byte("not found")) + }) + // Tests that routes do not get double instrumented when + // InstrumentRoutes is called twice by expecting error metrics with a + // count of 1. + InstrumentRoutes(r, app) + InstrumentRoutes(r, app) + response := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/foo", nil) + if err != nil { + t.Fatal(err) + } + r.ServeHTTP(response, req) + if respBody := response.Body.String(); respBody != "not found" { + t.Error("wrong response body", respBody) + } + if response.Code != 500 { + t.Error("wrong response code", response.Code) + } + // Error metrics test the 500 response code capture. + app.ExpectTxnMetrics(t, internal.WantTxn{ + Name: "NotFoundHandler", + IsWeb: true, + NumErrors: 1, + }) +} + +func TestNilApp(t *testing.T) { + var app newrelic.Application + r := mux.NewRouter() + r.Handle("/alpha", makeHandler("alpha response")) + InstrumentRoutes(r, app) + response := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/alpha", nil) + if err != nil { + t.Fatal(err) + } + r.ServeHTTP(response, req) + if respBody := response.Body.String(); respBody != "alpha response" { + t.Error("wrong response body", respBody) + } +} diff --git a/_integrations/nrgrpc/README.md b/_integrations/nrgrpc/README.md new file mode 100644 index 000000000..0243fc008 --- /dev/null +++ b/_integrations/nrgrpc/README.md @@ -0,0 +1,10 @@ +# _integrations/nrgrpc [](https://godoc.org/github.com/newrelic/go-agent/_integrations/nrgrpc) + +Package `nrgrpc` instruments https://github.com/grpc/grpc-go. + +```go +import "github.com/newrelic/go-agent/_integrations/nrgrpc" +``` + +For more information, see +[godocs](https://godoc.org/github.com/newrelic/go-agent/_integrations/nrgrpc). diff --git a/_integrations/nrgrpc/example/client/client.go b/_integrations/nrgrpc/example/client/client.go new file mode 100644 index 000000000..ac66a6426 --- /dev/null +++ b/_integrations/nrgrpc/example/client/client.go @@ -0,0 +1,131 @@ +package main + +import ( + "context" + "fmt" + "io" + "os" + "time" + + newrelic "github.com/newrelic/go-agent" + "github.com/newrelic/go-agent/_integrations/nrgrpc" + sampleapp "github.com/newrelic/go-agent/_integrations/nrgrpc/example/sampleapp" + "google.golang.org/grpc" +) + +func doUnaryUnary(ctx context.Context, client sampleapp.SampleApplicationClient) { + msg, err := client.DoUnaryUnary(ctx, &sampleapp.Message{Text: "Hello DoUnaryUnary"}) + if nil != err { + panic(err) + } + fmt.Println(msg.Text) +} + +func doUnaryStream(ctx context.Context, client sampleapp.SampleApplicationClient) { + stream, err := client.DoUnaryStream(ctx, &sampleapp.Message{Text: "Hello DoUnaryStream"}) + if nil != err { + panic(err) + } + for { + msg, err := stream.Recv() + if err == io.EOF { + break + } + if nil != err { + panic(err) + } + fmt.Println(msg.Text) + } +} + +func doStreamUnary(ctx context.Context, client sampleapp.SampleApplicationClient) { + stream, err := client.DoStreamUnary(ctx) + if nil != err { + panic(err) + } + for i := 0; i < 3; i++ { + if err := stream.Send(&sampleapp.Message{Text: "Hello DoStreamUnary"}); nil != err { + if err == io.EOF { + break + } + panic(err) + } + } + msg, err := stream.CloseAndRecv() + if nil != err { + panic(err) + } + fmt.Println(msg.Text) +} + +func doStreamStream(ctx context.Context, client sampleapp.SampleApplicationClient) { + stream, err := client.DoStreamStream(ctx) + if nil != err { + panic(err) + } + waitc := make(chan struct{}) + go func() { + for { + msg, err := stream.Recv() + if err == io.EOF { + close(waitc) + return + } + if err != nil { + panic(err) + } + fmt.Println(msg.Text) + } + }() + for i := 0; i < 3; i++ { + if err := stream.Send(&sampleapp.Message{Text: "Hello DoStreamStream"}); err != nil { + panic(err) + } + } + stream.CloseSend() + <-waitc +} + +func mustGetEnv(key string) string { + if val := os.Getenv(key); "" != val { + return val + } + panic(fmt.Sprintf("environment variable %s unset", key)) +} + +func main() { + cfg := newrelic.NewConfig("gRPC Client", mustGetEnv("NEW_RELIC_LICENSE_KEY")) + cfg.Logger = newrelic.NewDebugLogger(os.Stdout) + app, err := newrelic.NewApplication(cfg) + if nil != err { + panic(err) + } + err = app.WaitForConnection(10 * time.Second) + if nil != err { + panic(err) + } + defer app.Shutdown(10 * time.Second) + + txn := app.StartTransaction("main", nil, nil) + defer txn.End() + + conn, err := grpc.Dial( + "localhost:8080", + grpc.WithInsecure(), + // Add the New Relic gRPC client instrumentation + grpc.WithUnaryInterceptor(nrgrpc.UnaryClientInterceptor), + grpc.WithStreamInterceptor(nrgrpc.StreamClientInterceptor), + ) + if err != nil { + panic(err) + } + defer conn.Close() + + client := sampleapp.NewSampleApplicationClient(conn) + ctx := newrelic.NewContext(context.Background(), txn) + + doUnaryUnary(ctx, client) + doUnaryStream(ctx, client) + doStreamUnary(ctx, client) + doStreamStream(ctx, client) +} diff --git a/_integrations/nrgrpc/example/sampleapp/sampleapp.pb.go b/_integrations/nrgrpc/example/sampleapp/sampleapp.pb.go new file mode 100644 index 000000000..b3aa03679 --- /dev/null +++ b/_integrations/nrgrpc/example/sampleapp/sampleapp.pb.go @@ -0,0 +1,366 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: sampleapp.proto + +package sampleapp + +import ( + context "context" + fmt "fmt" + proto "github.com/golang/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type Message struct { + Text string `protobuf:"bytes,1,opt,name=text,proto3" json:"text,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Message) Reset() { *m = Message{} } +func (m *Message) String() string { return proto.CompactTextString(m) } +func (*Message) ProtoMessage() {} +func (*Message) Descriptor() ([]byte, []int) { + return fileDescriptor_38ae74b4e52ac4e0, []int{0} +} + +func (m *Message) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Message.Unmarshal(m, b) +} +func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Message.Marshal(b, m, deterministic) +} +func (m *Message) XXX_Merge(src proto.Message) { + xxx_messageInfo_Message.Merge(m, src) +} +func (m *Message) XXX_Size() int { + return xxx_messageInfo_Message.Size(m) +} +func (m *Message) XXX_DiscardUnknown() { + xxx_messageInfo_Message.DiscardUnknown(m) +} + +var xxx_messageInfo_Message proto.InternalMessageInfo + +func (m *Message) GetText() string { + if m != nil { + return m.Text + } + return "" +} + +func init() { + proto.RegisterType((*Message)(nil), "Message") +} + +func init() { proto.RegisterFile("sampleapp.proto", fileDescriptor_38ae74b4e52ac4e0) } + +var fileDescriptor_38ae74b4e52ac4e0 = []byte{ + // 153 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2f, 0x4e, 0xcc, 0x2d, + 0xc8, 0x49, 0x4d, 0x2c, 0x28, 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x57, 0x92, 0xe5, 0x62, 0xf7, + 0x4d, 0x2d, 0x2e, 0x4e, 0x4c, 0x4f, 0x15, 0x12, 0xe2, 0x62, 0x29, 0x49, 0xad, 0x28, 0x91, 0x60, + 0x54, 0x60, 0xd4, 0xe0, 0x0c, 0x02, 0xb3, 0x8d, 0xb6, 0x33, 0x72, 0x09, 0x06, 0x83, 0xb5, 0x38, + 0x16, 0x14, 0xe4, 0x64, 0x26, 0x27, 0x96, 0x64, 0xe6, 0xe7, 0x09, 0xa9, 0x70, 0xf1, 0xb8, 0xe4, + 0x87, 0xe6, 0x25, 0x16, 0x55, 0x82, 0x09, 0x21, 0x0e, 0x3d, 0xa8, 0x19, 0x52, 0x70, 0x96, 0x12, + 0x83, 0x90, 0x3a, 0x17, 0x2f, 0x54, 0x55, 0x70, 0x49, 0x51, 0x6a, 0x62, 0x2e, 0x76, 0x65, 0x06, + 0x8c, 0x10, 0x85, 0x10, 0x35, 0x78, 0xcc, 0xd3, 0x60, 0x14, 0xd2, 0xe2, 0xe2, 0x83, 0x29, 0xc4, + 0x67, 0xa4, 0x06, 0xa3, 0x01, 0x63, 0x12, 0x1b, 0xd8, 0x7f, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, + 0xff, 0xe8, 0x8b, 0x56, 0x80, 0xf2, 0x00, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// SampleApplicationClient is the client API for SampleApplication service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type SampleApplicationClient interface { + DoUnaryUnary(ctx context.Context, in *Message, opts ...grpc.CallOption) (*Message, error) + DoUnaryStream(ctx context.Context, in *Message, opts ...grpc.CallOption) (SampleApplication_DoUnaryStreamClient, error) + DoStreamUnary(ctx context.Context, opts ...grpc.CallOption) (SampleApplication_DoStreamUnaryClient, error) + DoStreamStream(ctx context.Context, opts ...grpc.CallOption) (SampleApplication_DoStreamStreamClient, error) +} + +type sampleApplicationClient struct { + cc *grpc.ClientConn +} + +func NewSampleApplicationClient(cc *grpc.ClientConn) SampleApplicationClient { + return &sampleApplicationClient{cc} +} + +func (c *sampleApplicationClient) DoUnaryUnary(ctx context.Context, in *Message, opts ...grpc.CallOption) (*Message, error) { + out := new(Message) + err := c.cc.Invoke(ctx, "/SampleApplication/DoUnaryUnary", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *sampleApplicationClient) DoUnaryStream(ctx context.Context, in *Message, opts ...grpc.CallOption) (SampleApplication_DoUnaryStreamClient, error) { + stream, err := c.cc.NewStream(ctx, &_SampleApplication_serviceDesc.Streams[0], "/SampleApplication/DoUnaryStream", opts...) + if err != nil { + return nil, err + } + x := &sampleApplicationDoUnaryStreamClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type SampleApplication_DoUnaryStreamClient interface { + Recv() (*Message, error) + grpc.ClientStream +} + +type sampleApplicationDoUnaryStreamClient struct { + grpc.ClientStream +} + +func (x *sampleApplicationDoUnaryStreamClient) Recv() (*Message, error) { + m := new(Message) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *sampleApplicationClient) DoStreamUnary(ctx context.Context, opts ...grpc.CallOption) (SampleApplication_DoStreamUnaryClient, error) { + stream, err := c.cc.NewStream(ctx, &_SampleApplication_serviceDesc.Streams[1], "/SampleApplication/DoStreamUnary", opts...) + if err != nil { + return nil, err + } + x := &sampleApplicationDoStreamUnaryClient{stream} + return x, nil +} + +type SampleApplication_DoStreamUnaryClient interface { + Send(*Message) error + CloseAndRecv() (*Message, error) + grpc.ClientStream +} + +type sampleApplicationDoStreamUnaryClient struct { + grpc.ClientStream +} + +func (x *sampleApplicationDoStreamUnaryClient) Send(m *Message) error { + return x.ClientStream.SendMsg(m) +} + +func (x *sampleApplicationDoStreamUnaryClient) CloseAndRecv() (*Message, error) { + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + m := new(Message) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *sampleApplicationClient) DoStreamStream(ctx context.Context, opts ...grpc.CallOption) (SampleApplication_DoStreamStreamClient, error) { + stream, err := c.cc.NewStream(ctx, &_SampleApplication_serviceDesc.Streams[2], "/SampleApplication/DoStreamStream", opts...) + if err != nil { + return nil, err + } + x := &sampleApplicationDoStreamStreamClient{stream} + return x, nil +} + +type SampleApplication_DoStreamStreamClient interface { + Send(*Message) error + Recv() (*Message, error) + grpc.ClientStream +} + +type sampleApplicationDoStreamStreamClient struct { + grpc.ClientStream +} + +func (x *sampleApplicationDoStreamStreamClient) Send(m *Message) error { + return x.ClientStream.SendMsg(m) +} + +func (x *sampleApplicationDoStreamStreamClient) Recv() (*Message, error) { + m := new(Message) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// SampleApplicationServer is the server API for SampleApplication service. +type SampleApplicationServer interface { + DoUnaryUnary(context.Context, *Message) (*Message, error) + DoUnaryStream(*Message, SampleApplication_DoUnaryStreamServer) error + DoStreamUnary(SampleApplication_DoStreamUnaryServer) error + DoStreamStream(SampleApplication_DoStreamStreamServer) error +} + +// UnimplementedSampleApplicationServer can be embedded to have forward compatible implementations. +type UnimplementedSampleApplicationServer struct { +} + +func (*UnimplementedSampleApplicationServer) DoUnaryUnary(ctx context.Context, req *Message) (*Message, error) { + return nil, status.Errorf(codes.Unimplemented, "method DoUnaryUnary not implemented") +} +func (*UnimplementedSampleApplicationServer) DoUnaryStream(req *Message, srv SampleApplication_DoUnaryStreamServer) error { + return status.Errorf(codes.Unimplemented, "method DoUnaryStream not implemented") +} +func (*UnimplementedSampleApplicationServer) DoStreamUnary(srv SampleApplication_DoStreamUnaryServer) error { + return status.Errorf(codes.Unimplemented, "method DoStreamUnary not implemented") +} +func (*UnimplementedSampleApplicationServer) DoStreamStream(srv SampleApplication_DoStreamStreamServer) error { + return status.Errorf(codes.Unimplemented, "method DoStreamStream not implemented") +} + +func RegisterSampleApplicationServer(s *grpc.Server, srv SampleApplicationServer) { + s.RegisterService(&_SampleApplication_serviceDesc, srv) +} + +func _SampleApplication_DoUnaryUnary_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Message) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SampleApplicationServer).DoUnaryUnary(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/SampleApplication/DoUnaryUnary", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SampleApplicationServer).DoUnaryUnary(ctx, req.(*Message)) + } + return interceptor(ctx, in, info, handler) +} + +func _SampleApplication_DoUnaryStream_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(Message) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(SampleApplicationServer).DoUnaryStream(m, &sampleApplicationDoUnaryStreamServer{stream}) +} + +type SampleApplication_DoUnaryStreamServer interface { + Send(*Message) error + grpc.ServerStream +} + +type sampleApplicationDoUnaryStreamServer struct { + grpc.ServerStream +} + +func (x *sampleApplicationDoUnaryStreamServer) Send(m *Message) error { + return x.ServerStream.SendMsg(m) +} + +func _SampleApplication_DoStreamUnary_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(SampleApplicationServer).DoStreamUnary(&sampleApplicationDoStreamUnaryServer{stream}) +} + +type SampleApplication_DoStreamUnaryServer interface { + SendAndClose(*Message) error + Recv() (*Message, error) + grpc.ServerStream +} + +type sampleApplicationDoStreamUnaryServer struct { + grpc.ServerStream +} + +func (x *sampleApplicationDoStreamUnaryServer) SendAndClose(m *Message) error { + return x.ServerStream.SendMsg(m) +} + +func (x *sampleApplicationDoStreamUnaryServer) Recv() (*Message, error) { + m := new(Message) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _SampleApplication_DoStreamStream_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(SampleApplicationServer).DoStreamStream(&sampleApplicationDoStreamStreamServer{stream}) +} + +type SampleApplication_DoStreamStreamServer interface { + Send(*Message) error + Recv() (*Message, error) + grpc.ServerStream +} + +type sampleApplicationDoStreamStreamServer struct { + grpc.ServerStream +} + +func (x *sampleApplicationDoStreamStreamServer) Send(m *Message) error { + return x.ServerStream.SendMsg(m) +} + +func (x *sampleApplicationDoStreamStreamServer) Recv() (*Message, error) { + m := new(Message) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _SampleApplication_serviceDesc = grpc.ServiceDesc{ + ServiceName: "SampleApplication", + HandlerType: (*SampleApplicationServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "DoUnaryUnary", + Handler: _SampleApplication_DoUnaryUnary_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "DoUnaryStream", + Handler: _SampleApplication_DoUnaryStream_Handler, + ServerStreams: true, + }, + { + StreamName: "DoStreamUnary", + Handler: _SampleApplication_DoStreamUnary_Handler, + ClientStreams: true, + }, + { + StreamName: "DoStreamStream", + Handler: _SampleApplication_DoStreamStream_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "sampleapp.proto", +} diff --git a/_integrations/nrgrpc/example/sampleapp/sampleapp.proto b/_integrations/nrgrpc/example/sampleapp/sampleapp.proto new file mode 100644 index 000000000..0a62cabe2 --- /dev/null +++ b/_integrations/nrgrpc/example/sampleapp/sampleapp.proto @@ -0,0 +1,12 @@ +syntax = "proto3"; + +service SampleApplication { + rpc DoUnaryUnary(Message) returns (Message) {} + rpc DoUnaryStream(Message) returns (stream Message) {} + rpc DoStreamUnary(stream Message) returns (Message) {} + rpc DoStreamStream(stream Message) returns (stream Message) {} +} + +message Message { + string text = 1; +} diff --git a/_integrations/nrgrpc/example/server/server.go b/_integrations/nrgrpc/example/server/server.go new file mode 100644 index 000000000..6ebcd2c66 --- /dev/null +++ b/_integrations/nrgrpc/example/server/server.go @@ -0,0 +1,97 @@ +package main + +import ( + "context" + fmt "fmt" + "io" + "net" + "os" + + newrelic "github.com/newrelic/go-agent" + "github.com/newrelic/go-agent/_integrations/nrgrpc" + sampleapp "github.com/newrelic/go-agent/_integrations/nrgrpc/example/sampleapp" + "google.golang.org/grpc" +) + +// Server is a gRPC server. +type Server struct{} + +// processMessage processes each incoming Message. +func processMessage(ctx context.Context, msg *sampleapp.Message) { + defer newrelic.StartSegment(newrelic.FromContext(ctx), "processMessage").End() + fmt.Printf("Message received: %s\n", msg.Text) +} + +// DoUnaryUnary is a unary request, unary response method. +func (s *Server) DoUnaryUnary(ctx context.Context, msg *sampleapp.Message) (*sampleapp.Message, error) { + processMessage(ctx, msg) + return &sampleapp.Message{Text: "Hello from DoUnaryUnary"}, nil +} + +// DoUnaryStream is a unary request, stream response method. +func (s *Server) DoUnaryStream(msg *sampleapp.Message, stream sampleapp.SampleApplication_DoUnaryStreamServer) error { + processMessage(stream.Context(), msg) + for i := 0; i < 3; i++ { + if err := stream.Send(&sampleapp.Message{Text: "Hello from DoUnaryStream"}); nil != err { + return err + } + } + return nil +} + +// DoStreamUnary is a stream request, unary response method. +func (s *Server) DoStreamUnary(stream sampleapp.SampleApplication_DoStreamUnaryServer) error { + for { + msg, err := stream.Recv() + if err == io.EOF { + return stream.SendAndClose(&sampleapp.Message{Text: "Hello from DoStreamUnary"}) + } else if nil != err { + return err + } + processMessage(stream.Context(), msg) + } +} + +// DoStreamStream is a stream request, stream response method. +func (s *Server) DoStreamStream(stream sampleapp.SampleApplication_DoStreamStreamServer) error { + for { + msg, err := stream.Recv() + if err == io.EOF { + return nil + } else if nil != err { + return err + } + processMessage(stream.Context(), msg) + if err := stream.Send(&sampleapp.Message{Text: "Hello from DoStreamStream"}); nil != err { + return err + } + } +} + +func mustGetEnv(key string) string { + if val := os.Getenv(key); "" != val { + return val + } + panic(fmt.Sprintf("environment variable %s unset", key)) +} + +func main() { + cfg := newrelic.NewConfig("gRPC Server", mustGetEnv("NEW_RELIC_LICENSE_KEY")) + cfg.Logger = newrelic.NewDebugLogger(os.Stdout) + app, err := newrelic.NewApplication(cfg) + if nil != err { + panic(err) + } + + lis, err := net.Listen("tcp", "localhost:8080") + if err != nil { + panic(err) + } + grpcServer := grpc.NewServer( + // Add the New Relic gRPC server instrumentation + grpc.UnaryInterceptor(nrgrpc.UnaryServerInterceptor(app)), + grpc.StreamInterceptor(nrgrpc.StreamServerInterceptor(app)), + ) + sampleapp.RegisterSampleApplicationServer(grpcServer, &Server{}) + grpcServer.Serve(lis) +} diff --git a/_integrations/nrgrpc/nrgrpc_client.go b/_integrations/nrgrpc/nrgrpc_client.go new file mode 100644 index 000000000..335e3050c --- /dev/null +++ b/_integrations/nrgrpc/nrgrpc_client.go @@ -0,0 +1,132 @@ +package nrgrpc + +import ( + "context" + "io" + "net/url" + "strings" + + newrelic "github.com/newrelic/go-agent" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" +) + +func getURL(method, target string) *url.URL { + var host string + // target can be anything from + // https://github.com/grpc/grpc/blob/master/doc/naming.md + // see https://godoc.org/google.golang.org/grpc#DialContext + if strings.HasPrefix(target, "unix:") { + host = "localhost" + } else { + host = strings.TrimPrefix(target, "dns:///") + } + return &url.URL{ + Scheme: "grpc", + Host: host, + Path: method, + } +} + +// startClientSegment starts an ExternalSegment and adds Distributed Trace +// headers to the outgoing grpc metadata in the context. +func startClientSegment(ctx context.Context, method, target string) (*newrelic.ExternalSegment, context.Context) { + var seg *newrelic.ExternalSegment + if txn := newrelic.FromContext(ctx); nil != txn { + seg = newrelic.StartExternalSegment(txn, nil) + + method = strings.TrimPrefix(method, "/") + seg.Host = getURL(method, target).Host + seg.Library = "gRPC" + seg.Procedure = method + + payload := txn.CreateDistributedTracePayload() + if txt := payload.Text(); "" != txt { + md, ok := metadata.FromOutgoingContext(ctx) + if !ok { + md = metadata.New(nil) + } + md.Set(newrelic.DistributedTracePayloadHeader, txt) + ctx = metadata.NewOutgoingContext(ctx, md) + } + } + + return seg, ctx +} + +// UnaryClientInterceptor instruments client unary RPCs. This interceptor +// records each unary call with an external segment. Using it requires two steps: +// +// 1. Use this function with grpc.WithChainUnaryInterceptor or +// grpc.WithUnaryInterceptor when creating a grpc.ClientConn. Example: +// +// conn, err := grpc.Dial( +// "localhost:8080", +// grpc.WithUnaryInterceptor(nrgrpc.UnaryClientInterceptor), +// grpc.WithStreamInterceptor(nrgrpc.StreamClientInterceptor), +// ) +// +// 2. Ensure that calls made with this grpc.ClientConn are done with a context +// which contains a newrelic.Transaction. +// +// Full example: +// https://github.com/newrelic/go-agent/blob/master/_integrations/nrgrpc/example/client/client.go +// +// This interceptor only instruments unary calls. You must use both +// UnaryClientInterceptor and StreamClientInterceptor to instrument unary and +// streaming calls. These interceptors add headers to the call metadata if +// distributed tracing is enabled. +func UnaryClientInterceptor(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + seg, ctx := startClientSegment(ctx, method, cc.Target()) + defer seg.End() + return invoker(ctx, method, req, reply, cc, opts...) +} + +type wrappedClientStream struct { + grpc.ClientStream + segment *newrelic.ExternalSegment + isUnaryServer bool +} + +func (s wrappedClientStream) RecvMsg(m interface{}) error { + err := s.ClientStream.RecvMsg(m) + if err == io.EOF || s.isUnaryServer { + s.segment.End() + } + return err +} + +// StreamClientInterceptor instruments client streaming RPCs. This interceptor +// records streaming each call with an external segment. Using it requires two steps: +// +// 1. Use this function with grpc.WithChainStreamInterceptor or +// grpc.WithStreamInterceptor when creating a grpc.ClientConn. Example: +// +// conn, err := grpc.Dial( +// "localhost:8080", +// grpc.WithUnaryInterceptor(nrgrpc.UnaryClientInterceptor), +// grpc.WithStreamInterceptor(nrgrpc.StreamClientInterceptor), +// ) +// +// 2. Ensure that calls made with this grpc.ClientConn are done with a context +// which contains a newrelic.Transaction. +// +// Full example: +// https://github.com/newrelic/go-agent/blob/master/_integrations/nrgrpc/example/client/client.go +// +// This interceptor only instruments streaming calls. You must use both +// UnaryClientInterceptor and StreamClientInterceptor to instrument unary and +// streaming calls. These interceptors add headers to the call metadata if +// distributed tracing is enabled. +func StreamClientInterceptor(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { + seg, ctx := startClientSegment(ctx, method, cc.Target()) + s, err := streamer(ctx, desc, cc, method, opts...) + if err != nil { + return s, err + } + return wrappedClientStream{ + segment: seg, + ClientStream: s, + isUnaryServer: !desc.ServerStreams, + }, nil +} diff --git a/_integrations/nrgrpc/nrgrpc_client_test.go b/_integrations/nrgrpc/nrgrpc_client_test.go new file mode 100644 index 000000000..e8ac6e791 --- /dev/null +++ b/_integrations/nrgrpc/nrgrpc_client_test.go @@ -0,0 +1,594 @@ +package nrgrpc + +import ( + "context" + "encoding/json" + "io" + "testing" + + newrelic "github.com/newrelic/go-agent" + "github.com/newrelic/go-agent/_integrations/nrgrpc/testapp" + "github.com/newrelic/go-agent/internal" + "github.com/newrelic/go-agent/internal/integrationsupport" + "google.golang.org/grpc/metadata" +) + +func TestGetURL(t *testing.T) { + testcases := []struct { + method string + target string + expected string + }{ + { + method: "/TestApplication/DoUnaryUnary", + target: "", + expected: "grpc:///TestApplication/DoUnaryUnary", + }, + { + method: "TestApplication/DoUnaryUnary", + target: "", + expected: "grpc://TestApplication/DoUnaryUnary", + }, + { + method: "/TestApplication/DoUnaryUnary", + target: ":8080", + expected: "grpc://:8080/TestApplication/DoUnaryUnary", + }, + { + method: "/TestApplication/DoUnaryUnary", + target: "localhost:8080", + expected: "grpc://localhost:8080/TestApplication/DoUnaryUnary", + }, + { + method: "TestApplication/DoUnaryUnary", + target: "localhost:8080", + expected: "grpc://localhost:8080/TestApplication/DoUnaryUnary", + }, + { + method: "/TestApplication/DoUnaryUnary", + target: "dns:///localhost:8080", + expected: "grpc://localhost:8080/TestApplication/DoUnaryUnary", + }, + { + method: "/TestApplication/DoUnaryUnary", + target: "unix:/path/to/socket", + expected: "grpc://localhost/TestApplication/DoUnaryUnary", + }, + { + method: "/TestApplication/DoUnaryUnary", + target: "unix:///path/to/socket", + expected: "grpc://localhost/TestApplication/DoUnaryUnary", + }, + } + + for _, test := range testcases { + actual := getURL(test.method, test.target) + if actual.String() != test.expected { + t.Errorf("incorrect URL:\n\tmethod=%s,\n\ttarget=%s,\n\texpected=%s,\n\tactual=%s", + test.method, test.target, test.expected, actual.String()) + } + } +} + +func testApp() integrationsupport.ExpectApp { + return integrationsupport.NewTestApp(replyFn, configFn) +} + +var replyFn = func(reply *internal.ConnectReply) { + reply.AdaptiveSampler = internal.SampleEverything{} + reply.AccountID = "123" + reply.TrustedAccountKey = "123" + reply.PrimaryAppID = "456" +} + +var configFn = func(cfg *newrelic.Config) { + cfg.Enabled = false + cfg.DistributedTracer.Enabled = true + cfg.TransactionTracer.SegmentThreshold = 0 + cfg.TransactionTracer.Threshold.IsApdexFailing = false + cfg.TransactionTracer.Threshold.Duration = 0 +} + +func TestUnaryClientInterceptor(t *testing.T) { + app := testApp() + txn := app.StartTransaction("UnaryUnary", nil, nil) + ctx := newrelic.NewContext(context.Background(), txn) + + s, conn := newTestServerAndConn(t, nil) + defer s.Stop() + defer conn.Close() + + client := testapp.NewTestApplicationClient(conn) + resp, err := client.DoUnaryUnary(ctx, &testapp.Message{}) + if nil != err { + t.Fatal("client call to DoUnaryUnary failed", err) + } + var hdrs map[string][]string + err = json.Unmarshal([]byte(resp.Text), &hdrs) + if nil != err { + t.Fatal("cannot unmarshall client response", err) + } + if hdr, ok := hdrs["newrelic"]; !ok || len(hdr) != 1 || "" == hdr[0] { + t.Error("distributed trace header not sent", hdrs) + } + txn.End() + + app.ExpectMetrics(t, []internal.WantMetric{ + {Name: "OtherTransaction/Go/UnaryUnary", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransaction/all", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransactionTotalTime", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransactionTotalTime/Go/UnaryUnary", Scope: "", Forced: false, Data: nil}, + {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/all", Scope: "", Forced: false, Data: nil}, + {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/allOther", Scope: "", Forced: false, Data: nil}, + {Name: "External/all", Scope: "", Forced: true, Data: nil}, + {Name: "External/allOther", Scope: "", Forced: true, Data: nil}, + {Name: "External/bufnet/all", Scope: "", Forced: false, Data: nil}, + {Name: "External/bufnet/gRPC/TestApplication/DoUnaryUnary", Scope: "OtherTransaction/Go/UnaryUnary", Forced: false, Data: nil}, + {Name: "Supportability/DistributedTrace/CreatePayload/Success", Scope: "", Forced: true, Data: nil}, + }) + app.ExpectSpanEvents(t, []internal.WantEvent{ + { + Intrinsics: map[string]interface{}{ + "category": "generic", + "name": "OtherTransaction/Go/UnaryUnary", + "nr.entryPoint": true, + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{}, + }, + { + Intrinsics: map[string]interface{}{ + "category": "http", + "component": "gRPC", + "name": "External/bufnet/gRPC/TestApplication/DoUnaryUnary", + "parentId": internal.MatchAnything, + "span.kind": "client", + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{}, + }, + }) + app.ExpectTxnTraces(t, []internal.WantTxnTrace{{ + MetricName: "OtherTransaction/Go/UnaryUnary", + Root: internal.WantTraceSegment{ + SegmentName: "ROOT", + Attributes: map[string]interface{}{}, + Children: []internal.WantTraceSegment{{ + SegmentName: "OtherTransaction/Go/UnaryUnary", + Attributes: map[string]interface{}{"exclusive_duration_millis": internal.MatchAnything}, + Children: []internal.WantTraceSegment{ + { + SegmentName: "External/bufnet/gRPC/TestApplication/DoUnaryUnary", + Attributes: map[string]interface{}{}, + }, + }, + }}, + }, + }}) +} + +func TestUnaryStreamClientInterceptor(t *testing.T) { + app := testApp() + txn := app.StartTransaction("UnaryStream", nil, nil) + ctx := newrelic.NewContext(context.Background(), txn) + + s, conn := newTestServerAndConn(t, nil) + defer s.Stop() + defer conn.Close() + + client := testapp.NewTestApplicationClient(conn) + stream, err := client.DoUnaryStream(ctx, &testapp.Message{}) + if nil != err { + t.Fatal("client call to DoUnaryStream failed", err) + } + var recved int + for { + msg, err := stream.Recv() + if err == io.EOF { + break + } + if nil != err { + t.Fatal("error receiving message", err) + } + var hdrs map[string][]string + err = json.Unmarshal([]byte(msg.Text), &hdrs) + if nil != err { + t.Fatal("cannot unmarshall client response", err) + } + if hdr, ok := hdrs["newrelic"]; !ok || len(hdr) != 1 || "" == hdr[0] { + t.Error("distributed trace header not sent", hdrs) + } + recved++ + } + if recved != 3 { + t.Fatal("received incorrect number of messages from server", recved) + } + txn.End() + + app.ExpectMetrics(t, []internal.WantMetric{ + {Name: "OtherTransaction/Go/UnaryStream", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransaction/all", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransactionTotalTime", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransactionTotalTime/Go/UnaryStream", Scope: "", Forced: false, Data: nil}, + {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/all", Scope: "", Forced: false, Data: nil}, + {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/allOther", Scope: "", Forced: false, Data: nil}, + {Name: "External/all", Scope: "", Forced: true, Data: nil}, + {Name: "External/allOther", Scope: "", Forced: true, Data: nil}, + {Name: "External/bufnet/all", Scope: "", Forced: false, Data: nil}, + {Name: "External/bufnet/gRPC/TestApplication/DoUnaryStream", Scope: "OtherTransaction/Go/UnaryStream", Forced: false, Data: nil}, + {Name: "Supportability/DistributedTrace/CreatePayload/Success", Scope: "", Forced: true, Data: nil}, + }) + app.ExpectSpanEvents(t, []internal.WantEvent{ + { + Intrinsics: map[string]interface{}{ + "category": "generic", + "name": "OtherTransaction/Go/UnaryStream", + "nr.entryPoint": true, + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{}, + }, + { + Intrinsics: map[string]interface{}{ + "category": "http", + "component": "gRPC", + "name": "External/bufnet/gRPC/TestApplication/DoUnaryStream", + "parentId": internal.MatchAnything, + "span.kind": "client", + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{}, + }, + }) + app.ExpectTxnTraces(t, []internal.WantTxnTrace{{ + MetricName: "OtherTransaction/Go/UnaryStream", + Root: internal.WantTraceSegment{ + SegmentName: "ROOT", + Attributes: map[string]interface{}{}, + Children: []internal.WantTraceSegment{{ + SegmentName: "OtherTransaction/Go/UnaryStream", + Attributes: map[string]interface{}{"exclusive_duration_millis": internal.MatchAnything}, + Children: []internal.WantTraceSegment{ + { + SegmentName: "External/bufnet/gRPC/TestApplication/DoUnaryStream", + Attributes: map[string]interface{}{}, + }, + }, + }}, + }, + }}) +} + +func TestStreamUnaryClientInterceptor(t *testing.T) { + app := testApp() + txn := app.StartTransaction("StreamUnary", nil, nil) + ctx := newrelic.NewContext(context.Background(), txn) + + s, conn := newTestServerAndConn(t, nil) + defer s.Stop() + defer conn.Close() + + client := testapp.NewTestApplicationClient(conn) + stream, err := client.DoStreamUnary(ctx) + if nil != err { + t.Fatal("client call to DoStreamUnary failed", err) + } + for i := 0; i < 3; i++ { + if err := stream.Send(&testapp.Message{Text: "Hello DoStreamUnary"}); nil != err { + if err == io.EOF { + break + } + t.Fatal("failure to Send", err) + } + } + msg, err := stream.CloseAndRecv() + if nil != err { + t.Fatal("failure to CloseAndRecv", err) + } + var hdrs map[string][]string + err = json.Unmarshal([]byte(msg.Text), &hdrs) + if nil != err { + t.Fatal("cannot unmarshall client response", err) + } + if hdr, ok := hdrs["newrelic"]; !ok || len(hdr) != 1 || "" == hdr[0] { + t.Error("distributed trace header not sent", hdrs) + } + txn.End() + + app.ExpectMetrics(t, []internal.WantMetric{ + {Name: "OtherTransaction/Go/StreamUnary", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransaction/all", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransactionTotalTime", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransactionTotalTime/Go/StreamUnary", Scope: "", Forced: false, Data: nil}, + {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/all", Scope: "", Forced: false, Data: nil}, + {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/allOther", Scope: "", Forced: false, Data: nil}, + {Name: "External/all", Scope: "", Forced: true, Data: nil}, + {Name: "External/allOther", Scope: "", Forced: true, Data: nil}, + {Name: "External/bufnet/all", Scope: "", Forced: false, Data: nil}, + {Name: "External/bufnet/gRPC/TestApplication/DoStreamUnary", Scope: "OtherTransaction/Go/StreamUnary", Forced: false, Data: nil}, + {Name: "Supportability/DistributedTrace/CreatePayload/Success", Scope: "", Forced: true, Data: nil}, + }) + app.ExpectSpanEvents(t, []internal.WantEvent{ + { + Intrinsics: map[string]interface{}{ + "category": "generic", + "name": "OtherTransaction/Go/StreamUnary", + "nr.entryPoint": true, + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{}, + }, + { + Intrinsics: map[string]interface{}{ + "category": "http", + "component": "gRPC", + "name": "External/bufnet/gRPC/TestApplication/DoStreamUnary", + "parentId": internal.MatchAnything, + "span.kind": "client", + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{}, + }, + }) + app.ExpectTxnTraces(t, []internal.WantTxnTrace{{ + MetricName: "OtherTransaction/Go/StreamUnary", + Root: internal.WantTraceSegment{ + SegmentName: "ROOT", + Attributes: map[string]interface{}{}, + Children: []internal.WantTraceSegment{{ + SegmentName: "OtherTransaction/Go/StreamUnary", + Attributes: map[string]interface{}{"exclusive_duration_millis": internal.MatchAnything}, + Children: []internal.WantTraceSegment{ + { + SegmentName: "External/bufnet/gRPC/TestApplication/DoStreamUnary", + Attributes: map[string]interface{}{}, + }, + }, + }}, + }, + }}) +} + +func TestStreamStreamClientInterceptor(t *testing.T) { + app := testApp() + txn := app.StartTransaction("StreamStream", nil, nil) + ctx := newrelic.NewContext(context.Background(), txn) + + s, conn := newTestServerAndConn(t, nil) + defer s.Stop() + defer conn.Close() + + client := testapp.NewTestApplicationClient(conn) + stream, err := client.DoStreamStream(ctx) + if nil != err { + t.Fatal("client call to DoStreamStream failed", err) + } + waitc := make(chan struct{}) + go func() { + defer close(waitc) + var recved int + for { + msg, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + t.Fatal("failure to Recv", err) + } + var hdrs map[string][]string + err = json.Unmarshal([]byte(msg.Text), &hdrs) + if nil != err { + t.Fatal("cannot unmarshall client response", err) + } + if hdr, ok := hdrs["newrelic"]; !ok || len(hdr) != 1 || "" == hdr[0] { + t.Error("distributed trace header not sent", hdrs) + } + recved++ + } + if recved != 3 { + t.Fatal("received incorrect number of messages from server", recved) + } + }() + for i := 0; i < 3; i++ { + if err := stream.Send(&testapp.Message{Text: "Hello DoStreamStream"}); err != nil { + t.Fatal("failure to Send", err) + } + } + stream.CloseSend() + <-waitc + txn.End() + + app.ExpectMetrics(t, []internal.WantMetric{ + {Name: "OtherTransaction/Go/StreamStream", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransaction/all", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransactionTotalTime", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransactionTotalTime/Go/StreamStream", Scope: "", Forced: false, Data: nil}, + {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/all", Scope: "", Forced: false, Data: nil}, + {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/allOther", Scope: "", Forced: false, Data: nil}, + {Name: "External/all", Scope: "", Forced: true, Data: nil}, + {Name: "External/allOther", Scope: "", Forced: true, Data: nil}, + {Name: "External/bufnet/all", Scope: "", Forced: false, Data: nil}, + {Name: "External/bufnet/gRPC/TestApplication/DoStreamStream", Scope: "OtherTransaction/Go/StreamStream", Forced: false, Data: nil}, + {Name: "Supportability/DistributedTrace/CreatePayload/Success", Scope: "", Forced: true, Data: nil}, + }) + app.ExpectSpanEvents(t, []internal.WantEvent{ + { + Intrinsics: map[string]interface{}{ + "category": "generic", + "name": "OtherTransaction/Go/StreamStream", + "nr.entryPoint": true, + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{}, + }, + { + Intrinsics: map[string]interface{}{ + "category": "http", + "component": "gRPC", + "name": "External/bufnet/gRPC/TestApplication/DoStreamStream", + "parentId": internal.MatchAnything, + "span.kind": "client", + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{}, + }, + }) + app.ExpectTxnTraces(t, []internal.WantTxnTrace{{ + MetricName: "OtherTransaction/Go/StreamStream", + Root: internal.WantTraceSegment{ + SegmentName: "ROOT", + Attributes: map[string]interface{}{}, + Children: []internal.WantTraceSegment{{ + SegmentName: "OtherTransaction/Go/StreamStream", + Attributes: map[string]interface{}{"exclusive_duration_millis": internal.MatchAnything}, + Children: []internal.WantTraceSegment{ + { + SegmentName: "External/bufnet/gRPC/TestApplication/DoStreamStream", + Attributes: map[string]interface{}{}, + }, + }, + }}, + }, + }}) +} + +func TestClientUnaryMetadata(t *testing.T) { + // Test that metadata on the outgoing request are presevered + app := testApp() + txn := app.StartTransaction("metadata", nil, nil) + ctx := newrelic.NewContext(context.Background(), txn) + + md := metadata.New(map[string]string{ + "testing": "hello world", + "newrelic": "payload", + }) + ctx = metadata.NewOutgoingContext(ctx, md) + + s, conn := newTestServerAndConn(t, nil) + defer s.Stop() + defer conn.Close() + + client := testapp.NewTestApplicationClient(conn) + resp, err := client.DoUnaryUnary(ctx, &testapp.Message{}) + if nil != err { + t.Fatal("client call to DoUnaryUnary failed", err) + } + var hdrs map[string][]string + err = json.Unmarshal([]byte(resp.Text), &hdrs) + if nil != err { + t.Fatal("cannot unmarshall client response", err) + } + if hdr, ok := hdrs["newrelic"]; !ok || len(hdr) != 1 || "" == hdr[0] || "payload" == hdr[0] { + t.Error("distributed trace header not sent", hdrs) + } + if hdr, ok := hdrs["testing"]; !ok || len(hdr) != 1 || "hello world" != hdr[0] { + t.Error("testing header not sent", hdrs) + } +} + +func TestNilTxnClientUnary(t *testing.T) { + s, conn := newTestServerAndConn(t, nil) + defer s.Stop() + defer conn.Close() + + client := testapp.NewTestApplicationClient(conn) + resp, err := client.DoUnaryUnary(context.Background(), &testapp.Message{}) + if nil != err { + t.Fatal("client call to DoUnaryUnary failed", err) + } + var hdrs map[string][]string + err = json.Unmarshal([]byte(resp.Text), &hdrs) + if nil != err { + t.Fatal("cannot unmarshall client response", err) + } + if _, ok := hdrs["newrelic"]; ok { + t.Error("distributed trace header sent", hdrs) + } +} + +func TestNilTxnClientStreaming(t *testing.T) { + s, conn := newTestServerAndConn(t, nil) + defer s.Stop() + defer conn.Close() + + client := testapp.NewTestApplicationClient(conn) + stream, err := client.DoStreamUnary(context.Background()) + if nil != err { + t.Fatal("client call to DoStreamUnary failed", err) + } + for i := 0; i < 3; i++ { + if err := stream.Send(&testapp.Message{Text: "Hello DoStreamUnary"}); nil != err { + if err == io.EOF { + break + } + t.Fatal("failure to Send", err) + } + } + msg, err := stream.CloseAndRecv() + if nil != err { + t.Fatal("failure to CloseAndRecv", err) + } + var hdrs map[string][]string + err = json.Unmarshal([]byte(msg.Text), &hdrs) + if nil != err { + t.Fatal("cannot unmarshall client response", err) + } + if _, ok := hdrs["newrelic"]; ok { + t.Error("distributed trace header sent", hdrs) + } +} + +func TestClientStreamingError(t *testing.T) { + // Test that when creating the stream returns an error, no external + // segments are created + app := testApp() + txn := app.StartTransaction("UnaryStream", nil, nil) + + s, conn := newTestServerAndConn(t, nil) + defer s.Stop() + defer conn.Close() + + client := testapp.NewTestApplicationClient(conn) + + ctx, cancel := context.WithTimeout(context.Background(), 0) + defer cancel() + ctx = newrelic.NewContext(ctx, txn) + _, err := client.DoUnaryStream(ctx, &testapp.Message{}) + if nil == err { + t.Fatal("client call to DoUnaryStream did not return error") + } + txn.End() + + app.ExpectMetrics(t, []internal.WantMetric{ + {Name: "OtherTransaction/Go/UnaryStream", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransaction/all", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransactionTotalTime", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransactionTotalTime/Go/UnaryStream", Scope: "", Forced: false, Data: nil}, + {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/all", Scope: "", Forced: false, Data: nil}, + {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/allOther", Scope: "", Forced: false, Data: nil}, + {Name: "Supportability/DistributedTrace/CreatePayload/Success", Scope: "", Forced: true, Data: nil}, + }) + app.ExpectSpanEvents(t, []internal.WantEvent{ + { + Intrinsics: map[string]interface{}{ + "category": "generic", + "name": "OtherTransaction/Go/UnaryStream", + "nr.entryPoint": true, + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{}, + }, + }) + app.ExpectTxnTraces(t, []internal.WantTxnTrace{{ + MetricName: "OtherTransaction/Go/UnaryStream", + Root: internal.WantTraceSegment{ + SegmentName: "ROOT", + Attributes: map[string]interface{}{}, + Children: []internal.WantTraceSegment{{ + SegmentName: "OtherTransaction/Go/UnaryStream", + Attributes: map[string]interface{}{"exclusive_duration_millis": internal.MatchAnything}, + Children: []internal.WantTraceSegment{}, + }}, + }, + }}) +} diff --git a/_integrations/nrgrpc/nrgrpc_doc.go b/_integrations/nrgrpc/nrgrpc_doc.go new file mode 100644 index 000000000..0756bd353 --- /dev/null +++ b/_integrations/nrgrpc/nrgrpc_doc.go @@ -0,0 +1,44 @@ +// Package nrgrpc instruments https://github.com/grpc/grpc-go. +// +// This package can be used to instrument gRPC servers and gRPC clients. +// +// To instrument a gRPC server, use UnaryServerInterceptor and +// StreamServerInterceptor with your newrelic.Application to create server +// interceptors to pass to grpc.NewServer. Example: +// +// +// cfg := newrelic.NewConfig("gRPC Server", os.Getenv("NEW_RELIC_LICENSE_KEY")) +// app, _ := newrelic.NewApplication(cfg) +// server := grpc.NewServer( +// grpc.UnaryInterceptor(nrgrpc.UnaryServerInterceptor(app)), +// grpc.StreamInterceptor(nrgrpc.StreamServerInterceptor(app)), +// ) +// +// These interceptors create transactions for inbound calls. The transaction is +// added to the call context and can be accessed in your method handlers +// using newrelic.FromContext. +// +// Full server example: +// https://github.com/newrelic/go-agent/blob/master/_integrations/nrgrpc/example/server/server.go +// +// To instrument a gRPC client, follow these two steps: +// +// 1. Use UnaryClientInterceptor and StreamClientInterceptor when creating a +// grpc.ClientConn. Example: +// +// conn, err := grpc.Dial( +// "localhost:8080", +// grpc.WithUnaryInterceptor(nrgrpc.UnaryClientInterceptor), +// grpc.WithStreamInterceptor(nrgrpc.StreamClientInterceptor), +// ) +// +// 2. Ensure that calls made with this grpc.ClientConn are done with a context +// which contains a newrelic.Transaction. +// +// Full client example: +// https://github.com/newrelic/go-agent/blob/master/_integrations/nrgrpc/example/client/client.go +package nrgrpc + +import "github.com/newrelic/go-agent/internal" + +func init() { internal.TrackUsage("integration", "framework", "grpc") } diff --git a/_integrations/nrgrpc/nrgrpc_server.go b/_integrations/nrgrpc/nrgrpc_server.go new file mode 100644 index 000000000..f131a7eba --- /dev/null +++ b/_integrations/nrgrpc/nrgrpc_server.go @@ -0,0 +1,129 @@ +package nrgrpc + +import ( + "context" + "net/http" + "strings" + + newrelic "github.com/newrelic/go-agent" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +func startTransaction(ctx context.Context, app newrelic.Application, fullMethod string) newrelic.Transaction { + method := strings.TrimPrefix(fullMethod, "/") + + var hdrs http.Header + if md, ok := metadata.FromIncomingContext(ctx); ok { + hdrs = make(http.Header, len(md)) + for k, vs := range md { + for _, v := range vs { + hdrs.Add(k, v) + } + } + } + + target := hdrs.Get(":authority") + url := getURL(method, target) + + webReq := newrelic.NewStaticWebRequest(hdrs, url, method, newrelic.TransportHTTP) + txn := app.StartTransaction(method, nil, nil) + txn.SetWebRequest(webReq) + + return txn +} + +// UnaryServerInterceptor instruments server unary RPCs. +// +// Use this function with grpc.UnaryInterceptor and a newrelic.Application to +// create a grpc.ServerOption to pass to grpc.NewServer. This interceptor +// records each unary call with a transaction. You must use both +// UnaryServerInterceptor and StreamServerInterceptor to instrument unary and +// streaming calls. +// +// Example: +// +// cfg := newrelic.NewConfig("gRPC Server", os.Getenv("NEW_RELIC_LICENSE_KEY")) +// app, _ := newrelic.NewApplication(cfg) +// server := grpc.NewServer( +// grpc.UnaryInterceptor(nrgrpc.UnaryServerInterceptor(app)), +// grpc.StreamInterceptor(nrgrpc.StreamServerInterceptor(app)), +// ) +// +// These interceptors add the transaction to the call context so it may be +// accessed in your method handlers using newrelic.FromContext. +// +// Full example: +// https://github.com/newrelic/go-agent/blob/master/_integrations/nrgrpc/example/server/server.go +// +func UnaryServerInterceptor(app newrelic.Application) grpc.UnaryServerInterceptor { + if nil == app { + return nil + } + + return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { + txn := startTransaction(ctx, app, info.FullMethod) + defer txn.End() + + ctx = newrelic.NewContext(ctx, txn) + resp, err = handler(ctx, req) + txn.WriteHeader(int(status.Code(err))) + return + } +} + +type wrappedServerStream struct { + grpc.ServerStream + txn newrelic.Transaction +} + +func (s wrappedServerStream) Context() context.Context { + ctx := s.ServerStream.Context() + return newrelic.NewContext(ctx, s.txn) +} + +func newWrappedServerStream(stream grpc.ServerStream, txn newrelic.Transaction) grpc.ServerStream { + return wrappedServerStream{ + ServerStream: stream, + txn: txn, + } +} + +// StreamServerInterceptor instruments server streaming RPCs. +// +// Use this function with grpc.StreamInterceptor and a newrelic.Application to +// create a grpc.ServerOption to pass to grpc.NewServer. This interceptor +// records each streaming call with a transaction. You must use both +// UnaryServerInterceptor and StreamServerInterceptor to instrument unary and +// streaming calls. +// +// Example: +// +// cfg := newrelic.NewConfig("gRPC Server", os.Getenv("NEW_RELIC_LICENSE_KEY")) +// app, _ := newrelic.NewApplication(cfg) +// server := grpc.NewServer( +// grpc.UnaryInterceptor(nrgrpc.UnaryServerInterceptor(app)), +// grpc.StreamInterceptor(nrgrpc.StreamServerInterceptor(app)), +// ) +// +// These interceptors add the transaction to the call context so it may be +// accessed in your method handlers using newrelic.FromContext. +// +// Full example: +// https://github.com/newrelic/go-agent/blob/master/_integrations/nrgrpc/example/server/server.go +// +func StreamServerInterceptor(app newrelic.Application) grpc.StreamServerInterceptor { + if nil == app { + return nil + } + + return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + txn := startTransaction(ss.Context(), app, info.FullMethod) + defer txn.End() + + err := handler(srv, newWrappedServerStream(ss, txn)) + txn.WriteHeader(int(status.Code(err))) + return err + } +} diff --git a/_integrations/nrgrpc/nrgrpc_server_test.go b/_integrations/nrgrpc/nrgrpc_server_test.go new file mode 100644 index 000000000..fcd133ef6 --- /dev/null +++ b/_integrations/nrgrpc/nrgrpc_server_test.go @@ -0,0 +1,592 @@ +package nrgrpc + +import ( + "context" + "io" + "net" + "strings" + "testing" + "time" + + newrelic "github.com/newrelic/go-agent" + "github.com/newrelic/go-agent/_integrations/nrgrpc/testapp" + "github.com/newrelic/go-agent/internal" + "google.golang.org/grpc" + "google.golang.org/grpc/test/bufconn" +) + +// newTestServerAndConn creates a new *grpc.Server and *grpc.ClientConn for use +// in testing. It adds instrumentation to both. If app is nil, then +// instrumentation is not applied to the server. Be sure to Stop() the server +// and Close() the connection when done with them. +func newTestServerAndConn(t *testing.T, app newrelic.Application) (*grpc.Server, *grpc.ClientConn) { + s := grpc.NewServer( + grpc.UnaryInterceptor(UnaryServerInterceptor(app)), + grpc.StreamInterceptor(StreamServerInterceptor(app)), + ) + testapp.RegisterTestApplicationServer(s, &testapp.Server{}) + lis := bufconn.Listen(1024 * 1024) + + go func() { + s.Serve(lis) + }() + + bufDialer := func(string, time.Duration) (net.Conn, error) { + return lis.Dial() + } + conn, err := grpc.Dial("bufnet", + grpc.WithDialer(bufDialer), + grpc.WithInsecure(), + grpc.WithBlock(), // create the connection synchronously + grpc.WithUnaryInterceptor(UnaryClientInterceptor), + grpc.WithStreamInterceptor(StreamClientInterceptor), + ) + if err != nil { + t.Fatal("failure to create ClientConn", err) + } + + return s, conn +} + +func TestUnaryServerInterceptor(t *testing.T) { + app := testApp() + + s, conn := newTestServerAndConn(t, app) + defer s.Stop() + defer conn.Close() + + client := testapp.NewTestApplicationClient(conn) + txn := app.StartTransaction("client", nil, nil) + ctx := newrelic.NewContext(context.Background(), txn) + _, err := client.DoUnaryUnary(ctx, &testapp.Message{}) + if nil != err { + t.Fatal("unable to call client DoUnaryUnary", err) + } + + app.ExpectMetrics(t, []internal.WantMetric{ + {Name: "Apdex", Scope: "", Forced: true, Data: nil}, + {Name: "Apdex/Go/TestApplication/DoUnaryUnary", Scope: "", Forced: false, Data: nil}, + {Name: "Custom/DoUnaryUnary", Scope: "", Forced: false, Data: nil}, + {Name: "Custom/DoUnaryUnary", Scope: "WebTransaction/Go/TestApplication/DoUnaryUnary", Forced: false, Data: nil}, + {Name: "DurationByCaller/App/123/456/HTTP/all", Scope: "", Forced: false, Data: nil}, + {Name: "DurationByCaller/App/123/456/HTTP/allWeb", Scope: "", Forced: false, Data: nil}, + {Name: "HttpDispatcher", Scope: "", Forced: true, Data: nil}, + {Name: "Supportability/DistributedTrace/AcceptPayload/Success", Scope: "", Forced: true, Data: nil}, + {Name: "TransportDuration/App/123/456/HTTP/all", Scope: "", Forced: false, Data: nil}, + {Name: "TransportDuration/App/123/456/HTTP/allWeb", Scope: "", Forced: false, Data: nil}, + {Name: "WebTransaction", Scope: "", Forced: true, Data: nil}, + {Name: "WebTransaction/Go/TestApplication/DoUnaryUnary", Scope: "", Forced: true, Data: nil}, + {Name: "WebTransactionTotalTime", Scope: "", Forced: true, Data: nil}, + {Name: "WebTransactionTotalTime/Go/TestApplication/DoUnaryUnary", Scope: "", Forced: false, Data: nil}, + }) + app.ExpectTxnEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "guid": internal.MatchAnything, + "name": "WebTransaction/Go/TestApplication/DoUnaryUnary", + "nr.apdexPerfZone": internal.MatchAnything, + "parent.account": 123, + "parent.app": 456, + "parent.transportDuration": internal.MatchAnything, + "parent.transportType": "HTTP", + "parent.type": "App", + "parentId": internal.MatchAnything, + "parentSpanId": internal.MatchAnything, + "priority": internal.MatchAnything, + "sampled": internal.MatchAnything, + "traceId": internal.MatchAnything, + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{ + "httpResponseCode": 0, + "request.headers.contentType": "application/grpc", + "request.method": "TestApplication/DoUnaryUnary", + "request.uri": "grpc://bufnet/TestApplication/DoUnaryUnary", + }, + }}) + app.ExpectSpanEvents(t, []internal.WantEvent{ + { + Intrinsics: map[string]interface{}{ + "category": "generic", + "name": "WebTransaction/Go/TestApplication/DoUnaryUnary", + "nr.entryPoint": true, + "parentId": internal.MatchAnything, + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{}, + }, + { + Intrinsics: map[string]interface{}{ + "category": "generic", + "name": "Custom/DoUnaryUnary", + "parentId": internal.MatchAnything, + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{}, + }, + }) +} + +func TestUnaryServerInterceptorError(t *testing.T) { + app := testApp() + + s, conn := newTestServerAndConn(t, app) + defer s.Stop() + defer conn.Close() + + client := testapp.NewTestApplicationClient(conn) + _, err := client.DoUnaryUnaryError(context.Background(), &testapp.Message{}) + if nil == err { + t.Fatal("DoUnaryUnaryError should have returned an error") + } + + app.ExpectMetrics(t, []internal.WantMetric{ + {Name: "Apdex", Scope: "", Forced: true, Data: nil}, + {Name: "Apdex/Go/TestApplication/DoUnaryUnaryError", Scope: "", Forced: false, Data: nil}, + {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/all", Scope: "", Forced: false, Data: nil}, + {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/allWeb", Scope: "", Forced: false, Data: nil}, + {Name: "Errors/WebTransaction/Go/TestApplication/DoUnaryUnaryError", Scope: "", Forced: true, Data: nil}, + {Name: "Errors/all", Scope: "", Forced: true, Data: nil}, + {Name: "Errors/allWeb", Scope: "", Forced: true, Data: nil}, + {Name: "ErrorsByCaller/Unknown/Unknown/Unknown/Unknown/all", Scope: "", Forced: false, Data: nil}, + {Name: "ErrorsByCaller/Unknown/Unknown/Unknown/Unknown/allWeb", Scope: "", Forced: false, Data: nil}, + {Name: "HttpDispatcher", Scope: "", Forced: true, Data: nil}, + {Name: "WebTransaction", Scope: "", Forced: true, Data: nil}, + {Name: "WebTransaction/Go/TestApplication/DoUnaryUnaryError", Scope: "", Forced: true, Data: nil}, + {Name: "WebTransactionTotalTime", Scope: "", Forced: true, Data: nil}, + {Name: "WebTransactionTotalTime/Go/TestApplication/DoUnaryUnaryError", Scope: "", Forced: false, Data: nil}, + }) + app.ExpectTxnEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "guid": internal.MatchAnything, + "name": "WebTransaction/Go/TestApplication/DoUnaryUnaryError", + "nr.apdexPerfZone": internal.MatchAnything, + "priority": internal.MatchAnything, + "sampled": internal.MatchAnything, + "traceId": internal.MatchAnything, + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{ + "httpResponseCode": 15, + "request.headers.contentType": "application/grpc", + "request.method": "TestApplication/DoUnaryUnaryError", + "request.uri": "grpc://bufnet/TestApplication/DoUnaryUnaryError", + }, + }}) + app.ExpectErrorEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "error.class": "15", + "error.message": "response code 15", + "guid": internal.MatchAnything, + "priority": internal.MatchAnything, + "sampled": internal.MatchAnything, + "traceId": internal.MatchAnything, + "transactionName": "WebTransaction/Go/TestApplication/DoUnaryUnaryError", + }, + AgentAttributes: map[string]interface{}{ + "httpResponseCode": 15, + "request.headers.User-Agent": internal.MatchAnything, + "request.headers.contentType": "application/grpc", + "request.method": "TestApplication/DoUnaryUnaryError", + "request.uri": "grpc://bufnet/TestApplication/DoUnaryUnaryError", + }, + UserAttributes: map[string]interface{}{}, + }}) +} + +func TestUnaryStreamServerInterceptor(t *testing.T) { + app := testApp() + + s, conn := newTestServerAndConn(t, app) + defer s.Stop() + defer conn.Close() + + client := testapp.NewTestApplicationClient(conn) + txn := app.StartTransaction("client", nil, nil) + ctx := newrelic.NewContext(context.Background(), txn) + stream, err := client.DoUnaryStream(ctx, &testapp.Message{}) + if nil != err { + t.Fatal("client call to DoUnaryStream failed", err) + } + var recved int + for { + _, err := stream.Recv() + if err == io.EOF { + break + } + if nil != err { + t.Fatal("error receiving message", err) + } + recved++ + } + if recved != 3 { + t.Fatal("received incorrect number of messages from server", recved) + } + + app.ExpectMetrics(t, []internal.WantMetric{ + {Name: "Apdex", Scope: "", Forced: true, Data: nil}, + {Name: "Apdex/Go/TestApplication/DoUnaryStream", Scope: "", Forced: false, Data: nil}, + {Name: "Custom/DoUnaryStream", Scope: "", Forced: false, Data: nil}, + {Name: "Custom/DoUnaryStream", Scope: "WebTransaction/Go/TestApplication/DoUnaryStream", Forced: false, Data: nil}, + {Name: "DurationByCaller/App/123/456/HTTP/all", Scope: "", Forced: false, Data: nil}, + {Name: "DurationByCaller/App/123/456/HTTP/allWeb", Scope: "", Forced: false, Data: nil}, + {Name: "HttpDispatcher", Scope: "", Forced: true, Data: nil}, + {Name: "Supportability/DistributedTrace/AcceptPayload/Success", Scope: "", Forced: true, Data: nil}, + {Name: "TransportDuration/App/123/456/HTTP/all", Scope: "", Forced: false, Data: nil}, + {Name: "TransportDuration/App/123/456/HTTP/allWeb", Scope: "", Forced: false, Data: nil}, + {Name: "WebTransaction", Scope: "", Forced: true, Data: nil}, + {Name: "WebTransaction/Go/TestApplication/DoUnaryStream", Scope: "", Forced: true, Data: nil}, + {Name: "WebTransactionTotalTime", Scope: "", Forced: true, Data: nil}, + {Name: "WebTransactionTotalTime/Go/TestApplication/DoUnaryStream", Scope: "", Forced: false, Data: nil}, + }) + app.ExpectTxnEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "guid": internal.MatchAnything, + "name": "WebTransaction/Go/TestApplication/DoUnaryStream", + "nr.apdexPerfZone": internal.MatchAnything, + "parent.account": 123, + "parent.app": 456, + "parent.transportDuration": internal.MatchAnything, + "parent.transportType": "HTTP", + "parent.type": "App", + "parentId": internal.MatchAnything, + "parentSpanId": internal.MatchAnything, + "priority": internal.MatchAnything, + "sampled": internal.MatchAnything, + "traceId": internal.MatchAnything, + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{ + "httpResponseCode": 0, + "request.headers.contentType": "application/grpc", + "request.method": "TestApplication/DoUnaryStream", + "request.uri": "grpc://bufnet/TestApplication/DoUnaryStream", + }, + }}) + app.ExpectSpanEvents(t, []internal.WantEvent{ + { + Intrinsics: map[string]interface{}{ + "category": "generic", + "name": "WebTransaction/Go/TestApplication/DoUnaryStream", + "nr.entryPoint": true, + "parentId": internal.MatchAnything, + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{}, + }, + { + Intrinsics: map[string]interface{}{ + "category": "generic", + "name": "Custom/DoUnaryStream", + "parentId": internal.MatchAnything, + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{}, + }, + }) +} + +func TestStreamUnaryServerInterceptor(t *testing.T) { + app := testApp() + + s, conn := newTestServerAndConn(t, app) + defer s.Stop() + defer conn.Close() + + client := testapp.NewTestApplicationClient(conn) + txn := app.StartTransaction("client", nil, nil) + ctx := newrelic.NewContext(context.Background(), txn) + stream, err := client.DoStreamUnary(ctx) + if nil != err { + t.Fatal("client call to DoStreamUnary failed", err) + } + for i := 0; i < 3; i++ { + if err := stream.Send(&testapp.Message{Text: "Hello DoStreamUnary"}); nil != err { + if err == io.EOF { + break + } + t.Fatal("failure to Send", err) + } + } + _, err = stream.CloseAndRecv() + if nil != err { + t.Fatal("failure to CloseAndRecv", err) + } + + app.ExpectMetrics(t, []internal.WantMetric{ + {Name: "Apdex", Scope: "", Forced: true, Data: nil}, + {Name: "Apdex/Go/TestApplication/DoStreamUnary", Scope: "", Forced: false, Data: nil}, + {Name: "Custom/DoStreamUnary", Scope: "", Forced: false, Data: nil}, + {Name: "Custom/DoStreamUnary", Scope: "WebTransaction/Go/TestApplication/DoStreamUnary", Forced: false, Data: nil}, + {Name: "DurationByCaller/App/123/456/HTTP/all", Scope: "", Forced: false, Data: nil}, + {Name: "DurationByCaller/App/123/456/HTTP/allWeb", Scope: "", Forced: false, Data: nil}, + {Name: "HttpDispatcher", Scope: "", Forced: true, Data: nil}, + {Name: "Supportability/DistributedTrace/AcceptPayload/Success", Scope: "", Forced: true, Data: nil}, + {Name: "TransportDuration/App/123/456/HTTP/all", Scope: "", Forced: false, Data: nil}, + {Name: "TransportDuration/App/123/456/HTTP/allWeb", Scope: "", Forced: false, Data: nil}, + {Name: "WebTransaction", Scope: "", Forced: true, Data: nil}, + {Name: "WebTransaction/Go/TestApplication/DoStreamUnary", Scope: "", Forced: true, Data: nil}, + {Name: "WebTransactionTotalTime", Scope: "", Forced: true, Data: nil}, + {Name: "WebTransactionTotalTime/Go/TestApplication/DoStreamUnary", Scope: "", Forced: false, Data: nil}, + }) + app.ExpectTxnEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "guid": internal.MatchAnything, + "name": "WebTransaction/Go/TestApplication/DoStreamUnary", + "nr.apdexPerfZone": internal.MatchAnything, + "parent.account": 123, + "parent.app": 456, + "parent.transportDuration": internal.MatchAnything, + "parent.transportType": "HTTP", + "parent.type": "App", + "parentId": internal.MatchAnything, + "parentSpanId": internal.MatchAnything, + "priority": internal.MatchAnything, + "sampled": internal.MatchAnything, + "traceId": internal.MatchAnything, + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{ + "httpResponseCode": 0, + "request.headers.contentType": "application/grpc", + "request.method": "TestApplication/DoStreamUnary", + "request.uri": "grpc://bufnet/TestApplication/DoStreamUnary", + }, + }}) + app.ExpectSpanEvents(t, []internal.WantEvent{ + { + Intrinsics: map[string]interface{}{ + "category": "generic", + "name": "WebTransaction/Go/TestApplication/DoStreamUnary", + "nr.entryPoint": true, + "parentId": internal.MatchAnything, + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{}, + }, + { + Intrinsics: map[string]interface{}{ + "category": "generic", + "name": "Custom/DoStreamUnary", + "parentId": internal.MatchAnything, + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{}, + }, + }) +} + +func TestStreamStreamServerInterceptor(t *testing.T) { + app := testApp() + + s, conn := newTestServerAndConn(t, app) + defer s.Stop() + defer conn.Close() + + client := testapp.NewTestApplicationClient(conn) + txn := app.StartTransaction("client", nil, nil) + ctx := newrelic.NewContext(context.Background(), txn) + stream, err := client.DoStreamStream(ctx) + if nil != err { + t.Fatal("client call to DoStreamStream failed", err) + } + waitc := make(chan struct{}) + go func() { + defer close(waitc) + var recved int + for { + _, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + t.Fatal("failure to Recv", err) + } + recved++ + } + if recved != 3 { + t.Fatal("received incorrect number of messages from server", recved) + } + }() + for i := 0; i < 3; i++ { + if err := stream.Send(&testapp.Message{Text: "Hello DoStreamStream"}); err != nil { + t.Fatal("failure to Send", err) + } + } + stream.CloseSend() + <-waitc + + app.ExpectMetrics(t, []internal.WantMetric{ + {Name: "Apdex", Scope: "", Forced: true, Data: nil}, + {Name: "Apdex/Go/TestApplication/DoStreamStream", Scope: "", Forced: false, Data: nil}, + {Name: "Custom/DoStreamStream", Scope: "", Forced: false, Data: nil}, + {Name: "Custom/DoStreamStream", Scope: "WebTransaction/Go/TestApplication/DoStreamStream", Forced: false, Data: nil}, + {Name: "DurationByCaller/App/123/456/HTTP/all", Scope: "", Forced: false, Data: nil}, + {Name: "DurationByCaller/App/123/456/HTTP/allWeb", Scope: "", Forced: false, Data: nil}, + {Name: "HttpDispatcher", Scope: "", Forced: true, Data: nil}, + {Name: "Supportability/DistributedTrace/AcceptPayload/Success", Scope: "", Forced: true, Data: nil}, + {Name: "TransportDuration/App/123/456/HTTP/all", Scope: "", Forced: false, Data: nil}, + {Name: "TransportDuration/App/123/456/HTTP/allWeb", Scope: "", Forced: false, Data: nil}, + {Name: "WebTransaction", Scope: "", Forced: true, Data: nil}, + {Name: "WebTransaction/Go/TestApplication/DoStreamStream", Scope: "", Forced: true, Data: nil}, + {Name: "WebTransactionTotalTime", Scope: "", Forced: true, Data: nil}, + {Name: "WebTransactionTotalTime/Go/TestApplication/DoStreamStream", Scope: "", Forced: false, Data: nil}, + }) + app.ExpectTxnEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "guid": internal.MatchAnything, + "name": "WebTransaction/Go/TestApplication/DoStreamStream", + "nr.apdexPerfZone": internal.MatchAnything, + "parent.account": 123, + "parent.app": 456, + "parent.transportDuration": internal.MatchAnything, + "parent.transportType": "HTTP", + "parent.type": "App", + "parentId": internal.MatchAnything, + "parentSpanId": internal.MatchAnything, + "priority": internal.MatchAnything, + "sampled": internal.MatchAnything, + "traceId": internal.MatchAnything, + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{ + "httpResponseCode": 0, + "request.headers.contentType": "application/grpc", + "request.method": "TestApplication/DoStreamStream", + "request.uri": "grpc://bufnet/TestApplication/DoStreamStream", + }, + }}) + app.ExpectSpanEvents(t, []internal.WantEvent{ + { + Intrinsics: map[string]interface{}{ + "category": "generic", + "name": "WebTransaction/Go/TestApplication/DoStreamStream", + "nr.entryPoint": true, + "parentId": internal.MatchAnything, + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{}, + }, + { + Intrinsics: map[string]interface{}{ + "category": "generic", + "name": "Custom/DoStreamStream", + "parentId": internal.MatchAnything, + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{}, + }, + }) +} + +func TestStreamServerInterceptorError(t *testing.T) { + app := testApp() + + s, conn := newTestServerAndConn(t, app) + defer s.Stop() + defer conn.Close() + + client := testapp.NewTestApplicationClient(conn) + stream, err := client.DoUnaryStreamError(context.Background(), &testapp.Message{}) + if nil != err { + t.Fatal("client call to DoUnaryStream failed", err) + } + _, err = stream.Recv() + if nil == err { + t.Fatal("DoUnaryStreamError should have returned an error") + } + + app.ExpectMetrics(t, []internal.WantMetric{ + {Name: "Apdex", Scope: "", Forced: true, Data: nil}, + {Name: "Apdex/Go/TestApplication/DoUnaryStreamError", Scope: "", Forced: false, Data: nil}, + {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/all", Scope: "", Forced: false, Data: nil}, + {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/allWeb", Scope: "", Forced: false, Data: nil}, + {Name: "Errors/WebTransaction/Go/TestApplication/DoUnaryStreamError", Scope: "", Forced: true, Data: nil}, + {Name: "Errors/all", Scope: "", Forced: true, Data: nil}, + {Name: "Errors/allWeb", Scope: "", Forced: true, Data: nil}, + {Name: "ErrorsByCaller/Unknown/Unknown/Unknown/Unknown/all", Scope: "", Forced: false, Data: nil}, + {Name: "ErrorsByCaller/Unknown/Unknown/Unknown/Unknown/allWeb", Scope: "", Forced: false, Data: nil}, + {Name: "HttpDispatcher", Scope: "", Forced: true, Data: nil}, + {Name: "WebTransaction", Scope: "", Forced: true, Data: nil}, + {Name: "WebTransaction/Go/TestApplication/DoUnaryStreamError", Scope: "", Forced: true, Data: nil}, + {Name: "WebTransactionTotalTime", Scope: "", Forced: true, Data: nil}, + {Name: "WebTransactionTotalTime/Go/TestApplication/DoUnaryStreamError", Scope: "", Forced: false, Data: nil}, + }) + app.ExpectTxnEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "guid": internal.MatchAnything, + "name": "WebTransaction/Go/TestApplication/DoUnaryStreamError", + "nr.apdexPerfZone": internal.MatchAnything, + "priority": internal.MatchAnything, + "sampled": internal.MatchAnything, + "traceId": internal.MatchAnything, + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{ + "httpResponseCode": 15, + "request.headers.contentType": "application/grpc", + "request.method": "TestApplication/DoUnaryStreamError", + "request.uri": "grpc://bufnet/TestApplication/DoUnaryStreamError", + }, + }}) + app.ExpectErrorEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "error.class": "15", + "error.message": "response code 15", + "guid": internal.MatchAnything, + "priority": internal.MatchAnything, + "sampled": internal.MatchAnything, + "traceId": internal.MatchAnything, + "transactionName": "WebTransaction/Go/TestApplication/DoUnaryStreamError", + }, + AgentAttributes: map[string]interface{}{ + "httpResponseCode": 15, + "request.headers.User-Agent": internal.MatchAnything, + "request.headers.contentType": "application/grpc", + "request.method": "TestApplication/DoUnaryStreamError", + "request.uri": "grpc://bufnet/TestApplication/DoUnaryStreamError", + }, + UserAttributes: map[string]interface{}{}, + }}) +} + +func TestUnaryServerInterceptorNilApp(t *testing.T) { + s, conn := newTestServerAndConn(t, nil) + defer s.Stop() + defer conn.Close() + + client := testapp.NewTestApplicationClient(conn) + msg, err := client.DoUnaryUnary(context.Background(), &testapp.Message{}) + if nil != err { + t.Fatal("unable to call client DoUnaryUnary", err) + } + if !strings.Contains(msg.Text, "content-type") { + t.Error("incorrect message received") + } +} + +func TestStreamServerInterceptorNilApp(t *testing.T) { + s, conn := newTestServerAndConn(t, nil) + defer s.Stop() + defer conn.Close() + + client := testapp.NewTestApplicationClient(conn) + stream, err := client.DoStreamUnary(context.Background()) + if nil != err { + t.Fatal("client call to DoStreamUnary failed", err) + } + for i := 0; i < 3; i++ { + if err := stream.Send(&testapp.Message{Text: "Hello DoStreamUnary"}); nil != err { + if err == io.EOF { + break + } + t.Fatal("failure to Send", err) + } + } + msg, err := stream.CloseAndRecv() + if nil != err { + t.Fatal("failure to CloseAndRecv", err) + } + if !strings.Contains(msg.Text, "content-type") { + t.Error("incorrect message received") + } +} diff --git a/_integrations/nrgrpc/testapp/README.md b/_integrations/nrgrpc/testapp/README.md new file mode 100644 index 000000000..81d7151e8 --- /dev/null +++ b/_integrations/nrgrpc/testapp/README.md @@ -0,0 +1,17 @@ +# Testing gRPC Application + +This directory contains a testing application for validating the New Relic gRPC +integration. The code in `testapp.pb.go` is generated using the following +command (to be run from the `_integrations/nrgrpc` directory). This command +should be rerun every time the `testapp.proto` file has changed for any reason. + +```bash +$ protoc -I testapp/ testapp/testapp.proto --go_out=plugins=grpc:testapp +``` + +To install required dependencies: + +```bash +go get -u google.golang.org/grpc +go get -u github.com/golang/protobuf/protoc-gen-go +``` diff --git a/_integrations/nrgrpc/testapp/server.go b/_integrations/nrgrpc/testapp/server.go new file mode 100644 index 000000000..2c439f552 --- /dev/null +++ b/_integrations/nrgrpc/testapp/server.go @@ -0,0 +1,81 @@ +package testapp + +import ( + "context" + "encoding/json" + "io" + + newrelic "github.com/newrelic/go-agent" + codes "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + status "google.golang.org/grpc/status" +) + +// Server is a gRPC server. +type Server struct{} + +// DoUnaryUnary is a unary request, unary response method. +func (s *Server) DoUnaryUnary(ctx context.Context, msg *Message) (*Message, error) { + defer newrelic.StartSegment(newrelic.FromContext(ctx), "DoUnaryUnary").End() + md, _ := metadata.FromIncomingContext(ctx) + js, _ := json.Marshal(md) + return &Message{Text: string(js)}, nil +} + +// DoUnaryStream is a unary request, stream response method. +func (s *Server) DoUnaryStream(msg *Message, stream TestApplication_DoUnaryStreamServer) error { + defer newrelic.StartSegment(newrelic.FromContext(stream.Context()), "DoUnaryStream").End() + md, _ := metadata.FromIncomingContext(stream.Context()) + js, _ := json.Marshal(md) + for i := 0; i < 3; i++ { + if err := stream.Send(&Message{Text: string(js)}); nil != err { + return err + } + } + return nil +} + +// DoStreamUnary is a stream request, unary response method. +func (s *Server) DoStreamUnary(stream TestApplication_DoStreamUnaryServer) error { + defer newrelic.StartSegment(newrelic.FromContext(stream.Context()), "DoStreamUnary").End() + md, _ := metadata.FromIncomingContext(stream.Context()) + js, _ := json.Marshal(md) + for { + _, err := stream.Recv() + if err == io.EOF { + return stream.SendAndClose(&Message{Text: string(js)}) + } else if nil != err { + return err + } + } +} + +// DoStreamStream is a stream request, stream response method. +func (s *Server) DoStreamStream(stream TestApplication_DoStreamStreamServer) error { + defer newrelic.StartSegment(newrelic.FromContext(stream.Context()), "DoStreamStream").End() + md, _ := metadata.FromIncomingContext(stream.Context()) + js, _ := json.Marshal(md) + for { + _, err := stream.Recv() + if err == io.EOF { + return nil + } else if nil != err { + return err + } + if err := stream.Send(&Message{Text: string(js)}); nil != err { + return err + } + } +} + +// DoUnaryUnaryError is a unary request, unary response method that returns an +// error. +func (s *Server) DoUnaryUnaryError(ctx context.Context, msg *Message) (*Message, error) { + return &Message{}, status.New(codes.DataLoss, "oooooops!").Err() +} + +// DoUnaryStreamError is a unary request, unary response method that returns an +// error. +func (s *Server) DoUnaryStreamError(msg *Message, stream TestApplication_DoUnaryStreamErrorServer) error { + return status.New(codes.DataLoss, "oooooops!").Err() +} diff --git a/_integrations/nrgrpc/testapp/testapp.pb.go b/_integrations/nrgrpc/testapp/testapp.pb.go new file mode 100644 index 000000000..56f42caaf --- /dev/null +++ b/_integrations/nrgrpc/testapp/testapp.pb.go @@ -0,0 +1,466 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: testapp.proto + +package testapp + +import ( + context "context" + fmt "fmt" + proto "github.com/golang/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type Message struct { + Text string `protobuf:"bytes,1,opt,name=text,proto3" json:"text,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Message) Reset() { *m = Message{} } +func (m *Message) String() string { return proto.CompactTextString(m) } +func (*Message) ProtoMessage() {} +func (*Message) Descriptor() ([]byte, []int) { + return fileDescriptor_98d4e818d9f182b1, []int{0} +} + +func (m *Message) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Message.Unmarshal(m, b) +} +func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Message.Marshal(b, m, deterministic) +} +func (m *Message) XXX_Merge(src proto.Message) { + xxx_messageInfo_Message.Merge(m, src) +} +func (m *Message) XXX_Size() int { + return xxx_messageInfo_Message.Size(m) +} +func (m *Message) XXX_DiscardUnknown() { + xxx_messageInfo_Message.DiscardUnknown(m) +} + +var xxx_messageInfo_Message proto.InternalMessageInfo + +func (m *Message) GetText() string { + if m != nil { + return m.Text + } + return "" +} + +func init() { + proto.RegisterType((*Message)(nil), "Message") +} + +func init() { proto.RegisterFile("testapp.proto", fileDescriptor_98d4e818d9f182b1) } + +var fileDescriptor_98d4e818d9f182b1 = []byte{ + // 175 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2d, 0x49, 0x2d, 0x2e, + 0x49, 0x2c, 0x28, 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x57, 0x92, 0xe5, 0x62, 0xf7, 0x4d, 0x2d, + 0x2e, 0x4e, 0x4c, 0x4f, 0x15, 0x12, 0xe2, 0x62, 0x29, 0x49, 0xad, 0x28, 0x91, 0x60, 0x54, 0x60, + 0xd4, 0xe0, 0x0c, 0x02, 0xb3, 0x8d, 0xfa, 0x98, 0xb8, 0xf8, 0x43, 0x52, 0x8b, 0x4b, 0x1c, 0x0b, + 0x0a, 0x72, 0x32, 0x93, 0x13, 0x4b, 0x32, 0xf3, 0xf3, 0x84, 0x54, 0xb8, 0x78, 0x5c, 0xf2, 0x43, + 0xf3, 0x12, 0x8b, 0x2a, 0xc1, 0x84, 0x10, 0x87, 0x1e, 0xd4, 0x04, 0x29, 0x38, 0x4b, 0x89, 0x41, + 0x48, 0x9d, 0x8b, 0x17, 0xaa, 0x2a, 0xb8, 0xa4, 0x28, 0x35, 0x31, 0x17, 0xbb, 0x32, 0x03, 0x46, + 0x88, 0x42, 0x88, 0x1a, 0x3c, 0xe6, 0x69, 0x30, 0x0a, 0x69, 0x71, 0xf1, 0xc1, 0x14, 0xe2, 0x33, + 0x52, 0x83, 0xd1, 0x80, 0x51, 0x48, 0x93, 0x4b, 0x10, 0xd9, 0x8d, 0xae, 0x45, 0x45, 0xf9, 0x45, + 0x38, 0x1c, 0xaa, 0xc3, 0x25, 0x84, 0xe2, 0x50, 0x3c, 0x6a, 0x0d, 0x18, 0x93, 0xd8, 0xc0, 0xc1, + 0x66, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0xb8, 0xca, 0x5d, 0x32, 0x47, 0x01, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// TestApplicationClient is the client API for TestApplication service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type TestApplicationClient interface { + DoUnaryUnary(ctx context.Context, in *Message, opts ...grpc.CallOption) (*Message, error) + DoUnaryStream(ctx context.Context, in *Message, opts ...grpc.CallOption) (TestApplication_DoUnaryStreamClient, error) + DoStreamUnary(ctx context.Context, opts ...grpc.CallOption) (TestApplication_DoStreamUnaryClient, error) + DoStreamStream(ctx context.Context, opts ...grpc.CallOption) (TestApplication_DoStreamStreamClient, error) + DoUnaryUnaryError(ctx context.Context, in *Message, opts ...grpc.CallOption) (*Message, error) + DoUnaryStreamError(ctx context.Context, in *Message, opts ...grpc.CallOption) (TestApplication_DoUnaryStreamErrorClient, error) +} + +type testApplicationClient struct { + cc *grpc.ClientConn +} + +func NewTestApplicationClient(cc *grpc.ClientConn) TestApplicationClient { + return &testApplicationClient{cc} +} + +func (c *testApplicationClient) DoUnaryUnary(ctx context.Context, in *Message, opts ...grpc.CallOption) (*Message, error) { + out := new(Message) + err := c.cc.Invoke(ctx, "/TestApplication/DoUnaryUnary", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *testApplicationClient) DoUnaryStream(ctx context.Context, in *Message, opts ...grpc.CallOption) (TestApplication_DoUnaryStreamClient, error) { + stream, err := c.cc.NewStream(ctx, &_TestApplication_serviceDesc.Streams[0], "/TestApplication/DoUnaryStream", opts...) + if err != nil { + return nil, err + } + x := &testApplicationDoUnaryStreamClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type TestApplication_DoUnaryStreamClient interface { + Recv() (*Message, error) + grpc.ClientStream +} + +type testApplicationDoUnaryStreamClient struct { + grpc.ClientStream +} + +func (x *testApplicationDoUnaryStreamClient) Recv() (*Message, error) { + m := new(Message) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *testApplicationClient) DoStreamUnary(ctx context.Context, opts ...grpc.CallOption) (TestApplication_DoStreamUnaryClient, error) { + stream, err := c.cc.NewStream(ctx, &_TestApplication_serviceDesc.Streams[1], "/TestApplication/DoStreamUnary", opts...) + if err != nil { + return nil, err + } + x := &testApplicationDoStreamUnaryClient{stream} + return x, nil +} + +type TestApplication_DoStreamUnaryClient interface { + Send(*Message) error + CloseAndRecv() (*Message, error) + grpc.ClientStream +} + +type testApplicationDoStreamUnaryClient struct { + grpc.ClientStream +} + +func (x *testApplicationDoStreamUnaryClient) Send(m *Message) error { + return x.ClientStream.SendMsg(m) +} + +func (x *testApplicationDoStreamUnaryClient) CloseAndRecv() (*Message, error) { + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + m := new(Message) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *testApplicationClient) DoStreamStream(ctx context.Context, opts ...grpc.CallOption) (TestApplication_DoStreamStreamClient, error) { + stream, err := c.cc.NewStream(ctx, &_TestApplication_serviceDesc.Streams[2], "/TestApplication/DoStreamStream", opts...) + if err != nil { + return nil, err + } + x := &testApplicationDoStreamStreamClient{stream} + return x, nil +} + +type TestApplication_DoStreamStreamClient interface { + Send(*Message) error + Recv() (*Message, error) + grpc.ClientStream +} + +type testApplicationDoStreamStreamClient struct { + grpc.ClientStream +} + +func (x *testApplicationDoStreamStreamClient) Send(m *Message) error { + return x.ClientStream.SendMsg(m) +} + +func (x *testApplicationDoStreamStreamClient) Recv() (*Message, error) { + m := new(Message) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *testApplicationClient) DoUnaryUnaryError(ctx context.Context, in *Message, opts ...grpc.CallOption) (*Message, error) { + out := new(Message) + err := c.cc.Invoke(ctx, "/TestApplication/DoUnaryUnaryError", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *testApplicationClient) DoUnaryStreamError(ctx context.Context, in *Message, opts ...grpc.CallOption) (TestApplication_DoUnaryStreamErrorClient, error) { + stream, err := c.cc.NewStream(ctx, &_TestApplication_serviceDesc.Streams[3], "/TestApplication/DoUnaryStreamError", opts...) + if err != nil { + return nil, err + } + x := &testApplicationDoUnaryStreamErrorClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type TestApplication_DoUnaryStreamErrorClient interface { + Recv() (*Message, error) + grpc.ClientStream +} + +type testApplicationDoUnaryStreamErrorClient struct { + grpc.ClientStream +} + +func (x *testApplicationDoUnaryStreamErrorClient) Recv() (*Message, error) { + m := new(Message) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// TestApplicationServer is the server API for TestApplication service. +type TestApplicationServer interface { + DoUnaryUnary(context.Context, *Message) (*Message, error) + DoUnaryStream(*Message, TestApplication_DoUnaryStreamServer) error + DoStreamUnary(TestApplication_DoStreamUnaryServer) error + DoStreamStream(TestApplication_DoStreamStreamServer) error + DoUnaryUnaryError(context.Context, *Message) (*Message, error) + DoUnaryStreamError(*Message, TestApplication_DoUnaryStreamErrorServer) error +} + +// UnimplementedTestApplicationServer can be embedded to have forward compatible implementations. +type UnimplementedTestApplicationServer struct { +} + +func (*UnimplementedTestApplicationServer) DoUnaryUnary(ctx context.Context, req *Message) (*Message, error) { + return nil, status.Errorf(codes.Unimplemented, "method DoUnaryUnary not implemented") +} +func (*UnimplementedTestApplicationServer) DoUnaryStream(req *Message, srv TestApplication_DoUnaryStreamServer) error { + return status.Errorf(codes.Unimplemented, "method DoUnaryStream not implemented") +} +func (*UnimplementedTestApplicationServer) DoStreamUnary(srv TestApplication_DoStreamUnaryServer) error { + return status.Errorf(codes.Unimplemented, "method DoStreamUnary not implemented") +} +func (*UnimplementedTestApplicationServer) DoStreamStream(srv TestApplication_DoStreamStreamServer) error { + return status.Errorf(codes.Unimplemented, "method DoStreamStream not implemented") +} +func (*UnimplementedTestApplicationServer) DoUnaryUnaryError(ctx context.Context, req *Message) (*Message, error) { + return nil, status.Errorf(codes.Unimplemented, "method DoUnaryUnaryError not implemented") +} +func (*UnimplementedTestApplicationServer) DoUnaryStreamError(req *Message, srv TestApplication_DoUnaryStreamErrorServer) error { + return status.Errorf(codes.Unimplemented, "method DoUnaryStreamError not implemented") +} + +func RegisterTestApplicationServer(s *grpc.Server, srv TestApplicationServer) { + s.RegisterService(&_TestApplication_serviceDesc, srv) +} + +func _TestApplication_DoUnaryUnary_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Message) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TestApplicationServer).DoUnaryUnary(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/TestApplication/DoUnaryUnary", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TestApplicationServer).DoUnaryUnary(ctx, req.(*Message)) + } + return interceptor(ctx, in, info, handler) +} + +func _TestApplication_DoUnaryStream_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(Message) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(TestApplicationServer).DoUnaryStream(m, &testApplicationDoUnaryStreamServer{stream}) +} + +type TestApplication_DoUnaryStreamServer interface { + Send(*Message) error + grpc.ServerStream +} + +type testApplicationDoUnaryStreamServer struct { + grpc.ServerStream +} + +func (x *testApplicationDoUnaryStreamServer) Send(m *Message) error { + return x.ServerStream.SendMsg(m) +} + +func _TestApplication_DoStreamUnary_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(TestApplicationServer).DoStreamUnary(&testApplicationDoStreamUnaryServer{stream}) +} + +type TestApplication_DoStreamUnaryServer interface { + SendAndClose(*Message) error + Recv() (*Message, error) + grpc.ServerStream +} + +type testApplicationDoStreamUnaryServer struct { + grpc.ServerStream +} + +func (x *testApplicationDoStreamUnaryServer) SendAndClose(m *Message) error { + return x.ServerStream.SendMsg(m) +} + +func (x *testApplicationDoStreamUnaryServer) Recv() (*Message, error) { + m := new(Message) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _TestApplication_DoStreamStream_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(TestApplicationServer).DoStreamStream(&testApplicationDoStreamStreamServer{stream}) +} + +type TestApplication_DoStreamStreamServer interface { + Send(*Message) error + Recv() (*Message, error) + grpc.ServerStream +} + +type testApplicationDoStreamStreamServer struct { + grpc.ServerStream +} + +func (x *testApplicationDoStreamStreamServer) Send(m *Message) error { + return x.ServerStream.SendMsg(m) +} + +func (x *testApplicationDoStreamStreamServer) Recv() (*Message, error) { + m := new(Message) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _TestApplication_DoUnaryUnaryError_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Message) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TestApplicationServer).DoUnaryUnaryError(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/TestApplication/DoUnaryUnaryError", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TestApplicationServer).DoUnaryUnaryError(ctx, req.(*Message)) + } + return interceptor(ctx, in, info, handler) +} + +func _TestApplication_DoUnaryStreamError_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(Message) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(TestApplicationServer).DoUnaryStreamError(m, &testApplicationDoUnaryStreamErrorServer{stream}) +} + +type TestApplication_DoUnaryStreamErrorServer interface { + Send(*Message) error + grpc.ServerStream +} + +type testApplicationDoUnaryStreamErrorServer struct { + grpc.ServerStream +} + +func (x *testApplicationDoUnaryStreamErrorServer) Send(m *Message) error { + return x.ServerStream.SendMsg(m) +} + +var _TestApplication_serviceDesc = grpc.ServiceDesc{ + ServiceName: "TestApplication", + HandlerType: (*TestApplicationServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "DoUnaryUnary", + Handler: _TestApplication_DoUnaryUnary_Handler, + }, + { + MethodName: "DoUnaryUnaryError", + Handler: _TestApplication_DoUnaryUnaryError_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "DoUnaryStream", + Handler: _TestApplication_DoUnaryStream_Handler, + ServerStreams: true, + }, + { + StreamName: "DoStreamUnary", + Handler: _TestApplication_DoStreamUnary_Handler, + ClientStreams: true, + }, + { + StreamName: "DoStreamStream", + Handler: _TestApplication_DoStreamStream_Handler, + ServerStreams: true, + ClientStreams: true, + }, + { + StreamName: "DoUnaryStreamError", + Handler: _TestApplication_DoUnaryStreamError_Handler, + ServerStreams: true, + }, + }, + Metadata: "testapp.proto", +} diff --git a/_integrations/nrgrpc/testapp/testapp.proto b/_integrations/nrgrpc/testapp/testapp.proto new file mode 100644 index 000000000..1c2d207d6 --- /dev/null +++ b/_integrations/nrgrpc/testapp/testapp.proto @@ -0,0 +1,15 @@ +syntax = "proto3"; + +service TestApplication { + rpc DoUnaryUnary(Message) returns (Message) {} + rpc DoUnaryStream(Message) returns (stream Message) {} + rpc DoStreamUnary(stream Message) returns (Message) {} + rpc DoStreamStream(stream Message) returns (stream Message) {} + + rpc DoUnaryUnaryError(Message) returns (Message) {} + rpc DoUnaryStreamError(Message) returns (stream Message) {} +} + +message Message { + string text = 1; +} diff --git a/_integrations/nrhttprouter/README.md b/_integrations/nrhttprouter/README.md new file mode 100644 index 000000000..8570c3b7b --- /dev/null +++ b/_integrations/nrhttprouter/README.md @@ -0,0 +1,10 @@ +# _integrations/nrhttprouter [](https://godoc.org/github.com/newrelic/go-agent/_integrations/nrhttprouter) + +Package `nrhttprouter` instruments https://github.com/julienschmidt/httprouter applications. + +```go +import "github.com/newrelic/go-agent/_integrations/nrhttprouter" +``` + +For more information, see +[godocs](https://godoc.org/github.com/newrelic/go-agent/_integrations/nrhttprouter). diff --git a/_integrations/nrhttprouter/example/main.go b/_integrations/nrhttprouter/example/main.go new file mode 100644 index 000000000..3a58b0f2b --- /dev/null +++ b/_integrations/nrhttprouter/example/main.go @@ -0,0 +1,44 @@ +package main + +import ( + "fmt" + "net/http" + "os" + + "github.com/julienschmidt/httprouter" + newrelic "github.com/newrelic/go-agent" + "github.com/newrelic/go-agent/_integrations/nrhttprouter" +) + +func index(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { + w.Write([]byte("welcome\n")) +} + +func hello(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { + w.Write([]byte(fmt.Sprintf("hello %s\n", ps.ByName("name")))) +} + +func mustGetEnv(key string) string { + if val := os.Getenv(key); "" != val { + return val + } + panic(fmt.Sprintf("environment variable %s unset", key)) +} + +func main() { + cfg := newrelic.NewConfig("httprouter App", mustGetEnv("NEW_RELIC_LICENSE_KEY")) + cfg.Logger = newrelic.NewDebugLogger(os.Stdout) + app, err := newrelic.NewApplication(cfg) + if nil != err { + fmt.Println(err) + os.Exit(1) + } + + // Use an *nrhttprouter.Router in place of an *httprouter.Router. + router := nrhttprouter.New(app) + + router.GET("/", index) + router.GET("/hello/:name", hello) + + http.ListenAndServe(":8000", router) +} diff --git a/_integrations/nrhttprouter/nrhttprouter.go b/_integrations/nrhttprouter/nrhttprouter.go new file mode 100644 index 000000000..ff1fafb40 --- /dev/null +++ b/_integrations/nrhttprouter/nrhttprouter.go @@ -0,0 +1,148 @@ +// Package nrhttprouter instruments https://github.com/julienschmidt/httprouter +// applications. +// +// Use this package to instrument inbound requests handled by a +// httprouter.Router. Use an *nrhttprouter.Router in place of your +// *httprouter.Router. Example: +// +// package main +// +// import ( +// "fmt" +// "net/http" +// "os" +// +// "github.com/julienschmidt/httprouter" +// newrelic "github.com/newrelic/go-agent" +// "github.com/newrelic/go-agent/_integrations/nrhttprouter" +// ) +// +// func main() { +// cfg := newrelic.NewConfig("httprouter App", os.Getenv("NEW_RELIC_LICENSE_KEY")) +// app, _ := newrelic.NewApplication(cfg) +// +// // Create the Router replacement: +// router := nrhttprouter.New(app) +// +// router.GET("/", func(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { +// w.Write([]byte("welcome\n")) +// }) +// router.GET("/hello/:name", (w http.ResponseWriter, r *http.Request, ps httprouter.Params) { +// w.Write([]byte(fmt.Sprintf("hello %s\n", ps.ByName("name")))) +// }) +// http.ListenAndServe(":8000", router) +// } +// +// Runnable example: https://github.com/newrelic/go-agent/tree/master/_integrations/nrhttprouter/example/main.go +package nrhttprouter + +import ( + "net/http" + + "github.com/julienschmidt/httprouter" + newrelic "github.com/newrelic/go-agent" + "github.com/newrelic/go-agent/internal" +) + +func init() { internal.TrackUsage("integration", "framework", "httprouter") } + +// Router should be used in place of httprouter.Router. Create it using +// New(). +type Router struct { + *httprouter.Router + + application newrelic.Application +} + +// New creates a new Router to be used in place of httprouter.Router. +func New(app newrelic.Application) *Router { + return &Router{ + Router: httprouter.New(), + application: app, + } +} + +func txnName(method, path string) string { + return method + " " + path +} + +func (r *Router) handle(method string, path string, original httprouter.Handle) { + handle := original + if nil != r.application { + handle = func(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { + txn := r.application.StartTransaction(txnName(method, path), w, req) + defer txn.End() + + req = newrelic.RequestWithTransactionContext(req, txn) + + original(txn, req, ps) + } + } + r.Router.Handle(method, path, handle) +} + +// DELETE replaces httprouter.Router.DELETE. +func (r *Router) DELETE(path string, h httprouter.Handle) { + r.handle(http.MethodDelete, path, h) +} + +// GET replaces httprouter.Router.GET. +func (r *Router) GET(path string, h httprouter.Handle) { + r.handle(http.MethodGet, path, h) +} + +// HEAD replaces httprouter.Router.HEAD. +func (r *Router) HEAD(path string, h httprouter.Handle) { + r.handle(http.MethodHead, path, h) +} + +// OPTIONS replaces httprouter.Router.OPTIONS. +func (r *Router) OPTIONS(path string, h httprouter.Handle) { + r.handle(http.MethodOptions, path, h) +} + +// PATCH replaces httprouter.Router.PATCH. +func (r *Router) PATCH(path string, h httprouter.Handle) { + r.handle(http.MethodPatch, path, h) +} + +// POST replaces httprouter.Router.POST. +func (r *Router) POST(path string, h httprouter.Handle) { + r.handle(http.MethodPost, path, h) +} + +// PUT replaces httprouter.Router.PUT. +func (r *Router) PUT(path string, h httprouter.Handle) { + r.handle(http.MethodPut, path, h) +} + +// Handle replaces httprouter.Router.Handle. +func (r *Router) Handle(method, path string, h httprouter.Handle) { + r.handle(method, path, h) +} + +// Handler replaces httprouter.Router.Handler. +func (r *Router) Handler(method, path string, handler http.Handler) { + _, h := newrelic.WrapHandle(r.application, txnName(method, path), handler) + r.Router.Handler(method, path, h) +} + +// HandlerFunc replaces httprouter.Router.HandlerFunc. +func (r *Router) HandlerFunc(method, path string, handler http.HandlerFunc) { + r.Handler(method, path, handler) +} + +// ServeHTTP replaces httprouter.Router.ServeHTTP. +func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { + if nil != r.application { + h, _, _ := r.Router.Lookup(req.Method, req.URL.Path) + if nil == h { + txn := r.application.StartTransaction("NotFound", w, req) + defer txn.End() + w = txn + req = newrelic.RequestWithTransactionContext(req, txn) + } + } + + r.Router.ServeHTTP(w, req) +} diff --git a/_integrations/nrhttprouter/nrhttprouter_test.go b/_integrations/nrhttprouter/nrhttprouter_test.go new file mode 100644 index 000000000..f882cc049 --- /dev/null +++ b/_integrations/nrhttprouter/nrhttprouter_test.go @@ -0,0 +1,283 @@ +package nrhttprouter + +import ( + "fmt" + "net/http" + "net/http/httptest" + "testing" + + "github.com/julienschmidt/httprouter" + newrelic "github.com/newrelic/go-agent" + "github.com/newrelic/go-agent/internal" + "github.com/newrelic/go-agent/internal/integrationsupport" +) + +func TestMethodFunctions(t *testing.T) { + + methodFuncs := []struct { + Method string + Fn func(*Router) func(string, httprouter.Handle) + }{ + {Method: "DELETE", Fn: func(r *Router) func(string, httprouter.Handle) { return r.DELETE }}, + {Method: "GET", Fn: func(r *Router) func(string, httprouter.Handle) { return r.GET }}, + {Method: "HEAD", Fn: func(r *Router) func(string, httprouter.Handle) { return r.HEAD }}, + {Method: "OPTIONS", Fn: func(r *Router) func(string, httprouter.Handle) { return r.OPTIONS }}, + {Method: "PATCH", Fn: func(r *Router) func(string, httprouter.Handle) { return r.PATCH }}, + {Method: "POST", Fn: func(r *Router) func(string, httprouter.Handle) { return r.POST }}, + {Method: "PUT", Fn: func(r *Router) func(string, httprouter.Handle) { return r.PUT }}, + } + + for _, md := range methodFuncs { + app := integrationsupport.NewBasicTestApp() + router := New(app) + md.Fn(router)("/hello/:name", func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { + // Test that the Transaction is used as the response writer. + w.WriteHeader(500) + w.Write([]byte(fmt.Sprintf("hi %s", ps.ByName("name")))) + }) + response := httptest.NewRecorder() + req, err := http.NewRequest(md.Method, "/hello/person", nil) + if err != nil { + t.Fatal(err) + } + router.ServeHTTP(response, req) + if respBody := response.Body.String(); respBody != "hi person" { + t.Error("wrong response body", respBody) + } + app.ExpectTxnMetrics(t, internal.WantTxn{ + Name: md.Method + " /hello/:name", + IsWeb: true, + NumErrors: 1, + }) + } +} + +func TestGetNoApplication(t *testing.T) { + router := New(nil) + + router.GET("/hello/:name", func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { + w.Write([]byte(fmt.Sprintf("hi %s", ps.ByName("name")))) + }) + response := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/hello/person", nil) + if err != nil { + t.Fatal(err) + } + router.ServeHTTP(response, req) + if respBody := response.Body.String(); respBody != "hi person" { + t.Error("wrong response body", respBody) + } +} + +func TestHandle(t *testing.T) { + app := integrationsupport.NewBasicTestApp() + router := New(app) + + router.Handle("GET", "/hello/:name", func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { + // Test that the Transaction is used as the response writer. + w.WriteHeader(500) + w.Write([]byte(fmt.Sprintf("hi %s", ps.ByName("name")))) + if txn := newrelic.FromContext(r.Context()); txn != nil { + txn.AddAttribute("color", "purple") + } + }) + response := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/hello/person", nil) + if err != nil { + t.Fatal(err) + } + router.ServeHTTP(response, req) + if respBody := response.Body.String(); respBody != "hi person" { + t.Error("wrong response body", respBody) + } + app.ExpectTxnMetrics(t, internal.WantTxn{ + Name: "GET /hello/:name", + IsWeb: true, + NumErrors: 1, + }) + app.(internal.Expect).ExpectTxnEvents(t, []internal.WantEvent{ + { + Intrinsics: map[string]interface{}{ + "name": "WebTransaction/Go/GET /hello/:name", + "nr.apdexPerfZone": internal.MatchAnything, + }, + UserAttributes: map[string]interface{}{ + "color": "purple", + }, + AgentAttributes: map[string]interface{}{ + "httpResponseCode": 500, + "request.method": "GET", + "request.uri": "/hello/person", + }, + }, + }) +} + +func TestHandler(t *testing.T) { + app := integrationsupport.NewBasicTestApp() + router := New(app) + + router.Handler("GET", "/hello/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Test that the Transaction is used as the response writer. + w.WriteHeader(500) + w.Write([]byte("hi there")) + if txn := newrelic.FromContext(r.Context()); txn != nil { + txn.AddAttribute("color", "purple") + } + })) + response := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/hello/", nil) + if err != nil { + t.Fatal(err) + } + router.ServeHTTP(response, req) + if respBody := response.Body.String(); respBody != "hi there" { + t.Error("wrong response body", respBody) + } + app.ExpectTxnMetrics(t, internal.WantTxn{ + Name: "GET /hello/", + IsWeb: true, + NumErrors: 1, + }) + app.(internal.Expect).ExpectTxnEvents(t, []internal.WantEvent{ + { + Intrinsics: map[string]interface{}{ + "name": "WebTransaction/Go/GET /hello/", + "nr.apdexPerfZone": internal.MatchAnything, + }, + UserAttributes: map[string]interface{}{ + "color": "purple", + }, + AgentAttributes: map[string]interface{}{ + "httpResponseCode": 500, + "request.method": "GET", + "request.uri": "/hello/", + }, + }, + }) +} + +func TestHandlerMissingApplication(t *testing.T) { + router := New(nil) + + router.Handler("GET", "/hello/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(500) + w.Write([]byte("hi there")) + })) + response := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/hello/", nil) + if err != nil { + t.Fatal(err) + } + router.ServeHTTP(response, req) + if respBody := response.Body.String(); respBody != "hi there" { + t.Error("wrong response body", respBody) + } +} + +func TestHandlerFunc(t *testing.T) { + app := integrationsupport.NewBasicTestApp() + router := New(app) + + router.HandlerFunc("GET", "/hello/", func(w http.ResponseWriter, r *http.Request) { + // Test that the Transaction is used as the response writer. + w.WriteHeader(500) + w.Write([]byte("hi there")) + }) + response := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/hello/", nil) + if err != nil { + t.Fatal(err) + } + router.ServeHTTP(response, req) + if respBody := response.Body.String(); respBody != "hi there" { + t.Error("wrong response body", respBody) + } + app.ExpectTxnMetrics(t, internal.WantTxn{ + Name: "GET /hello/", + IsWeb: true, + NumErrors: 1, + }) +} + +func TestNotFound(t *testing.T) { + app := integrationsupport.NewBasicTestApp() + router := New(app) + + router.NotFound = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Test that the Transaction is used as the response writer. + w.WriteHeader(500) + w.Write([]byte("not found!")) + if txn := newrelic.FromContext(r.Context()); txn != nil { + txn.AddAttribute("color", "purple") + } + }) + response := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/hello/", nil) + if err != nil { + t.Fatal(err) + } + router.ServeHTTP(response, req) + if respBody := response.Body.String(); respBody != "not found!" { + t.Error("wrong response body", respBody) + } + app.ExpectTxnMetrics(t, internal.WantTxn{ + Name: "NotFound", + IsWeb: true, + NumErrors: 1, + }) + app.(internal.Expect).ExpectTxnEvents(t, []internal.WantEvent{ + { + Intrinsics: map[string]interface{}{ + "name": "WebTransaction/Go/NotFound", + "nr.apdexPerfZone": internal.MatchAnything, + }, + UserAttributes: map[string]interface{}{ + "color": "purple", + }, + AgentAttributes: map[string]interface{}{ + "httpResponseCode": 500, + "request.method": "GET", + "request.uri": "/hello/", + }, + }, + }) +} + +func TestNotFoundMissingApplication(t *testing.T) { + router := New(nil) + + router.NotFound = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Test that the Transaction is used as the response writer. + w.WriteHeader(500) + w.Write([]byte("not found!")) + }) + response := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/hello/", nil) + if err != nil { + t.Fatal(err) + } + router.ServeHTTP(response, req) + if respBody := response.Body.String(); respBody != "not found!" { + t.Error("wrong response body", respBody) + } +} + +func TestNotFoundNotSet(t *testing.T) { + app := integrationsupport.NewBasicTestApp() + router := New(app) + + response := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/hello/", nil) + if err != nil { + t.Fatal(err) + } + router.ServeHTTP(response, req) + if response.Code != 404 { + t.Error(response.Code) + } + app.ExpectTxnMetrics(t, internal.WantTxn{ + Name: "NotFound", + IsWeb: true, + }) +} diff --git a/_integrations/nrlambda/README.md b/_integrations/nrlambda/README.md new file mode 100644 index 000000000..b70a5d05a --- /dev/null +++ b/_integrations/nrlambda/README.md @@ -0,0 +1,10 @@ +# _integrations/nrlambda [](https://godoc.org/github.com/newrelic/go-agent/_integrations/nrlambda) + +Package `nrlambda` adds support for AWS Lambda. + +```go +import "github.com/newrelic/go-agent/_integrations/nrlambda" +``` + +For more information, see +[godocs](https://godoc.org/github.com/newrelic/go-agent/_integrations/nrlambda). diff --git a/_integrations/nrlambda/config.go b/_integrations/nrlambda/config.go new file mode 100644 index 000000000..b0dc85d27 --- /dev/null +++ b/_integrations/nrlambda/config.go @@ -0,0 +1,39 @@ +package nrlambda + +import ( + "os" + "time" + + newrelic "github.com/newrelic/go-agent" +) + +// NewConfig populates a newrelic.Config with correct default settings for a +// Lambda serverless environment. NewConfig will populate fields based on +// environment variables common to all New Relic agents that support Lambda. +// Environment variables NEW_RELIC_ACCOUNT_ID, NEW_RELIC_TRUSTED_ACCOUNT_KEY, +// and NEW_RELIC_PRIMARY_APPLICATION_ID configure fields required for +// distributed tracing. Environment variable NEW_RELIC_APDEX_T may be used to +// set a custom apdex threshold. +func NewConfig() newrelic.Config { + return newConfigInternal(os.Getenv) +} + +func newConfigInternal(getenv func(string) string) newrelic.Config { + cfg := newrelic.NewConfig("", "") + + cfg.ServerlessMode.Enabled = true + + cfg.ServerlessMode.AccountID = getenv("NEW_RELIC_ACCOUNT_ID") + cfg.ServerlessMode.TrustedAccountKey = getenv("NEW_RELIC_TRUSTED_ACCOUNT_KEY") + cfg.ServerlessMode.PrimaryAppID = getenv("NEW_RELIC_PRIMARY_APPLICATION_ID") + + cfg.DistributedTracer.Enabled = true + + if s := getenv("NEW_RELIC_APDEX_T"); "" != s { + if apdex, err := time.ParseDuration(s + "s"); nil == err { + cfg.ServerlessMode.ApdexThreshold = apdex + } + } + + return cfg +} diff --git a/_integrations/nrlambda/config_test.go b/_integrations/nrlambda/config_test.go new file mode 100644 index 000000000..ef33202a1 --- /dev/null +++ b/_integrations/nrlambda/config_test.go @@ -0,0 +1,38 @@ +package nrlambda + +import ( + "testing" + "time" +) + +func TestNewConfig(t *testing.T) { + cfg := newConfigInternal(func(key string) string { + switch key { + case "NEW_RELIC_ACCOUNT_ID": + return "the-account-id" + case "NEW_RELIC_TRUSTED_ACCOUNT_KEY": + return "the-trust-key" + case "NEW_RELIC_PRIMARY_APPLICATION_ID": + return "the-app-id" + case "NEW_RELIC_APDEX_T": + return "2" + default: + return "" + } + }) + if !cfg.ServerlessMode.Enabled { + t.Error(cfg.ServerlessMode.Enabled) + } + if cfg.ServerlessMode.AccountID != "the-account-id" { + t.Error(cfg.ServerlessMode.AccountID) + } + if cfg.ServerlessMode.TrustedAccountKey != "the-trust-key" { + t.Error(cfg.ServerlessMode.TrustedAccountKey) + } + if cfg.ServerlessMode.PrimaryAppID != "the-app-id" { + t.Error(cfg.ServerlessMode.PrimaryAppID) + } + if cfg.ServerlessMode.ApdexThreshold != 2*time.Second { + t.Error(cfg.ServerlessMode.ApdexThreshold) + } +} diff --git a/_integrations/nrlambda/events.go b/_integrations/nrlambda/events.go new file mode 100644 index 000000000..bf32cd510 --- /dev/null +++ b/_integrations/nrlambda/events.go @@ -0,0 +1,124 @@ +package nrlambda + +import ( + "net/http" + "net/url" + "strings" + + "github.com/aws/aws-lambda-go/events" + newrelic "github.com/newrelic/go-agent" +) + +func getEventSourceARN(event interface{}) string { + switch v := event.(type) { + case events.KinesisFirehoseEvent: + return v.DeliveryStreamArn + case events.KinesisEvent: + if len(v.Records) > 0 { + return v.Records[0].EventSourceArn + } + case events.CodeCommitEvent: + if len(v.Records) > 0 { + return v.Records[0].EventSourceARN + } + case events.DynamoDBEvent: + if len(v.Records) > 0 { + return v.Records[0].EventSourceArn + } + case events.SQSEvent: + if len(v.Records) > 0 { + return v.Records[0].EventSourceARN + } + case events.S3Event: + if len(v.Records) > 0 { + return v.Records[0].S3.Bucket.Arn + } + case events.SNSEvent: + if len(v.Records) > 0 { + return v.Records[0].EventSubscriptionArn + } + } + return "" +} + +type webRequest struct { + header http.Header + method string + u *url.URL + transport newrelic.TransportType +} + +func (r webRequest) Header() http.Header { return r.header } +func (r webRequest) URL() *url.URL { return r.u } +func (r webRequest) Method() string { return r.method } +func (r webRequest) Transport() newrelic.TransportType { return r.transport } + +func eventWebRequest(event interface{}) newrelic.WebRequest { + var path string + var request webRequest + var headers map[string]string + + switch r := event.(type) { + case events.APIGatewayProxyRequest: + request.method = r.HTTPMethod + path = r.Path + headers = r.Headers + case events.ALBTargetGroupRequest: + // https://docs.aws.amazon.com/elasticloadbalancing/latest/application/lambda-functions.html#receive-event-from-load-balancer + request.method = r.HTTPMethod + path = r.Path + headers = r.Headers + default: + return nil + } + + request.header = make(http.Header, len(headers)) + for k, v := range headers { + request.header.Set(k, v) + } + + var host string + if port := request.header.Get("X-Forwarded-Port"); port != "" { + host = ":" + port + } + request.u = &url.URL{ + Path: path, + Host: host, + } + + proto := strings.ToLower(request.header.Get("X-Forwarded-Proto")) + switch proto { + case "https": + request.transport = newrelic.TransportHTTPS + case "http": + request.transport = newrelic.TransportHTTP + default: + request.transport = newrelic.TransportUnknown + } + + return request +} + +func eventResponse(event interface{}) *response { + var code int + var headers map[string]string + + switch r := event.(type) { + case events.APIGatewayProxyResponse: + code = r.StatusCode + headers = r.Headers + case events.ALBTargetGroupResponse: + code = r.StatusCode + headers = r.Headers + default: + return nil + } + hdr := make(http.Header, len(headers)) + for k, v := range headers { + hdr.Add(k, v) + } + return &response{ + code: code, + header: hdr, + } +} diff --git a/_integrations/nrlambda/events_test.go b/_integrations/nrlambda/events_test.go new file mode 100644 index 000000000..8deb262d2 --- /dev/null +++ b/_integrations/nrlambda/events_test.go @@ -0,0 +1,216 @@ +package nrlambda + +import ( + "testing" + + "github.com/aws/aws-lambda-go/events" + newrelic "github.com/newrelic/go-agent" +) + +func TestGetEventAttributes(t *testing.T) { + testcases := []struct { + Name string + Input interface{} + Arn string + }{ + {Name: "nil", Input: nil, Arn: ""}, + {Name: "SQSEvent empty", Input: events.SQSEvent{}, Arn: ""}, + {Name: "SQSEvent", Input: events.SQSEvent{ + Records: []events.SQSMessage{{ + EventSourceARN: "ARN", + }}, + }, Arn: "ARN"}, + {Name: "SNSEvent empty", Input: events.SNSEvent{}, Arn: ""}, + {Name: "SNSEvent", Input: events.SNSEvent{ + Records: []events.SNSEventRecord{{ + EventSubscriptionArn: "ARN", + }}, + }, Arn: "ARN"}, + {Name: "S3Event empty", Input: events.S3Event{}, Arn: ""}, + {Name: "S3Event", Input: events.S3Event{ + Records: []events.S3EventRecord{{ + S3: events.S3Entity{ + Bucket: events.S3Bucket{ + Arn: "ARN", + }, + }, + }}, + }, Arn: "ARN"}, + {Name: "DynamoDBEvent empty", Input: events.DynamoDBEvent{}, Arn: ""}, + {Name: "DynamoDBEvent", Input: events.DynamoDBEvent{ + Records: []events.DynamoDBEventRecord{{ + EventSourceArn: "ARN", + }}, + }, Arn: "ARN"}, + {Name: "CodeCommitEvent empty", Input: events.CodeCommitEvent{}, Arn: ""}, + {Name: "CodeCommitEvent", Input: events.CodeCommitEvent{ + Records: []events.CodeCommitRecord{{ + EventSourceARN: "ARN", + }}, + }, Arn: "ARN"}, + {Name: "KinesisEvent empty", Input: events.KinesisEvent{}, Arn: ""}, + {Name: "KinesisEvent", Input: events.KinesisEvent{ + Records: []events.KinesisEventRecord{{ + EventSourceArn: "ARN", + }}, + }, Arn: "ARN"}, + {Name: "KinesisFirehoseEvent", Input: events.KinesisFirehoseEvent{ + DeliveryStreamArn: "ARN", + }, Arn: "ARN"}, + } + + for _, testcase := range testcases { + arn := getEventSourceARN(testcase.Input) + if arn != testcase.Arn { + t.Error(testcase.Name, arn, testcase.Arn) + } + } +} + +func TestEventWebRequest(t *testing.T) { + // First test a type that does not count as a web request. + req := eventWebRequest(22) + if nil != req { + t.Error(req) + } + + testcases := []struct { + testname string + input interface{} + numHeaders int + method string + urlString string + transport newrelic.TransportType + }{ + { + testname: "empty proxy request", + input: events.APIGatewayProxyRequest{}, + numHeaders: 0, + method: "", + urlString: "", + transport: newrelic.TransportUnknown, + }, + { + testname: "populated proxy request", + input: events.APIGatewayProxyRequest{ + Headers: map[string]string{ + "x-forwarded-port": "4000", + "x-forwarded-proto": "HTTPS", + }, + HTTPMethod: "GET", + Path: "the/path", + }, + numHeaders: 2, + method: "GET", + urlString: "//:4000/the/path", + transport: newrelic.TransportHTTPS, + }, + { + testname: "empty alb request", + input: events.ALBTargetGroupRequest{}, + numHeaders: 0, + method: "", + urlString: "", + transport: newrelic.TransportUnknown, + }, + { + testname: "populated alb request", + input: events.ALBTargetGroupRequest{ + Headers: map[string]string{ + "x-forwarded-port": "3000", + "x-forwarded-proto": "HttP", + }, + HTTPMethod: "GET", + Path: "the/path", + }, + numHeaders: 2, + method: "GET", + urlString: "//:3000/the/path", + transport: newrelic.TransportHTTP, + }, + } + + for _, tc := range testcases { + req = eventWebRequest(tc.input) + if req == nil { + t.Error(tc.testname, "no request returned") + continue + } + if h := req.Header(); len(h) != tc.numHeaders { + t.Error(tc.testname, "header len mismatch", h, tc.numHeaders) + } + if u := req.URL().String(); u != tc.urlString { + t.Error(tc.testname, "url mismatch", u, tc.urlString) + } + if m := req.Method(); m != tc.method { + t.Error(tc.testname, "method mismatch", m, tc.method) + } + if tr := req.Transport(); tr != tc.transport { + t.Error(tc.testname, "transport mismatch", tr, tc.transport) + } + } +} + +func TestEventResponse(t *testing.T) { + // First test a type that does not count as a web request. + resp := eventResponse(22) + if nil != resp { + t.Error(resp) + } + + testcases := []struct { + testname string + input interface{} + numHeaders int + code int + }{ + { + testname: "empty proxy response", + input: events.APIGatewayProxyResponse{}, + numHeaders: 0, + code: 0, + }, + { + testname: "populated proxy response", + input: events.APIGatewayProxyResponse{ + StatusCode: 200, + Headers: map[string]string{ + "x-custom-header": "my custom header value", + }, + }, + numHeaders: 1, + code: 200, + }, + { + testname: "empty alb response", + input: events.ALBTargetGroupResponse{}, + numHeaders: 0, + code: 0, + }, + { + testname: "populated alb response", + input: events.ALBTargetGroupResponse{ + StatusCode: 200, + Headers: map[string]string{ + "x-custom-header": "my custom header value", + }, + }, + numHeaders: 1, + code: 200, + }, + } + + for _, tc := range testcases { + resp = eventResponse(tc.input) + if resp == nil { + t.Error(tc.testname, "no response returned") + continue + } + if h := resp.Header(); len(h) != tc.numHeaders { + t.Error(tc.testname, "header len mismatch", h, tc.numHeaders) + } + if resp.code != tc.code { + t.Error(tc.testname, "status code mismatch", resp.code, tc.code) + } + } +} diff --git a/_integrations/nrlambda/example/main.go b/_integrations/nrlambda/example/main.go new file mode 100644 index 000000000..808eb0392 --- /dev/null +++ b/_integrations/nrlambda/example/main.go @@ -0,0 +1,38 @@ +package main + +import ( + "context" + "fmt" + + newrelic "github.com/newrelic/go-agent" + "github.com/newrelic/go-agent/_integrations/nrlambda" +) + +func handler(ctx context.Context) { + // The nrlambda handler instrumentation will add the transaction to the + // context. Access it using newrelic.FromContext to add additional + // instrumentation. + if txn := newrelic.FromContext(ctx); nil != txn { + txn.AddAttribute("userLevel", "gold") + txn.Application().RecordCustomEvent("MyEvent", map[string]interface{}{ + "zip": "zap", + }) + } + fmt.Println("hello world") +} + +func main() { + // nrlambda.NewConfig should be used in place of newrelic.NewConfig + // since it sets Lambda specific configuration settings including + // Config.ServerlessMode.Enabled. + cfg := nrlambda.NewConfig() + // Here is the opportunity to change configuration settings before the + // application is created. + app, err := newrelic.NewApplication(cfg) + if nil != err { + fmt.Println("error creating app (invalid config):", err) + } + // nrlambda.Start should be used in place of lambda.Start. + // nrlambda.StartHandler should be used in place of lambda.StartHandler. + nrlambda.Start(handler, app) +} diff --git a/_integrations/nrlambda/handler.go b/_integrations/nrlambda/handler.go new file mode 100644 index 000000000..c3491a2dc --- /dev/null +++ b/_integrations/nrlambda/handler.go @@ -0,0 +1,159 @@ +// Package nrlambda adds support for AWS Lambda. +// +// Use this package to instrument your AWS Lambda handler function. Data is +// sent to CloudWatch when the Lambda is invoked. CloudWatch collects Lambda +// log data and sends it to a New Relic log-ingestion Lambda. The log-ingestion +// Lambda sends that data to us. +// +// Monitoring AWS Lambda requires several steps shown here: +// https://docs.newrelic.com/docs/serverless-function-monitoring/aws-lambda-monitoring/get-started/enable-new-relic-monitoring-aws-lambda +// +// Example: https://github.com/newrelic/go-agent/tree/master/_integrations/nrlambda/example/main.go +package nrlambda + +import ( + "context" + "io" + "net/http" + "os" + "sync" + + "github.com/aws/aws-lambda-go/lambda" + "github.com/aws/aws-lambda-go/lambda/handlertrace" + "github.com/aws/aws-lambda-go/lambdacontext" + newrelic "github.com/newrelic/go-agent" + "github.com/newrelic/go-agent/internal" + "github.com/newrelic/go-agent/internal/integrationsupport" +) + +type response struct { + header http.Header + code int +} + +var _ http.ResponseWriter = &response{} + +func (r *response) Header() http.Header { return r.header } +func (r *response) Write([]byte) (int, error) { return 0, nil } +func (r *response) WriteHeader(int) {} + +func requestEvent(ctx context.Context, event interface{}) { + txn := newrelic.FromContext(ctx) + + if nil == txn { + return + } + + if sourceARN := getEventSourceARN(event); "" != sourceARN { + integrationsupport.AddAgentAttribute(txn, internal.AttributeAWSLambdaEventSourceARN, sourceARN, nil) + } + + if request := eventWebRequest(event); nil != request { + txn.SetWebRequest(request) + } +} + +func responseEvent(ctx context.Context, event interface{}) { + txn := newrelic.FromContext(ctx) + if nil == txn { + return + } + if rw := eventResponse(event); nil != rw && 0 != rw.code { + txn.SetWebResponse(rw) + txn.WriteHeader(rw.code) + } +} + +func (h *wrappedHandler) Invoke(ctx context.Context, payload []byte) ([]byte, error) { + var arn, requestID string + if lctx, ok := lambdacontext.FromContext(ctx); ok { + arn = lctx.InvokedFunctionArn + requestID = lctx.AwsRequestID + } + + defer internal.ServerlessWrite(h.app, arn, h.writer) + + txn := h.app.StartTransaction(h.functionName, nil, nil) + defer txn.End() + + integrationsupport.AddAgentAttribute(txn, internal.AttributeAWSRequestID, requestID, nil) + integrationsupport.AddAgentAttribute(txn, internal.AttributeAWSLambdaARN, arn, nil) + h.firstTransaction.Do(func() { + integrationsupport.AddAgentAttribute(txn, internal.AttributeAWSLambdaColdStart, "", true) + }) + + ctx = newrelic.NewContext(ctx, txn) + ctx = handlertrace.NewContext(ctx, handlertrace.HandlerTrace{ + RequestEvent: requestEvent, + ResponseEvent: responseEvent, + }) + + response, err := h.original.Invoke(ctx, payload) + + if nil != err { + txn.NoticeError(err) + } + + return response, err +} + +type wrappedHandler struct { + original lambda.Handler + app newrelic.Application + // functionName is copied from lambdacontext.FunctionName for + // deterministic tests that don't depend on environment variables. + functionName string + // Although we are told that each Lambda will only handle one request at + // a time, we use a synchronization primitive to determine if this is + // the first transaction for defensiveness in case of future changes. + firstTransaction sync.Once + // writer is used to log the data JSON at the end of each transaction. + // This field exists (rather than hardcoded os.Stdout) for testing. + writer io.Writer +} + +// WrapHandler wraps the provided handler and returns a new handler with +// instrumentation. StartHandler should generally be used in place of +// WrapHandler: this function is exposed for consumers who are chaining +// middlewares. +func WrapHandler(handler lambda.Handler, app newrelic.Application) lambda.Handler { + if nil == app { + return handler + } + return &wrappedHandler{ + original: handler, + app: app, + functionName: lambdacontext.FunctionName, + writer: os.Stdout, + } +} + +// Wrap wraps the provided handler and returns a new handler with +// instrumentation. Start should generally be used in place of Wrap. +func Wrap(handler interface{}, app newrelic.Application) lambda.Handler { + return WrapHandler(lambda.NewHandler(handler), app) +} + +// Start should be used in place of lambda.Start. Replace: +// +// lambda.Start(myhandler) +// +// With: +// +// nrlambda.Start(myhandler, app) +// +func Start(handler interface{}, app newrelic.Application) { + lambda.StartHandler(Wrap(handler, app)) +} + +// StartHandler should be used in place of lambda.StartHandler. Replace: +// +// lambda.StartHandler(myhandler) +// +// With: +// +// nrlambda.StartHandler(myhandler, app) +// +func StartHandler(handler lambda.Handler, app newrelic.Application) { + lambda.StartHandler(WrapHandler(handler, app)) +} diff --git a/_integrations/nrlambda/handler_test.go b/_integrations/nrlambda/handler_test.go new file mode 100644 index 000000000..4811401a4 --- /dev/null +++ b/_integrations/nrlambda/handler_test.go @@ -0,0 +1,465 @@ +package nrlambda + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "strings" + "testing" + + "github.com/aws/aws-lambda-go/events" + "github.com/aws/aws-lambda-go/lambdacontext" + newrelic "github.com/newrelic/go-agent" + "github.com/newrelic/go-agent/internal" +) + +func dataShouldContain(tb testing.TB, data map[string]json.RawMessage, keys ...string) { + if h, ok := tb.(interface { + Helper() + }); ok { + h.Helper() + } + if len(data) != len(keys) { + tb.Errorf("data key length mismatch, expected=%v got=%v", + len(keys), len(data)) + return + } + for _, k := range keys { + _, ok := data[k] + if !ok { + tb.Errorf("data does not contain key %v", k) + } + } +} + +func testApp(getenv func(string) string, t *testing.T) newrelic.Application { + if nil == getenv { + getenv = func(string) string { return "" } + } + cfg := newConfigInternal(getenv) + + app, err := newrelic.NewApplication(cfg) + if nil != err { + t.Fatal(err) + } + internal.HarvestTesting(app, nil) + return app +} + +func distributedTracingEnabled(key string) string { + switch key { + case "NEW_RELIC_ACCOUNT_ID": + return "1" + case "NEW_RELIC_TRUSTED_ACCOUNT_KEY": + return "1" + case "NEW_RELIC_PRIMARY_APPLICATION_ID": + return "1" + default: + return "" + } +} + +func TestColdStart(t *testing.T) { + originalHandler := func(c context.Context) {} + app := testApp(nil, t) + wrapped := Wrap(originalHandler, app) + w := wrapped.(*wrappedHandler) + w.functionName = "functionName" + buf := &bytes.Buffer{} + w.writer = buf + + ctx := context.Background() + lctx := &lambdacontext.LambdaContext{ + AwsRequestID: "request-id", + InvokedFunctionArn: "function-arn", + } + ctx = lambdacontext.NewContext(ctx, lctx) + + resp, err := wrapped.Invoke(ctx, nil) + if nil != err || string(resp) != "null" { + t.Error("unexpected response", err, string(resp)) + } + app.(internal.Expect).ExpectTxnEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "name": "OtherTransaction/Go/functionName", + "guid": internal.MatchAnything, + "priority": internal.MatchAnything, + "sampled": internal.MatchAnything, + "traceId": internal.MatchAnything, + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{ + "aws.requestId": "request-id", + "aws.lambda.arn": "function-arn", + "aws.lambda.coldStart": true, + }, + }}) + metadata, data, err := internal.ParseServerlessPayload(buf.Bytes()) + if err != nil { + t.Error(err) + } + dataShouldContain(t, data, "metric_data", "analytic_event_data", "span_event_data") + if v := string(metadata["arn"]); v != `"function-arn"` { + t.Error(metadata) + } + + // Invoke the handler again to test the cold-start attribute absence. + buf = &bytes.Buffer{} + w.writer = buf + internal.HarvestTesting(app, nil) + resp, err = wrapped.Invoke(ctx, nil) + if nil != err || string(resp) != "null" { + t.Error("unexpected response", err, string(resp)) + } + app.(internal.Expect).ExpectTxnEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "name": "OtherTransaction/Go/functionName", + "guid": internal.MatchAnything, + "priority": internal.MatchAnything, + "sampled": internal.MatchAnything, + "traceId": internal.MatchAnything, + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{ + "aws.requestId": "request-id", + "aws.lambda.arn": "function-arn", + }, + }}) + metadata, data, err = internal.ParseServerlessPayload(buf.Bytes()) + if err != nil { + t.Error(err) + } + dataShouldContain(t, data, "metric_data", "analytic_event_data", "span_event_data") + if v := string(metadata["arn"]); v != `"function-arn"` { + t.Error(metadata) + } +} + +func TestErrorCapture(t *testing.T) { + returnError := errors.New("problem") + originalHandler := func() error { return returnError } + app := testApp(nil, t) + wrapped := Wrap(originalHandler, app) + w := wrapped.(*wrappedHandler) + w.functionName = "functionName" + buf := &bytes.Buffer{} + w.writer = buf + + resp, err := wrapped.Invoke(context.Background(), nil) + if err != returnError || string(resp) != "" { + t.Error(err, string(resp)) + } + app.(internal.Expect).ExpectMetrics(t, []internal.WantMetric{ + {Name: "OtherTransaction/Go/functionName", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransaction/all", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransactionTotalTime/Go/functionName", Scope: "", Forced: false, Data: nil}, + {Name: "OtherTransactionTotalTime", Scope: "", Forced: true, Data: nil}, + // Error metrics test the error capture. + {Name: "Errors/all", Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, + {Name: "Errors/allOther", Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, + {Name: "Errors/OtherTransaction/Go/functionName", Scope: "", Forced: true, Data: []float64{1, 0, 0, 0, 0, 0}}, + {Name: "ErrorsByCaller/Unknown/Unknown/Unknown/Unknown/all", Scope: "", Forced: false, Data: nil}, + {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/all", Scope: "", Forced: false, Data: nil}, + {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/allOther", Scope: "", Forced: false, Data: nil}, + {Name: "ErrorsByCaller/Unknown/Unknown/Unknown/Unknown/allOther", Scope: "", Forced: false, Data: nil}, + }) + app.(internal.Expect).ExpectTxnEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "name": "OtherTransaction/Go/functionName", + "guid": internal.MatchAnything, + "priority": internal.MatchAnything, + "sampled": internal.MatchAnything, + "traceId": internal.MatchAnything, + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{ + "aws.lambda.coldStart": true, + }, + }}) + _, data, err := internal.ParseServerlessPayload(buf.Bytes()) + if err != nil { + t.Error(err) + } + dataShouldContain(t, data, "metric_data", "analytic_event_data", "span_event_data", + "error_event_data", "error_data") +} + +func TestWrapNilApp(t *testing.T) { + originalHandler := func() (int, error) { + return 123, nil + } + wrapped := Wrap(originalHandler, nil) + ctx := context.Background() + resp, err := wrapped.Invoke(ctx, nil) + if nil != err || string(resp) != "123" { + t.Error("unexpected response", err, string(resp)) + } +} + +func TestSetWebRequest(t *testing.T) { + originalHandler := func(events.APIGatewayProxyRequest) {} + app := testApp(nil, t) + wrapped := Wrap(originalHandler, app) + w := wrapped.(*wrappedHandler) + w.functionName = "functionName" + buf := &bytes.Buffer{} + w.writer = buf + + req := events.APIGatewayProxyRequest{ + Headers: map[string]string{ + "X-Forwarded-Port": "4000", + "X-Forwarded-Proto": "HTTPS", + }, + } + reqbytes, err := json.Marshal(req) + if err != nil { + t.Error("unable to marshal json", err) + } + + resp, err := wrapped.Invoke(context.Background(), reqbytes) + if err != nil { + t.Error(err, string(resp)) + } + app.(internal.Expect).ExpectMetrics(t, []internal.WantMetric{ + {Name: "Apdex", Scope: "", Forced: true, Data: nil}, + {Name: "Apdex/Go/functionName", Scope: "", Forced: false, Data: nil}, + {Name: "HttpDispatcher", Scope: "", Forced: true, Data: nil}, + {Name: "WebTransaction", Scope: "", Forced: true, Data: nil}, + {Name: "WebTransaction/Go/functionName", Scope: "", Forced: true, Data: nil}, + {Name: "WebTransactionTotalTime", Scope: "", Forced: true, Data: nil}, + {Name: "WebTransactionTotalTime/Go/functionName", Scope: "", Forced: false, Data: nil}, + {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/all", Scope: "", Forced: false, Data: nil}, + {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/allWeb", Scope: "", Forced: false, Data: nil}, + }) + app.(internal.Expect).ExpectTxnEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "name": "WebTransaction/Go/functionName", + "nr.apdexPerfZone": "S", + "guid": internal.MatchAnything, + "priority": internal.MatchAnything, + "sampled": internal.MatchAnything, + "traceId": internal.MatchAnything, + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{ + "aws.lambda.coldStart": true, + "request.uri": "//:4000", + }, + }}) + _, data, err := internal.ParseServerlessPayload(buf.Bytes()) + if err != nil { + t.Error(err) + } + dataShouldContain(t, data, "metric_data", "analytic_event_data", "span_event_data") +} + +func makePayload(app newrelic.Application) string { + txn := app.StartTransaction("hello", nil, nil) + return txn.CreateDistributedTracePayload().Text() +} + +func TestDistributedTracing(t *testing.T) { + originalHandler := func(events.APIGatewayProxyRequest) {} + app := testApp(distributedTracingEnabled, t) + wrapped := Wrap(originalHandler, app) + w := wrapped.(*wrappedHandler) + w.functionName = "functionName" + buf := &bytes.Buffer{} + w.writer = buf + + req := events.APIGatewayProxyRequest{ + Headers: map[string]string{ + "X-Forwarded-Port": "4000", + "X-Forwarded-Proto": "HTTPS", + newrelic.DistributedTracePayloadHeader: makePayload(app), + }, + } + reqbytes, err := json.Marshal(req) + if err != nil { + t.Error("unable to marshal json", err) + } + + resp, err := wrapped.Invoke(context.Background(), reqbytes) + if err != nil { + t.Error(err, string(resp)) + } + app.(internal.Expect).ExpectMetrics(t, []internal.WantMetric{ + {Name: "Apdex", Scope: "", Forced: true, Data: nil}, + {Name: "Apdex/Go/functionName", Scope: "", Forced: false, Data: nil}, + {Name: "DurationByCaller/App/1/1/HTTPS/all", Scope: "", Forced: false, Data: nil}, + {Name: "DurationByCaller/App/1/1/HTTPS/allWeb", Scope: "", Forced: false, Data: nil}, + {Name: "HttpDispatcher", Scope: "", Forced: true, Data: nil}, + {Name: "Supportability/DistributedTrace/AcceptPayload/Success", Scope: "", Forced: true, Data: nil}, + {Name: "TransportDuration/App/1/1/HTTPS/all", Scope: "", Forced: false, Data: nil}, + {Name: "TransportDuration/App/1/1/HTTPS/allWeb", Scope: "", Forced: false, Data: nil}, + {Name: "WebTransaction", Scope: "", Forced: true, Data: nil}, + {Name: "WebTransaction/Go/functionName", Scope: "", Forced: true, Data: nil}, + {Name: "WebTransactionTotalTime", Scope: "", Forced: true, Data: nil}, + {Name: "WebTransactionTotalTime/Go/functionName", Scope: "", Forced: false, Data: nil}, + }) + app.(internal.Expect).ExpectTxnEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "name": "WebTransaction/Go/functionName", + "nr.apdexPerfZone": "S", + "parent.account": "1", + "parent.app": "1", + "parent.transportType": "HTTPS", + "parent.type": "App", + "guid": internal.MatchAnything, + "parent.transportDuration": internal.MatchAnything, + "parentId": internal.MatchAnything, + "parentSpanId": internal.MatchAnything, + "priority": internal.MatchAnything, + "sampled": internal.MatchAnything, + "traceId": internal.MatchAnything, + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{ + "aws.lambda.coldStart": true, + "request.uri": "//:4000", + }, + }}) + _, data, err := internal.ParseServerlessPayload(buf.Bytes()) + if err != nil { + t.Error(err) + } + dataShouldContain(t, data, "metric_data", "analytic_event_data", "span_event_data") +} + +func TestEventARN(t *testing.T) { + originalHandler := func(events.DynamoDBEvent) {} + app := testApp(nil, t) + wrapped := Wrap(originalHandler, app) + w := wrapped.(*wrappedHandler) + w.functionName = "functionName" + buf := &bytes.Buffer{} + w.writer = buf + + req := events.DynamoDBEvent{ + Records: []events.DynamoDBEventRecord{{ + EventSourceArn: "ARN", + }}, + } + + reqbytes, err := json.Marshal(req) + if err != nil { + t.Error("unable to marshal json", err) + } + + resp, err := wrapped.Invoke(context.Background(), reqbytes) + if err != nil { + t.Error(err, string(resp)) + } + app.(internal.Expect).ExpectMetrics(t, []internal.WantMetric{ + {Name: "OtherTransaction/all", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransaction/Go/functionName", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransactionTotalTime", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransactionTotalTime/Go/functionName", Scope: "", Forced: false, Data: nil}, + {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/all", Scope: "", Forced: false, Data: nil}, + {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/allOther", Scope: "", Forced: false, Data: nil}, + }) + app.(internal.Expect).ExpectTxnEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "name": "OtherTransaction/Go/functionName", + "guid": internal.MatchAnything, + "priority": internal.MatchAnything, + "sampled": internal.MatchAnything, + "traceId": internal.MatchAnything, + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{ + "aws.lambda.coldStart": true, + "aws.lambda.eventSource.arn": "ARN", + }, + }}) + _, data, err := internal.ParseServerlessPayload(buf.Bytes()) + if err != nil { + t.Error(err) + } + dataShouldContain(t, data, "metric_data", "analytic_event_data", "span_event_data") +} + +func TestAPIGatewayProxyResponse(t *testing.T) { + originalHandler := func() (events.APIGatewayProxyResponse, error) { + return events.APIGatewayProxyResponse{ + Body: "Hello World", + StatusCode: 200, + Headers: map[string]string{ + "Content-Type": "text/html", + }, + }, nil + } + + app := testApp(nil, t) + wrapped := Wrap(originalHandler, app) + w := wrapped.(*wrappedHandler) + w.functionName = "functionName" + buf := &bytes.Buffer{} + w.writer = buf + + resp, err := wrapped.Invoke(context.Background(), nil) + if nil != err { + t.Error("unexpected err", err) + } + if !strings.Contains(string(resp), "Hello World") { + t.Error("unexpected response", string(resp)) + } + + app.(internal.Expect).ExpectTxnEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "name": "OtherTransaction/Go/functionName", + "guid": internal.MatchAnything, + "priority": internal.MatchAnything, + "sampled": internal.MatchAnything, + "traceId": internal.MatchAnything, + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{ + "aws.lambda.coldStart": true, + "httpResponseCode": "200", + "response.headers.contentType": "text/html", + }, + }}) + _, data, err := internal.ParseServerlessPayload(buf.Bytes()) + if err != nil { + t.Error(err) + } + dataShouldContain(t, data, "metric_data", "analytic_event_data", "span_event_data") +} + +func TestCustomEvent(t *testing.T) { + originalHandler := func(c context.Context) { + if txn := newrelic.FromContext(c); nil != txn { + txn.Application().RecordCustomEvent("myEvent", map[string]interface{}{ + "zip": "zap", + }) + } + } + app := testApp(nil, t) + wrapped := Wrap(originalHandler, app) + w := wrapped.(*wrappedHandler) + w.functionName = "functionName" + buf := &bytes.Buffer{} + w.writer = buf + + resp, err := wrapped.Invoke(context.Background(), nil) + if nil != err || string(resp) != "null" { + t.Error("unexpected response", err, string(resp)) + } + app.(internal.Expect).ExpectCustomEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "type": "myEvent", + "timestamp": internal.MatchAnything, + }, + UserAttributes: map[string]interface{}{ + "zip": "zap", + }, + AgentAttributes: map[string]interface{}{}, + }}) + _, data, err := internal.ParseServerlessPayload(buf.Bytes()) + if err != nil { + t.Error(err) + } + dataShouldContain(t, data, "metric_data", "analytic_event_data", "span_event_data", "custom_event_data") +} diff --git a/_integrations/nrlogrus/README.md b/_integrations/nrlogrus/README.md new file mode 100644 index 000000000..cfa195a99 --- /dev/null +++ b/_integrations/nrlogrus/README.md @@ -0,0 +1,10 @@ +# _integrations/nrlogrus [](https://godoc.org/github.com/newrelic/go-agent/_integrations/nrlogrus) + +Package `nrlogrus` sends go-agent log messages to https://github.com/sirupsen/logrus. + +```go +import "github.com/newrelic/go-agent/_integrations/nrlogrus" +``` + +For more information, see +[godocs](https://godoc.org/github.com/newrelic/go-agent/_integrations/nrlogrus). diff --git a/_integrations/nrlogrus/example/main.go b/_integrations/nrlogrus/example/main.go new file mode 100644 index 000000000..79825698d --- /dev/null +++ b/_integrations/nrlogrus/example/main.go @@ -0,0 +1,37 @@ +package main + +import ( + "fmt" + "io" + "net/http" + "os" + + newrelic "github.com/newrelic/go-agent" + "github.com/newrelic/go-agent/_integrations/nrlogrus" + "github.com/sirupsen/logrus" +) + +func mustGetEnv(key string) string { + if val := os.Getenv(key); "" != val { + return val + } + panic(fmt.Sprintf("environment variable %s unset", key)) +} + +func main() { + cfg := newrelic.NewConfig("Logrus App", mustGetEnv("NEW_RELIC_LICENSE_KEY")) + logrus.SetLevel(logrus.DebugLevel) + cfg.Logger = nrlogrus.StandardLogger() + + app, err := newrelic.NewApplication(cfg) + if nil != err { + fmt.Println(err) + os.Exit(1) + } + + http.HandleFunc(newrelic.WrapHandleFunc(app, "/", func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, "hello world") + })) + + http.ListenAndServe(":8000", nil) +} diff --git a/_integrations/nrlogrus/nrlogrus.go b/_integrations/nrlogrus/nrlogrus.go new file mode 100644 index 000000000..dd578cd0b --- /dev/null +++ b/_integrations/nrlogrus/nrlogrus.go @@ -0,0 +1,67 @@ +// Package nrlogrus sends go-agent log messages to +// https://github.com/sirupsen/logrus. +// +// Use this package if you are using logrus in your application and would like +// the go-agent log messages to end up in the same place. If you are using +// the logrus standard logger, assign the newrelic.Config.Logger field to +// nrlogrus.StandardLogger(): +// +// cfg := newrelic.NewConfig("Your Application Name", "__YOUR_NEW_RELIC_LICENSE_KEY__") +// cfg.Logger = nrlogrus.StandardLogger() +// +// If you are using a particular logrus Logger instance, assign the +// newrelic.Config.Logger field to the the output of nrlogrus.Transform: +// +// l := logrus.New() +// l.SetLevel(logrus.DebugLevel) +// cfg := newrelic.NewConfig("Your Application Name", "__YOUR_NEW_RELIC_LICENSE_KEY__") +// cfg.Logger = nrlogrus.Transform(l) +// +// This package requires logrus version v1.1.0 and above. +package nrlogrus + +import ( + newrelic "github.com/newrelic/go-agent" + "github.com/newrelic/go-agent/internal" + "github.com/sirupsen/logrus" +) + +func init() { internal.TrackUsage("integration", "logging", "logrus") } + +type shim struct { + e *logrus.Entry + l *logrus.Logger +} + +func (s *shim) Error(msg string, c map[string]interface{}) { + s.e.WithFields(c).Error(msg) +} +func (s *shim) Warn(msg string, c map[string]interface{}) { + s.e.WithFields(c).Warn(msg) +} +func (s *shim) Info(msg string, c map[string]interface{}) { + s.e.WithFields(c).Info(msg) +} +func (s *shim) Debug(msg string, c map[string]interface{}) { + s.e.WithFields(c).Debug(msg) +} +func (s *shim) DebugEnabled() bool { + lvl := s.l.GetLevel() + return lvl >= logrus.DebugLevel +} + +// StandardLogger returns a newrelic.Logger which forwards agent log messages to +// the logrus package-level exported logger. +func StandardLogger() newrelic.Logger { + return Transform(logrus.StandardLogger()) +} + +// Transform turns a *logrus.Logger into a newrelic.Logger. +func Transform(l *logrus.Logger) newrelic.Logger { + return &shim{ + l: l, + e: l.WithFields(logrus.Fields{ + "component": "newrelic", + }), + } +} diff --git a/_integrations/nrlogrus/nrlogrus_test.go b/_integrations/nrlogrus/nrlogrus_test.go new file mode 100644 index 000000000..a23f60d6a --- /dev/null +++ b/_integrations/nrlogrus/nrlogrus_test.go @@ -0,0 +1,46 @@ +package nrlogrus + +import ( + "bytes" + "strings" + "testing" + + "github.com/sirupsen/logrus" +) + +func bufferToStringAndReset(buf *bytes.Buffer) string { + s := buf.String() + buf.Reset() + return s +} + +func TestLogrus(t *testing.T) { + buf := &bytes.Buffer{} + l := logrus.New() + l.SetOutput(buf) + l.SetLevel(logrus.DebugLevel) + lg := Transform(l) + lg.Debug("elephant", map[string]interface{}{"color": "gray"}) + s := bufferToStringAndReset(buf) + if !strings.Contains(s, "elephant") || !strings.Contains(s, "gray") { + t.Error(s) + } + if enabled := lg.DebugEnabled(); !enabled { + t.Error(enabled) + } + // Now switch the level and test that debug is no longer enabled. + l.SetLevel(logrus.InfoLevel) + lg.Debug("lion", map[string]interface{}{"color": "yellow"}) + s = bufferToStringAndReset(buf) + if strings.Contains(s, "lion") || strings.Contains(s, "yellow") { + t.Error(s) + } + if enabled := lg.DebugEnabled(); enabled { + t.Error(enabled) + } + lg.Info("tiger", map[string]interface{}{"color": "orange"}) + s = bufferToStringAndReset(buf) + if !strings.Contains(s, "tiger") || !strings.Contains(s, "orange") { + t.Error(s) + } +} diff --git a/_integrations/nrlogxi/v1/README.md b/_integrations/nrlogxi/v1/README.md new file mode 100644 index 000000000..54244d0cc --- /dev/null +++ b/_integrations/nrlogxi/v1/README.md @@ -0,0 +1,10 @@ +# _integrations/nrlogxi/v1 [](https://godoc.org/github.com/newrelic/go-agent/_integrations/nrlogxi/v1) + +Package `nrlogxi` supports https://github.com/mgutz/logxi. + +```go +import "github.com/newrelic/go-agent/_integrations/nrlogxi/v1" +``` + +For more information, see +[godocs](https://godoc.org/github.com/newrelic/go-agent/_integrations/nrlogxi/v1). diff --git a/_integrations/nrlogxi/v1/example_test.go b/_integrations/nrlogxi/v1/example_test.go new file mode 100644 index 000000000..f09a82602 --- /dev/null +++ b/_integrations/nrlogxi/v1/example_test.go @@ -0,0 +1,20 @@ +package nrlogxi_test + +import ( + log "github.com/mgutz/logxi/v1" + newrelic "github.com/newrelic/go-agent" + nrlogxi "github.com/newrelic/go-agent/_integrations/nrlogxi/v1" +) + +func Example() { + cfg := newrelic.NewConfig("Example App", "__YOUR_NEWRELIC_LICENSE_KEY__") + + // Create a new logxi logger: + l := log.New("newrelic") + l.SetLevel(log.LevelInfo) + + // Use nrlogxi to register the logger with the agent: + cfg.Logger = nrlogxi.New(l) + + newrelic.NewApplication(cfg) +} diff --git a/_integrations/nrlogxi/v1/nrlogxi.go b/_integrations/nrlogxi/v1/nrlogxi.go new file mode 100644 index 000000000..0df49eb20 --- /dev/null +++ b/_integrations/nrlogxi/v1/nrlogxi.go @@ -0,0 +1,49 @@ +// Package nrlogxi supports https://github.com/mgutz/logxi. +// +// Wrap your logxi Logger using nrlogxi.New to send agent log messages through +// logxi. +package nrlogxi + +import ( + "github.com/mgutz/logxi/v1" + newrelic "github.com/newrelic/go-agent" + "github.com/newrelic/go-agent/internal" +) + +func init() { internal.TrackUsage("integration", "logging", "logxi", "v1") } + +type shim struct { + e log.Logger +} + +func (l *shim) Error(msg string, context map[string]interface{}) { + l.e.Error(msg, convert(context)...) +} +func (l *shim) Warn(msg string, context map[string]interface{}) { + l.e.Warn(msg, convert(context)...) +} +func (l *shim) Info(msg string, context map[string]interface{}) { + l.e.Info(msg, convert(context)...) +} +func (l *shim) Debug(msg string, context map[string]interface{}) { + l.e.Debug(msg, convert(context)...) +} +func (l *shim) DebugEnabled() bool { + return l.e.IsDebug() +} + +func convert(c map[string]interface{}) []interface{} { + output := make([]interface{}, 0, 2*len(c)) + for k, v := range c { + output = append(output, k, v) + } + return output +} + +// New returns a newrelic.Logger which forwards agent log messages to the +// provided logxi Logger. +func New(l log.Logger) newrelic.Logger { + return &shim{ + e: l, + } +} diff --git a/_integrations/nrmicro/README.md b/_integrations/nrmicro/README.md new file mode 100644 index 000000000..371bb12b1 --- /dev/null +++ b/_integrations/nrmicro/README.md @@ -0,0 +1,10 @@ +# _integrations/nrmicro [](https://godoc.org/github.com/newrelic/go-agent/_integrations/nrmicro) + +Package `nrmicro` instruments https://github.com/micro/go-micro. + +```go +import "github.com/newrelic/go-agent/_integrations/nrmicro" +``` + +For more information, see +[godocs](https://godoc.org/github.com/newrelic/go-agent/_integrations/nrmicro). diff --git a/_integrations/nrmicro/example/README.md b/_integrations/nrmicro/example/README.md new file mode 100644 index 000000000..f2981c866 --- /dev/null +++ b/_integrations/nrmicro/example/README.md @@ -0,0 +1,9 @@ +# Example Go Micro apps +In this directory you will find several example Go Micro apps that are instrumented using the New Relic agent. All of the apps assume that your New Relic license key is available as an environment variable named `NEW_RELIC_LICENSE_KEY` + +They can be run the standard way: +* The sample Pub/Sub app: `go run pubsub/main.go` instruments both a publish and a subscribe method +* The sample Server app: `go run server/server.go` instruments a handler method +* The sample Client app: `go run client/client.go` instruments the client. + * Note that in order for this to function, the server app must also be running. + \ No newline at end of file diff --git a/_integrations/nrmicro/example/client/client.go b/_integrations/nrmicro/example/client/client.go new file mode 100644 index 000000000..b08d5f016 --- /dev/null +++ b/_integrations/nrmicro/example/client/client.go @@ -0,0 +1,55 @@ +package main + +import ( + "context" + "fmt" + "os" + "time" + + "github.com/micro/go-micro" + newrelic "github.com/newrelic/go-agent" + "github.com/newrelic/go-agent/_integrations/nrmicro" + proto "github.com/newrelic/go-agent/_integrations/nrmicro/example/proto" +) + +func mustGetEnv(key string) string { + if val := os.Getenv(key); "" != val { + return val + } + panic(fmt.Sprintf("environment variable %s unset", key)) +} + +func main() { + cfg := newrelic.NewConfig("Micro Client", mustGetEnv("NEW_RELIC_LICENSE_KEY")) + cfg.Logger = newrelic.NewDebugLogger(os.Stdout) + app, err := newrelic.NewApplication(cfg) + if nil != err { + panic(err) + } + err = app.WaitForConnection(10 * time.Second) + if nil != err { + panic(err) + } + defer app.Shutdown(10 * time.Second) + + txn := app.StartTransaction("client", nil, nil) + defer txn.End() + + service := micro.NewService( + // Add the New Relic wrapper to the client which will create External + // segments for each out going call. + micro.WrapClient(nrmicro.ClientWrapper()), + ) + service.Init() + ctx := newrelic.NewContext(context.Background(), txn) + c := proto.NewGreeterService("greeter", service.Client()) + + rsp, err := c.Hello(ctx, &proto.HelloRequest{ + Name: "John", + }) + if err != nil { + fmt.Println(err) + return + } + fmt.Println(rsp.Greeting) +} diff --git a/_integrations/nrmicro/example/proto/greeter.micro.go b/_integrations/nrmicro/example/proto/greeter.micro.go new file mode 100644 index 000000000..34f47eebe --- /dev/null +++ b/_integrations/nrmicro/example/proto/greeter.micro.go @@ -0,0 +1,91 @@ +// Code generated by protoc-gen-micro. DO NOT EDIT. +// source: greeter.proto + +package greeter + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +import ( + context "context" + client "github.com/micro/go-micro/client" + server "github.com/micro/go-micro/server" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ client.Option +var _ server.Option + +// Client API for Greeter service + +type GreeterService interface { + Hello(ctx context.Context, in *HelloRequest, opts ...client.CallOption) (*HelloResponse, error) +} + +type greeterService struct { + c client.Client + name string +} + +func NewGreeterService(name string, c client.Client) GreeterService { + if c == nil { + c = client.NewClient() + } + if len(name) == 0 { + name = "greeter" + } + return &greeterService{ + c: c, + name: name, + } +} + +func (c *greeterService) Hello(ctx context.Context, in *HelloRequest, opts ...client.CallOption) (*HelloResponse, error) { + req := c.c.NewRequest(c.name, "Greeter.Hello", in) + out := new(HelloResponse) + err := c.c.Call(ctx, req, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Greeter service + +type GreeterHandler interface { + Hello(context.Context, *HelloRequest, *HelloResponse) error +} + +func RegisterGreeterHandler(s server.Server, hdlr GreeterHandler, opts ...server.HandlerOption) error { + type greeter interface { + Hello(ctx context.Context, in *HelloRequest, out *HelloResponse) error + } + type Greeter struct { + greeter + } + h := &greeterHandler{hdlr} + return s.Handle(s.NewHandler(&Greeter{h}, opts...)) +} + +type greeterHandler struct { + GreeterHandler +} + +func (h *greeterHandler) Hello(ctx context.Context, in *HelloRequest, out *HelloResponse) error { + return h.GreeterHandler.Hello(ctx, in, out) +} diff --git a/_integrations/nrmicro/example/proto/greeter.pb.go b/_integrations/nrmicro/example/proto/greeter.pb.go new file mode 100644 index 000000000..ebd2dc6f5 --- /dev/null +++ b/_integrations/nrmicro/example/proto/greeter.pb.go @@ -0,0 +1,119 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: greeter.proto + +package greeter + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type HelloRequest struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HelloRequest) Reset() { *m = HelloRequest{} } +func (m *HelloRequest) String() string { return proto.CompactTextString(m) } +func (*HelloRequest) ProtoMessage() {} +func (*HelloRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_e585294ab3f34af5, []int{0} +} + +func (m *HelloRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_HelloRequest.Unmarshal(m, b) +} +func (m *HelloRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_HelloRequest.Marshal(b, m, deterministic) +} +func (m *HelloRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_HelloRequest.Merge(m, src) +} +func (m *HelloRequest) XXX_Size() int { + return xxx_messageInfo_HelloRequest.Size(m) +} +func (m *HelloRequest) XXX_DiscardUnknown() { + xxx_messageInfo_HelloRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_HelloRequest proto.InternalMessageInfo + +func (m *HelloRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +type HelloResponse struct { + Greeting string `protobuf:"bytes,2,opt,name=greeting,proto3" json:"greeting,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HelloResponse) Reset() { *m = HelloResponse{} } +func (m *HelloResponse) String() string { return proto.CompactTextString(m) } +func (*HelloResponse) ProtoMessage() {} +func (*HelloResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_e585294ab3f34af5, []int{1} +} + +func (m *HelloResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_HelloResponse.Unmarshal(m, b) +} +func (m *HelloResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_HelloResponse.Marshal(b, m, deterministic) +} +func (m *HelloResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_HelloResponse.Merge(m, src) +} +func (m *HelloResponse) XXX_Size() int { + return xxx_messageInfo_HelloResponse.Size(m) +} +func (m *HelloResponse) XXX_DiscardUnknown() { + xxx_messageInfo_HelloResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_HelloResponse proto.InternalMessageInfo + +func (m *HelloResponse) GetGreeting() string { + if m != nil { + return m.Greeting + } + return "" +} + +func init() { + proto.RegisterType((*HelloRequest)(nil), "HelloRequest") + proto.RegisterType((*HelloResponse)(nil), "HelloResponse") +} + +func init() { proto.RegisterFile("greeter.proto", fileDescriptor_e585294ab3f34af5) } + +var fileDescriptor_e585294ab3f34af5 = []byte{ + // 130 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4d, 0x2f, 0x4a, 0x4d, + 0x2d, 0x49, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x57, 0x52, 0xe2, 0xe2, 0xf1, 0x48, 0xcd, + 0xc9, 0xc9, 0x0f, 0x4a, 0x2d, 0x2c, 0x4d, 0x2d, 0x2e, 0x11, 0x12, 0xe2, 0x62, 0xc9, 0x4b, 0xcc, + 0x4d, 0x95, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x0c, 0x02, 0xb3, 0x95, 0xb4, 0xb9, 0x78, 0xa1, 0x6a, + 0x8a, 0x0b, 0xf2, 0xf3, 0x8a, 0x53, 0x85, 0xa4, 0xb8, 0x38, 0xc0, 0xa6, 0x64, 0xe6, 0xa5, 0x4b, + 0x30, 0x81, 0x15, 0xc2, 0xf9, 0x46, 0xc6, 0x5c, 0xec, 0xee, 0x10, 0x1b, 0x84, 0x34, 0xb8, 0x58, + 0xc1, 0xfa, 0x84, 0x78, 0xf5, 0x90, 0xed, 0x90, 0xe2, 0xd3, 0x43, 0x31, 0x4e, 0x89, 0x21, 0x89, + 0x0d, 0xec, 0x18, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xbd, 0xe0, 0x75, 0x0a, 0x9d, 0x00, + 0x00, 0x00, +} diff --git a/_integrations/nrmicro/example/proto/greeter.proto b/_integrations/nrmicro/example/proto/greeter.proto new file mode 100644 index 000000000..4ff347afa --- /dev/null +++ b/_integrations/nrmicro/example/proto/greeter.proto @@ -0,0 +1,13 @@ +syntax = "proto3"; + +service Greeter { + rpc Hello(HelloRequest) returns (HelloResponse) {} +} + +message HelloRequest { + string name = 1; +} + +message HelloResponse { + string greeting = 2; +} diff --git a/_integrations/nrmicro/example/pubsub/main.go b/_integrations/nrmicro/example/pubsub/main.go new file mode 100644 index 000000000..2feb27395 --- /dev/null +++ b/_integrations/nrmicro/example/pubsub/main.go @@ -0,0 +1,74 @@ +package main + +import ( + "context" + "fmt" + "log" + "os" + "time" + + "github.com/micro/go-micro" + newrelic "github.com/newrelic/go-agent" + "github.com/newrelic/go-agent/_integrations/nrmicro" + proto "github.com/newrelic/go-agent/_integrations/nrmicro/example/proto" +) + +func mustGetEnv(key string) string { + if val := os.Getenv(key); "" != val { + return val + } + panic(fmt.Sprintf("environment variable %s unset", key)) +} + +func subEv(ctx context.Context, msg *proto.HelloRequest) error { + fmt.Println("Message received from", msg.GetName()) + return nil +} + +func publish(s micro.Service, app newrelic.Application) { + c := s.Client() + + for range time.NewTicker(time.Second).C { + txn := app.StartTransaction("publish", nil, nil) + msg := c.NewMessage("example.topic.pubsub", &proto.HelloRequest{Name: "Sally"}) + ctx := newrelic.NewContext(context.Background(), txn) + fmt.Println("Sending message") + if err := c.Publish(ctx, msg); nil != err { + log.Fatal(err) + } + txn.End() + } +} + +func main() { + cfg := newrelic.NewConfig("Micro Pub/Sub", mustGetEnv("NEW_RELIC_LICENSE_KEY")) + cfg.Logger = newrelic.NewDebugLogger(os.Stdout) + app, err := newrelic.NewApplication(cfg) + if nil != err { + panic(err) + } + err = app.WaitForConnection(10 * time.Second) + if nil != err { + panic(err) + } + defer app.Shutdown(10 * time.Second) + + s := micro.NewService( + micro.Name("go.micro.srv.pubsub"), + // Add the New Relic wrapper to the client which will create + // MessageProducerSegments for each Publish call. + micro.WrapClient(nrmicro.ClientWrapper()), + // Add the New Relic wrapper to the subscriber which will start a new + // transaction for each Subscriber invocation. + micro.WrapSubscriber(nrmicro.SubscriberWrapper(app)), + ) + s.Init() + + go publish(s, app) + + micro.RegisterSubscriber("example.topic.pubsub", s.Server(), subEv) + + if err := s.Run(); err != nil { + log.Fatal(err) + } +} diff --git a/_integrations/nrmicro/example/server/server.go b/_integrations/nrmicro/example/server/server.go new file mode 100644 index 000000000..4b2b7add2 --- /dev/null +++ b/_integrations/nrmicro/example/server/server.go @@ -0,0 +1,64 @@ +package main + +import ( + "context" + "fmt" + "log" + "os" + "time" + + "github.com/micro/go-micro" + newrelic "github.com/newrelic/go-agent" + "github.com/newrelic/go-agent/_integrations/nrmicro" + proto "github.com/newrelic/go-agent/_integrations/nrmicro/example/proto" +) + +// Greeter is the server struct +type Greeter struct{} + +// Hello is the method on the server being called +func (g *Greeter) Hello(ctx context.Context, req *proto.HelloRequest, rsp *proto.HelloResponse) error { + name := req.GetName() + if txn := newrelic.FromContext(ctx); nil != txn { + txn.AddAttribute("Name", name) + } + fmt.Println("Request received from", name) + rsp.Greeting = "Hello " + name + return nil +} + +func mustGetEnv(key string) string { + if val := os.Getenv(key); "" != val { + return val + } + panic(fmt.Sprintf("environment variable %s unset", key)) +} + +func main() { + cfg := newrelic.NewConfig("Micro Server", mustGetEnv("NEW_RELIC_LICENSE_KEY")) + cfg.Logger = newrelic.NewDebugLogger(os.Stdout) + app, err := newrelic.NewApplication(cfg) + if nil != err { + panic(err) + } + err = app.WaitForConnection(10 * time.Second) + if nil != err { + panic(err) + } + defer app.Shutdown(10 * time.Second) + + service := micro.NewService( + micro.Name("greeter"), + // Add the New Relic middleware which will start a new transaction for + // each Handler invocation. + micro.WrapHandler(nrmicro.HandlerWrapper(app)), + ) + + service.Init() + + proto.RegisterGreeterHandler(service.Server(), new(Greeter)) + + if err := service.Run(); err != nil { + log.Fatal(err) + } +} diff --git a/_integrations/nrmicro/nrmicro.go b/_integrations/nrmicro/nrmicro.go new file mode 100644 index 000000000..ea0113d9a --- /dev/null +++ b/_integrations/nrmicro/nrmicro.go @@ -0,0 +1,235 @@ +package nrmicro + +import ( + "context" + "net/http" + "net/url" + "strings" + + "github.com/micro/go-micro/client" + "github.com/micro/go-micro/errors" + "github.com/micro/go-micro/metadata" + "github.com/micro/go-micro/registry" + "github.com/micro/go-micro/server" + newrelic "github.com/newrelic/go-agent" + "github.com/newrelic/go-agent/internal" + "github.com/newrelic/go-agent/internal/integrationsupport" +) + +type nrWrapper struct { + client.Client +} + +var addrMap = make(map[string]string) + +func startExternal(ctx context.Context, procedure, host string) (context.Context, newrelic.ExternalSegment) { + var seg newrelic.ExternalSegment + if txn := newrelic.FromContext(ctx); nil != txn { + seg = newrelic.ExternalSegment{ + StartTime: newrelic.StartSegmentNow(txn), + Procedure: procedure, + Library: "Micro", + Host: host, + } + ctx = addDTPayloadToContext(ctx, txn) + } + return ctx, seg +} + +func startMessage(ctx context.Context, topic string) (context.Context, *newrelic.MessageProducerSegment) { + var seg *newrelic.MessageProducerSegment + if txn := newrelic.FromContext(ctx); nil != txn { + seg = &newrelic.MessageProducerSegment{ + StartTime: newrelic.StartSegmentNow(txn), + Library: "Micro", + DestinationType: newrelic.MessageTopic, + DestinationName: topic, + } + ctx = addDTPayloadToContext(ctx, txn) + } + return ctx, seg +} + +func addDTPayloadToContext(ctx context.Context, txn newrelic.Transaction) context.Context { + payload := txn.CreateDistributedTracePayload() + if txt := payload.Text(); "" != txt { + md, _ := metadata.FromContext(ctx) + md = metadata.Copy(md) + md[newrelic.DistributedTracePayloadHeader] = txt + ctx = metadata.NewContext(ctx, md) + } + return ctx +} + +func extractHost(addr string) string { + if host, ok := addrMap[addr]; ok { + return host + } + + host := addr + if strings.HasPrefix(host, "unix://") { + host = "localhost" + } else if u, err := url.Parse(host); nil == err { + if "" != u.Host { + host = u.Host + } else { + host = u.Path + } + } + + addrMap[addr] = host + return host +} + +func (n *nrWrapper) Publish(ctx context.Context, msg client.Message, opts ...client.PublishOption) error { + ctx, seg := startMessage(ctx, msg.Topic()) + defer seg.End() + return n.Client.Publish(ctx, msg, opts...) +} + +func (n *nrWrapper) Stream(ctx context.Context, req client.Request, opts ...client.CallOption) (client.Stream, error) { + ctx, seg := startExternal(ctx, req.Endpoint(), req.Service()) + defer seg.End() + return n.Client.Stream(ctx, req, opts...) +} + +func (n *nrWrapper) Call(ctx context.Context, req client.Request, rsp interface{}, opts ...client.CallOption) error { + ctx, seg := startExternal(ctx, req.Endpoint(), req.Service()) + defer seg.End() + return n.Client.Call(ctx, req, rsp, opts...) +} + +// ClientWrapper wraps a Micro `client.Client` +// (https://godoc.org/github.com/micro/go-micro/client#Client) instance. External +// segments will be created for each call to the client's `Call`, `Publish`, or +// `Stream` methods. The `newrelic.Transaction` must be put into the context +// using `newrelic.NewContext` +// (https://godoc.org/github.com/newrelic/go-agent#NewContext) when calling one +// of those methods. +func ClientWrapper() client.Wrapper { + return func(c client.Client) client.Client { + return &nrWrapper{c} + } +} + +// CallWrapper wraps the `Call` method of a Micro `client.Client` +// (https://godoc.org/github.com/micro/go-micro/client#Client) instance. +// External segments will be created for each call to the client's `Call` +// method. The `newrelic.Transaction` must be put into the context using +// `newrelic.NewContext` +// (https://godoc.org/github.com/newrelic/go-agent#NewContext) when calling +// `Call`. +func CallWrapper() client.CallWrapper { + return func(cf client.CallFunc) client.CallFunc { + return func(ctx context.Context, node *registry.Node, req client.Request, rsp interface{}, opts client.CallOptions) error { + ctx, seg := startExternal(ctx, req.Endpoint(), req.Service()) + defer seg.End() + return cf(ctx, node, req, rsp, opts) + } + } +} + +// HandlerWrapper wraps a Micro `server.Server` +// (https://godoc.org/github.com/micro/go-micro/server#Server) handler. +// +// This wrapper creates transactions for inbound calls. The transaction is +// added to the call context and can be accessed in your method handlers using +// `newrelic.FromContext` +// (https://godoc.org/github.com/newrelic/go-agent#FromContext). +// +// When an error is returned and it is of type Micro `errors.Error` +// (https://godoc.org/github.com/micro/go-micro/errors#Error), the error that +// is recorded is based on the HTTP response code (found in the Code field). +// Values above 400 or below 100 that are not in the IgnoreStatusCodes +// (https://godoc.org/github.com/newrelic/go-agent#Config) configuration list +// are recorded as errors. A 500 response code and corresponding error is +// recorded when the error is of any other type. A 200 response code is +// recorded if no error is returned. +func HandlerWrapper(app newrelic.Application) server.HandlerWrapper { + return func(fn server.HandlerFunc) server.HandlerFunc { + if app == nil { + return fn + } + return func(ctx context.Context, req server.Request, rsp interface{}) error { + txn := startWebTransaction(ctx, app, req) + defer txn.End() + err := fn(newrelic.NewContext(ctx, txn), req, rsp) + var code int + if err != nil { + if t, ok := err.(*errors.Error); ok { + code = int(t.Code) + } else { + code = 500 + } + } else { + code = 200 + } + txn.WriteHeader(code) + return err + } + } +} + +// SubscriberWrapper wraps a Micro `server.Subscriber` +// (https://godoc.org/github.com/micro/go-micro/server#Subscriber) instance. +// +// This wrapper creates background transactions for inbound calls. The +// transaction is added to the subscriber context and can be accessed in your +// subscriber handlers using `newrelic.FromContext` +// (https://godoc.org/github.com/newrelic/go-agent#FromContext). +// +// The attribute `"message.routingKey"` is added to the transaction and will +// appear on transaction events, transaction traces, error events, and error +// traces. It corresponds to the `server.Message`'s Topic +// (https://godoc.org/github.com/micro/go-micro/server#Message). +// +// If a Subscriber returns an error, it will be recorded and reported. +func SubscriberWrapper(app newrelic.Application) server.SubscriberWrapper { + return func(fn server.SubscriberFunc) server.SubscriberFunc { + if app == nil { + return fn + } + return func(ctx context.Context, m server.Message) (err error) { + namer := internal.MessageMetricKey{ + Library: "Micro", + DestinationType: string(newrelic.MessageTopic), + DestinationName: m.Topic(), + Consumer: true, + } + txn := app.StartTransaction(namer.Name(), nil, nil) + defer txn.End() + integrationsupport.AddAgentAttribute(txn, internal.AttributeMessageRoutingKey, m.Topic(), nil) + md, ok := metadata.FromContext(ctx) + if ok { + txn.AcceptDistributedTracePayload(newrelic.TransportHTTP, md[newrelic.DistributedTracePayloadHeader]) + } + ctx = newrelic.NewContext(ctx, txn) + err = fn(ctx, m) + if err != nil { + txn.NoticeError(err) + } + return err + } + } +} + +func startWebTransaction(ctx context.Context, app newrelic.Application, req server.Request) newrelic.Transaction { + var hdrs http.Header + if md, ok := metadata.FromContext(ctx); ok { + hdrs = make(http.Header, len(md)) + for k, v := range md { + hdrs.Add(k, v) + } + } + txn := app.StartTransaction(req.Endpoint(), nil, nil) + u := &url.URL{ + Scheme: "micro", + Host: req.Service(), + Path: req.Endpoint(), + } + + webReq := newrelic.NewStaticWebRequest(hdrs, u, req.Method(), newrelic.TransportHTTP) + txn.SetWebRequest(webReq) + + return txn +} diff --git a/_integrations/nrmicro/nrmicro_doc.go b/_integrations/nrmicro/nrmicro_doc.go new file mode 100644 index 000000000..049f7dc0e --- /dev/null +++ b/_integrations/nrmicro/nrmicro_doc.go @@ -0,0 +1,181 @@ +// Package nrmicro instruments https://github.com/micro/go-micro. +// +// This package can be used to instrument Micro Servers, Clients, Producers, +// and Subscribers. +// +// Micro Servers +// +// To instrument a Micro Server, use the `micro.WrapHandler` +// (https://godoc.org/github.com/micro/go-micro#WrapHandler) option with +// `nrmicro.HandlerWrapper` and your `newrelic.Application` and pass it to the +// `micro.NewService` method. Example: +// +// cfg := newrelic.NewConfig("Micro Server", os.Getenv("NEW_RELIC_LICENSE_KEY")) +// app, _ := newrelic.NewApplication(cfg) +// service := micro.NewService( +// micro.WrapHandler(nrmicro.HandlerWrapper(app)), +// ) +// +// Alternatively, use the `server.WrapHandler` +// (https://godoc.org/github.com/micro/go-micro/server#WrapHandler) option with +// `nrmicro.HandlerWrapper` and your `newrelic.Application` and pass it to the +// `server.NewServer` method. Example: +// +// cfg := newrelic.NewConfig("Micro Server", os.Getenv("NEW_RELIC_LICENSE_KEY")) +// app, _ := newrelic.NewApplication(cfg) +// svr := server.NewServer( +// server.WrapHandler(nrmicro.HandlerWrapper(app)), +// ) +// +// If more than one wrapper is passed to `micro.WrapHandler` or +// `server.WrapHandler` as a list, be sure that the `nrmicro.HandlerWrapper` is +// first in this list. +// +// This wrapper creates transactions for inbound calls. The transaction is +// added to the call context and can be accessed in your method handlers using +// `newrelic.FromContext` +// (https://godoc.org/github.com/newrelic/go-agent#FromContext). +// +// When an error is returned and it is of type Micro `errors.Error` +// (https://godoc.org/github.com/micro/go-micro/errors#Error), the error that +// is recorded is based on the HTTP response code (found in the Code field). +// Values above 400 or below 100 that are not in the IgnoreStatusCodes +// (https://godoc.org/github.com/newrelic/go-agent#Config) configuration list +// are recorded as errors. A 500 response code and corresponding error is +// recorded when the error is of any other type. A 200 response code is +// recorded if no error is returned. +// +// Full server example: +// https://github.com/newrelic/go-agent/blob/master/_integrations/nrmicro/example/server/server.go +// +// Micro Clients +// +// There are three different ways to instrument a Micro Client and create +// External segments for `Call`, `Publish`, and `Stream` methods. +// +// No matter which way the Micro `client.Client` is wrapped, all calls to +// `Client.Call`, `Client.Publish`, or `Client.Stream` must be done with a +// context which contains a `newrelic.Transaction`. +// +// ctx = newrelic.NewContext(ctx, txn) +// err := cli.Call(ctx, req, &rsp) +// +// 1. The first option is to wrap the `Call`, `Publish`, and `Stream` methods +// on a client using the `micro.WrapClient` +// (https://godoc.org/github.com/micro/go-micro#WrapClient) option with +// `nrmicro.ClientWrapper` and pass it to the `micro.NewService` method. If +// more than one wrapper is passed to `micro.WrapClient`, ensure that the +// `nrmicro.ClientWrapper` is the first in the list. `ExternalSegment`s will be +// created each time a `Call` or `Stream` method is called on the +// client. `MessageProducerSegment`s will be created each time a `Publish` +// method is called on the client. Example: +// +// service := micro.NewService( +// micro.WrapClient(nrmicro.ClientWrapper()), +// ) +// cli := service.Client() +// +// It is also possible to use the `client.Wrap` +// (https://godoc.org/github.com/micro/go-micro/client#Wrap) option with +// `nrmicro.ClientWrapper` and pass it to the `client.NewClient` method to +// achieve the same result. +// +// cli := client.NewClient( +// client.Wrap(nrmicro.ClientWrapper()), +// ) +// +// 2. The second option is to wrap just the `Call` method on a client using the +// `micro.WrapCall` (https://godoc.org/github.com/micro/go-micro#WrapCall) +// option with `nrmicro.CallWrapper` and pass it to the `micro.NewService` +// method. If more than one wrapper is passed to `micro.WrapCall`, ensure that +// the `nrmicro.CallWrapper` is the first in the list. External segments will +// be created each time a `Call` method is called on the client. Example: +// +// service := micro.NewService( +// micro.WrapCall(nrmicro.CallWrapper()), +// ) +// cli := service.Client() +// +// It is also possible to use the `client.WrapCall` +// (https://godoc.org/github.com/micro/go-micro/client#WrapCall) option with +// `nrmicro.CallWrapper` and pass it to the `client.NewClient` method to +// achieve the same result. +// +// cli := client.NewClient( +// client.WrapCall(nrmicro.CallWrapper()), +// ) +// +// 3. The third option is to wrap the Micro Client directly using +// `nrmicro.ClientWrapper`. `ExternalSegment`s will be created each time a +// `Call` or `Stream` method is called on the client. +// `MessageProducerSegment`s will be created each time a `Publish` method is +// called on the client. Example: +// +// cli := client.NewClient() +// cli = nrmicro.ClientWrapper()(cli) +// +// Full client example: +// https://github.com/newrelic/go-agent/blob/master/_integrations/nrmicro/example/client/client.go +// +// Micro Producers +// +// To instrument a Micro Producer, wrap the Micro Client using the +// `nrmico.ClientWrapper` as described in option 1 or 3 above. +// `MessageProducerSegment`s will be created each time a `Publish` method is +// called on the client. Be sure the context passed to the `Publish` method +// contains a `newrelic.Transaction`. +// +// service := micro.NewService( +// micro.WrapClient(nrmicro.ClientWrapper()), +// ) +// cli := service.Client() +// +// // Add the transaction to the context +// ctx := newrelic.NewContext(context.Background(), txn) +// msg := cli.NewMessage("my.example.topic", "hello world") +// err := cli.Publish(ctx, msg) +// +// Full Publisher/Subscriber example: +// https://github.com/newrelic/go-agent/blob/master/_integrations/nrmicro/example/pubsub/main.go +// +// Micro Subscribers +// +// To instrument a Micro Subscriber use the `micro.WrapSubscriber` +// (https://godoc.org/github.com/micro/go-micro#WrapSubscriber) option with +// `nrmicro.SubscriberWrapper` and your `newrelic.Application` and pass it to +// the `micro.NewService` method. Example: +// +// cfg := newrelic.NewConfig("Micro Subscriber", os.Getenv("NEW_RELIC_LICENSE_KEY")) +// app, _ := newrelic.NewApplication(cfg) +// service := micro.NewService( +// micro.WrapSubscriber(nrmicro.SubscriberWrapper(app)), +// ) +// +// Alternatively, use the `server.WrapSubscriber` +// (https://godoc.org/github.com/micro/go-micro/server#WrapSubscriber) option +// with `nrmicro.SubscriberWrapper` and your `newrelic.Application` and pass it +// to the `server.NewServer` method. Example: +// +// cfg := newrelic.NewConfig("Micro Subscriber", os.Getenv("NEW_RELIC_LICENSE_KEY")) +// app, _ := newrelic.NewApplication(cfg) +// svr := server.NewServer( +// server.WrapSubscriber(nrmicro.SubscriberWrapper(app)), +// ) +// +// If more than one wrapper is passed to `micro.WrapSubscriber` or +// `server.WrapSubscriber` as a list, be sure that the `nrmicro.SubscriberWrapper` is +// first in this list. +// +// This wrapper creates background transactions for inbound calls. The +// transaction is added to the subscriber context and can be accessed in your +// subscriber handlers using `newrelic.FromContext`. +// +// If a Subscriber returns an error, it will be recorded and reported. +// +// Full Publisher/Subscriber example: +// https://github.com/newrelic/go-agent/blob/master/_integrations/nrmicro/example/pubsub/main.go +package nrmicro + +import "github.com/newrelic/go-agent/internal" + +func init() { internal.TrackUsage("integration", "framework", "micro") } diff --git a/_integrations/nrmicro/nrmicro_test.go b/_integrations/nrmicro/nrmicro_test.go new file mode 100644 index 000000000..7b33a7e57 --- /dev/null +++ b/_integrations/nrmicro/nrmicro_test.go @@ -0,0 +1,1031 @@ +package nrmicro + +import ( + "context" + "errors" + "sync" + "testing" + "time" + + "github.com/micro/go-micro" + "github.com/micro/go-micro/broker" + bmemory "github.com/micro/go-micro/broker/memory" + "github.com/micro/go-micro/client" + "github.com/micro/go-micro/client/selector" + microerrors "github.com/micro/go-micro/errors" + "github.com/micro/go-micro/metadata" + rmemory "github.com/micro/go-micro/registry/memory" + "github.com/micro/go-micro/server" + newrelic "github.com/newrelic/go-agent" + proto "github.com/newrelic/go-agent/_integrations/nrmicro/example/proto" + "github.com/newrelic/go-agent/internal" + "github.com/newrelic/go-agent/internal/integrationsupport" +) + +const ( + missingHeaders = "HEADERS NOT FOUND" + missingMetadata = "METADATA NOT FOUND" + serverName = "testing" + topic = "topic" +) + +type TestRequest struct{} + +type TestResponse struct { + RequestHeaders string +} + +func dtHeadersFound(hdr string) bool { + return hdr != "" && hdr != missingMetadata && hdr != missingHeaders +} + +type TestHandler struct{} + +func (t *TestHandler) Method(ctx context.Context, req *TestRequest, rsp *TestResponse) error { + rsp.RequestHeaders = getDTRequestHeaderVal(ctx) + defer newrelic.StartSegment(newrelic.FromContext(ctx), "Method").End() + return nil +} + +func (t *TestHandler) StreamingMethod(ctx context.Context, stream server.Stream) error { + if err := stream.Send(getDTRequestHeaderVal(ctx)); nil != err { + return err + } + return nil +} + +type TestHandlerWithError struct{} + +func (t *TestHandlerWithError) Method(ctx context.Context, req *TestRequest, rsp *TestResponse) error { + rsp.RequestHeaders = getDTRequestHeaderVal(ctx) + return microerrors.Unauthorized("id", "format") +} + +type TestHandlerWithNonMicroError struct{} + +func (t *TestHandlerWithNonMicroError) Method(ctx context.Context, req *TestRequest, rsp *TestResponse) error { + rsp.RequestHeaders = getDTRequestHeaderVal(ctx) + return errors.New("Non-Micro Error") +} + +func getDTRequestHeaderVal(ctx context.Context) string { + if md, ok := metadata.FromContext(ctx); ok { + if dtHeader, ok := md[newrelic.DistributedTracePayloadHeader]; ok { + return dtHeader + } + return missingHeaders + } + return missingMetadata +} + +func createTestApp() integrationsupport.ExpectApp { + return integrationsupport.NewTestApp(replyFn, cfgFn) +} + +var replyFn = func(reply *internal.ConnectReply) { + reply.AdaptiveSampler = internal.SampleEverything{} + reply.AccountID = "123" + reply.TrustedAccountKey = "123" + reply.PrimaryAppID = "456" +} + +var cfgFn = func(cfg *newrelic.Config) { + cfg.Enabled = false + cfg.DistributedTracer.Enabled = true + cfg.TransactionTracer.SegmentThreshold = 0 + cfg.TransactionTracer.Threshold.IsApdexFailing = false + cfg.TransactionTracer.Threshold.Duration = 0 + cfg.Attributes.Include = append(cfg.Attributes.Include, + newrelic.AttributeMessageRoutingKey, + newrelic.AttributeMessageQueueName, + newrelic.AttributeMessageExchangeType, + newrelic.AttributeMessageReplyTo, + newrelic.AttributeMessageCorrelationID, + ) +} + +func newTestWrappedClientAndServer(app newrelic.Application, wrapperOption client.Option, t *testing.T) (client.Client, server.Server) { + registry := rmemory.NewRegistry() + sel := selector.NewSelector(selector.Registry(registry)) + c := client.NewClient( + client.Selector(sel), + wrapperOption, + ) + s := server.NewServer( + server.Name(serverName), + server.Registry(registry), + server.WrapHandler(HandlerWrapper(app)), + ) + s.Handle(s.NewHandler(new(TestHandler))) + s.Handle(s.NewHandler(new(TestHandlerWithError))) + s.Handle(s.NewHandler(new(TestHandlerWithNonMicroError))) + + if err := s.Start(); nil != err { + t.Fatal(err) + } + return c, s +} + +func TestClientCallWithNoTransaction(t *testing.T) { + c, s := newTestWrappedClientAndServer(createTestApp(), client.Wrap(ClientWrapper()), t) + defer s.Stop() + testClientCallWithNoTransaction(c, t) +} + +func TestClientCallWrapperWithNoTransaction(t *testing.T) { + c, s := newTestWrappedClientAndServer(createTestApp(), client.WrapCall(CallWrapper()), t) + defer s.Stop() + testClientCallWithNoTransaction(c, t) +} + +func testClientCallWithNoTransaction(c client.Client, t *testing.T) { + + ctx := context.Background() + req := c.NewRequest(serverName, "TestHandler.Method", &TestRequest{}, client.WithContentType("application/json")) + rsp := TestResponse{} + if err := c.Call(ctx, req, &rsp); nil != err { + t.Fatal("Error calling test client:", err) + } + if rsp.RequestHeaders != missingHeaders { + t.Error("Header should not be here", rsp.RequestHeaders) + } +} + +func TestClientCallWithTransaction(t *testing.T) { + c, s := newTestWrappedClientAndServer(createTestApp(), client.Wrap(ClientWrapper()), t) + defer s.Stop() + testClientCallWithTransaction(c, t) +} + +func TestClientCallWrapperWithTransaction(t *testing.T) { + c, s := newTestWrappedClientAndServer(createTestApp(), client.WrapCall(CallWrapper()), t) + defer s.Stop() + testClientCallWithTransaction(c, t) +} + +func testClientCallWithTransaction(c client.Client, t *testing.T) { + + req := c.NewRequest(serverName, "TestHandler.Method", &TestRequest{}, client.WithContentType("application/json")) + rsp := TestResponse{} + app := createTestApp() + txn := app.StartTransaction("name", nil, nil) + ctx := newrelic.NewContext(context.Background(), txn) + if err := c.Call(ctx, req, &rsp); nil != err { + t.Fatal("Error calling test client:", err) + } + if !dtHeadersFound(rsp.RequestHeaders) { + t.Error("Incorrect header:", rsp.RequestHeaders) + } + + txn.End() + app.ExpectMetrics(t, []internal.WantMetric{ + {Name: "OtherTransaction/Go/name", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransaction/all", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransactionTotalTime", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransactionTotalTime/Go/name", Scope: "", Forced: false, Data: nil}, + {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/all", Scope: "", Forced: false, Data: nil}, + {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/allOther", Scope: "", Forced: false, Data: nil}, + {Name: "External/all", Scope: "", Forced: true, Data: nil}, + {Name: "External/allOther", Scope: "", Forced: true, Data: nil}, + {Name: "External/testing/all", Scope: "", Forced: false, Data: nil}, + {Name: "External/testing/Micro/TestHandler.Method", Scope: "OtherTransaction/Go/name", Forced: false, Data: nil}, + {Name: "Supportability/DistributedTrace/CreatePayload/Success", Scope: "", Forced: true, Data: nil}, + }) + app.ExpectSpanEvents(t, []internal.WantEvent{ + { + Intrinsics: map[string]interface{}{ + "category": "generic", + "name": "OtherTransaction/Go/name", + "nr.entryPoint": true, + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{}, + }, + { + Intrinsics: map[string]interface{}{ + "category": "http", + "component": "Micro", + "name": "External/testing/Micro/TestHandler.Method", + "parentId": internal.MatchAnything, + "span.kind": "client", + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{}, + }, + }) + app.ExpectTxnTraces(t, []internal.WantTxnTrace{{ + MetricName: "OtherTransaction/Go/name", + Root: internal.WantTraceSegment{ + SegmentName: "ROOT", + Attributes: map[string]interface{}{}, + Children: []internal.WantTraceSegment{{ + SegmentName: "OtherTransaction/Go/name", + Attributes: map[string]interface{}{"exclusive_duration_millis": internal.MatchAnything}, + Children: []internal.WantTraceSegment{ + { + SegmentName: "External/testing/Micro/TestHandler.Method", + Attributes: map[string]interface{}{}, + }, + }, + }}, + }, + }}) +} + +func TestClientCallMetadata(t *testing.T) { + c, s := newTestWrappedClientAndServer(createTestApp(), client.Wrap(ClientWrapper()), t) + defer s.Stop() + testClientCallMetadata(c, t) +} + +func TestCallMetadata(t *testing.T) { + c, s := newTestWrappedClientAndServer(createTestApp(), client.WrapCall(CallWrapper()), t) + defer s.Stop() + testClientCallMetadata(c, t) +} + +func testClientCallMetadata(c client.Client, t *testing.T) { + // test that context metadata is not changed by the newrelic wrapper + req := c.NewRequest(serverName, "TestHandler.Method", &TestRequest{}, client.WithContentType("application/json")) + rsp := TestResponse{} + app := createTestApp() + txn := app.StartTransaction("name", nil, nil) + ctx := newrelic.NewContext(context.Background(), txn) + md := metadata.Metadata{ + "zip": "zap", + } + ctx = metadata.NewContext(ctx, md) + if err := c.Call(ctx, req, &rsp); nil != err { + t.Fatal("Error calling test client:", err) + } + if len(md) != 1 || md["zip"] != "zap" { + t.Error("metadata changed:", md) + } +} + +func waitOrTimeout(t *testing.T, wg *sync.WaitGroup) { + ch := make(chan struct{}) + go func() { + defer close(ch) + wg.Wait() + }() + select { + case <-ch: + case <-time.After(time.Second): + t.Fatal("timeout waiting for message") + } +} + +func TestClientPublishWithNoTransaction(t *testing.T) { + c, _, b := newTestClientServerAndBroker(createTestApp(), t) + + var wg sync.WaitGroup + if err := b.Connect(); nil != err { + t.Fatal("broker connect error:", err) + } + defer b.Disconnect() + if _, err := b.Subscribe(topic, func(e broker.Event) error { + defer wg.Done() + h := e.Message().Header + if _, ok := h[newrelic.DistributedTracePayloadHeader]; ok { + t.Error("Distributed tracing headers found", h) + } + return nil + }); nil != err { + t.Fatal("Failure to subscribe to broker:", err) + } + + ctx := context.Background() + msg := c.NewMessage(topic, "hello world") + wg.Add(1) + if err := c.Publish(ctx, msg); nil != err { + t.Fatal("Error calling test client:", err) + } + waitOrTimeout(t, &wg) +} + +func TestClientPublishWithTransaction(t *testing.T) { + c, _, b := newTestClientServerAndBroker(createTestApp(), t) + + var wg sync.WaitGroup + if err := b.Connect(); nil != err { + t.Fatal("broker connect error:", err) + } + defer b.Disconnect() + if _, err := b.Subscribe(topic, func(e broker.Event) error { + defer wg.Done() + h := e.Message().Header + if _, ok := h[newrelic.DistributedTracePayloadHeader]; !ok { + t.Error("Distributed tracing headers not found", h) + } + return nil + }); nil != err { + t.Fatal("Failure to subscribe to broker:", err) + } + + app := createTestApp() + txn := app.StartTransaction("name", nil, nil) + ctx := newrelic.NewContext(context.Background(), txn) + msg := c.NewMessage(topic, "hello world") + wg.Add(1) + if err := c.Publish(ctx, msg); nil != err { + t.Fatal("Error calling test client:", err) + } + waitOrTimeout(t, &wg) + + txn.End() + app.ExpectMetrics(t, []internal.WantMetric{ + {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/all", Scope: "", Forced: false, Data: nil}, + {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/allOther", Scope: "", Forced: false, Data: nil}, + {Name: "MessageBroker/Micro/Topic/Produce/Named/topic", Scope: "", Forced: false, Data: nil}, + {Name: "MessageBroker/Micro/Topic/Produce/Named/topic", Scope: "OtherTransaction/Go/name", Forced: false, Data: nil}, + {Name: "OtherTransaction/Go/name", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransaction/all", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransactionTotalTime", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransactionTotalTime/Go/name", Scope: "", Forced: false, Data: nil}, + {Name: "Supportability/DistributedTrace/CreatePayload/Success", Scope: "", Forced: true, Data: nil}, + }) + app.ExpectSpanEvents(t, []internal.WantEvent{ + { + Intrinsics: map[string]interface{}{ + "category": "generic", + "name": "OtherTransaction/Go/name", + "nr.entryPoint": true, + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{}, + }, + { + Intrinsics: map[string]interface{}{ + "category": "generic", + "name": "MessageBroker/Micro/Topic/Produce/Named/topic", + "parentId": internal.MatchAnything, + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{}, + }, + }) + app.ExpectTxnTraces(t, []internal.WantTxnTrace{{ + MetricName: "OtherTransaction/Go/name", + Root: internal.WantTraceSegment{ + SegmentName: "ROOT", + Attributes: map[string]interface{}{}, + Children: []internal.WantTraceSegment{{ + SegmentName: "OtherTransaction/Go/name", + Attributes: map[string]interface{}{"exclusive_duration_millis": internal.MatchAnything}, + Children: []internal.WantTraceSegment{ + { + SegmentName: "MessageBroker/Micro/Topic/Produce/Named/topic", + Attributes: map[string]interface{}{}, + }, + }, + }}, + }, + }}) +} + +func TestExtractHost(t *testing.T) { + testcases := []struct { + input string + expect string + }{ + { + input: "192.168.0.10", + expect: "192.168.0.10", + }, + { + input: "192.168.0.10:1234", + expect: "192.168.0.10:1234", + }, + { + input: "unix:///path/to/file", + expect: "localhost", + }, + { + input: "nats://127.0.0.1:4222", + expect: "127.0.0.1:4222", + }, + { + input: "scheme://user:pass@host.com:5432/path?k=v#f", + expect: "host.com:5432", + }, + } + + for _, test := range testcases { + if actual := extractHost(test.input); actual != test.expect { + t.Errorf("incorrect host value extracted: actual=%s expected=%s", actual, test.expect) + } + } +} + +func TestClientStreamWrapperWithNoTransaction(t *testing.T) { + c, s := newTestWrappedClientAndServer(createTestApp(), client.Wrap(ClientWrapper()), t) + defer s.Stop() + + ctx := context.Background() + req := c.NewRequest( + serverName, + "TestHandler.StreamingMethod", + &TestRequest{}, + client.WithContentType("application/json"), + client.StreamingRequest(), + ) + stream, err := c.Stream(ctx, req) + defer stream.Close() + if nil != err { + t.Fatal("Error calling test client:", err) + } + + var resp string + err = stream.Recv(&resp) + if nil != err { + t.Fatal(err) + } + if dtHeadersFound(resp) { + t.Error("dt headers found:", resp) + } + + err = stream.Recv(&resp) + if nil == err { + t.Fatal("should have received EOF error from server") + } +} + +func TestClientStreamWrapperWithTransaction(t *testing.T) { + c, s := newTestWrappedClientAndServer(createTestApp(), client.Wrap(ClientWrapper()), t) + defer s.Stop() + + app := createTestApp() + txn := app.StartTransaction("name", nil, nil) + ctx := newrelic.NewContext(context.Background(), txn) + req := c.NewRequest( + serverName, + "TestHandler.StreamingMethod", + &TestRequest{}, + client.WithContentType("application/json"), + client.StreamingRequest(), + ) + stream, err := c.Stream(ctx, req) + defer stream.Close() + if nil != err { + t.Fatal("Error calling test client:", err) + } + + var resp string + // second outgoing request to server, ensures we only create a single + // metric for the entire streaming cycle + if err := stream.Send(&resp); nil != err { + t.Fatal(err) + } + + // receive the distributed trace headers from the server + if err := stream.Recv(&resp); nil != err { + t.Fatal(err) + } + if !dtHeadersFound(resp) { + t.Error("dt headers not found:", resp) + } + + // exhaust the stream + if err := stream.Recv(&resp); nil == err { + t.Fatal("should have received EOF error from server") + } + + txn.End() + app.ExpectMetrics(t, []internal.WantMetric{ + {Name: "OtherTransaction/Go/name", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransaction/all", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransactionTotalTime", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransactionTotalTime/Go/name", Scope: "", Forced: false, Data: nil}, + {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/all", Scope: "", Forced: false, Data: nil}, + {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/allOther", Scope: "", Forced: false, Data: nil}, + {Name: "External/all", Scope: "", Forced: true, Data: nil}, + {Name: "External/allOther", Scope: "", Forced: true, Data: nil}, + {Name: "External/testing/all", Scope: "", Forced: false, Data: nil}, + {Name: "External/testing/Micro/TestHandler.StreamingMethod", Scope: "OtherTransaction/Go/name", Forced: false, Data: []float64{1}}, + {Name: "Supportability/DistributedTrace/CreatePayload/Success", Scope: "", Forced: true, Data: nil}, + }) + app.ExpectSpanEvents(t, []internal.WantEvent{ + { + Intrinsics: map[string]interface{}{ + "category": "generic", + "name": "OtherTransaction/Go/name", + "nr.entryPoint": true, + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{}, + }, + { + Intrinsics: map[string]interface{}{ + "category": "http", + "component": "Micro", + "name": "External/testing/Micro/TestHandler.StreamingMethod", + "parentId": internal.MatchAnything, + "span.kind": "client", + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{}, + }, + }) + app.ExpectTxnTraces(t, []internal.WantTxnTrace{{ + MetricName: "OtherTransaction/Go/name", + Root: internal.WantTraceSegment{ + SegmentName: "ROOT", + Attributes: map[string]interface{}{}, + Children: []internal.WantTraceSegment{{ + SegmentName: "OtherTransaction/Go/name", + Attributes: map[string]interface{}{"exclusive_duration_millis": internal.MatchAnything}, + Children: []internal.WantTraceSegment{ + { + SegmentName: "External/testing/Micro/TestHandler.StreamingMethod", + Attributes: map[string]interface{}{}, + }, + }, + }}, + }, + }}) +} + +func TestServerWrapperWithNoApp(t *testing.T) { + c, s := newTestWrappedClientAndServer(nil, client.Wrap(ClientWrapper()), t) + defer s.Stop() + ctx := context.Background() + req := c.NewRequest(serverName, "TestHandler.Method", &TestRequest{}, client.WithContentType("application/json")) + rsp := TestResponse{} + if err := c.Call(ctx, req, &rsp); nil != err { + t.Fatal("Error calling test client:", err) + } + if rsp.RequestHeaders != missingHeaders { + t.Error("Header should not be here", rsp.RequestHeaders) + } +} + +func TestServerWrapperWithApp(t *testing.T) { + app := createTestApp() + c, s := newTestWrappedClientAndServer(app, client.Wrap(ClientWrapper()), t) + defer s.Stop() + ctx := context.Background() + txn := app.StartTransaction("txn", nil, nil) + defer txn.End() + ctx = newrelic.NewContext(ctx, txn) + req := c.NewRequest(serverName, "TestHandler.Method", &TestRequest{}, client.WithContentType("application/json")) + rsp := TestResponse{} + if err := c.Call(ctx, req, &rsp); nil != err { + t.Fatal("Error calling test client:", err) + } + app.ExpectMetrics(t, []internal.WantMetric{ + {Name: "DurationByCaller/App/123/456/HTTP/allWeb", Scope: "", Forced: false, Data: nil}, + {Name: "TransportDuration/App/123/456/HTTP/allWeb", Scope: "", Forced: false, Data: nil}, + {Name: "DurationByCaller/App/123/456/HTTP/all", Scope: "", Forced: false, Data: nil}, + {Name: "TransportDuration/App/123/456/HTTP/all", Scope: "", Forced: false, Data: nil}, + {Name: "Supportability/DistributedTrace/AcceptPayload/Success", Scope: "", Forced: true, Data: nil}, + {Name: "Apdex", Scope: "", Forced: true, Data: nil}, + {Name: "Apdex/Go/TestHandler.Method", Scope: "", Forced: false, Data: nil}, + {Name: "HttpDispatcher", Scope: "", Forced: true, Data: nil}, + {Name: "WebTransaction/Go/TestHandler.Method", Scope: "", Forced: true, Data: nil}, + {Name: "WebTransaction", Scope: "", Forced: true, Data: nil}, + {Name: "WebTransactionTotalTime", Scope: "", Forced: true, Data: nil}, + {Name: "WebTransactionTotalTime/Go/TestHandler.Method", Scope: "", Forced: false, Data: nil}, + {Name: "Custom/Method", Scope: "", Forced: false, Data: nil}, + {Name: "Custom/Method", Scope: "WebTransaction/Go/TestHandler.Method", Forced: false, Data: nil}, + }) + app.ExpectSpanEvents(t, []internal.WantEvent{ + { + Intrinsics: map[string]interface{}{ + "category": "generic", + "name": "WebTransaction/Go/TestHandler.Method", + "nr.entryPoint": true, + "parentId": internal.MatchAnything, + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{}, + }, + { + Intrinsics: map[string]interface{}{ + "category": "generic", + "name": "Custom/Method", + "parentId": internal.MatchAnything, + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{}, + }, + }) + app.ExpectTxnTraces(t, []internal.WantTxnTrace{{ + MetricName: "WebTransaction/Go/TestHandler.Method", + Root: internal.WantTraceSegment{ + SegmentName: "ROOT", + Attributes: map[string]interface{}{}, + Children: []internal.WantTraceSegment{{ + SegmentName: "WebTransaction/Go/TestHandler.Method", + Attributes: map[string]interface{}{"exclusive_duration_millis": internal.MatchAnything}, + Children: []internal.WantTraceSegment{ + { + SegmentName: "Custom/Method", + Attributes: map[string]interface{}{}, + }, + }, + }}, + }, + }}) + app.ExpectTxnEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "name": "WebTransaction/Go/TestHandler.Method", + "guid": internal.MatchAnything, + "priority": internal.MatchAnything, + "sampled": internal.MatchAnything, + "traceId": internal.MatchAnything, + "nr.apdexPerfZone": "S", + "parent.account": 123, + "parent.transportType": "HTTP", + "parent.app": 456, + "parentId": internal.MatchAnything, + "parent.type": "App", + "parent.transportDuration": internal.MatchAnything, + "parentSpanId": internal.MatchAnything, + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{ + "request.method": "TestHandler.Method", + "request.uri": "micro://testing/TestHandler.Method", + "request.headers.accept": "application/json", + "request.headers.contentType": "application/json", + "request.headers.contentLength": 3, + "httpResponseCode": "200", + }, + }}) +} + +func TestServerWrapperWithAppReturnsError(t *testing.T) { + app := createTestApp() + c, s := newTestWrappedClientAndServer(app, client.Wrap(ClientWrapper()), t) + defer s.Stop() + ctx := context.Background() + req := c.NewRequest(serverName, "TestHandlerWithError.Method", &TestRequest{}, client.WithContentType("application/json")) + rsp := TestResponse{} + if err := c.Call(ctx, req, &rsp); nil == err { + t.Fatal("Expected an error but did not get one") + } + app.ExpectMetrics(t, []internal.WantMetric{ + {Name: "Apdex/Go/TestHandlerWithError.Method", Scope: "", Forced: false, Data: nil}, + {Name: "Errors/all", Scope: "", Forced: true, Data: nil}, + {Name: "Errors/allWeb", Scope: "", Forced: true, Data: nil}, + {Name: "Errors/WebTransaction/Go/TestHandlerWithError.Method", Scope: "", Forced: true, Data: nil}, + {Name: "ErrorsByCaller/Unknown/Unknown/Unknown/Unknown/all", Scope: "", Forced: false, Data: nil}, + {Name: "ErrorsByCaller/Unknown/Unknown/Unknown/Unknown/allWeb", Scope: "", Forced: false, Data: nil}, + {Name: "WebTransaction/Go/TestHandlerWithError.Method", Scope: "", Forced: true, Data: nil}, + {Name: "WebTransactionTotalTime/Go/TestHandlerWithError.Method", Scope: "", Forced: false, Data: nil}, + {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/all", Scope: "", Forced: false, Data: nil}, + {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/allWeb", Scope: "", Forced: false, Data: nil}, + {Name: "HttpDispatcher", Scope: "", Forced: true, Data: nil}, + {Name: "WebTransaction", Scope: "", Forced: true, Data: nil}, + {Name: "WebTransactionTotalTime", Scope: "", Forced: true, Data: nil}, + {Name: "Apdex", Scope: "", Forced: true, Data: nil}, + }) + app.ExpectSpanEvents(t, []internal.WantEvent{ + { + Intrinsics: map[string]interface{}{ + "category": "generic", + "name": "WebTransaction/Go/TestHandlerWithError.Method", + "nr.entryPoint": true, + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{}, + }, + }) + app.ExpectTxnTraces(t, []internal.WantTxnTrace{{ + MetricName: "WebTransaction/Go/TestHandlerWithError.Method", + Root: internal.WantTraceSegment{ + SegmentName: "ROOT", + Attributes: map[string]interface{}{}, + Children: []internal.WantTraceSegment{{ + SegmentName: "WebTransaction/Go/TestHandlerWithError.Method", + Attributes: map[string]interface{}{"exclusive_duration_millis": internal.MatchAnything}, + Children: []internal.WantTraceSegment{}, + }}, + }, + }}) + app.ExpectTxnEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "name": "WebTransaction/Go/TestHandlerWithError.Method", + "guid": internal.MatchAnything, + "priority": internal.MatchAnything, + "sampled": internal.MatchAnything, + "traceId": internal.MatchAnything, + "nr.apdexPerfZone": internal.MatchAnything, + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{ + "request.method": "TestHandlerWithError.Method", + "request.uri": "micro://testing/TestHandlerWithError.Method", + "request.headers.accept": "application/json", + "request.headers.contentType": "application/json", + "request.headers.contentLength": 3, + "httpResponseCode": 401, + }, + }}) + app.ExpectErrors(t, []internal.WantError{{ + TxnName: "WebTransaction/Go/TestHandlerWithError.Method", + Msg: "Unauthorized", + Klass: "401", + }}) + app.ExpectErrorEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "error.message": "Unauthorized", + "error.class": "401", + "transactionName": "WebTransaction/Go/TestHandlerWithError.Method", + "traceId": internal.MatchAnything, + "priority": internal.MatchAnything, + "guid": internal.MatchAnything, + "sampled": "true", + }, + }}) +} + +func TestServerWrapperWithAppReturnsNonMicroError(t *testing.T) { + app := createTestApp() + c, s := newTestWrappedClientAndServer(app, client.Wrap(ClientWrapper()), t) + defer s.Stop() + ctx := context.Background() + req := c.NewRequest("testing", "TestHandlerWithNonMicroError.Method", &TestRequest{}, client.WithContentType("application/json")) + rsp := TestResponse{} + if err := c.Call(ctx, req, &rsp); nil == err { + t.Fatal("Expected an error but did not get one") + } + app.ExpectMetrics(t, []internal.WantMetric{ + {Name: "Apdex/Go/TestHandlerWithNonMicroError.Method", Scope: "", Forced: false, Data: nil}, + {Name: "Errors/all", Scope: "", Forced: true, Data: nil}, + {Name: "Errors/allWeb", Scope: "", Forced: true, Data: nil}, + {Name: "Errors/WebTransaction/Go/TestHandlerWithNonMicroError.Method", Scope: "", Forced: true, Data: nil}, + {Name: "ErrorsByCaller/Unknown/Unknown/Unknown/Unknown/all", Scope: "", Forced: false, Data: nil}, + {Name: "ErrorsByCaller/Unknown/Unknown/Unknown/Unknown/allWeb", Scope: "", Forced: false, Data: nil}, + {Name: "WebTransaction/Go/TestHandlerWithNonMicroError.Method", Scope: "", Forced: true, Data: nil}, + {Name: "WebTransactionTotalTime/Go/TestHandlerWithNonMicroError.Method", Scope: "", Forced: false, Data: nil}, + {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/all", Scope: "", Forced: false, Data: nil}, + {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/allWeb", Scope: "", Forced: false, Data: nil}, + {Name: "HttpDispatcher", Scope: "", Forced: true, Data: nil}, + {Name: "WebTransaction", Scope: "", Forced: true, Data: nil}, + {Name: "WebTransactionTotalTime", Scope: "", Forced: true, Data: nil}, + {Name: "Apdex", Scope: "", Forced: true, Data: nil}, + }) + app.ExpectTxnEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "name": "WebTransaction/Go/TestHandlerWithNonMicroError.Method", + "guid": internal.MatchAnything, + "priority": internal.MatchAnything, + "sampled": internal.MatchAnything, + "traceId": internal.MatchAnything, + "nr.apdexPerfZone": internal.MatchAnything, + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{ + "request.method": "TestHandlerWithNonMicroError.Method", + "request.uri": "micro://testing/TestHandlerWithNonMicroError.Method", + "request.headers.accept": "application/json", + "request.headers.contentType": "application/json", + "request.headers.contentLength": 3, + "httpResponseCode": 500, + }, + }}) + app.ExpectErrors(t, []internal.WantError{{ + TxnName: "WebTransaction/Go/TestHandlerWithNonMicroError.Method", + Msg: "Internal Server Error", + Klass: "500", + }}) + app.ExpectErrorEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "error.message": "Internal Server Error", + "error.class": "500", + "transactionName": "WebTransaction/Go/TestHandlerWithNonMicroError.Method", + "traceId": internal.MatchAnything, + "priority": internal.MatchAnything, + "guid": internal.MatchAnything, + "sampled": "true", + }, + }}) +} + +func TestServerSubscribeNoApp(t *testing.T) { + c, s, b := newTestClientServerAndBroker(nil, t) + defer s.Stop() + + var wg sync.WaitGroup + if err := b.Connect(); nil != err { + t.Fatal("broker connect error:", err) + } + defer b.Disconnect() + err := micro.RegisterSubscriber(topic, s, func(ctx context.Context, msg *proto.HelloRequest) error { + defer wg.Done() + return nil + }) + if err != nil { + t.Fatal("error registering subscriber", err) + } + if err := s.Start(); nil != err { + t.Fatal(err) + } + + ctx := context.Background() + msg := c.NewMessage(topic, &proto.HelloRequest{Name: "test"}) + wg.Add(1) + if err := c.Publish(ctx, msg); nil != err { + t.Fatal("Error calling publish:", err) + } + waitOrTimeout(t, &wg) +} + +func TestServerSubscribe(t *testing.T) { + app := createTestApp() + c, s, _ := newTestClientServerAndBroker(app, t) + + var wg sync.WaitGroup + err := micro.RegisterSubscriber(topic, s, func(ctx context.Context, msg *proto.HelloRequest) error { + txn := newrelic.FromContext(ctx) + defer newrelic.StartSegment(txn, "segment").End() + defer wg.Done() + return nil + }) + if err != nil { + t.Fatal("error registering subscriber", err) + } + if err := s.Start(); nil != err { + t.Fatal(err) + } + + ctx := context.Background() + msg := c.NewMessage(topic, &proto.HelloRequest{Name: "test"}) + wg.Add(1) + txn := app.StartTransaction("pub", nil, nil) + ctx = newrelic.NewContext(ctx, txn) + if err := c.Publish(ctx, msg); nil != err { + t.Fatal("Error calling publish:", err) + } + defer txn.End() + waitOrTimeout(t, &wg) + s.Stop() + + app.ExpectMetrics(t, []internal.WantMetric{ + {Name: "OtherTransaction/Go/Message/Micro/Topic/Named/topic", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransaction/all", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransactionTotalTime", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransactionTotalTime/Go/Message/Micro/Topic/Named/topic", Scope: "", Forced: false, Data: nil}, + {Name: "Custom/segment", Scope: "", Forced: false, Data: nil}, + {Name: "Custom/segment", Scope: "OtherTransaction/Go/Message/Micro/Topic/Named/topic", Forced: false, Data: nil}, + {Name: "TransportDuration/App/123/456/HTTP/allOther", Scope: "", Forced: false, Data: nil}, + {Name: "Supportability/DistributedTrace/AcceptPayload/Success", Scope: "", Forced: true, Data: nil}, + {Name: "DurationByCaller/App/123/456/HTTP/all", Scope: "", Forced: false, Data: nil}, + {Name: "DurationByCaller/App/123/456/HTTP/allOther", Scope: "", Forced: false, Data: nil}, + {Name: "TransportDuration/App/123/456/HTTP/all", Scope: "", Forced: false, Data: nil}, + }) + app.ExpectSpanEvents(t, []internal.WantEvent{ + { + Intrinsics: map[string]interface{}{ + "category": "generic", + "name": "OtherTransaction/Go/Message/Micro/Topic/Named/topic", + "nr.entryPoint": true, + "parentId": internal.MatchAnything, + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{}, + }, + { + Intrinsics: map[string]interface{}{ + "category": "generic", + "name": "Custom/segment", + "parentId": internal.MatchAnything, + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{}, + }, + }) + app.ExpectTxnEvents(t, []internal.WantEvent{ + { + Intrinsics: map[string]interface{}{ + "guid": internal.MatchAnything, + "name": "OtherTransaction/Go/Message/Micro/Topic/Named/topic", + "parent.account": 123, + "parent.app": 456, + "parent.transportDuration": internal.MatchAnything, + "parent.transportType": "HTTP", + "parent.type": "App", + "parentId": internal.MatchAnything, + "parentSpanId": internal.MatchAnything, + "priority": internal.MatchAnything, + "sampled": internal.MatchAnything, + "traceId": internal.MatchAnything, + }, + AgentAttributes: map[string]interface{}{ + "message.routingKey": "topic", + }, + UserAttributes: map[string]interface{}{}, + }, + }) + app.ExpectTxnTraces(t, []internal.WantTxnTrace{{ + MetricName: "OtherTransaction/Go/Message/Micro/Topic/Named/topic", + Root: internal.WantTraceSegment{ + SegmentName: "ROOT", + Attributes: map[string]interface{}{}, + Children: []internal.WantTraceSegment{{ + SegmentName: "OtherTransaction/Go/Message/Micro/Topic/Named/topic", + Attributes: map[string]interface{}{"exclusive_duration_millis": internal.MatchAnything}, + Children: []internal.WantTraceSegment{{ + SegmentName: "Custom/segment", + Attributes: map[string]interface{}{}, + Children: []internal.WantTraceSegment{}}, + }, + }}, + }, + }}) +} + +func TestServerSubscribeWithError(t *testing.T) { + app := createTestApp() + c, s, _ := newTestClientServerAndBroker(app, t) + + var wg sync.WaitGroup + err := micro.RegisterSubscriber(topic, s, func(ctx context.Context, msg *proto.HelloRequest) error { + defer wg.Done() + return errors.New("subscriber error") + }) + if err != nil { + t.Fatal("error registering subscriber", err) + } + if err := s.Start(); nil != err { + t.Fatal(err) + } + + ctx := context.Background() + msg := c.NewMessage(topic, &proto.HelloRequest{Name: "test"}) + wg.Add(1) + if err := c.Publish(ctx, msg); nil == err { + t.Fatal("Expected error but didn't get one") + } + waitOrTimeout(t, &wg) + s.Stop() + + app.ExpectMetrics(t, []internal.WantMetric{ + {Name: "OtherTransaction/Go/Message/Micro/Topic/Named/topic", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransaction/all", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransactionTotalTime", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransactionTotalTime/Go/Message/Micro/Topic/Named/topic", Scope: "", Forced: false, Data: nil}, + {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/all", Scope: "", Forced: false, Data: nil}, + {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/allOther", Scope: "", Forced: false, Data: nil}, + {Name: "Errors/all", Scope: "", Forced: true, Data: nil}, + {Name: "Errors/allOther", Scope: "", Forced: true, Data: nil}, + {Name: "ErrorsByCaller/Unknown/Unknown/Unknown/Unknown/all", Scope: "", Forced: false, Data: nil}, + {Name: "ErrorsByCaller/Unknown/Unknown/Unknown/Unknown/allOther", Scope: "", Forced: false, Data: nil}, + {Name: "Errors/OtherTransaction/Go/Message/Micro/Topic/Named/topic", Scope: "", Forced: true, Data: nil}, + }) + app.ExpectSpanEvents(t, []internal.WantEvent{ + { + Intrinsics: map[string]interface{}{ + "category": "generic", + "name": "OtherTransaction/Go/Message/Micro/Topic/Named/topic", + "nr.entryPoint": true, + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{}, + }, + }) + app.ExpectTxnTraces(t, []internal.WantTxnTrace{{ + MetricName: "OtherTransaction/Go/Message/Micro/Topic/Named/topic", + Root: internal.WantTraceSegment{ + SegmentName: "ROOT", + Attributes: map[string]interface{}{}, + Children: []internal.WantTraceSegment{{ + SegmentName: "OtherTransaction/Go/Message/Micro/Topic/Named/topic", + Attributes: map[string]interface{}{"exclusive_duration_millis": internal.MatchAnything}, + Children: []internal.WantTraceSegment{}, + }}, + }, + }}) + app.ExpectErrors(t, []internal.WantError{{ + TxnName: "OtherTransaction/Go/Message/Micro/Topic/Named/topic", + Msg: "subscriber error", + Klass: "*errors.errorString", + }}) + app.ExpectErrorEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "error.message": "subscriber error", + "error.class": "*errors.errorString", + "transactionName": "OtherTransaction/Go/Message/Micro/Topic/Named/topic", + "traceId": internal.MatchAnything, + "priority": internal.MatchAnything, + "guid": internal.MatchAnything, + "sampled": "true", + }, + }}) +} + +func newTestClientServerAndBroker(app newrelic.Application, t *testing.T) (client.Client, server.Server, broker.Broker) { + b := bmemory.NewBroker() + c := client.NewClient( + client.Broker(b), + client.Wrap(ClientWrapper()), + ) + s := server.NewServer( + server.Name(serverName), + server.Broker(b), + server.WrapSubscriber(SubscriberWrapper(app)), + ) + return c, s, b +} diff --git a/_integrations/nrmongo/README.md b/_integrations/nrmongo/README.md new file mode 100644 index 000000000..03296bd06 --- /dev/null +++ b/_integrations/nrmongo/README.md @@ -0,0 +1,10 @@ +# _integrations/nrmongo [](https://godoc.org/github.com/newrelic/go-agent/_integrations/nrmongo) + +Package `nrmongo` instruments https://github.com/mongodb/mongo-go-driver + +```go +import "github.com/newrelic/go-agent/_integrations/nrmongo" +``` + +For more information, see +[godocs](https://godoc.org/github.com/newrelic/go-agent/_integrations/nrmongo). diff --git a/_integrations/nrmongo/example/main.go b/_integrations/nrmongo/example/main.go new file mode 100644 index 000000000..bb8bf305e --- /dev/null +++ b/_integrations/nrmongo/example/main.go @@ -0,0 +1,48 @@ +package main + +import ( + "context" + "os" + "time" + + newrelic "github.com/newrelic/go-agent" + "github.com/newrelic/go-agent/_integrations/nrmongo" + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" +) + +func main() { + config := newrelic.NewConfig("Basic Mongo Example", os.Getenv("NEW_RELIC_LICENSE_KEY")) + config.Logger = newrelic.NewDebugLogger(os.Stdout) + app, err := newrelic.NewApplication(config) + if nil != err { + panic(err) + } + app.WaitForConnection(10 * time.Second) + + // If you have another CommandMonitor, you can pass it to NewCommandMonitor and it will get called along + // with the NR monitor + nrMon := nrmongo.NewCommandMonitor(nil) + ctx := context.Background() + + // nrMon must be added after any other monitors are added, as previous options get overwritten. + // This example assumes Mongo is running locally on port 27017 + client, err := mongo.Connect(ctx, options.Client().ApplyURI("mongodb://localhost:27017").SetMonitor(nrMon)) + if err != nil { + panic(err) + } + defer client.Disconnect(ctx) + + txn := app.StartTransaction("Mongo txn", nil, nil) + // Make sure to add the newrelic.Transaction to the context + nrCtx := newrelic.NewContext(context.Background(), txn) + collection := client.Database("testing").Collection("numbers") + _, err = collection.InsertOne(nrCtx, bson.M{"name": "exampleName", "value": "exampleValue"}) + if err != nil { + panic(err) + } + txn.End() + app.Shutdown(10 * time.Second) + +} diff --git a/_integrations/nrmongo/nrmongo.go b/_integrations/nrmongo/nrmongo.go new file mode 100644 index 000000000..48bc2a87d --- /dev/null +++ b/_integrations/nrmongo/nrmongo.go @@ -0,0 +1,156 @@ +// Package nrmongo instruments https://github.com/mongodb/mongo-go-driver +// +// Use this package to instrument your MongoDB calls without having to manually +// create DatastoreSegments. To do so, first set the monitor in the connect +// options using `SetMonitor` +// (https://godoc.org/go.mongodb.org/mongo-driver/mongo/options#ClientOptions.SetMonitor): +// +// nrMon := nrmongo.NewCommandMonitor(nil) +// client, err := mongo.Connect(ctx, options.Client().SetMonitor(nrMon)) +// +// Note that it is important that this `nrmongo` monitor is the last monitor +// set, otherwise it will be overwritten. If needing to use more than one +// `event.CommandMonitor`, pass the original monitor to the +// `nrmongo.NewCommandMonitor` function: +// +// origMon := &event.CommandMonitor{ +// Started: origStarted, +// Succeeded: origSucceeded, +// Failed: origFailed, +// } +// nrMon := nrmongo.NewCommandMonitor(origMon) +// client, err := mongo.Connect(ctx, options.Client().SetMonitor(nrMon)) +// +// Then add the current transaction to the context used in any MongoDB call: +// +// ctx = newrelic.NewContext(context.Background(), txn) +// resp, err := collection.InsertOne(ctx, bson.M{"name": "pi", "value": 3.14159}) +package nrmongo + +import ( + "context" + "regexp" + "sync" + + newrelic "github.com/newrelic/go-agent" + "github.com/newrelic/go-agent/internal" + "go.mongodb.org/mongo-driver/event" +) + +func init() { internal.TrackUsage("integration", "datastore", "mongo") } + +type mongoMonitor struct { + segmentMap map[int64]*newrelic.DatastoreSegment + origCommMon *event.CommandMonitor + sync.Mutex +} + +// The Mongo connection ID is constructed as: `fmt.Sprintf("%s[-%d]", addr, nextConnectionID())`, +// where addr is of the form `host:port` (or `a.sock` for unix sockets) +// See https://github.com/mongodb/mongo-go-driver/blob/b39cd78ce7021252efee2fb44aa6e492d67680ef/x/mongo/driver/topology/connection.go#L68 +// and https://github.com/mongodb/mongo-go-driver/blob/b39cd78ce7021252efee2fb44aa6e492d67680ef/x/mongo/driver/address/addr.go +var connIDPattern = regexp.MustCompile(`([^:\[]+)(?::(\d+))?\[-\d+]`) + +// NewCommandMonitor returns a new `*event.CommandMonitor` +// (https://godoc.org/go.mongodb.org/mongo-driver/event#CommandMonitor). If +// provided, the original `*event.CommandMonitor` will be called as well. The +// returned `*event.CommandMonitor` creates `newrelic.DatastoreSegment`s +// (https://godoc.org/github.com/newrelic/go-agent#DatastoreSegment) for each +// database call. +// +// // Use `SetMonitor` to register the CommandMonitor. +// client, err := mongo.Connect(ctx, options.Client().ApplyURI("mongodb://localhost:27017").SetMonitor(nrmongo.NewCommandMonitor(nil))) +// if err != nil { +// log.Fatal(err) +// } +// +// // Add transaction to the context. This step is required. +// ctx = newrelic.NewContext(ctx, txn) +// +// collection := client.Database("testing").Collection("numbers") +// resp, err := collection.InsertOne(ctx, bson.M{"name": "pi", "value": 3.14159}) +// if err != nil { +// log.Fatal(err) +// } +func NewCommandMonitor(original *event.CommandMonitor) *event.CommandMonitor { + m := mongoMonitor{ + segmentMap: make(map[int64]*newrelic.DatastoreSegment), + origCommMon: original, + } + return &event.CommandMonitor{ + Started: m.started, + Succeeded: m.succeeded, + Failed: m.failed, + } +} + +func (m *mongoMonitor) started(ctx context.Context, e *event.CommandStartedEvent) { + if m.origCommMon != nil && m.origCommMon.Started != nil { + m.origCommMon.Started(ctx, e) + } + txn := newrelic.FromContext(ctx) + if txn == nil { + return + } + host, port := calcHostAndPort(e.ConnectionID) + sgmt := newrelic.DatastoreSegment{ + StartTime: newrelic.StartSegmentNow(txn), + Product: newrelic.DatastoreMongoDB, + Collection: collName(e), + Operation: e.CommandName, + Host: host, + PortPathOrID: port, + DatabaseName: e.DatabaseName, + } + m.addSgmt(e, &sgmt) +} + +func collName(e *event.CommandStartedEvent) string { + coll := e.Command.Lookup(e.CommandName) + collName, _ := coll.StringValueOK() + return collName +} + +func (m *mongoMonitor) addSgmt(e *event.CommandStartedEvent, sgmt *newrelic.DatastoreSegment) { + m.Lock() + defer m.Unlock() + m.segmentMap[e.RequestID] = sgmt +} + +func (m *mongoMonitor) succeeded(ctx context.Context, e *event.CommandSucceededEvent) { + m.endSgmtIfExists(e.RequestID) + if m.origCommMon != nil && m.origCommMon.Succeeded != nil { + m.origCommMon.Succeeded(ctx, e) + } +} + +func (m *mongoMonitor) failed(ctx context.Context, e *event.CommandFailedEvent) { + m.endSgmtIfExists(e.RequestID) + if m.origCommMon != nil && m.origCommMon.Failed != nil { + m.origCommMon.Failed(ctx, e) + } +} + +func (m *mongoMonitor) endSgmtIfExists(id int64) { + m.getAndRemoveSgmt(id).End() +} + +func (m *mongoMonitor) getAndRemoveSgmt(id int64) *newrelic.DatastoreSegment { + m.Lock() + defer m.Unlock() + sgmt := m.segmentMap[id] + if sgmt != nil { + delete(m.segmentMap, id) + } + return sgmt +} + +func calcHostAndPort(connID string) (host string, port string) { + // FindStringSubmatch either returns nil or an array of the size # of submatches + 1 (in this case 3) + addressParts := connIDPattern.FindStringSubmatch(connID) + if len(addressParts) == 3 { + host = addressParts[1] + port = addressParts[2] + } + return +} diff --git a/_integrations/nrmongo/nrmongo_test.go b/_integrations/nrmongo/nrmongo_test.go new file mode 100644 index 000000000..fbc7b9d65 --- /dev/null +++ b/_integrations/nrmongo/nrmongo_test.go @@ -0,0 +1,245 @@ +package nrmongo + +import ( + "context" + "testing" + + newrelic "github.com/newrelic/go-agent" + "github.com/newrelic/go-agent/internal" + "github.com/newrelic/go-agent/internal/integrationsupport" + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/bson/primitive" + "go.mongodb.org/mongo-driver/event" +) + +var ( + connID = "localhost:27017[-1]" + reqID int64 = 10 + raw, _ = bson.Marshal(bson.D{primitive.E{Key: "commName", Value: "collName"}, {Key: "$db", Value: "testing"}}) + ste = &event.CommandStartedEvent{ + Command: raw, + DatabaseName: "testdb", + CommandName: "commName", + RequestID: reqID, + ConnectionID: connID, + } + finishedEvent = event.CommandFinishedEvent{ + DurationNanos: 5, + CommandName: "name", + RequestID: reqID, + ConnectionID: connID, + } + se = &event.CommandSucceededEvent{ + CommandFinishedEvent: finishedEvent, + Reply: nil, + } + fe = &event.CommandFailedEvent{ + CommandFinishedEvent: finishedEvent, + Failure: "failureCause", + } +) + +func TestOrigMonitorsAreCalled(t *testing.T) { + var started, succeeded, failed bool + origMonitor := &event.CommandMonitor{ + Started: func(ctx context.Context, e *event.CommandStartedEvent) { started = true }, + Succeeded: func(ctx context.Context, e *event.CommandSucceededEvent) { succeeded = true }, + Failed: func(ctx context.Context, e *event.CommandFailedEvent) { failed = true }, + } + ctx := context.Background() + nrMonitor := NewCommandMonitor(origMonitor) + + nrMonitor.Started(ctx, ste) + if !started { + t.Error("started not called") + } + nrMonitor.Succeeded(ctx, se) + if !succeeded { + t.Error("succeeded not called") + } + nrMonitor.Failed(ctx, fe) + if !failed { + t.Error("failed not called") + } +} + +func TestClientOptsWithNullFunctions(t *testing.T) { + origMonitor := &event.CommandMonitor{} // the monitor isn't nil, but its functions are. + ctx := context.Background() + nrMonitor := NewCommandMonitor(origMonitor) + + // Verifying no nil pointer dereferences + nrMonitor.Started(ctx, ste) + nrMonitor.Succeeded(ctx, se) + nrMonitor.Failed(ctx, fe) +} + +func TestHostAndPort(t *testing.T) { + type hostAndPort struct { + host string + port string + } + testCases := map[string]hostAndPort{ + "localhost:8080[-1]": {host: "localhost", port: "8080"}, + "something.com:987[-789]": {host: "something.com", port: "987"}, + "thisformatiswrong": {host: "", port: ""}, + "somethingunix.sock[-876]": {host: "somethingunix.sock", port: ""}, + "/var/dir/path/somethingunix.sock[-876]": {host: "/var/dir/path/somethingunix.sock", port: ""}, + } + for test, expected := range testCases { + h, p := calcHostAndPort(test) + if expected.host != h { + t.Errorf("unexpected host - expected %s, got %s", expected.host, h) + } + if expected.port != p { + t.Errorf("unexpected port - expected %s, got %s", expected.port, p) + } + } +} + +func TestMonitor(t *testing.T) { + var started, succeeded, failed bool + origMonitor := &event.CommandMonitor{ + Started: func(ctx context.Context, e *event.CommandStartedEvent) { started = true }, + Succeeded: func(ctx context.Context, e *event.CommandSucceededEvent) { succeeded = true }, + Failed: func(ctx context.Context, e *event.CommandFailedEvent) { failed = true }, + } + nrMonitor := mongoMonitor{ + segmentMap: make(map[int64]*newrelic.DatastoreSegment), + origCommMon: origMonitor, + } + app := createTestApp() + txn := app.StartTransaction("txnName", nil, nil) + ctx := newrelic.NewContext(context.Background(), txn) + nrMonitor.started(ctx, ste) + if !started { + t.Error("Original monitor not started") + } + if len(nrMonitor.segmentMap) != 1 { + t.Errorf("Wrong number of segments, expected 1 but got %d", len(nrMonitor.segmentMap)) + } + nrMonitor.succeeded(ctx, se) + if !succeeded { + t.Error("Original monitor not succeeded") + } + if len(nrMonitor.segmentMap) != 0 { + t.Errorf("Wrong number of segments, expected 0 but got %d", len(nrMonitor.segmentMap)) + } + txn.End() + + app.ExpectMetrics(t, []internal.WantMetric{ + {Name: "OtherTransactionTotalTime/Go/txnName", Scope: "", Forced: false, Data: nil}, + {Name: "Datastore/instance/MongoDB/" + internal.ThisHost + "/27017", Scope: "", Forced: false, Data: nil}, + {Name: "Datastore/operation/MongoDB/commName", Scope: "", Forced: false, Data: nil}, + {Name: "OtherTransaction/Go/txnName", Scope: "", Forced: true, Data: nil}, + {Name: "Datastore/all", Scope: "", Forced: true, Data: nil}, + {Name: "Datastore/allOther", Scope: "", Forced: true, Data: []float64{1.0}}, + {Name: "Datastore/MongoDB/all", Scope: "", Forced: true, Data: []float64{1.0}}, + {Name: "Datastore/MongoDB/allOther", Scope: "", Forced: true, Data: []float64{1.0}}, + {Name: "OtherTransaction/all", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransactionTotalTime", Scope: "", Forced: true, Data: nil}, + {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/all", Scope: "", Forced: false, Data: nil}, + {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/allOther", Scope: "", Forced: false, Data: nil}, + {Name: "Datastore/statement/MongoDB/collName/commName", Scope: "", Forced: false, Data: []float64{1.0}}, + {Name: "Datastore/statement/MongoDB/collName/commName", Scope: "OtherTransaction/Go/txnName", Forced: false, Data: []float64{1.0}}, + }) + app.ExpectSpanEvents(t, []internal.WantEvent{ + { + Intrinsics: map[string]interface{}{ + "name": "OtherTransaction/Go/txnName", + "sampled": true, + "category": "generic", + "nr.entryPoint": true, + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{}, + }, + { + Intrinsics: map[string]interface{}{ + "name": "Datastore/statement/MongoDB/collName/commName", + "sampled": true, + "category": "datastore", + "component": "MongoDB", + "span.kind": "client", + "parentId": internal.MatchAnything, + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{ + "peer.address": internal.ThisHost + ":27017", + "peer.hostname": internal.ThisHost, + "db.statement": "'commName' on 'collName' using 'MongoDB'", + "db.instance": "testdb", + "db.collection": "collName", + }, + }, + }) + + txn = app.StartTransaction("txnName", nil, nil) + ctx = newrelic.NewContext(context.Background(), txn) + nrMonitor.started(ctx, ste) + if len(nrMonitor.segmentMap) != 1 { + t.Errorf("Wrong number of segments, expected 1 but got %d", len(nrMonitor.segmentMap)) + } + nrMonitor.failed(ctx, fe) + if !failed { + t.Error("Original monitor not succeeded") + } + if len(nrMonitor.segmentMap) != 0 { + t.Errorf("Wrong number of segments, expected 0 but got %d", len(nrMonitor.segmentMap)) + } + txn.End() + + app.ExpectMetrics(t, []internal.WantMetric{ + {Name: "OtherTransactionTotalTime/Go/txnName", Scope: "", Forced: false, Data: nil}, + {Name: "Datastore/instance/MongoDB/" + internal.ThisHost + "/27017", Scope: "", Forced: false, Data: nil}, + {Name: "Datastore/operation/MongoDB/commName", Scope: "", Forced: false, Data: nil}, + {Name: "OtherTransaction/Go/txnName", Scope: "", Forced: true, Data: nil}, + {Name: "Datastore/all", Scope: "", Forced: true, Data: nil}, + {Name: "Datastore/allOther", Scope: "", Forced: true, Data: []float64{2.0}}, + {Name: "Datastore/MongoDB/all", Scope: "", Forced: true, Data: []float64{2.0}}, + {Name: "Datastore/MongoDB/allOther", Scope: "", Forced: true, Data: []float64{2.0}}, + {Name: "OtherTransaction/all", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransactionTotalTime", Scope: "", Forced: true, Data: nil}, + {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/all", Scope: "", Forced: false, Data: nil}, + {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/allOther", Scope: "", Forced: false, Data: nil}, + {Name: "Datastore/statement/MongoDB/collName/commName", Scope: "", Forced: false, Data: []float64{2.0}}, + {Name: "Datastore/statement/MongoDB/collName/commName", Scope: "OtherTransaction/Go/txnName", Forced: false, Data: []float64{2.0}}, + }) +} + +func TestCollName(t *testing.T) { + command := "find" + ex1, _ := bson.Marshal(bson.D{{Key: command, Value: "numbers"}, {Key: "$db", Value: "testing"}}) + ex2, _ := bson.Marshal(bson.D{{Key: "filter", Value: ""}}) + testCases := map[string]bson.Raw{ + "numbers": ex1, + "": ex2, + } + for name, raw := range testCases { + e := event.CommandStartedEvent{ + Command: raw, + CommandName: command, + } + result := collName(&e) + if result != name { + t.Errorf("Wrong collection name: %s", result) + } + } + +} + +func createTestApp() integrationsupport.ExpectApp { + return integrationsupport.NewTestApp(replyFn, cfgFn) +} + +var cfgFn = func(cfg *newrelic.Config) { + cfg.Enabled = false + cfg.DistributedTracer.Enabled = true + cfg.TransactionTracer.SegmentThreshold = 0 + cfg.TransactionTracer.Threshold.IsApdexFailing = false + cfg.TransactionTracer.Threshold.Duration = 0 +} + +var replyFn = func(reply *internal.ConnectReply) { + reply.AdaptiveSampler = internal.SampleEverything{} +} diff --git a/_integrations/nrmysql/README.md b/_integrations/nrmysql/README.md new file mode 100644 index 000000000..c1f8ed267 --- /dev/null +++ b/_integrations/nrmysql/README.md @@ -0,0 +1,10 @@ +# _integrations/nrmysql [](https://godoc.org/github.com/newrelic/go-agent/_integrations/nrmysql) + +Package `nrmysql` instruments https://github.com/go-sql-driver/mysql. + +```go +import "github.com/newrelic/go-agent/_integrations/nrmysql" +``` + +For more information, see +[godocs](https://godoc.org/github.com/newrelic/go-agent/_integrations/nrmysql). diff --git a/_integrations/nrmysql/example/main.go b/_integrations/nrmysql/example/main.go new file mode 100644 index 000000000..7371555a8 --- /dev/null +++ b/_integrations/nrmysql/example/main.go @@ -0,0 +1,48 @@ +package main + +import ( + "context" + "database/sql" + "fmt" + "os" + "time" + + "github.com/newrelic/go-agent" + _ "github.com/newrelic/go-agent/_integrations/nrmysql" +) + +func mustGetEnv(key string) string { + if val := os.Getenv(key); "" != val { + return val + } + panic(fmt.Sprintf("environment variable %s unset", key)) +} + +func main() { + // Set up a local mysql docker container with: + // docker run -it -p 3306:3306 --net "bridge" -e MYSQL_ALLOW_EMPTY_PASSWORD=true mysql + + db, err := sql.Open("nrmysql", "root@/information_schema") + if nil != err { + panic(err) + } + + cfg := newrelic.NewConfig("MySQL App", mustGetEnv("NEW_RELIC_LICENSE_KEY")) + cfg.Logger = newrelic.NewDebugLogger(os.Stdout) + app, err := newrelic.NewApplication(cfg) + if nil != err { + panic(err) + } + app.WaitForConnection(5 * time.Second) + txn := app.StartTransaction("mysqlQuery", nil, nil) + + ctx := newrelic.NewContext(context.Background(), txn) + row := db.QueryRowContext(ctx, "SELECT count(*) from tables") + var count int + row.Scan(&count) + + txn.End() + app.Shutdown(5 * time.Second) + + fmt.Println("number of tables in information_schema", count) +} diff --git a/_integrations/nrmysql/nrmysql.go b/_integrations/nrmysql/nrmysql.go new file mode 100644 index 000000000..2b05b7028 --- /dev/null +++ b/_integrations/nrmysql/nrmysql.go @@ -0,0 +1,102 @@ +// +build go1.10 + +// Package nrmysql instruments https://github.com/go-sql-driver/mysql. +// +// Use this package to instrument your MySQL calls without having to manually +// create DatastoreSegments. This is done in a two step process: +// +// 1. Use this package's driver in place of the mysql driver. +// +// If your code is using sql.Open like this: +// +// import ( +// _ "github.com/go-sql-driver/mysql" +// ) +// +// func main() { +// db, err := sql.Open("mysql", "user@unix(/path/to/socket)/dbname") +// } +// +// Then change the side-effect import to this package, and open "nrmysql" instead: +// +// import ( +// _ "github.com/newrelic/go-agent/_integrations/nrmysql" +// ) +// +// func main() { +// db, err := sql.Open("nrmysql", "user@unix(/path/to/socket)/dbname") +// } +// +// 2. Provide a context containing a newrelic.Transaction to all exec and query +// methods on sql.DB, sql.Conn, sql.Tx, and sql.Stmt. This requires using the +// context methods ExecContext, QueryContext, and QueryRowContext in place of +// Exec, Query, and QueryRow respectively. For example, instead of the +// following: +// +// row := db.QueryRow("SELECT count(*) from tables") +// +// Do this: +// +// ctx := newrelic.NewContext(context.Background(), txn) +// row := db.QueryRowContext(ctx, "SELECT count(*) from tables") +// +// A working example is shown here: +// https://github.com/newrelic/go-agent/tree/master/_integrations/nrmysql/example/main.go +package nrmysql + +import ( + "database/sql" + "net" + + "github.com/go-sql-driver/mysql" + newrelic "github.com/newrelic/go-agent" + "github.com/newrelic/go-agent/internal" + "github.com/newrelic/go-agent/internal/sqlparse" +) + +var ( + baseBuilder = newrelic.SQLDriverSegmentBuilder{ + BaseSegment: newrelic.DatastoreSegment{ + Product: newrelic.DatastoreMySQL, + }, + ParseQuery: sqlparse.ParseQuery, + ParseDSN: parseDSN, + } +) + +func init() { + sql.Register("nrmysql", newrelic.InstrumentSQLDriver(mysql.MySQLDriver{}, baseBuilder)) + internal.TrackUsage("integration", "driver", "mysql") +} + +func parseDSN(s *newrelic.DatastoreSegment, dsn string) { + cfg, err := mysql.ParseDSN(dsn) + if nil != err { + return + } + parseConfig(s, cfg) +} + +func parseConfig(s *newrelic.DatastoreSegment, cfg *mysql.Config) { + s.DatabaseName = cfg.DBName + + var host, ppoid string + switch cfg.Net { + case "unix", "unixgram", "unixpacket": + host = "localhost" + ppoid = cfg.Addr + case "cloudsql": + host = cfg.Addr + default: + var err error + host, ppoid, err = net.SplitHostPort(cfg.Addr) + if nil != err { + host = cfg.Addr + } else if host == "" { + host = "localhost" + } + } + + s.Host = host + s.PortPathOrID = ppoid +} diff --git a/_integrations/nrmysql/nrmysql_test.go b/_integrations/nrmysql/nrmysql_test.go new file mode 100644 index 000000000..bdcd2feb1 --- /dev/null +++ b/_integrations/nrmysql/nrmysql_test.go @@ -0,0 +1,182 @@ +package nrmysql + +import ( + "testing" + + "github.com/go-sql-driver/mysql" + newrelic "github.com/newrelic/go-agent" +) + +func TestParseDSN(t *testing.T) { + testcases := []struct { + dsn string + expHost string + expPortPathOrID string + expDatabaseName string + }{ + // examples from https://github.com/go-sql-driver/mysql README + { + dsn: "user@unix(/path/to/socket)/dbname", + expHost: "localhost", + expPortPathOrID: "/path/to/socket", + expDatabaseName: "dbname", + }, + { + dsn: "root:pw@unix(/tmp/mysql.sock)/myDatabase?loc=Local", + expHost: "localhost", + expPortPathOrID: "/tmp/mysql.sock", + expDatabaseName: "myDatabase", + }, + { + dsn: "user:password@tcp(localhost:5555)/dbname?tls=skip-verify&autocommit=true", + expHost: "localhost", + expPortPathOrID: "5555", + expDatabaseName: "dbname", + }, + { + dsn: "user:password@/dbname?sql_mode=TRADITIONAL", + expHost: "127.0.0.1", + expPortPathOrID: "3306", + expDatabaseName: "dbname", + }, + { + dsn: "user:password@tcp([de:ad:be:ef::ca:fe]:80)/dbname?timeout=90s&collation=utf8mb4_unicode_ci", + expHost: "de:ad:be:ef::ca:fe", + expPortPathOrID: "80", + expDatabaseName: "dbname", + }, + { + dsn: "id:password@tcp(your-amazonaws-uri.com:3306)/dbname", + expHost: "your-amazonaws-uri.com", + expPortPathOrID: "3306", + expDatabaseName: "dbname", + }, + { + dsn: "user@cloudsql(project-id:instance-name)/dbname", + expHost: "project-id:instance-name", + expPortPathOrID: "", + expDatabaseName: "dbname", + }, + { + dsn: "user@cloudsql(project-id:regionname:instance-name)/dbname", + expHost: "project-id:regionname:instance-name", + expPortPathOrID: "", + expDatabaseName: "dbname", + }, + { + dsn: "user:password@tcp/dbname?charset=utf8mb4,utf8&sys_var=esc%40ped", + expHost: "127.0.0.1", + expPortPathOrID: "3306", + expDatabaseName: "dbname", + }, + { + dsn: "user:password@/dbname", + expHost: "127.0.0.1", + expPortPathOrID: "3306", + expDatabaseName: "dbname", + }, + { + dsn: "user:password@/", + expHost: "127.0.0.1", + expPortPathOrID: "3306", + expDatabaseName: "", + }, + { + dsn: "this is not a dsn", + expHost: "", + expPortPathOrID: "", + expDatabaseName: "", + }, + } + + for _, test := range testcases { + s := &newrelic.DatastoreSegment{} + parseDSN(s, test.dsn) + if test.expHost != s.Host { + t.Errorf(`incorrect host, expected="%s", actual="%s"`, test.expHost, s.Host) + } + if test.expPortPathOrID != s.PortPathOrID { + t.Errorf(`incorrect port path or id, expected="%s", actual="%s"`, test.expPortPathOrID, s.PortPathOrID) + } + if test.expDatabaseName != s.DatabaseName { + t.Errorf(`incorrect database name, expected="%s", actual="%s"`, test.expDatabaseName, s.DatabaseName) + } + } +} + +func TestParseConfig(t *testing.T) { + testcases := []struct { + cfgNet string + cfgAddr string + cfgDBName string + expHost string + expPortPathOrID string + expDatabaseName string + }{ + { + cfgDBName: "mydb", + expDatabaseName: "mydb", + }, + { + cfgNet: "unixgram", + cfgAddr: "/path/to/my/sock", + expHost: "localhost", + expPortPathOrID: "/path/to/my/sock", + }, + { + cfgNet: "unixpacket", + cfgAddr: "/path/to/my/sock", + expHost: "localhost", + expPortPathOrID: "/path/to/my/sock", + }, + { + cfgNet: "udp", + cfgAddr: "[fe80::1%lo0]:53", + expHost: "fe80::1%lo0", + expPortPathOrID: "53", + }, + { + cfgNet: "tcp", + cfgAddr: ":80", + expHost: "localhost", + expPortPathOrID: "80", + }, + { + cfgNet: "ip4:1", + cfgAddr: "192.0.2.1", + expHost: "192.0.2.1", + expPortPathOrID: "", + }, + { + cfgNet: "tcp6", + cfgAddr: "golang.org:http", + expHost: "golang.org", + expPortPathOrID: "http", + }, + { + cfgNet: "ip6:ipv6-icmp", + cfgAddr: "2001:db8::1", + expHost: "2001:db8::1", + expPortPathOrID: "", + }, + } + + for _, test := range testcases { + s := &newrelic.DatastoreSegment{} + cfg := &mysql.Config{ + Net: test.cfgNet, + Addr: test.cfgAddr, + DBName: test.cfgDBName, + } + parseConfig(s, cfg) + if test.expHost != s.Host { + t.Errorf(`incorrect host, expected="%s", actual="%s"`, test.expHost, s.Host) + } + if test.expPortPathOrID != s.PortPathOrID { + t.Errorf(`incorrect port path or id, expected="%s", actual="%s"`, test.expPortPathOrID, s.PortPathOrID) + } + if test.expDatabaseName != s.DatabaseName { + t.Errorf(`incorrect database name, expected="%s", actual="%s"`, test.expDatabaseName, s.DatabaseName) + } + } +} diff --git a/_integrations/nrnats/README.md b/_integrations/nrnats/README.md new file mode 100644 index 000000000..be66253f2 --- /dev/null +++ b/_integrations/nrnats/README.md @@ -0,0 +1,10 @@ +# _integrations/nrnats [](https://godoc.org/github.com/newrelic/go-agent/_integrations/nrnats) + +Package `nrnats` instruments https://github.com/nats-io/nats.go. + +```go +import "github.com/newrelic/go-agent/_integrations/nrnats" +``` + +For more information, see +[godocs](https://godoc.org/github.com/newrelic/go-agent/_integrations/nrnats). diff --git a/_integrations/nrnats/example_test.go b/_integrations/nrnats/example_test.go new file mode 100644 index 000000000..65e4e80f7 --- /dev/null +++ b/_integrations/nrnats/example_test.go @@ -0,0 +1,56 @@ +package nrnats + +import ( + "fmt" + "time" + + "github.com/nats-io/nats.go" + "github.com/nats-io/stan.go" + newrelic "github.com/newrelic/go-agent" +) + +func currentTransaction() newrelic.Transaction { return nil } + +func ExampleStartPublishSegment() { + nc, _ := nats.Connect(nats.DefaultURL) + txn := currentTransaction() + subject := "testing.subject" + + // Start the Publish segment + seg := StartPublishSegment(txn, nc, subject) + err := nc.Publish(subject, []byte("Hello World")) + if nil != err { + panic(err) + } + // Manually end the segment + seg.End() +} + +func ExampleStartPublishSegment_defer() { + nc, _ := nats.Connect(nats.DefaultURL) + txn := currentTransaction() + subject := "testing.subject" + + // Start the Publish segment and defer End till the func returns + defer StartPublishSegment(txn, nc, subject).End() + m, err := nc.Request(subject, []byte("request"), time.Second) + if nil != err { + panic(err) + } + fmt.Println("Received reply message:", string(m.Data)) +} + +var clusterID, clientID string + +// StartPublishSegment can be used with a NATS Streamming Connection as well +// (https://github.com/nats-io/stan.go). Use the `NatsConn()` method on the +// `stan.Conn` interface (https://godoc.org/github.com/nats-io/stan#Conn) to +// access the `nats.Conn` object. +func ExampleStartPublishSegment_stan() { + sc, _ := stan.Connect(clusterID, clientID) + txn := currentTransaction() + subject := "testing.subject" + + defer StartPublishSegment(txn, sc.NatsConn(), subject).End() + sc.Publish(subject, []byte("Hello World")) +} diff --git a/_integrations/nrnats/examples/README.md b/_integrations/nrnats/examples/README.md new file mode 100644 index 000000000..dfb9d8cdf --- /dev/null +++ b/_integrations/nrnats/examples/README.md @@ -0,0 +1,5 @@ +# Example NATS app +In this example app you can find several different ways of instrumenting NATS functions using New Relic. In order to run the app, make sure the following assumptions are correct: +* Your New Relic license key is available as an environment variable named `NEW_RELIC_LICENSE_KEY` +* A NATS server is running locally at the `nats.DefaultURL` + \ No newline at end of file diff --git a/_integrations/nrnats/examples/main.go b/_integrations/nrnats/examples/main.go new file mode 100644 index 000000000..4c666365d --- /dev/null +++ b/_integrations/nrnats/examples/main.go @@ -0,0 +1,203 @@ +package main + +import ( + "fmt" + "os" + "sync" + "time" + + "github.com/nats-io/nats.go" + newrelic "github.com/newrelic/go-agent" + "github.com/newrelic/go-agent/_integrations/nrnats" +) + +var app newrelic.Application + +func doAsync(nc *nats.Conn, txn newrelic.Transaction) { + wg := sync.WaitGroup{} + subj := "async" + + // Simple Async Subscriber + // Use the nrnats.SubWrapper to wrap the nats.MsgHandler and create a + // newrelic.Transaction with each processed nats.Msg + _, err := nc.Subscribe(subj, nrnats.SubWrapper(app, func(m *nats.Msg) { + defer wg.Done() + fmt.Println("Received async message:", string(m.Data)) + })) + if nil != err { + panic(err) + } + + // Simple Publisher + wg.Add(1) + // Use nrnats.StartPublishSegment to create a + // newrelic.MessageProducerSegment for the call to nc.Publish + seg := nrnats.StartPublishSegment(txn, nc, subj) + err = nc.Publish(subj, []byte("Hello World")) + seg.End() + if nil != err { + panic(err) + } + + wg.Wait() +} + +func doQueue(nc *nats.Conn, txn newrelic.Transaction) { + wg := sync.WaitGroup{} + subj := "queue" + + // Queue Subscriber + // Use the nrnats.SubWrapper to wrap the nats.MsgHandler and create a + // newrelic.Transaction with each processed nats.Msg + _, err := nc.QueueSubscribe(subj, "myQueueName", nrnats.SubWrapper(app, func(m *nats.Msg) { + defer wg.Done() + fmt.Println("Received queue message:", string(m.Data)) + })) + if nil != err { + panic(err) + } + + wg.Add(1) + // Use nrnats.StartPublishSegment to create a + // newrelic.MessageProducerSegment for the call to nc.Publish + seg := nrnats.StartPublishSegment(txn, nc, subj) + err = nc.Publish(subj, []byte("Hello World")) + seg.End() + if nil != err { + panic(err) + } + + wg.Wait() +} + +func doSync(nc *nats.Conn, txn newrelic.Transaction) { + subj := "sync" + + // Simple Sync Subscriber + sub, err := nc.SubscribeSync(subj) + if nil != err { + panic(err) + } + // Use nrnats.StartPublishSegment to create a + // newrelic.MessageProducerSegment for the call to nc.Publish + seg := nrnats.StartPublishSegment(txn, nc, subj) + err = nc.Publish(subj, []byte("Hello World")) + seg.End() + if nil != err { + panic(err) + } + m, err := sub.NextMsg(time.Second) + if nil != err { + panic(err) + } + fmt.Println("Received sync message:", string(m.Data)) +} + +func doChan(nc *nats.Conn, txn newrelic.Transaction) { + subj := "chan" + + // Channel Subscriber + ch := make(chan *nats.Msg) + _, err := nc.ChanSubscribe(subj, ch) + if nil != err { + panic(err) + } + + // Use nrnats.StartPublishSegment to create a + // newrelic.MessageProducerSegment for the call to nc.Publish + seg := nrnats.StartPublishSegment(txn, nc, subj) + err = nc.Publish(subj, []byte("Hello World")) + seg.End() + if nil != err { + panic(err) + } + + m := <-ch + fmt.Println("Received chan message:", string(m.Data)) +} + +func doReply(nc *nats.Conn, txn newrelic.Transaction) { + subj := "reply" + + // Replies + nc.Subscribe(subj, func(m *nats.Msg) { + // Use nrnats.StartPublishSegment to create a + // newrelic.MessageProducerSegment for the call to nc.Publish + seg := nrnats.StartPublishSegment(txn, nc, m.Reply) + nc.Publish(m.Reply, []byte("Hello World")) + seg.End() + }) + + // Requests + // Use nrnats.StartPublishSegment to create a + // newrelic.MessageProducerSegment for the call to nc.Request + seg := nrnats.StartPublishSegment(txn, nc, subj) + m, err := nc.Request(subj, []byte("request"), time.Second) + seg.End() + if nil != err { + panic(err) + } + fmt.Println("Received reply message:", string(m.Data)) +} + +func doRespond(nc *nats.Conn, txn newrelic.Transaction) { + subj := "respond" + // Respond + nc.Subscribe(subj, func(m *nats.Msg) { + // Use nrnats.StartPublishSegment to create a + // newrelic.MessageProducerSegment for the call to m.Respond + seg := nrnats.StartPublishSegment(txn, nc, m.Reply) + m.Respond([]byte("Hello World")) + seg.End() + }) + + // Requests + // Use nrnats.StartPublishSegment to create a + // newrelic.MessageProducerSegment for the call to nc.Request + seg := nrnats.StartPublishSegment(txn, nc, subj) + m, err := nc.Request(subj, []byte("request"), time.Second) + seg.End() + if nil != err { + panic(err) + } + fmt.Println("Received respond message:", string(m.Data)) +} + +func mustGetEnv(key string) string { + if val := os.Getenv(key); "" != val { + return val + } + panic(fmt.Sprintf("environment variable %s unset", key)) +} + +func main() { + // Initialize agent + cfg := newrelic.NewConfig("NATS App", mustGetEnv("NEW_RELIC_LICENSE_KEY")) + cfg.Logger = newrelic.NewDebugLogger(os.Stdout) + var err error + app, err = newrelic.NewApplication(cfg) + if nil != err { + panic(err) + } + defer app.Shutdown(10 * time.Second) + err = app.WaitForConnection(5 * time.Second) + if nil != err { + panic(err) + } + txn := app.StartTransaction("main", nil, nil) + defer txn.End() + + // Connect to a server + nc, err := nats.Connect(nats.DefaultURL) + if nil != err { + panic(err) + } + defer nc.Drain() + + doAsync(nc, txn) + doQueue(nc, txn) + doSync(nc, txn) + doChan(nc, txn) + doReply(nc, txn) + doRespond(nc, txn) +} diff --git a/_integrations/nrnats/nrnats.go b/_integrations/nrnats/nrnats.go new file mode 100644 index 000000000..7f10594eb --- /dev/null +++ b/_integrations/nrnats/nrnats.go @@ -0,0 +1,63 @@ +package nrnats + +import ( + "strings" + + nats "github.com/nats-io/nats.go" + newrelic "github.com/newrelic/go-agent" + "github.com/newrelic/go-agent/internal" + "github.com/newrelic/go-agent/internal/integrationsupport" +) + +// StartPublishSegment creates and starts a `newrelic.MessageProducerSegment` +// (https://godoc.org/github.com/newrelic/go-agent#MessageProducerSegment) for NATS +// publishers. Call this function before calling any method that publishes or +// responds to a NATS message. Call `End()` +// (https://godoc.org/github.com/newrelic/go-agent#MessageProducerSegment.End) on the +// returned newrelic.MessageProducerSegment when the publish is complete. The +// `newrelic.Transaction` and `nats.Conn` parameters are required. The subject +// parameter is the subject of the publish call and is used in metric and span +// names. +func StartPublishSegment(txn newrelic.Transaction, nc *nats.Conn, subject string) *newrelic.MessageProducerSegment { + if nil == txn { + return nil + } + if nil == nc { + return nil + } + return &newrelic.MessageProducerSegment{ + StartTime: newrelic.StartSegmentNow(txn), + Library: "NATS", + DestinationType: newrelic.MessageTopic, + DestinationName: subject, + DestinationTemporary: strings.HasPrefix(subject, "_INBOX"), + } +} + +// SubWrapper can be used to wrap the function for nats.Subscribe (https://godoc.org/github.com/nats-io/go-nats#Conn.Subscribe +// or https://godoc.org/github.com/nats-io/go-nats#EncodedConn.Subscribe) +// and nats.QueueSubscribe (https://godoc.org/github.com/nats-io/go-nats#Conn.QueueSubscribe or +// https://godoc.org/github.com/nats-io/go-nats#EncodedConn.QueueSubscribe) +// If the `newrelic.Application` parameter is non-nil, it will create a `newrelic.Transaction` and end the transaction +// when the passed function is complete. +func SubWrapper(app newrelic.Application, f func(msg *nats.Msg)) func(msg *nats.Msg) { + if app == nil { + return f + } + return func(msg *nats.Msg) { + namer := internal.MessageMetricKey{ + Library: "NATS", + DestinationType: string(newrelic.MessageTopic), + DestinationName: msg.Subject, + Consumer: true, + } + txn := app.StartTransaction(namer.Name(), nil, nil) + defer txn.End() + + integrationsupport.AddAgentAttribute(txn, internal.AttributeMessageRoutingKey, msg.Sub.Subject, nil) + integrationsupport.AddAgentAttribute(txn, internal.AttributeMessageQueueName, msg.Sub.Queue, nil) + integrationsupport.AddAgentAttribute(txn, internal.AttributeMessageReplyTo, msg.Reply, nil) + + f(msg) + } +} diff --git a/_integrations/nrnats/nrnats_doc.go b/_integrations/nrnats/nrnats_doc.go new file mode 100644 index 000000000..8b95d7aa4 --- /dev/null +++ b/_integrations/nrnats/nrnats_doc.go @@ -0,0 +1,52 @@ +// Package nrnats instruments https://github.com/nats-io/nats.go. +// +// This package can be used to simplify instrumenting NATS publishers and subscribers. Currently due to the nature of +// the NATS framework we are limited to two integration points: `StartPublishSegment` for publishers, and `SubWrapper` +// for subscribers. +// +// NATS publishers +// +// To generate an external segment for any method that publishes or responds to a NATS message, use the +// `StartPublishSegment` method. The resulting segment will also need to be ended. Example: +// +// nc, _ := nats.Connect(nats.DefaultURL) +// txn := currentTransaction() // current newrelic.Transaction +// subject := "testing.subject" +// seg := nrnats.StartPublishSegment(txn, nc, subject) +// err := nc.Publish(subject, []byte("Hello World")) +// if nil != err { +// panic(err) +// } +// seg.End() +// +// Or: +// +// nc, _ := nats.Connect(nats.DefaultURL) +// txn := currentTransaction() // current newrelic.Transaction +// subject := "testing.subject" +// defer nrnats.StartPublishSegment(txn, nc, subject).End() +// nc.Publish(subject, []byte("Hello World")) +// +// +// NATS subscribers +// +// The `nrnats.SubWrapper` function can be used to wrap the function for `nats.Subscribe` +// (https://godoc.org/github.com/nats-io/go-nats#Conn.Subscribe or +// https://godoc.org/github.com/nats-io/go-nats#EncodedConn.Subscribe) +// and `nats.QueueSubscribe` (https://godoc.org/github.com/nats-io/go-nats#Conn.QueueSubscribe or +// https://godoc.org/github.com/nats-io/go-nats#EncodedConn.QueueSubscribe) +// If the `newrelic.Application` parameter is non-nil, it will create a `newrelic.Transaction` and end the transaction +// when the passed function is complete. Example: +// +// nc, _ := nats.Connect(nats.DefaultURL) +// app := createNRApp() // newrelic.Application +// subject := "testing.subject" +// nc.Subscribe(subject, nrnats.SubWrapper(app, myMessageHandler)) +// +// Full Publisher/Subscriber example: +// https://github.com/newrelic/go-agent/blob/master/_integrations/nrnats/examples/main.go +package nrnats + +import "github.com/newrelic/go-agent/internal" + +func init() { internal.TrackUsage("integration", "framework", "nats") } diff --git a/_integrations/nrnats/nrnats_test.go b/_integrations/nrnats/nrnats_test.go new file mode 100644 index 000000000..04ed34b8b --- /dev/null +++ b/_integrations/nrnats/nrnats_test.go @@ -0,0 +1,226 @@ +package nrnats + +import ( + "os" + "sync" + "testing" + "time" + + "github.com/nats-io/nats-server/test" + "github.com/nats-io/nats.go" + newrelic "github.com/newrelic/go-agent" + "github.com/newrelic/go-agent/internal" + "github.com/newrelic/go-agent/internal/integrationsupport" +) + +func TestMain(m *testing.M) { + s := test.RunDefaultServer() + defer s.Shutdown() + os.Exit(m.Run()) +} + +func testApp() integrationsupport.ExpectApp { + return integrationsupport.NewTestApp(integrationsupport.SampleEverythingReplyFn, cfgFn) +} + +var cfgFn = func(cfg *newrelic.Config) { + cfg.Enabled = false + cfg.DistributedTracer.Enabled = true + cfg.TransactionTracer.SegmentThreshold = 0 + cfg.TransactionTracer.Threshold.IsApdexFailing = false + cfg.TransactionTracer.Threshold.Duration = 0 + cfg.Attributes.Include = append(cfg.Attributes.Include, + newrelic.AttributeMessageRoutingKey, + newrelic.AttributeMessageQueueName, + newrelic.AttributeMessageExchangeType, + newrelic.AttributeMessageReplyTo, + newrelic.AttributeMessageCorrelationID, + ) +} + +func TestStartPublishSegmentNilTxn(t *testing.T) { + // Make sure that a nil transaction does not cause panics + nc, err := nats.Connect(nats.DefaultURL) + if nil != err { + t.Fatal(err) + } + defer nc.Close() + + StartPublishSegment(nil, nc, "mysubject").End() +} + +func TestStartPublishSegmentNilConn(t *testing.T) { + // Make sure that a nil nats.Conn does not cause panics and does not record + // metrics + app := testApp() + txn := app.StartTransaction("testing", nil, nil) + StartPublishSegment(txn, nil, "mysubject").End() + txn.End() + + app.ExpectMetrics(t, []internal.WantMetric{ + {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/all", Scope: "", Forced: false, Data: nil}, + {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/allOther", Scope: "", Forced: false, Data: nil}, + {Name: "OtherTransaction/Go/testing", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransaction/all", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransactionTotalTime", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransactionTotalTime/Go/testing", Scope: "", Forced: false, Data: nil}, + }) +} + +func TestStartPublishSegmentBasic(t *testing.T) { + app := testApp() + txn := app.StartTransaction("testing", nil, nil) + nc, err := nats.Connect(nats.DefaultURL) + if nil != err { + t.Fatal(err) + } + defer nc.Close() + + StartPublishSegment(txn, nc, "mysubject").End() + txn.End() + + app.ExpectMetrics(t, []internal.WantMetric{ + {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/all", Scope: "", Forced: false, Data: nil}, + {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/allOther", Scope: "", Forced: false, Data: nil}, + {Name: "MessageBroker/NATS/Topic/Produce/Named/mysubject", Scope: "", Forced: false, Data: nil}, + {Name: "MessageBroker/NATS/Topic/Produce/Named/mysubject", Scope: "OtherTransaction/Go/testing", Forced: false, Data: nil}, + {Name: "OtherTransaction/Go/testing", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransaction/all", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransactionTotalTime", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransactionTotalTime/Go/testing", Scope: "", Forced: false, Data: nil}, + }) + app.ExpectSpanEvents(t, []internal.WantEvent{ + { + Intrinsics: map[string]interface{}{ + "category": "generic", + "name": "OtherTransaction/Go/testing", + "nr.entryPoint": true, + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{}, + }, + { + Intrinsics: map[string]interface{}{ + "category": "generic", + "name": "MessageBroker/NATS/Topic/Produce/Named/mysubject", + "parentId": internal.MatchAnything, + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{}, + }, + }) + app.ExpectTxnTraces(t, []internal.WantTxnTrace{{ + MetricName: "OtherTransaction/Go/testing", + Root: internal.WantTraceSegment{ + SegmentName: "ROOT", + Attributes: map[string]interface{}{}, + Children: []internal.WantTraceSegment{{ + SegmentName: "OtherTransaction/Go/testing", + Attributes: map[string]interface{}{"exclusive_duration_millis": internal.MatchAnything}, + Children: []internal.WantTraceSegment{ + { + SegmentName: "MessageBroker/NATS/Topic/Produce/Named/mysubject", + Attributes: map[string]interface{}{}, + }, + }, + }}, + }, + }, + }) +} + +func TestSubWrapperWithNilApp(t *testing.T) { + nc, err := nats.Connect(nats.DefaultURL) + if err != nil { + t.Fatal("Error connecting to NATS server", err) + } + wg := sync.WaitGroup{} + nc.Subscribe("subject1", SubWrapper(nil, func(msg *nats.Msg) { + wg.Done() + })) + wg.Add(1) + nc.Publish("subject1", []byte("data")) + wg.Wait() +} + +func TestSubWrapper(t *testing.T) { + nc, err := nats.Connect(nats.DefaultURL) + if err != nil { + t.Fatal("Error connecting to NATS server", err) + } + wg := sync.WaitGroup{} + app := testApp() + nc.QueueSubscribe("subject2", "queue1", WgWrapper(&wg, SubWrapper(app, func(msg *nats.Msg) {}))) + wg.Add(1) + nc.Request("subject2", []byte("data"), time.Second) + wg.Wait() + + app.ExpectMetrics(t, []internal.WantMetric{ + {Name: "OtherTransaction/all", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransactionTotalTime", Scope: "", Forced: true, Data: nil}, + {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/all", Scope: "", Forced: false, Data: nil}, + {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/allOther", Scope: "", Forced: false, Data: nil}, + {Name: "OtherTransaction/Go/Message/NATS/Topic/Named/subject2", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransactionTotalTime/Go/Message/NATS/Topic/Named/subject2", Scope: "", Forced: false, Data: nil}, + }) + app.ExpectTxnEvents(t, []internal.WantEvent{ + { + Intrinsics: map[string]interface{}{ + "name": "OtherTransaction/Go/Message/NATS/Topic/Named/subject2", + "guid": internal.MatchAnything, + "priority": internal.MatchAnything, + "sampled": internal.MatchAnything, + "traceId": internal.MatchAnything, + }, + AgentAttributes: map[string]interface{}{ + "message.replyTo": internal.MatchAnything, // starts with _INBOX + "message.routingKey": "subject2", + "message.queueName": "queue1", + }, + UserAttributes: map[string]interface{}{}, + }, + }) +} + +func TestStartPublishSegmentNaming(t *testing.T) { + testCases := []struct { + subject string + metric string + }{ + {subject: "", metric: "MessageBroker/NATS/Topic/Produce/Named/Unknown"}, + {subject: "mysubject", metric: "MessageBroker/NATS/Topic/Produce/Named/mysubject"}, + {subject: "_INBOX.asldfkjsldfjskd.ldskfjls", metric: "MessageBroker/NATS/Topic/Produce/Temp"}, + } + + nc, err := nats.Connect(nats.DefaultURL) + if nil != err { + t.Fatal(err) + } + defer nc.Close() + + for _, tc := range testCases { + app := testApp() + txn := app.StartTransaction("testing", nil, nil) + StartPublishSegment(txn, nc, tc.subject).End() + txn.End() + + app.ExpectMetrics(t, []internal.WantMetric{ + {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/all", Scope: "", Forced: false, Data: nil}, + {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/allOther", Scope: "", Forced: false, Data: nil}, + {Name: "OtherTransaction/Go/testing", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransaction/all", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransactionTotalTime", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransactionTotalTime/Go/testing", Scope: "", Forced: false, Data: nil}, + {Name: tc.metric, Scope: "", Forced: false, Data: nil}, + {Name: tc.metric, Scope: "OtherTransaction/Go/testing", Forced: false, Data: nil}, + }) + } +} + +// Wrapper function to ensure that the NR wrapper is done recording transaction data before wg.Done() is called +func WgWrapper(wg *sync.WaitGroup, nrWrap func(msg *nats.Msg)) func(msg *nats.Msg) { + return func(msg *nats.Msg) { + nrWrap(msg) + wg.Done() + } +} diff --git a/_integrations/nrpkgerrors/README.md b/_integrations/nrpkgerrors/README.md new file mode 100644 index 000000000..68f3c3cf5 --- /dev/null +++ b/_integrations/nrpkgerrors/README.md @@ -0,0 +1,10 @@ +# _integrations/nrpkgerrors [](https://godoc.org/github.com/newrelic/go-agent/_integrations/nrpkgerrors) + +Package `nrpkgerrors` introduces support for https://github.com/pkg/errors. + +```go +import "github.com/newrelic/go-agent/_integrations/nrpkgerrors" +``` + +For more information, see +[godocs](https://godoc.org/github.com/newrelic/go-agent/_integrations/nrpkgerrors). diff --git a/_integrations/nrpkgerrors/example/main.go b/_integrations/nrpkgerrors/example/main.go new file mode 100644 index 000000000..8d9279ccf --- /dev/null +++ b/_integrations/nrpkgerrors/example/main.go @@ -0,0 +1,57 @@ +package main + +import ( + "fmt" + "os" + "time" + + newrelic "github.com/newrelic/go-agent" + "github.com/newrelic/go-agent/_integrations/nrpkgerrors" + "github.com/pkg/errors" +) + +type sampleError string + +func (e sampleError) Error() string { + return string(e) +} + +func alpha() error { + return errors.WithStack(sampleError("alpha is the cause")) +} + +func beta() error { + return errors.WithStack(alpha()) +} + +func gamma() error { + return errors.Wrap(beta(), "gamma was involved") +} + +func mustGetEnv(key string) string { + if val := os.Getenv(key); "" != val { + return val + } + panic(fmt.Sprintf("environment variable %s unset", key)) +} + +func main() { + cfg := newrelic.NewConfig("pkg/errors app", mustGetEnv("NEW_RELIC_LICENSE_KEY")) + cfg.Logger = newrelic.NewDebugLogger(os.Stdout) + app, err := newrelic.NewApplication(cfg) + if nil != err { + fmt.Println(err) + os.Exit(1) + } + + if err := app.WaitForConnection(5 * time.Second); nil != err { + fmt.Println(err) + } + + txn := app.StartTransaction("has-error", nil, nil) + e := gamma() + txn.NoticeError(nrpkgerrors.Wrap(e)) + txn.End() + + app.Shutdown(10 * time.Second) +} diff --git a/_integrations/nrpkgerrors/example_test.go b/_integrations/nrpkgerrors/example_test.go new file mode 100644 index 000000000..971ff92e4 --- /dev/null +++ b/_integrations/nrpkgerrors/example_test.go @@ -0,0 +1,27 @@ +package nrpkgerrors_test + +import ( + newrelic "github.com/newrelic/go-agent" + "github.com/newrelic/go-agent/_integrations/nrpkgerrors" + "github.com/pkg/errors" +) + +type rootError string + +func (e rootError) Error() string { return string(e) } + +func makeRootError() error { + return errors.WithStack(rootError("this is the original error")) +} + +func Example() { + var txn newrelic.Transaction + e := errors.Wrap(makeRootError(), "extra information") + // Wrap the error to record stack-trace and class type information from + // the error's root cause. Here, "rootError" will be recored as the + // class and top stack-trace frame will be inside makeRootError(). + // Without nrpkgerrors.Wrap, "*errors.withStack" would be recorded as + // the class and the top stack-trace frame would be site of the + // NoticeError call. + txn.NoticeError(nrpkgerrors.Wrap(e)) +} diff --git a/_integrations/nrpkgerrors/nrkpgerrors_test.go b/_integrations/nrpkgerrors/nrkpgerrors_test.go new file mode 100644 index 000000000..0e41fccb2 --- /dev/null +++ b/_integrations/nrpkgerrors/nrkpgerrors_test.go @@ -0,0 +1,99 @@ +package nrpkgerrors + +import ( + "runtime" + "strings" + "testing" + + newrelic "github.com/newrelic/go-agent" + "github.com/pkg/errors" +) + +func topFrameFunction(stack []uintptr) string { + var frame runtime.Frame + frames := runtime.CallersFrames(stack) + if nil != frames { + frame, _ = frames.Next() + } + return frame.Function +} + +type basicError struct{} + +func (e basicError) Error() string { return "something went wrong" } + +func alpha(e error) error { return errors.WithStack(e) } +func beta(e error) error { return errors.WithStack(e) } +func gamma(e error) error { return errors.WithStack(e) } + +func theta(e error) error { return errors.WithMessage(e, "theta") } + +func TestWrappedStackTrace(t *testing.T) { + testcases := []struct { + Error error + ExpectTopFrame string + }{ + {Error: basicError{}, ExpectTopFrame: ""}, + {Error: alpha(basicError{}), ExpectTopFrame: "alpha"}, + {Error: alpha(beta(gamma(basicError{}))), ExpectTopFrame: "gamma"}, + {Error: alpha(theta(basicError{})), ExpectTopFrame: "alpha"}, + {Error: alpha(theta(beta(basicError{}))), ExpectTopFrame: "beta"}, + {Error: alpha(theta(beta(theta(basicError{})))), ExpectTopFrame: "beta"}, + {Error: theta(basicError{}), ExpectTopFrame: ""}, + } + + for idx, tc := range testcases { + e := Wrap(tc.Error) + st := e.(newrelic.StackTracer).StackTrace() + fn := topFrameFunction(st) + if !strings.Contains(fn, tc.ExpectTopFrame) { + t.Errorf("testcase %d: expected %s got %s", + idx, tc.ExpectTopFrame, fn) + } + } +} + +type withClass struct{ class string } + +func errorWithClass(class string) error { return withClass{class: class} } + +func (e withClass) Error() string { return "something went wrong" } +func (e withClass) ErrorClass() string { return e.class } + +type classAndCause struct { + cause error + class string +} + +func wrapWithClass(e error, class string) error { return classAndCause{cause: e, class: class} } + +func (e classAndCause) Error() string { return e.cause.Error() } +func (e classAndCause) Cause() error { return e.cause } +func (e classAndCause) ErrorClass() string { return e.class } + +func TestWrappedErrorClass(t *testing.T) { + // First choice is any ErrorClass of the immediate error. + // Second choice is any ErrorClass of the error's cause. + // Final choice is the reflect type of the error's cause. + testcases := []struct { + Error error + ExpectClass string + }{ + {Error: basicError{}, ExpectClass: "nrpkgerrors.basicError"}, + {Error: errorWithClass("zap"), ExpectClass: "zap"}, + {Error: wrapWithClass(errorWithClass("zap"), "zip"), ExpectClass: "zip"}, + {Error: theta(wrapWithClass(errorWithClass("zap"), "zip")), ExpectClass: "zap"}, + {Error: alpha(basicError{}), ExpectClass: "nrpkgerrors.basicError"}, + {Error: wrapWithClass(basicError{}, "zip"), ExpectClass: "zip"}, + {Error: alpha(wrapWithClass(basicError{}, "zip")), ExpectClass: "nrpkgerrors.basicError"}, + } + + for idx, tc := range testcases { + e := Wrap(tc.Error) + class := e.(newrelic.ErrorClasser).ErrorClass() + if class != tc.ExpectClass { + t.Errorf("testcase %d: expected %s got %s", + idx, tc.ExpectClass, class) + } + } +} diff --git a/_integrations/nrpkgerrors/nrpkgerrors.go b/_integrations/nrpkgerrors/nrpkgerrors.go new file mode 100644 index 000000000..acd610aba --- /dev/null +++ b/_integrations/nrpkgerrors/nrpkgerrors.go @@ -0,0 +1,81 @@ +// Package nrpkgerrors introduces support for https://github.com/pkg/errors. +// +// This package improves the class and stack-trace fields of pkg/error errors +// when they are recorded with Transaction.NoticeError. +// +package nrpkgerrors + +import ( + "fmt" + + newrelic "github.com/newrelic/go-agent" + "github.com/newrelic/go-agent/internal" + "github.com/pkg/errors" +) + +func init() { internal.TrackUsage("integration", "pkg-errors") } + +type nrpkgerror struct { + error +} + +// stackTracer is an error that also knows about its StackTrace. +// All wrapped errors from github.com/pkg/errors implement this interface. +type stackTracer interface { + StackTrace() errors.StackTrace +} + +func deepestStackTrace(err error) errors.StackTrace { + var last stackTracer + for err != nil { + if err, ok := err.(stackTracer); ok { + last = err + } + cause, ok := err.(interface { + Cause() error + }) + if !ok { + break + } + err = cause.Cause() + } + + if last == nil { + return nil + } + return last.StackTrace() +} + +func transformStackTrace(orig errors.StackTrace) []uintptr { + st := make([]uintptr, len(orig)) + for i, frame := range orig { + st[i] = uintptr(frame) + } + return st +} + +func (e nrpkgerror) StackTrace() []uintptr { + st := deepestStackTrace(e.error) + if nil == st { + return nil + } + return transformStackTrace(st) +} + +func (e nrpkgerror) ErrorClass() string { + if ec, ok := e.error.(newrelic.ErrorClasser); ok { + return ec.ErrorClass() + } + cause := errors.Cause(e.error) + if ec, ok := cause.(newrelic.ErrorClasser); ok { + return ec.ErrorClass() + } + return fmt.Sprintf("%T", cause) +} + +// Wrap wraps a pkg/errors error so that when noticed by +// newrelic.Transaction.NoticeError it gives an improved stacktrace and class +// type. +func Wrap(e error) error { + return nrpkgerror{e} +} diff --git a/_integrations/nrpq/README.md b/_integrations/nrpq/README.md new file mode 100644 index 000000000..1ab62055a --- /dev/null +++ b/_integrations/nrpq/README.md @@ -0,0 +1,10 @@ +# _integrations/nrpq [](https://godoc.org/github.com/newrelic/go-agent/_integrations/nrpq) + +Package `nrpq` instruments https://github.com/lib/pq. + +```go +import "github.com/newrelic/go-agent/_integrations/nrpq" +``` + +For more information, see +[godocs](https://godoc.org/github.com/newrelic/go-agent/_integrations/nrpq). diff --git a/_integrations/nrpq/example/main.go b/_integrations/nrpq/example/main.go new file mode 100644 index 000000000..b44b777f8 --- /dev/null +++ b/_integrations/nrpq/example/main.go @@ -0,0 +1,46 @@ +package main + +import ( + "context" + "database/sql" + "fmt" + "os" + "time" + + newrelic "github.com/newrelic/go-agent" + _ "github.com/newrelic/go-agent/_integrations/nrpq" +) + +func mustGetEnv(key string) string { + if val := os.Getenv(key); "" != val { + return val + } + panic(fmt.Sprintf("environment variable %s unset", key)) +} + +func main() { + // docker run --rm -e POSTGRES_PASSWORD=docker -p 5432:5432 postgres + db, err := sql.Open("nrpostgres", "host=localhost port=5432 user=postgres dbname=postgres password=docker sslmode=disable") + if err != nil { + panic(err) + } + + cfg := newrelic.NewConfig("PostgreSQL App", mustGetEnv("NEW_RELIC_LICENSE_KEY")) + cfg.Logger = newrelic.NewDebugLogger(os.Stdout) + app, err := newrelic.NewApplication(cfg) + if nil != err { + panic(err) + } + app.WaitForConnection(5 * time.Second) + txn := app.StartTransaction("postgresQuery", nil, nil) + + ctx := newrelic.NewContext(context.Background(), txn) + row := db.QueryRowContext(ctx, "SELECT count(*) FROM pg_catalog.pg_tables") + var count int + row.Scan(&count) + + txn.End() + app.Shutdown(5 * time.Second) + + fmt.Println("number of entries in pg_catalog.pg_tables", count) +} diff --git a/_integrations/nrpq/example/sqlx/main.go b/_integrations/nrpq/example/sqlx/main.go new file mode 100644 index 000000000..4d075822b --- /dev/null +++ b/_integrations/nrpq/example/sqlx/main.go @@ -0,0 +1,143 @@ +// An application that illustrates how to instrument jmoiron/sqlx with DatastoreSegments +// +// To run this example, be sure the environment varible NEW_RELIC_LICENSE_KEY +// is set to your license key. Postgres must be running on the default port +// 5432 and have a user "foo" and a database "bar". +// +// Adding instrumentation for the SQLx package is easy. It means you can +// make database calls without having to manually create DatastoreSegments. +// Setup can be done in two steps: +// +// Set up your driver +// +// If you are using one of our currently supported database drivers (see +// https://docs.newrelic.com/docs/agents/go-agent/get-started/go-agent-compatibility-requirements#frameworks), +// follow the instructions on installing the driver. +// +// As an example, for the `lib/pq` driver, you will use the newrelic +// integration's driver in place of the postgres driver. If your code is using +// sqlx.Open with `lib/pq` like this: +// +// import ( +// "github.com/jmoiron/sqlx" +// _ "github.com/lib/pq" +// ) +// +// func main() { +// db, err := sqlx.Open("postgres", "user=pqgotest dbname=pqgotest sslmode=verify-full") +// } +// +// Then change the side-effect import to the integration package, and open +// "nrpostgres" instead: +// +// import ( +// "github.com/jmoiron/sqlx" +// _ "github.com/newrelic/go-agent/_integrations/nrpq" +// ) +// +// func main() { +// db, err := sqlx.Open("nrpostgres", "user=pqgotest dbname=pqgotest sslmode=verify-full") +// } +// +// If you are not using one of the supported database drivers, use the +// `InstrumentSQLDriver` +// (https://godoc.org/github.com/newrelic/go-agent#InstrumentSQLDriver) API. +// See +// https://github.com/newrelic/go-agent/blob/master/_integrations/nrmysql/nrmysql.go +// for a full example. +// +// Add context to your database calls +// +// Next, you must provide a context containing a newrelic.Transaction to all +// methods on sqlx.DB, sqlx.NamedStmt, sqlx.Stmt, and sqlx.Tx that make a +// database call. For example, instead of the following: +// +// err := db.Get(&jason, "SELECT * FROM person WHERE first_name=$1", "Jason") +// +// Do this: +// +// ctx := newrelic.NewContext(context.Background(), txn) +// err := db.GetContext(ctx, &jason, "SELECT * FROM person WHERE first_name=$1", "Jason") +// +package main + +import ( + "context" + "fmt" + "log" + "os" + "time" + + "github.com/jmoiron/sqlx" + newrelic "github.com/newrelic/go-agent" + _ "github.com/newrelic/go-agent/_integrations/nrpq" +) + +var schema = ` +CREATE TABLE person ( + first_name text, + last_name text, + email text +)` + +// Person is a person in the database +type Person struct { + FirstName string `db:"first_name"` + LastName string `db:"last_name"` + Email string +} + +func mustGetEnv(key string) string { + if val := os.Getenv(key); "" != val { + return val + } + panic(fmt.Sprintf("environment variable %s unset", key)) +} + +func createApp() newrelic.Application { + cfg := newrelic.NewConfig("SQLx", mustGetEnv("NEW_RELIC_LICENSE_KEY")) + cfg.Logger = newrelic.NewDebugLogger(os.Stdout) + app, err := newrelic.NewApplication(cfg) + if nil != err { + log.Fatalln(err) + } + if err := app.WaitForConnection(5 * time.Second); nil != err { + log.Fatalln(err) + } + return app +} + +func main() { + // Create application + app := createApp() + defer app.Shutdown(10 * time.Second) + // Start a transaction + txn := app.StartTransaction("main", nil, nil) + defer txn.End() + // Add transaction to context + ctx := newrelic.NewContext(context.Background(), txn) + + // Connect to database using the "nrpostgres" driver + db, err := sqlx.Connect("nrpostgres", "user=foo dbname=bar sslmode=disable") + if err != nil { + log.Fatalln(err) + } + + // Create database table if it does not exist already + // When the context is passed, DatastoreSegments will be created + db.ExecContext(ctx, schema) + + // Add people to the database + // When the context is passed, DatastoreSegments will be created + tx := db.MustBegin() + tx.MustExecContext(ctx, "INSERT INTO person (first_name, last_name, email) VALUES ($1, $2, $3)", "Jason", "Moiron", "jmoiron@jmoiron.net") + tx.MustExecContext(ctx, "INSERT INTO person (first_name, last_name, email) VALUES ($1, $2, $3)", "John", "Doe", "johndoeDNE@gmail.net") + tx.Commit() + + // Read from the database + // When the context is passed, DatastoreSegments will be created + people := []Person{} + db.SelectContext(ctx, &people, "SELECT * FROM person ORDER BY first_name ASC") + jason := Person{} + db.GetContext(ctx, &jason, "SELECT * FROM person WHERE first_name=$1", "Jason") +} diff --git a/_integrations/nrpq/nrpq.go b/_integrations/nrpq/nrpq.go new file mode 100644 index 000000000..e7de7d3e0 --- /dev/null +++ b/_integrations/nrpq/nrpq.go @@ -0,0 +1,157 @@ +// +build go1.10 + +// Package nrpq instruments https://github.com/lib/pq. +// +// Use this package to instrument your PostgreSQL calls without having to manually +// create DatastoreSegments. This is done in a two step process: +// +// 1. Use this package's driver in place of the postgres driver. +// +// If your code is using sql.Open like this: +// +// import ( +// _ "github.com/lib/pq" +// ) +// +// func main() { +// db, err := sql.Open("postgres", "user=pqgotest dbname=pqgotest sslmode=verify-full") +// } +// +// Then change the side-effect import to this package, and open "nrpostgres" instead: +// +// import ( +// _ "github.com/newrelic/go-agent/_integrations/nrpq" +// ) +// +// func main() { +// db, err := sql.Open("nrpostgres", "user=pqgotest dbname=pqgotest sslmode=verify-full") +// } +// +// If your code is using pq.NewConnector, simply use nrpq.NewConnector +// instead. +// +// 2. Provide a context containing a newrelic.Transaction to all exec and query +// methods on sql.DB, sql.Conn, and sql.Tx. This requires using the +// context methods ExecContext, QueryContext, and QueryRowContext in place of +// Exec, Query, and QueryRow respectively. For example, instead of the +// following: +// +// row := db.QueryRow("SELECT count(*) FROM pg_catalog.pg_tables") +// +// Do this: +// +// ctx := newrelic.NewContext(context.Background(), txn) +// row := db.QueryRowContext(ctx, "SELECT count(*) FROM pg_catalog.pg_tables") +// +// Unfortunately, sql.Stmt exec and query calls are not supported since pq.stmt +// does not have ExecContext and QueryContext methods (as of June 2019, see +// https://github.com/lib/pq/pull/768). +// +// A working example is shown here: +// https://github.com/newrelic/go-agent/tree/master/_integrations/nrpq/example/main.go +package nrpq + +import ( + "database/sql" + "database/sql/driver" + "os" + "path" + "regexp" + "strings" + + "github.com/lib/pq" + newrelic "github.com/newrelic/go-agent" + "github.com/newrelic/go-agent/internal" + "github.com/newrelic/go-agent/internal/sqlparse" +) + +var ( + baseBuilder = newrelic.SQLDriverSegmentBuilder{ + BaseSegment: newrelic.DatastoreSegment{ + Product: newrelic.DatastorePostgres, + }, + ParseQuery: sqlparse.ParseQuery, + ParseDSN: parseDSN(os.Getenv), + } +) + +// NewConnector can be used in place of pq.NewConnector to get an instrumented +// PostgreSQL connector. +func NewConnector(dsn string) (driver.Connector, error) { + connector, err := pq.NewConnector(dsn) + if nil != err || nil == connector { + // Return nil rather than 'connector' since a nil pointer would + // be returned as a non-nil driver.Connector. + return nil, err + } + bld := baseBuilder + bld.ParseDSN(&bld.BaseSegment, dsn) + return newrelic.InstrumentSQLConnector(connector, bld), nil +} + +func init() { + sql.Register("nrpostgres", newrelic.InstrumentSQLDriver(&pq.Driver{}, baseBuilder)) + internal.TrackUsage("integration", "driver", "postgres") +} + +var dsnSplit = regexp.MustCompile(`(\w+)\s*=\s*('[^=]*'|[^'\s]+)`) + +func getFirstHost(value string) string { + host := strings.SplitN(value, ",", 2)[0] + host = strings.Trim(host, "[]") + return host +} + +func parseDSN(getenv func(string) string) func(*newrelic.DatastoreSegment, string) { + return func(s *newrelic.DatastoreSegment, dsn string) { + if strings.HasPrefix(dsn, "postgres://") || strings.HasPrefix(dsn, "postgresql://") { + var err error + dsn, err = pq.ParseURL(dsn) + if nil != err { + return + } + } + + host := getenv("PGHOST") + hostaddr := "" + ppoid := getenv("PGPORT") + dbname := getenv("PGDATABASE") + + for _, split := range dsnSplit.FindAllStringSubmatch(dsn, -1) { + if len(split) != 3 { + continue + } + key := split[1] + value := strings.Trim(split[2], `'`) + + switch key { + case "dbname": + dbname = value + case "host": + host = getFirstHost(value) + case "hostaddr": + hostaddr = getFirstHost(value) + case "port": + ppoid = strings.SplitN(value, ",", 2)[0] + } + } + + if "" != hostaddr { + host = hostaddr + } else if "" == host { + host = "localhost" + } + if "" == ppoid { + ppoid = "5432" + } + if strings.HasPrefix(host, "/") { + // this is a unix socket + ppoid = path.Join(host, ".s.PGSQL."+ppoid) + host = "localhost" + } + + s.Host = host + s.PortPathOrID = ppoid + s.DatabaseName = dbname + } +} diff --git a/_integrations/nrpq/nrpq_test.go b/_integrations/nrpq/nrpq_test.go new file mode 100644 index 000000000..3bba3f8db --- /dev/null +++ b/_integrations/nrpq/nrpq_test.go @@ -0,0 +1,251 @@ +package nrpq + +import ( + "testing" + + newrelic "github.com/newrelic/go-agent" +) + +func TestParseDSN(t *testing.T) { + testcases := []struct { + dsn string + expHost string + expPortPathOrID string + expDatabaseName string + env map[string]string + }{ + // urls + { + dsn: "postgresql://", + expHost: "localhost", + expPortPathOrID: "5432", + expDatabaseName: "", + }, + { + dsn: "postgresql://localhost", + expHost: "localhost", + expPortPathOrID: "5432", + expDatabaseName: "", + }, + { + dsn: "postgresql://localhost:5433", + expHost: "localhost", + expPortPathOrID: "5433", + expDatabaseName: "", + }, + { + dsn: "postgresql://localhost/mydb", + expHost: "localhost", + expPortPathOrID: "5432", + expDatabaseName: "mydb", + }, + { + dsn: "postgresql://user@localhost", + expHost: "localhost", + expPortPathOrID: "5432", + expDatabaseName: "", + }, + { + dsn: "postgresql://other@localhost/otherdb?connect_timeout=10&application_name=myapp", + expHost: "localhost", + expPortPathOrID: "5432", + expDatabaseName: "otherdb", + }, + { + dsn: "postgresql:///mydb?host=myhost.com&port=5433", + expHost: "myhost.com", + expPortPathOrID: "5433", + expDatabaseName: "mydb", + }, + { + dsn: "postgresql://[2001:db8::1234]/database", + expHost: "2001:db8::1234", + expPortPathOrID: "5432", + expDatabaseName: "database", + }, + { + dsn: "postgresql://[2001:db8::1234]:7890/database", + expHost: "2001:db8::1234", + expPortPathOrID: "7890", + expDatabaseName: "database", + }, + { + dsn: "postgresql:///dbname?host=/var/lib/postgresql", + expHost: "localhost", + expPortPathOrID: "/var/lib/postgresql/.s.PGSQL.5432", + expDatabaseName: "dbname", + }, + { + dsn: "postgresql://%2Fvar%2Flib%2Fpostgresql/dbname", + expHost: "", + expPortPathOrID: "", + expDatabaseName: "", + }, + + // key,value pairs + { + dsn: "host=1.2.3.4 port=1234 dbname=mydb", + expHost: "1.2.3.4", + expPortPathOrID: "1234", + expDatabaseName: "mydb", + }, + { + dsn: "host =1.2.3.4 port= 1234 dbname = mydb", + expHost: "1.2.3.4", + expPortPathOrID: "1234", + expDatabaseName: "mydb", + }, + { + dsn: "host = 1.2.3.4 port=\t\t1234 dbname =\n\t\t\tmydb", + expHost: "1.2.3.4", + expPortPathOrID: "1234", + expDatabaseName: "mydb", + }, + { + dsn: "host ='1.2.3.4' port= '1234' dbname = 'mydb'", + expHost: "1.2.3.4", + expPortPathOrID: "1234", + expDatabaseName: "mydb", + }, + { + dsn: `host='ain\'t_single_quote' port='port\\slash' dbname='my db spaced'`, + expHost: `ain\'t_single_quote`, + expPortPathOrID: `port\\slash`, + expDatabaseName: "my db spaced", + }, + { + dsn: `host=localhost port=so=does=this`, + expHost: "localhost", + expPortPathOrID: "so=does=this", + }, + { + dsn: "host=1.2.3.4 hostaddr=5.6.7.8", + expHost: "5.6.7.8", + expPortPathOrID: "5432", + }, + { + dsn: "hostaddr=5.6.7.8 host=1.2.3.4", + expHost: "5.6.7.8", + expPortPathOrID: "5432", + }, + { + dsn: "hostaddr=1.2.3.4", + expHost: "1.2.3.4", + expPortPathOrID: "5432", + }, + { + dsn: "host=example.com,example.org port=80,443", + expHost: "example.com", + expPortPathOrID: "80", + }, + { + dsn: "hostaddr=example.com,example.org port=80,443", + expHost: "example.com", + expPortPathOrID: "80", + }, + { + dsn: "hostaddr='' host='' port=80,", + expHost: "localhost", + expPortPathOrID: "80", + }, + { + dsn: "host=/path/to/socket", + expHost: "localhost", + expPortPathOrID: "/path/to/socket/.s.PGSQL.5432", + }, + { + dsn: "port=1234 host=/path/to/socket", + expHost: "localhost", + expPortPathOrID: "/path/to/socket/.s.PGSQL.1234", + }, + { + dsn: "host=/path/to/socket port=1234", + expHost: "localhost", + expPortPathOrID: "/path/to/socket/.s.PGSQL.1234", + }, + + // env vars + { + dsn: "host=host_string port=port_string dbname=dbname_string", + expHost: "host_string", + expPortPathOrID: "port_string", + expDatabaseName: "dbname_string", + env: map[string]string{ + "PGHOST": "host_env", + "PGPORT": "port_env", + "PGDATABASE": "dbname_env", + }, + }, + { + dsn: "", + expHost: "host_env", + expPortPathOrID: "port_env", + expDatabaseName: "dbname_env", + env: map[string]string{ + "PGHOST": "host_env", + "PGPORT": "port_env", + "PGDATABASE": "dbname_env", + }, + }, + { + dsn: "host=host_string", + expHost: "host_string", + expPortPathOrID: "5432", + env: map[string]string{ + "PGHOSTADDR": "hostaddr_env", + }, + }, + { + dsn: "hostaddr=hostaddr_string", + expHost: "hostaddr_string", + expPortPathOrID: "5432", + env: map[string]string{ + "PGHOST": "host_env", + }, + }, + { + dsn: "host=host_string hostaddr=hostaddr_string", + expHost: "hostaddr_string", + expPortPathOrID: "5432", + env: map[string]string{ + "PGHOST": "host_env", + }, + }, + } + + for _, test := range testcases { + getenv := func(env string) string { + return test.env[env] + } + + s := &newrelic.DatastoreSegment{} + parseDSN(getenv)(s, test.dsn) + + if test.expHost != s.Host { + t.Errorf(`incorrect host, expected="%s", actual="%s"`, test.expHost, s.Host) + } + if test.expPortPathOrID != s.PortPathOrID { + t.Errorf(`incorrect port path or id, expected="%s", actual="%s"`, test.expPortPathOrID, s.PortPathOrID) + } + if test.expDatabaseName != s.DatabaseName { + t.Errorf(`incorrect database name, expected="%s", actual="%s"`, test.expDatabaseName, s.DatabaseName) + } + } +} + +func TestNewConnector(t *testing.T) { + connector, err := NewConnector("client_encoding=") + if err == nil { + t.Error("error expected from invalid dsn") + } + if connector != nil { + t.Error("nil connector expected from invalid dsn") + } + connector, err = NewConnector("host=localhost port=5432 user=postgres dbname=postgres password=docker sslmode=disable") + if err != nil { + t.Error("nil error expected from valid dsn", err) + } + if connector == nil { + t.Error("non-nil connector expected from valid dsn") + } +} diff --git a/_integrations/nrsqlite3/README.md b/_integrations/nrsqlite3/README.md new file mode 100644 index 000000000..0bde474fd --- /dev/null +++ b/_integrations/nrsqlite3/README.md @@ -0,0 +1,10 @@ +# _integrations/nrsqlite3 [](https://godoc.org/github.com/newrelic/go-agent/_integrations/nrsqlite3) + +Package `nrsqlite3` instruments https://github.com/mattn/go-sqlite3. + +```go +import "github.com/newrelic/go-agent/_integrations/nrsqlite3" +``` + +For more information, see +[godocs](https://godoc.org/github.com/newrelic/go-agent/_integrations/nrsqlite3). diff --git a/_integrations/nrsqlite3/example/main.go b/_integrations/nrsqlite3/example/main.go new file mode 100644 index 000000000..390cf3b74 --- /dev/null +++ b/_integrations/nrsqlite3/example/main.go @@ -0,0 +1,49 @@ +package main + +import ( + "context" + "database/sql" + "fmt" + "os" + "time" + + newrelic "github.com/newrelic/go-agent" + _ "github.com/newrelic/go-agent/_integrations/nrsqlite3" +) + +func mustGetEnv(key string) string { + if val := os.Getenv(key); "" != val { + return val + } + panic(fmt.Sprintf("environment variable %s unset", key)) +} + +func main() { + db, err := sql.Open("nrsqlite3", ":memory:") + if err != nil { + panic(err) + } + defer db.Close() + + db.Exec("CREATE TABLE zaps ( zap_num INTEGER )") + db.Exec("INSERT INTO zaps (zap_num) VALUES (22)") + + cfg := newrelic.NewConfig("SQLite App", mustGetEnv("NEW_RELIC_LICENSE_KEY")) + cfg.Logger = newrelic.NewDebugLogger(os.Stdout) + app, err := newrelic.NewApplication(cfg) + if nil != err { + panic(err) + } + app.WaitForConnection(5 * time.Second) + txn := app.StartTransaction("sqliteQuery", nil, nil) + + ctx := newrelic.NewContext(context.Background(), txn) + row := db.QueryRowContext(ctx, "SELECT count(*) from zaps") + var count int + row.Scan(&count) + + txn.End() + app.Shutdown(5 * time.Second) + + fmt.Println("number of entries in table", count) +} diff --git a/_integrations/nrsqlite3/nrsqlite3.go b/_integrations/nrsqlite3/nrsqlite3.go new file mode 100644 index 000000000..fe535d346 --- /dev/null +++ b/_integrations/nrsqlite3/nrsqlite3.go @@ -0,0 +1,141 @@ +// +build go1.10 + +// Package nrsqlite3 instruments https://github.com/mattn/go-sqlite3. +// +// Use this package to instrument your SQLite calls without having to manually +// create DatastoreSegments. This is done in a two step process: +// +// 1. Use this package's driver in place of the sqlite3 driver. +// +// If your code is using sql.Open like this: +// +// import ( +// _ "github.com/mattn/go-sqlite3" +// ) +// +// func main() { +// db, err := sql.Open("sqlite3", "./foo.db") +// } +// +// Then change the side-effect import to this package, and open "nrsqlite3" instead: +// +// import ( +// _ "github.com/newrelic/go-agent/_integrations/nrsqlite3" +// ) +// +// func main() { +// db, err := sql.Open("nrsqlite3", "./foo.db") +// } +// +// If you are registering a custom sqlite3 driver with special behavior then +// you must wrap your driver instance using nrsqlite3.InstrumentSQLDriver. For +// example, if your code looks like this: +// +// func main() { +// sql.Register("sqlite3_with_extensions", &sqlite3.SQLiteDriver{ +// Extensions: []string{ +// "sqlite3_mod_regexp", +// }, +// }) +// db, err := sql.Open("sqlite3_with_extensions", ":memory:") +// } +// +// Then instrument the driver like this: +// +// func main() { +// sql.Register("sqlite3_with_extensions", nrsqlite3.InstrumentSQLDriver(&sqlite3.SQLiteDriver{ +// Extensions: []string{ +// "sqlite3_mod_regexp", +// }, +// })) +// db, err := sql.Open("sqlite3_with_extensions", ":memory:") +// } +// +// 2. Provide a context containing a newrelic.Transaction to all exec and query +// methods on sql.DB, sql.Conn, sql.Tx, and sql.Stmt. This requires using the +// context methods ExecContext, QueryContext, and QueryRowContext in place of +// Exec, Query, and QueryRow respectively. For example, instead of the +// following: +// +// row := db.QueryRow("SELECT count(*) from tables") +// +// Do this: +// +// ctx := newrelic.NewContext(context.Background(), txn) +// row := db.QueryRowContext(ctx, "SELECT count(*) from tables") +// +// A working example is shown here: +// https://github.com/newrelic/go-agent/tree/master/_integrations/nrsqlite3/example/main.go +package nrsqlite3 + +import ( + "database/sql" + "database/sql/driver" + "path/filepath" + "strings" + + sqlite3 "github.com/mattn/go-sqlite3" + newrelic "github.com/newrelic/go-agent" + "github.com/newrelic/go-agent/internal" + "github.com/newrelic/go-agent/internal/sqlparse" +) + +var ( + baseBuilder = newrelic.SQLDriverSegmentBuilder{ + BaseSegment: newrelic.DatastoreSegment{ + Product: newrelic.DatastoreSQLite, + }, + ParseQuery: sqlparse.ParseQuery, + ParseDSN: parseDSN, + } +) + +func init() { + sql.Register("nrsqlite3", InstrumentSQLDriver(&sqlite3.SQLiteDriver{})) + internal.TrackUsage("integration", "driver", "sqlite3") +} + +// InstrumentSQLDriver wraps an sqlite3.SQLiteDriver to add instrumentation. +// For example, if you are registering a custom SQLiteDriver like this: +// +// sql.Register("sqlite3_with_extensions", +// &sqlite3.SQLiteDriver{ +// Extensions: []string{ +// "sqlite3_mod_regexp", +// }, +// }) +// +// Then add instrumentation like this: +// +// sql.Register("sqlite3_with_extensions", +// nrsqlite3.InstrumentSQLDriver(&sqlite3.SQLiteDriver{ +// Extensions: []string{ +// "sqlite3_mod_regexp", +// }, +// })) +// +func InstrumentSQLDriver(d *sqlite3.SQLiteDriver) driver.Driver { + return newrelic.InstrumentSQLDriver(d, baseBuilder) +} + +func getPortPathOrID(dsn string) (ppoid string) { + ppoid = strings.Split(dsn, "?")[0] + ppoid = strings.TrimPrefix(ppoid, "file:") + + if ":memory:" != ppoid && "" != ppoid { + if abs, err := filepath.Abs(ppoid); nil == err { + ppoid = abs + } + } + + return +} + +// ParseDSN accepts a DSN string and sets the Host, PortPathOrID, and +// DatabaseName fields on a newrelic.DatastoreSegment. +func parseDSN(s *newrelic.DatastoreSegment, dsn string) { + // See https://godoc.org/github.com/mattn/go-sqlite3#SQLiteDriver.Open + s.Host = "localhost" + s.PortPathOrID = getPortPathOrID(dsn) + s.DatabaseName = "" +} diff --git a/_integrations/nrsqlite3/nrsqlite3_test.go b/_integrations/nrsqlite3/nrsqlite3_test.go new file mode 100644 index 000000000..cc653fa45 --- /dev/null +++ b/_integrations/nrsqlite3/nrsqlite3_test.go @@ -0,0 +1,29 @@ +package nrsqlite3 + +import ( + "path/filepath" + "runtime" + "testing" +) + +func TestGetPortPathOrID(t *testing.T) { + _, here, _, _ := runtime.Caller(0) + currentDir := filepath.Dir(here) + + testcases := []struct { + dsn string + expected string + }{ + {":memory:", ":memory:"}, + {"test.db", filepath.Join(currentDir, "test.db")}, + {"file:/test.db?cache=shared&mode=memory", "/test.db"}, + {"file::memory:", ":memory:"}, + {"", ""}, + } + + for _, test := range testcases { + if actual := getPortPathOrID(test.dsn); actual != test.expected { + t.Errorf(`incorrect port path or id: dsn="%s", actual="%s"`, test.dsn, actual) + } + } +} diff --git a/_integrations/nrstan/README.md b/_integrations/nrstan/README.md new file mode 100644 index 000000000..5886b8d43 --- /dev/null +++ b/_integrations/nrstan/README.md @@ -0,0 +1,10 @@ +# _integrations/nrstan [](https://godoc.org/github.com/newrelic/go-agent/_integrations/nrstan) + +Package `nrstan` instruments https://github.com/nats-io/stan.go. + +```go +import "github.com/newrelic/go-agent/_integrations/nrstan" +``` + +For more information, see +[godocs](https://godoc.org/github.com/newrelic/go-agent/_integrations/nrstan). diff --git a/_integrations/nrstan/examples/README.md b/_integrations/nrstan/examples/README.md new file mode 100644 index 000000000..186846324 --- /dev/null +++ b/_integrations/nrstan/examples/README.md @@ -0,0 +1,4 @@ +# Example STAN app +In this example app you can find several different ways of instrumenting NATS Streaming functions using New Relic. In order to run the app, make sure the following assumptions are correct: +* Your New Relic license key is available as an environment variable named `NEW_RELIC_LICENSE_KEY` +* A NATS Streaming Server is running with the cluster id `test-cluster` \ No newline at end of file diff --git a/_integrations/nrstan/examples/main.go b/_integrations/nrstan/examples/main.go new file mode 100644 index 000000000..e0962a85f --- /dev/null +++ b/_integrations/nrstan/examples/main.go @@ -0,0 +1,107 @@ +package main + +import ( + "fmt" + "os" + "sync" + "time" + + "github.com/nats-io/stan.go" + newrelic "github.com/newrelic/go-agent" + "github.com/newrelic/go-agent/_integrations/nrnats" + "github.com/newrelic/go-agent/_integrations/nrstan" +) + +var app newrelic.Application + +func doAsync(sc stan.Conn, txn newrelic.Transaction) { + wg := sync.WaitGroup{} + subj := "async" + + // Simple Async Subscriber + // Use the nrstan.StreamingSubWrapper to wrap the stan.MsgHandler and + // create a newrelic.Transaction with each processed stan.Msg + _, err := sc.Subscribe(subj, nrstan.StreamingSubWrapper(app, func(m *stan.Msg) { + defer wg.Done() + fmt.Println("Received async message:", string(m.Data)) + })) + if nil != err { + panic(err) + } + + // Simple Publisher + wg.Add(1) + // Use nrnats.StartPublishSegment to create a newrelic.ExternalSegment for + // the call to sc.Publish + seg := nrnats.StartPublishSegment(txn, sc.NatsConn(), subj) + err = sc.Publish(subj, []byte("Hello World")) + seg.End() + if nil != err { + panic(err) + } + + wg.Wait() +} + +func doQueue(sc stan.Conn, txn newrelic.Transaction) { + wg := sync.WaitGroup{} + subj := "queue" + + // Queue Subscriber + // Use the nrstan.StreamingSubWrapper to wrap the stan.MsgHandler and + // create a newrelic.Transaction with each processed stan.Msg + _, err := sc.QueueSubscribe(subj, "myqueue", nrstan.StreamingSubWrapper(app, func(m *stan.Msg) { + defer wg.Done() + fmt.Println("Received queue message:", string(m.Data)) + })) + if nil != err { + panic(err) + } + + wg.Add(1) + // Use nrnats.StartPublishSegment to create a newrelic.ExternalSegment for + // the call to sc.Publish + seg := nrnats.StartPublishSegment(txn, sc.NatsConn(), subj) + err = sc.Publish(subj, []byte("Hello World")) + seg.End() + if nil != err { + panic(err) + } + + wg.Wait() +} + +func mustGetEnv(key string) string { + if val := os.Getenv(key); "" != val { + return val + } + panic(fmt.Sprintf("environment variable %s unset", key)) +} + +func main() { + // Initialize agent + cfg := newrelic.NewConfig("STAN App", mustGetEnv("NEW_RELIC_LICENSE_KEY")) + cfg.Logger = newrelic.NewDebugLogger(os.Stdout) + var err error + app, err = newrelic.NewApplication(cfg) + if nil != err { + panic(err) + } + defer app.Shutdown(10 * time.Second) + err = app.WaitForConnection(5 * time.Second) + if nil != err { + panic(err) + } + txn := app.StartTransaction("main", nil, nil) + defer txn.End() + + // Connect to a server + sc, err := stan.Connect("test-cluster", "clientid") + if nil != err { + panic(err) + } + defer sc.Close() + + doAsync(sc, txn) + doQueue(sc, txn) +} diff --git a/_integrations/nrstan/nrstan.go b/_integrations/nrstan/nrstan.go new file mode 100644 index 000000000..1422e837b --- /dev/null +++ b/_integrations/nrstan/nrstan.go @@ -0,0 +1,33 @@ +package nrstan + +import ( + stan "github.com/nats-io/stan.go" + newrelic "github.com/newrelic/go-agent" + "github.com/newrelic/go-agent/internal" + "github.com/newrelic/go-agent/internal/integrationsupport" +) + +// StreamingSubWrapper can be used to wrap the function for STREAMING stan.Subscribe and stan.QueueSubscribe +// (https://godoc.org/github.com/nats-io/stan.go#Conn) +// If the `newrelic.Application` parameter is non-nil, it will create a `newrelic.Transaction` and end the transaction +// when the passed function is complete. +func StreamingSubWrapper(app newrelic.Application, f func(msg *stan.Msg)) func(msg *stan.Msg) { + if app == nil { + return f + } + return func(msg *stan.Msg) { + namer := internal.MessageMetricKey{ + Library: "STAN", + DestinationType: string(newrelic.MessageTopic), + DestinationName: msg.MsgProto.Subject, + Consumer: true, + } + txn := app.StartTransaction(namer.Name(), nil, nil) + defer txn.End() + + integrationsupport.AddAgentAttribute(txn, internal.AttributeMessageRoutingKey, msg.MsgProto.Subject, nil) + integrationsupport.AddAgentAttribute(txn, internal.AttributeMessageReplyTo, msg.MsgProto.Reply, nil) + + f(msg) + } +} diff --git a/_integrations/nrstan/nrstan_doc.go b/_integrations/nrstan/nrstan_doc.go new file mode 100644 index 000000000..7ff972398 --- /dev/null +++ b/_integrations/nrstan/nrstan_doc.go @@ -0,0 +1,45 @@ +// Package nrstan instruments https://github.com/nats-io/stan.go. +// +// This package can be used to simplify instrumenting NATS Streaming subscribers. Currently due to the nature of +// the NATS Streaming framework we are limited to two integration points: `StartPublishSegment` for publishers, and +// `SubWrapper` for subscribers. +// +// +// NATS Streaming subscribers +// +// `nrstan.StreamingSubWrapper` can be used to wrap the function for STREAMING stan.Subscribe and stan.QueueSubscribe +// (https://godoc.org/github.com/nats-io/stan.go#Conn) If the `newrelic.Application` parameter is non-nil, it will +// create a `newrelic.Transaction` and end the transaction when the passed function is complete. Example: +// +// sc, err := stan.Connect(clusterName, clientName) +// if err != nil { +// t.Fatal("Couldn't connect to server", err) +// } +// defer sc.Close() +// app := createTestApp(t) // newrelic.Application +// sc.Subscribe(subject, StreamingSubWrapper(app, myMessageHandler) +// +// +// NATS Streaming publishers +// +// You can use `nrnats.StartPublishSegment` from the `nrnats` package +// (https://godoc.org/github.com/newrelic/go-agent/_integrations/nrnats/#StartPublishSegment) +// to start an external segment when doing a streaming publish, which must be ended after publishing is complete. +// Example: +// +// sc, err := stan.Connect(clusterName, clientName) +// if err != nil { +// t.Fatal("Couldn't connect to server", err) +// } +// txn := currentTransaction() // current newrelic.Transaction +// seg := nrnats.StartPublishSegment(txn, sc.NatsConn(), subj) +// sc.Publish(subj, []byte("Hello World")) +// seg.End() +// +// Full Publisher/Subscriber example: +// https://github.com/newrelic/go-agent/blob/master/_integrations/nrstan/examples/main.go +package nrstan + +import "github.com/newrelic/go-agent/internal" + +func init() { internal.TrackUsage("integration", "framework", "stan") } diff --git a/_integrations/nrstan/nrstan_test.go b/_integrations/nrstan/nrstan_test.go new file mode 100644 index 000000000..dd332b2d4 --- /dev/null +++ b/_integrations/nrstan/nrstan_test.go @@ -0,0 +1,112 @@ +package nrstan + +import ( + "os" + "sync" + "testing" + + "github.com/nats-io/nats-streaming-server/server" + "github.com/nats-io/stan.go" + newrelic "github.com/newrelic/go-agent" + "github.com/newrelic/go-agent/internal" + "github.com/newrelic/go-agent/internal/integrationsupport" +) + +const ( + clusterName = "my_test_cluster" + clientName = "me" +) + +func TestMain(m *testing.M) { + s, err := server.RunServer(clusterName) + if err != nil { + panic(err) + } + defer s.Shutdown() + os.Exit(m.Run()) +} + +func createTestApp() integrationsupport.ExpectApp { + return integrationsupport.NewTestApp(integrationsupport.SampleEverythingReplyFn, cfgFn) +} + +var cfgFn = func(cfg *newrelic.Config) { + cfg.Enabled = false + cfg.DistributedTracer.Enabled = true + cfg.TransactionTracer.SegmentThreshold = 0 + cfg.TransactionTracer.Threshold.IsApdexFailing = false + cfg.TransactionTracer.Threshold.Duration = 0 + cfg.Attributes.Include = append(cfg.Attributes.Include, + newrelic.AttributeMessageRoutingKey, + newrelic.AttributeMessageQueueName, + newrelic.AttributeMessageExchangeType, + newrelic.AttributeMessageReplyTo, + newrelic.AttributeMessageCorrelationID, + ) +} + +func TestSubWrapperWithNilApp(t *testing.T) { + subject := "sample.subject1" + sc, err := stan.Connect(clusterName, clientName) + if err != nil { + t.Fatal("Couldn't connect to server", err) + } + defer sc.Close() + + wg := sync.WaitGroup{} + sc.Subscribe(subject, StreamingSubWrapper(nil, func(msg *stan.Msg) { + defer wg.Done() + })) + wg.Add(1) + sc.Publish(subject, []byte("data")) + wg.Wait() +} + +func TestSubWrapper(t *testing.T) { + subject := "sample.subject2" + sc, err := stan.Connect(clusterName, clientName) + if err != nil { + t.Fatal("Couldn't connect to server", err) + } + defer sc.Close() + + wg := sync.WaitGroup{} + app := createTestApp() + sc.Subscribe(subject, WgWrapper(&wg, StreamingSubWrapper(app, func(msg *stan.Msg) {}))) + + wg.Add(1) + sc.Publish(subject, []byte("data")) + wg.Wait() + + app.ExpectMetrics(t, []internal.WantMetric{ + {Name: "OtherTransaction/all", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransactionTotalTime", Scope: "", Forced: true, Data: nil}, + {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/all", Scope: "", Forced: false, Data: nil}, + {Name: "DurationByCaller/Unknown/Unknown/Unknown/Unknown/allOther", Scope: "", Forced: false, Data: nil}, + {Name: "OtherTransaction/Go/Message/STAN/Topic/Named/sample.subject2", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransactionTotalTime/Go/Message/STAN/Topic/Named/sample.subject2", Scope: "", Forced: false, Data: nil}, + }) + app.ExpectTxnEvents(t, []internal.WantEvent{ + { + Intrinsics: map[string]interface{}{ + "name": "OtherTransaction/Go/Message/STAN/Topic/Named/sample.subject2", + "guid": internal.MatchAnything, + "priority": internal.MatchAnything, + "sampled": internal.MatchAnything, + "traceId": internal.MatchAnything, + }, + AgentAttributes: map[string]interface{}{ + "message.routingKey": "sample.subject2", + }, + UserAttributes: map[string]interface{}{}, + }, + }) +} + +// Wrapper function to ensure that the NR wrapper is done recording transaction data before wg.Done() is called +func WgWrapper(wg *sync.WaitGroup, nrWrap func(msg *stan.Msg)) func(msg *stan.Msg) { + return func(msg *stan.Msg) { + nrWrap(msg) + wg.Done() + } +} diff --git a/_integrations/nrzap/README.md b/_integrations/nrzap/README.md new file mode 100644 index 000000000..0278aafce --- /dev/null +++ b/_integrations/nrzap/README.md @@ -0,0 +1,10 @@ +# _integrations/nrzap [](https://godoc.org/github.com/newrelic/go-agent/_integrations/nrzap) + +Package `nrzap` supports https://github.com/uber-go/zap. + +```go +import "github.com/newrelic/go-agent/_integrations/nrzap" +``` + +For more information, see +[godocs](https://godoc.org/github.com/newrelic/go-agent/_integrations/nrzap). diff --git a/_integrations/nrzap/example_test.go b/_integrations/nrzap/example_test.go new file mode 100644 index 000000000..1d4c6b1d5 --- /dev/null +++ b/_integrations/nrzap/example_test.go @@ -0,0 +1,18 @@ +package nrzap + +import ( + newrelic "github.com/newrelic/go-agent" + "go.uber.org/zap" +) + +func Example() { + cfg := newrelic.NewConfig("Example App", "__YOUR_NEWRELIC_LICENSE_KEY__") + + // Create a new zap logger: + z, _ := zap.NewProduction() + + // Use nrzap to register the logger with the agent: + cfg.Logger = Transform(z.Named("newrelic")) + + newrelic.NewApplication(cfg) +} diff --git a/_integrations/nrzap/nrzap.go b/_integrations/nrzap/nrzap.go new file mode 100644 index 000000000..e91ff03da --- /dev/null +++ b/_integrations/nrzap/nrzap.go @@ -0,0 +1,42 @@ +// Package nrzap supports https://github.com/uber-go/zap +// +// Wrap your zap Logger using nrzap.Transform to send agent log messages to zap. +package nrzap + +import ( + newrelic "github.com/newrelic/go-agent" + "github.com/newrelic/go-agent/internal" + "go.uber.org/zap" +) + +func init() { internal.TrackUsage("integration", "logging", "zap") } + +type shim struct{ logger *zap.Logger } + +func transformAttributes(atts map[string]interface{}) []zap.Field { + fs := make([]zap.Field, 0, len(atts)) + for key, val := range atts { + fs = append(fs, zap.Any(key, val)) + } + return fs +} + +func (s *shim) Error(msg string, c map[string]interface{}) { + s.logger.Error(msg, transformAttributes(c)...) +} +func (s *shim) Warn(msg string, c map[string]interface{}) { + s.logger.Warn(msg, transformAttributes(c)...) +} +func (s *shim) Info(msg string, c map[string]interface{}) { + s.logger.Info(msg, transformAttributes(c)...) +} +func (s *shim) Debug(msg string, c map[string]interface{}) { + s.logger.Debug(msg, transformAttributes(c)...) +} +func (s *shim) DebugEnabled() bool { + ce := s.logger.Check(zap.DebugLevel, "debugging") + return ce != nil +} + +// Transform turns a *zap.Logger into a newrelic.Logger. +func Transform(l *zap.Logger) newrelic.Logger { return &shim{logger: l} } diff --git a/app_run.go b/app_run.go new file mode 100644 index 000000000..f1972930d --- /dev/null +++ b/app_run.go @@ -0,0 +1,207 @@ +package newrelic + +import ( + "encoding/json" + "strings" + "time" + + "github.com/newrelic/go-agent/internal" +) + +// appRun contains information regarding a single connection session with the +// collector. It is immutable after creation at application connect. +type appRun struct { + Reply *internal.ConnectReply + + // AttributeConfig is calculated on every connect since it depends on + // the security policies. + AttributeConfig *internal.AttributeConfig + Config Config + + // firstAppName is the value of Config.AppName up to the first semicolon. + firstAppName string +} + +func newAppRun(config Config, reply *internal.ConnectReply) *appRun { + convertConfig := func(c AttributeDestinationConfig) internal.AttributeDestinationConfig { + return internal.AttributeDestinationConfig{ + Enabled: c.Enabled, + Include: c.Include, + Exclude: c.Exclude, + } + } + run := &appRun{ + Reply: reply, + AttributeConfig: internal.CreateAttributeConfig(internal.AttributeConfigInput{ + Attributes: convertConfig(config.Attributes), + ErrorCollector: convertConfig(config.ErrorCollector.Attributes), + TransactionEvents: convertConfig(config.TransactionEvents.Attributes), + TransactionTracer: convertConfig(config.TransactionTracer.Attributes), + BrowserMonitoring: convertConfig(config.BrowserMonitoring.Attributes), + SpanEvents: convertConfig(config.SpanEvents.Attributes), + TraceSegments: convertConfig(config.TransactionTracer.Segments.Attributes), + }, reply.SecurityPolicies.AttributesInclude.Enabled()), + Config: config, + } + + // Overwrite local settings with any server-side-config settings + // present. NOTE! This requires that the Config provided to this + // function is a value and not a pointer: We do not want to change the + // input Config with values particular to this connection. + + if v := run.Reply.ServerSideConfig.TransactionTracerEnabled; nil != v { + run.Config.TransactionTracer.Enabled = *v + } + if v := run.Reply.ServerSideConfig.ErrorCollectorEnabled; nil != v { + run.Config.ErrorCollector.Enabled = *v + } + if v := run.Reply.ServerSideConfig.CrossApplicationTracerEnabled; nil != v { + run.Config.CrossApplicationTracer.Enabled = *v + } + if v := run.Reply.ServerSideConfig.TransactionTracerThreshold; nil != v { + switch val := v.(type) { + case float64: + run.Config.TransactionTracer.Threshold.IsApdexFailing = false + run.Config.TransactionTracer.Threshold.Duration = internal.FloatSecondsToDuration(val) + case string: + if val == "apdex_f" { + run.Config.TransactionTracer.Threshold.IsApdexFailing = true + } + } + } + if v := run.Reply.ServerSideConfig.TransactionTracerStackTraceThreshold; nil != v { + run.Config.TransactionTracer.StackTraceThreshold = internal.FloatSecondsToDuration(*v) + } + if v := run.Reply.ServerSideConfig.ErrorCollectorIgnoreStatusCodes; nil != v { + run.Config.ErrorCollector.IgnoreStatusCodes = v + } + + if !run.Reply.CollectErrorEvents { + run.Config.ErrorCollector.CaptureEvents = false + } + if !run.Reply.CollectAnalyticsEvents { + run.Config.TransactionEvents.Enabled = false + } + if !run.Reply.CollectTraces { + run.Config.TransactionTracer.Enabled = false + run.Config.DatastoreTracer.SlowQuery.Enabled = false + } + if !run.Reply.CollectSpanEvents { + run.Config.SpanEvents.Enabled = false + } + + // Distributed tracing takes priority over cross-app-tracing per: + // https://source.datanerd.us/agents/agent-specs/blob/master/Distributed-Tracing.md#distributed-trace-payload + if run.Config.DistributedTracer.Enabled { + run.Config.CrossApplicationTracer.Enabled = false + } + + // Cache the first application name set on the config + run.firstAppName = strings.SplitN(config.AppName, ";", 2)[0] + + if "" != run.Reply.RunID { + js, _ := json.Marshal(settings(run.Config)) + run.Config.Logger.Debug("final configuration", map[string]interface{}{ + "config": internal.JSONString(js), + }) + } + + return run +} + +const ( + // https://source.datanerd.us/agents/agent-specs/blob/master/Lambda.md#distributed-tracing + serverlessDefaultPrimaryAppID = "Unknown" +) + +const ( + // https://source.datanerd.us/agents/agent-specs/blob/master/Lambda.md#adaptive-sampling + serverlessSamplerPeriod = 60 * time.Second + serverlessSamplerTarget = 10 +) + +func newServerlessConnectReply(config Config) *internal.ConnectReply { + reply := internal.ConnectReplyDefaults() + + reply.ApdexThresholdSeconds = config.ServerlessMode.ApdexThreshold.Seconds() + + reply.AccountID = config.ServerlessMode.AccountID + reply.TrustedAccountKey = config.ServerlessMode.TrustedAccountKey + reply.PrimaryAppID = config.ServerlessMode.PrimaryAppID + + if "" == reply.TrustedAccountKey { + // The trust key does not need to be provided by customers whose + // account ID is the same as the trust key. + reply.TrustedAccountKey = reply.AccountID + } + + if "" == reply.PrimaryAppID { + reply.PrimaryAppID = serverlessDefaultPrimaryAppID + } + + reply.AdaptiveSampler = internal.NewAdaptiveSampler(serverlessSamplerPeriod, + serverlessSamplerTarget, time.Now()) + + return reply +} + +func (run *appRun) responseCodeIsError(code int) bool { + // Response codes below 100 are allowed to be errors to support gRPC. + if code < 400 && code >= 100 { + return false + } + for _, ignoreCode := range run.Config.ErrorCollector.IgnoreStatusCodes { + if code == ignoreCode { + return false + } + } + return true +} + +func (run *appRun) txnTraceThreshold(apdexThreshold time.Duration) time.Duration { + if run.Config.TransactionTracer.Threshold.IsApdexFailing { + return internal.ApdexFailingThreshold(apdexThreshold) + } + return run.Config.TransactionTracer.Threshold.Duration +} + +func (run *appRun) ptrTxnEvents() *uint { return run.Reply.EventData.Limits.TxnEvents } +func (run *appRun) ptrCustomEvents() *uint { return run.Reply.EventData.Limits.CustomEvents } +func (run *appRun) ptrErrorEvents() *uint { return run.Reply.EventData.Limits.ErrorEvents } +func (run *appRun) ptrSpanEvents() *uint { return run.Reply.EventData.Limits.SpanEvents } + +func (run *appRun) MaxTxnEvents() int { return run.limit(run.Config.MaxTxnEvents(), run.ptrTxnEvents) } +func (run *appRun) MaxCustomEvents() int { + return run.limit(internal.MaxCustomEvents, run.ptrCustomEvents) +} +func (run *appRun) MaxErrorEvents() int { return run.limit(internal.MaxErrorEvents, run.ptrErrorEvents) } +func (run *appRun) MaxSpanEvents() int { return run.limit(internal.MaxSpanEvents, run.ptrSpanEvents) } + +func (run *appRun) limit(dflt int, field func() *uint) int { + if nil != field() { + return int(*field()) + } + return dflt +} + +func (run *appRun) ReportPeriods() map[internal.HarvestTypes]time.Duration { + fixed := internal.HarvestMetricsTraces + configurable := internal.HarvestTypes(0) + + for tp, fn := range map[internal.HarvestTypes]func() *uint{ + internal.HarvestTxnEvents: run.ptrTxnEvents, + internal.HarvestCustomEvents: run.ptrCustomEvents, + internal.HarvestErrorEvents: run.ptrErrorEvents, + internal.HarvestSpanEvents: run.ptrSpanEvents, + } { + if nil != run && fn() != nil { + configurable |= tp + } else { + fixed |= tp + } + } + return map[internal.HarvestTypes]time.Duration{ + configurable: run.Reply.ConfigurablePeriod(), + fixed: internal.FixedHarvestPeriod, + } +} diff --git a/app_run_test.go b/app_run_test.go new file mode 100644 index 000000000..36e49f2a0 --- /dev/null +++ b/app_run_test.go @@ -0,0 +1,385 @@ +package newrelic + +import ( + "encoding/json" + "fmt" + "reflect" + "testing" + "time" + + "github.com/newrelic/go-agent/internal" +) + +func TestResponseCodeIsError(t *testing.T) { + cfg := NewConfig("my app", "0123456789012345678901234567890123456789") + cfg.ErrorCollector.IgnoreStatusCodes = append(cfg.ErrorCollector.IgnoreStatusCodes, 504) + run := newAppRun(cfg, internal.ConnectReplyDefaults()) + + for _, tc := range []struct { + Code int + IsError bool + }{ + {Code: 0, IsError: false}, // gRPC + {Code: 1, IsError: true}, // gRPC + {Code: 5, IsError: false}, // gRPC + {Code: 6, IsError: true}, // gRPC + {Code: 99, IsError: true}, + {Code: 100, IsError: false}, + {Code: 199, IsError: false}, + {Code: 200, IsError: false}, + {Code: 300, IsError: false}, + {Code: 399, IsError: false}, + {Code: 400, IsError: true}, + {Code: 404, IsError: false}, + {Code: 503, IsError: true}, + {Code: 504, IsError: false}, + } { + if is := run.responseCodeIsError(tc.Code); is != tc.IsError { + t.Errorf("responseCodeIsError for %d, wanted=%v got=%v", + tc.Code, tc.IsError, is) + } + } + +} + +func TestCrossAppTracingEnabled(t *testing.T) { + // CAT should be enabled by default. + cfg := NewConfig("my app", "0123456789012345678901234567890123456789") + run := newAppRun(cfg, internal.ConnectReplyDefaults()) + if enabled := run.Config.CrossApplicationTracer.Enabled; !enabled { + t.Error(enabled) + } + + // DT gets priority over CAT. + cfg = NewConfig("my app", "0123456789012345678901234567890123456789") + cfg.DistributedTracer.Enabled = true + cfg.CrossApplicationTracer.Enabled = true + run = newAppRun(cfg, internal.ConnectReplyDefaults()) + if enabled := run.Config.CrossApplicationTracer.Enabled; enabled { + t.Error(enabled) + } + + cfg = NewConfig("my app", "0123456789012345678901234567890123456789") + cfg.DistributedTracer.Enabled = false + cfg.CrossApplicationTracer.Enabled = false + run = newAppRun(cfg, internal.ConnectReplyDefaults()) + if enabled := run.Config.CrossApplicationTracer.Enabled; enabled { + t.Error(enabled) + } + + cfg = NewConfig("my app", "0123456789012345678901234567890123456789") + cfg.DistributedTracer.Enabled = false + cfg.CrossApplicationTracer.Enabled = true + run = newAppRun(cfg, internal.ConnectReplyDefaults()) + if enabled := run.Config.CrossApplicationTracer.Enabled; !enabled { + t.Error(enabled) + } +} + +func TestTxnTraceThreshold(t *testing.T) { + // Test that the default txn trace threshold is the failing apdex. + cfg := NewConfig("my app", "0123456789012345678901234567890123456789") + run := newAppRun(cfg, internal.ConnectReplyDefaults()) + threshold := run.txnTraceThreshold(1 * time.Second) + if threshold != 4*time.Second { + t.Error(threshold) + } + + // Test that the trace threshold can be assigned to a fixed value. + cfg = NewConfig("my app", "0123456789012345678901234567890123456789") + cfg.TransactionTracer.Threshold.IsApdexFailing = false + cfg.TransactionTracer.Threshold.Duration = 3 * time.Second + run = newAppRun(cfg, internal.ConnectReplyDefaults()) + threshold = run.txnTraceThreshold(1 * time.Second) + if threshold != 3*time.Second { + t.Error(threshold) + } + + // Test that the trace threshold can be overwritten by server-side-config. + // with "apdex_f". + cfg = NewConfig("my app", "0123456789012345678901234567890123456789") + cfg.TransactionTracer.Threshold.IsApdexFailing = false + cfg.TransactionTracer.Threshold.Duration = 3 * time.Second + reply := internal.ConnectReplyDefaults() + json.Unmarshal([]byte(`{"agent_config":{"transaction_tracer.transaction_threshold":"apdex_f"}}`), &reply) + run = newAppRun(cfg, reply) + threshold = run.txnTraceThreshold(1 * time.Second) + if threshold != 4*time.Second { + t.Error(threshold) + } + + // Test that the trace threshold can be overwritten by server-side-config. + // with a numberic value. + cfg = NewConfig("my app", "0123456789012345678901234567890123456789") + reply = internal.ConnectReplyDefaults() + json.Unmarshal([]byte(`{"agent_config":{"transaction_tracer.transaction_threshold":3}}`), &reply) + run = newAppRun(cfg, reply) + threshold = run.txnTraceThreshold(1 * time.Second) + if threshold != 3*time.Second { + t.Error(threshold) + } +} + +var cfg = NewConfig("name", "license") + +func TestEmptyReplyEventHarvestDefaults(t *testing.T) { + var run internal.HarvestConfigurer = newAppRun(cfg, &internal.ConnectReply{}) + assertHarvestConfig(t, &run, expectHarvestConfig{ + maxTxnEvents: internal.MaxTxnEvents, + maxCustomEvents: internal.MaxCustomEvents, + maxErrorEvents: internal.MaxErrorEvents, + maxSpanEvents: internal.MaxSpanEvents, + periods: map[internal.HarvestTypes]time.Duration{ + internal.HarvestTypesAll: 60 * time.Second, + 0: 60 * time.Second, + }, + }) +} + +func TestEventHarvestFieldsAllPopulated(t *testing.T) { + reply, err := internal.ConstructConnectReply([]byte(`{"return_value":{ + "event_harvest_config": { + "report_period_ms": 5000, + "harvest_limits": { + "analytic_event_data": 1, + "custom_event_data": 2, + "span_event_data": 3, + "error_event_data": 4 + } + } + }}`), internal.PreconnectReply{}) + if nil != err { + t.Fatal(err) + } + var run internal.HarvestConfigurer = newAppRun(cfg, reply) + assertHarvestConfig(t, &run, expectHarvestConfig{ + maxTxnEvents: 1, + maxCustomEvents: 2, + maxErrorEvents: 4, + maxSpanEvents: 3, + periods: map[internal.HarvestTypes]time.Duration{ + internal.HarvestMetricsTraces: 60 * time.Second, + internal.HarvestTypesEvents: 5 * time.Second, + }, + }) +} + +func TestZeroReportPeriod(t *testing.T) { + reply, err := internal.ConstructConnectReply([]byte(`{"return_value":{ + "event_harvest_config": { + "report_period_ms": 0 + } + }}`), internal.PreconnectReply{}) + if nil != err { + t.Fatal(err) + } + var run internal.HarvestConfigurer = newAppRun(cfg, reply) + assertHarvestConfig(t, &run, expectHarvestConfig{ + maxTxnEvents: internal.MaxTxnEvents, + maxCustomEvents: internal.MaxCustomEvents, + maxErrorEvents: internal.MaxErrorEvents, + maxSpanEvents: internal.MaxSpanEvents, + periods: map[internal.HarvestTypes]time.Duration{ + internal.HarvestTypesAll: 60 * time.Second, + 0: 60 * time.Second, + }, + }) +} + +func TestEventHarvestFieldsOnlySpanEvents(t *testing.T) { + reply, err := internal.ConstructConnectReply([]byte(`{"return_value":{ + "event_harvest_config": { + "report_period_ms": 5000, + "harvest_limits": { "span_event_data": 3 } + }}}`), internal.PreconnectReply{}) + if nil != err { + t.Fatal(err) + } + var run internal.HarvestConfigurer = newAppRun(cfg, reply) + assertHarvestConfig(t, &run, expectHarvestConfig{ + maxTxnEvents: internal.MaxTxnEvents, + maxCustomEvents: internal.MaxCustomEvents, + maxErrorEvents: internal.MaxErrorEvents, + maxSpanEvents: 3, + periods: map[internal.HarvestTypes]time.Duration{ + internal.HarvestTypesAll ^ internal.HarvestSpanEvents: 60 * time.Second, + internal.HarvestSpanEvents: 5 * time.Second, + }, + }) +} + +func TestEventHarvestFieldsOnlyTxnEvents(t *testing.T) { + reply, err := internal.ConstructConnectReply([]byte(`{"return_value":{ + "event_harvest_config": { + "report_period_ms": 5000, + "harvest_limits": { "analytic_event_data": 3 } + }}}`), internal.PreconnectReply{}) + if nil != err { + t.Fatal(err) + } + var run internal.HarvestConfigurer = newAppRun(cfg, reply) + assertHarvestConfig(t, &run, expectHarvestConfig{ + maxTxnEvents: 3, + maxCustomEvents: internal.MaxCustomEvents, + maxErrorEvents: internal.MaxErrorEvents, + maxSpanEvents: internal.MaxSpanEvents, + periods: map[internal.HarvestTypes]time.Duration{ + internal.HarvestTypesAll ^ internal.HarvestTxnEvents: 60 * time.Second, + internal.HarvestTxnEvents: 5 * time.Second, + }, + }) +} + +func TestEventHarvestFieldsOnlyErrorEvents(t *testing.T) { + reply, err := internal.ConstructConnectReply([]byte(`{"return_value":{ + "event_harvest_config": { + "report_period_ms": 5000, + "harvest_limits": { "error_event_data": 3 } + }}}`), internal.PreconnectReply{}) + if nil != err { + t.Fatal(err) + } + var run internal.HarvestConfigurer = newAppRun(cfg, reply) + assertHarvestConfig(t, &run, expectHarvestConfig{ + maxTxnEvents: internal.MaxTxnEvents, + maxCustomEvents: internal.MaxCustomEvents, + maxErrorEvents: 3, + maxSpanEvents: internal.MaxSpanEvents, + periods: map[internal.HarvestTypes]time.Duration{ + internal.HarvestTypesAll ^ internal.HarvestErrorEvents: 60 * time.Second, + internal.HarvestErrorEvents: 5 * time.Second, + }, + }) +} + +func TestEventHarvestFieldsOnlyCustomEvents(t *testing.T) { + reply, err := internal.ConstructConnectReply([]byte(`{"return_value":{ + "event_harvest_config": { + "report_period_ms": 5000, + "harvest_limits": { "custom_event_data": 3 } + }}}`), internal.PreconnectReply{}) + if nil != err { + t.Fatal(err) + } + var run internal.HarvestConfigurer = newAppRun(cfg, reply) + assertHarvestConfig(t, &run, expectHarvestConfig{ + maxTxnEvents: internal.MaxTxnEvents, + maxCustomEvents: 3, + maxErrorEvents: internal.MaxErrorEvents, + maxSpanEvents: internal.MaxSpanEvents, + periods: map[internal.HarvestTypes]time.Duration{ + internal.HarvestTypesAll ^ internal.HarvestCustomEvents: 60 * time.Second, + internal.HarvestCustomEvents: 5 * time.Second, + }, + }) +} + +func TestConfigurableHarvestNegativeReportPeriod(t *testing.T) { + h, err := internal.ConstructConnectReply([]byte(`{"return_value":{ + "event_harvest_config": { + "report_period_ms": -1 + }}}`), internal.PreconnectReply{}) + if nil != err { + t.Fatal(err) + } + expect := time.Duration(internal.DefaultConfigurableEventHarvestMs) * time.Millisecond + if period := h.ConfigurablePeriod(); period != expect { + t.Fatal(expect, period) + } +} + +func TestReplyTraceIDGenerator(t *testing.T) { + // Test that the default connect reply has a populated trace id + // generator that works. + reply := internal.ConnectReplyDefaults() + id1 := reply.TraceIDGenerator.GenerateTraceID() + id2 := reply.TraceIDGenerator.GenerateTraceID() + if len(id1) != 16 || len(id2) != 16 || id1 == id2 { + t.Error(id1, id2) + } +} + +func TestConfigurableTxnEvents_withCollResponse(t *testing.T) { + h, err := internal.ConstructConnectReply([]byte( + `{"return_value":{ + "event_harvest_config": { + "report_period_ms": 10000, + "harvest_limits": { + "analytic_event_data": 15 + } + } + }}`), internal.PreconnectReply{}) + if nil != err { + t.Fatal(err) + } + result := newAppRun(cfg, h).MaxTxnEvents() + if result != 15 { + t.Error(fmt.Sprintf("Unexpected max number of txn events, expected %d but got %d", 15, result)) + } +} + +func TestConfigurableTxnEvents_notInCollResponse(t *testing.T) { + reply, err := internal.ConstructConnectReply([]byte( + `{"return_value":{ + "event_harvest_config": { + "report_period_ms": 10000 + } + }}`), internal.PreconnectReply{}) + if nil != err { + t.Fatal(err) + } + expected := 10 + cfg.TransactionEvents.MaxSamplesStored = expected + result := newAppRun(cfg, reply).MaxTxnEvents() + if result != expected { + t.Error(fmt.Sprintf("Unexpected max number of txn events, expected %d but got %d", expected, result)) + } +} + +func TestConfigurableTxnEvents_configMoreThanMax(t *testing.T) { + h, err := internal.ConstructConnectReply([]byte( + `{"return_value":{ + "event_harvest_config": { + "report_period_ms": 10000 + } + }}`), internal.PreconnectReply{}) + if nil != err { + t.Fatal(err) + } + cfg.TransactionEvents.MaxSamplesStored = internal.MaxTxnEvents + 100 + result := newAppRun(cfg, h).MaxTxnEvents() + if result != internal.MaxTxnEvents { + t.Error(fmt.Sprintf("Unexpected max number of txn events, expected %d but got %d", internal.MaxTxnEvents, result)) + } +} + +type expectHarvestConfig struct { + maxTxnEvents int + maxCustomEvents int + maxErrorEvents int + maxSpanEvents int + periods map[internal.HarvestTypes]time.Duration +} + +func assertHarvestConfig(t testing.TB, hc *internal.HarvestConfigurer, expect expectHarvestConfig) { + if h, ok := t.(interface { + Helper() + }); ok { + h.Helper() + } + if max := (*hc).MaxTxnEvents(); max != expect.maxTxnEvents { + t.Error(max, expect.maxTxnEvents) + } + if max := (*hc).MaxCustomEvents(); max != expect.maxCustomEvents { + t.Error(max, expect.maxCustomEvents) + } + if max := (*hc).MaxSpanEvents(); max != expect.maxSpanEvents { + t.Error(max, expect.maxSpanEvents) + } + if max := (*hc).MaxErrorEvents(); max != expect.maxErrorEvents { + t.Error(max, expect.maxErrorEvents) + } + if periods := (*hc).ReportPeriods(); !reflect.DeepEqual(periods, expect.periods) { + t.Error(periods, expect.periods) + } +} diff --git a/application.go b/application.go new file mode 100644 index 000000000..61702adf0 --- /dev/null +++ b/application.go @@ -0,0 +1,67 @@ +package newrelic + +import ( + "net/http" + "time" +) + +// Application represents your application. +type Application interface { + // StartTransaction begins a Transaction. + // * Transaction.NewGoroutine() must be used to pass the Transaction + // between goroutines. + // * This method never returns nil. + // * The Transaction is considered a web transaction if an http.Request + // is provided. + // * The transaction returned implements the http.ResponseWriter + // interface. Provide your ResponseWriter as a parameter and + // then use the Transaction in its place to instrument the response + // code and response headers. + StartTransaction(name string, w http.ResponseWriter, r *http.Request) Transaction + + // RecordCustomEvent adds a custom event. + // + // eventType must consist of alphanumeric characters, underscores, and + // colons, and must contain fewer than 255 bytes. + // + // Each value in the params map must be a number, string, or boolean. + // Keys must be less than 255 bytes. The params map may not contain + // more than 64 attributes. For more information, and a set of + // restricted keywords, see: + // + // https://docs.newrelic.com/docs/insights/new-relic-insights/adding-querying-data/inserting-custom-events-new-relic-apm-agents + // + // An error is returned if event type or params is invalid. + RecordCustomEvent(eventType string, params map[string]interface{}) error + + // RecordCustomMetric records a custom metric. The metric name you + // provide will be prefixed by "Custom/". Custom metrics are not + // currently supported in serverless mode. + // + // https://docs.newrelic.com/docs/agents/manage-apm-agents/agent-data/collect-custom-metrics + RecordCustomMetric(name string, value float64) error + + // WaitForConnection blocks until the application is connected, is + // incapable of being connected, or the timeout has been reached. This + // method is useful for short-lived processes since the application will + // not gather data until it is connected. nil is returned if the + // application is connected successfully. + WaitForConnection(timeout time.Duration) error + + // Shutdown flushes data to New Relic's servers and stops all + // agent-related goroutines managing this application. After Shutdown + // is called, The application is disabled and will never collect data + // again. This method blocks until all final data is sent to New Relic + // or the timeout has elapsed. Increase the timeout and check debug + // logs if you aren't seeing data. + Shutdown(timeout time.Duration) +} + +// NewApplication creates an Application and spawns goroutines to manage the +// aggregation and harvesting of data. On success, a non-nil Application and a +// nil error are returned. On failure, a nil Application and a non-nil error +// are returned. Applications do not share global state, therefore it is safe +// to create multiple applications. +func NewApplication(c Config) (Application, error) { + return newApp(c) +} diff --git a/attributes.go b/attributes.go new file mode 100644 index 000000000..26c29c8e4 --- /dev/null +++ b/attributes.go @@ -0,0 +1,116 @@ +package newrelic + +// This file contains the names of the automatically captured attributes. +// Attributes are key value pairs attached to transaction events, error events, +// and traced errors. You may add your own attributes using the +// Transaction.AddAttribute method (see transaction.go). +// +// These attribute names are exposed here to facilitate configuration. +// +// For more information, see: +// https://docs.newrelic.com/docs/agents/manage-apm-agents/agent-metrics/agent-attributes + +// Attributes destined for Transaction Events, Errors, and Transaction Traces: +const ( + // AttributeResponseCode is the response status code for a web request. + AttributeResponseCode = "httpResponseCode" + // AttributeRequestMethod is the request's method. + AttributeRequestMethod = "request.method" + // AttributeRequestAccept is the request's "Accept" header. + AttributeRequestAccept = "request.headers.accept" + // AttributeRequestContentType is the request's "Content-Type" header. + AttributeRequestContentType = "request.headers.contentType" + // AttributeRequestContentLength is the request's "Content-Length" header. + AttributeRequestContentLength = "request.headers.contentLength" + // AttributeRequestHost is the request's "Host" header. + AttributeRequestHost = "request.headers.host" + // AttributeRequestURI is the request's URL without query parameters, + // fragment, user, or password. + AttributeRequestURI = "request.uri" + // AttributeResponseContentType is the response "Content-Type" header. + AttributeResponseContentType = "response.headers.contentType" + // AttributeResponseContentLength is the response "Content-Length" header. + AttributeResponseContentLength = "response.headers.contentLength" + // AttributeHostDisplayName contains the value of Config.HostDisplayName. + AttributeHostDisplayName = "host.displayName" +) + +// Attributes destined for Errors and Transaction Traces: +const ( + // AttributeRequestUserAgent is the request's "User-Agent" header. + AttributeRequestUserAgent = "request.headers.User-Agent" + // AttributeRequestReferer is the request's "Referer" header. Query + // string parameters are removed. + AttributeRequestReferer = "request.headers.referer" +) + +// AWS Lambda specific attributes: +const ( + AttributeAWSRequestID = "aws.requestId" + AttributeAWSLambdaARN = "aws.lambda.arn" + AttributeAWSLambdaColdStart = "aws.lambda.coldStart" + AttributeAWSLambdaEventSourceARN = "aws.lambda.eventSource.arn" +) + +// Attributes for consumed message transactions: +// +// When a message is consumed (for example from Kafka or RabbitMQ), supported +// instrumentation packages -- i.e. those found in the _integrations +// (https://godoc.org/github.com/newrelic/go-agent/_integrations) directory -- +// will add these attributes automatically. `AttributeMessageExchangeType`, +// `AttributeMessageReplyTo`, and `AttributeMessageCorrelationID` are disabled +// by default. To see these attributes added to all destinations, you must add +// include them in your config settings: +// +// cfg.Attributes.Include = append(cfg.Attributes.Include, +// AttributeMessageExchangeType, AttributeMessageReplyTo, +// AttributeMessageCorrelationID) +// +// When not using a supported instrumentation package, you can add these +// attributes manually using the `Transaction.AddAttribute` +// (https://godoc.org/github.com/newrelic/go-agent#Transaction) API. In this +// case, these attributes will be included on all destintations by default. +// +// txn := app.StartTransaction("Message/RabbitMQ/Exchange/Named/MyExchange", nil, nil) +// txn.AddAttribute(AttributeMessageRoutingKey, "myRoutingKey") +// txn.AddAttribute(AttributeMessageQueueName, "myQueueName") +// txn.AddAttribute(AttributeMessageExchangeType, "myExchangeType") +// txn.AddAttribute(AttributeMessageReplyTo, "myReplyTo") +// txn.AddAttribute(AttributeMessageCorrelationID, "myCorrelationID") +// // ... consume a message ... +// txn.End() +// +// It is recommended that at most one message is consumed per transaction. +const ( + // The routing key of the consumed message. + AttributeMessageRoutingKey = "message.routingKey" + // The name of the queue the message was consumed from. + AttributeMessageQueueName = "message.queueName" + // The type of exchange used for the consumed message (direct, fanout, + // topic, or headers). + AttributeMessageExchangeType = "message.exchangeType" + // The callback queue used in RPC configurations. + AttributeMessageReplyTo = "message.replyTo" + // The application-generated identifier used in RPC configurations. + AttributeMessageCorrelationID = "message.correlationId" +) + +// Attributes destined for Span Events: +// +// To disable the capture of one of these span event attributes, db.statement +// for example, modify your Config like this: +// +// cfg.SpanEvents.Attributes.Exclude = append(cfg.SpanEvents.Attributes.Exclude, +// newrelic.SpanAttributeDBStatement) +const ( + SpanAttributeDBStatement = "db.statement" + SpanAttributeDBInstance = "db.instance" + SpanAttributeDBCollection = "db.collection" + SpanAttributePeerAddress = "peer.address" + SpanAttributePeerHostname = "peer.hostname" + SpanAttributeHTTPURL = "http.url" + SpanAttributeHTTPMethod = "http.method" + SpanAttributeAWSOperation = "aws.operation" + SpanAttributeAWSRequestID = "aws.requestId" + SpanAttributeAWSRegion = "aws.region" +) diff --git a/browser_header.go b/browser_header.go new file mode 100644 index 000000000..0df1d6f22 --- /dev/null +++ b/browser_header.go @@ -0,0 +1,80 @@ +package newrelic + +import ( + "encoding/json" +) + +var ( + browserStartTag = []byte(``) + browserInfoPrefix = []byte(`window.NREUM||(NREUM={});NREUM.info=`) +) + +// browserInfo contains the fields that are marshalled into the Browser agent's +// info hash. +// +// https://newrelic.atlassian.net/wiki/spaces/eng/pages/50299103/BAM+Agent+Auto-Instrumentation +type browserInfo struct { + Beacon string `json:"beacon"` + LicenseKey string `json:"licenseKey"` + ApplicationID string `json:"applicationID"` + TransactionName string `json:"transactionName"` + QueueTimeMillis int64 `json:"queueTime"` + ApplicationTimeMillis int64 `json:"applicationTime"` + ObfuscatedAttributes string `json:"atts"` + ErrorBeacon string `json:"errorBeacon"` + Agent string `json:"agent"` +} + +// BrowserTimingHeader encapsulates the JavaScript required to enable New +// Relic's Browser product. +type BrowserTimingHeader struct { + agentLoader string + info browserInfo +} + +func appendSlices(slices ...[]byte) []byte { + length := 0 + for _, s := range slices { + length += len(s) + } + combined := make([]byte, 0, length) + for _, s := range slices { + combined = append(combined, s...) + } + return combined +} + +// WithTags returns the browser timing JavaScript which includes the enclosing +// tags. This method returns nil if the receiver is +// nil, the feature is disabled, the application is not yet connected, or an +// error occurs. The byte slice returned is in UTF-8 format. +func (h *BrowserTimingHeader) WithTags() []byte { + withoutTags := h.WithoutTags() + if nil == withoutTags { + return nil + } + return appendSlices(browserStartTag, withoutTags, browserEndTag) +} + +// WithoutTags returns the browser timing JavaScript without any enclosing tags, +// which may then be embedded within any JavaScript code. This method returns +// nil if the receiver is nil, the feature is disabled, the application is not +// yet connected, or an error occurs. The byte slice returned is in UTF-8 +// format. +func (h *BrowserTimingHeader) WithoutTags() []byte { + if nil == h { + return nil + } + + // We could memoise this, but it seems unnecessary, since most users are + // going to call this zero or one times. + info, err := json.Marshal(h.info) + if err != nil { + // There's no way to log from here, but this also should be unreachable in + // practice. + return nil + } + + return appendSlices([]byte(h.agentLoader), browserInfoPrefix, info) +} diff --git a/browser_header_test.go b/browser_header_test.go new file mode 100644 index 000000000..7aa75562f --- /dev/null +++ b/browser_header_test.go @@ -0,0 +1,65 @@ +package newrelic + +import ( + "fmt" + "testing" + + "github.com/newrelic/go-agent/internal" +) + +func TestNilBrowserTimingHeader(t *testing.T) { + var h *BrowserTimingHeader + + // The methods on a nil BrowserTimingHeader pointer should not panic. + + if out := h.WithTags(); out != nil { + t.Errorf("unexpected WithTags output for a disabled header: expected a blank string; got %s", out) + } + + if out := h.WithoutTags(); out != nil { + t.Errorf("unexpected WithoutTags output for a disabled header: expected a blank string; got %s", out) + } +} + +func TestEnabled(t *testing.T) { + // We're not trying to test Go's JSON marshalling here; we just want to + // ensure that we get the right fields out the other side. + expectInfo := internal.CompactJSONString(` + { + "beacon": "brecon", + "licenseKey": "12345", + "applicationID": "app", + "transactionName": "txn", + "queueTime": 1, + "applicationTime": 2, + "atts": "attrs", + "errorBeacon": "blah", + "agent": "bond" + } + `) + + h := &BrowserTimingHeader{ + agentLoader: "loader();", + info: browserInfo{ + Beacon: "brecon", + LicenseKey: "12345", + ApplicationID: "app", + TransactionName: "txn", + QueueTimeMillis: 1, + ApplicationTimeMillis: 2, + ObfuscatedAttributes: "attrs", + ErrorBeacon: "blah", + Agent: "bond", + }, + } + + expected := fmt.Sprintf("%s%s%s%s%s", browserStartTag, h.agentLoader, browserInfoPrefix, expectInfo, browserEndTag) + if actual := h.WithTags(); string(actual) != expected { + t.Errorf("unexpected WithTags output: expected %s; got %s", expected, string(actual)) + } + + expected = fmt.Sprintf("%s%s%s", h.agentLoader, browserInfoPrefix, expectInfo) + if actual := h.WithoutTags(); string(actual) != expected { + t.Errorf("unexpected WithoutTags output: expected %s; got %s", expected, string(actual)) + } +} diff --git a/config.go b/config.go new file mode 100644 index 000000000..437192d29 --- /dev/null +++ b/config.go @@ -0,0 +1,413 @@ +package newrelic + +import ( + "errors" + "fmt" + "net/http" + "strings" + "time" + + "github.com/newrelic/go-agent/internal" +) + +// Config contains Application and Transaction behavior settings. +// Use NewConfig to create a Config with proper defaults. +type Config struct { + // AppName is used by New Relic to link data across servers. + // + // https://docs.newrelic.com/docs/apm/new-relic-apm/installation-configuration/naming-your-application + AppName string + + // License is your New Relic license key. + // + // https://docs.newrelic.com/docs/accounts/install-new-relic/account-setup/license-key + License string + + // Logger controls go-agent logging. For info level logging to stdout: + // + // cfg.Logger = newrelic.NewLogger(os.Stdout) + // + // For debug level logging to stdout: + // + // cfg.Logger = newrelic.NewDebugLogger(os.Stdout) + // + // See https://github.com/newrelic/go-agent/blob/master/GUIDE.md#logging + // for more examples and logging integrations. + Logger Logger + + // Enabled controls whether the agent will communicate with the New Relic + // servers and spawn goroutines. Setting this to be false is useful in + // testing and staging situations. + Enabled bool + + // Labels are key value pairs used to roll up applications into specific + // categories. + // + // https://docs.newrelic.com/docs/using-new-relic/user-interface-functions/organize-your-data/labels-categories-organize-apps-monitors + Labels map[string]string + + // HighSecurity guarantees that certain agent settings can not be made + // more permissive. This setting must match the corresponding account + // setting in the New Relic UI. + // + // https://docs.newrelic.com/docs/agents/manage-apm-agents/configuration/high-security-mode + HighSecurity bool + + // SecurityPoliciesToken enables security policies if set to a non-empty + // string. Only set this if security policies have been enabled on your + // account. This cannot be used in conjunction with HighSecurity. + // + // https://docs.newrelic.com/docs/agents/manage-apm-agents/configuration/enable-configurable-security-policies + SecurityPoliciesToken string + + // CustomInsightsEvents controls the behavior of + // Application.RecordCustomEvent. + // + // https://docs.newrelic.com/docs/insights/new-relic-insights/adding-querying-data/inserting-custom-events-new-relic-apm-agents + CustomInsightsEvents struct { + // Enabled controls whether RecordCustomEvent will collect + // custom analytics events. High security mode overrides this + // setting. + Enabled bool + } + + // TransactionEvents controls the behavior of transaction analytics + // events. + TransactionEvents struct { + // Enabled controls whether transaction events are captured. + Enabled bool + // Attributes controls the attributes included with transaction + // events. + Attributes AttributeDestinationConfig + // MaxSamplesStored allows you to limit the number of Transaction + // Events stored/reported in a given 60-second period + MaxSamplesStored int + } + + // ErrorCollector controls the capture of errors. + ErrorCollector struct { + // Enabled controls whether errors are captured. This setting + // affects both traced errors and error analytics events. + Enabled bool + // CaptureEvents controls whether error analytics events are + // captured. + CaptureEvents bool + // IgnoreStatusCodes controls which http response codes are + // automatically turned into errors. By default, response codes + // greater than or equal to 400, with the exception of 404, are + // turned into errors. + IgnoreStatusCodes []int + // Attributes controls the attributes included with errors. + Attributes AttributeDestinationConfig + } + + // TransactionTracer controls the capture of transaction traces. + TransactionTracer struct { + // Enabled controls whether transaction traces are captured. + Enabled bool + // Threshold controls whether a transaction trace will be + // considered for capture. Of the traces exceeding the + // threshold, the slowest trace every minute is captured. + Threshold struct { + // If IsApdexFailing is true then the trace threshold is + // four times the apdex threshold. + IsApdexFailing bool + // If IsApdexFailing is false then this field is the + // threshold, otherwise it is ignored. + Duration time.Duration + } + // SegmentThreshold is the threshold at which segments will be + // added to the trace. Lowering this setting may increase + // overhead. Decrease this duration if your Transaction Traces are + // missing segments. + SegmentThreshold time.Duration + // StackTraceThreshold is the threshold at which segments will + // be given a stack trace in the transaction trace. Lowering + // this setting will increase overhead. + StackTraceThreshold time.Duration + // Attributes controls the attributes included with transaction + // traces. + Attributes AttributeDestinationConfig + // Segments.Attributes controls the attributes included with + // each trace segment. + Segments struct { + Attributes AttributeDestinationConfig + } + } + + // BrowserMonitoring contains settings which control the behavior of + // Transaction.BrowserTimingHeader. + BrowserMonitoring struct { + // Enabled controls whether or not the Browser monitoring feature is + // enabled. + Enabled bool + // Attributes controls the attributes included with Browser monitoring. + // BrowserMonitoring.Attributes.Enabled is false by default, to include + // attributes in the Browser timing Javascript: + // + // cfg.BrowserMonitoring.Attributes.Enabled = true + Attributes AttributeDestinationConfig + } + + // HostDisplayName gives this server a recognizable name in the New + // Relic UI. This is an optional setting. + HostDisplayName string + + // Transport customizes communication with the New Relic servers. This may + // be used to configure a proxy. + Transport http.RoundTripper + + // Utilization controls the detection and gathering of system + // information. + Utilization struct { + // DetectAWS controls whether the Application attempts to detect + // AWS. + DetectAWS bool + // DetectAzure controls whether the Application attempts to detect + // Azure. + DetectAzure bool + // DetectPCF controls whether the Application attempts to detect + // PCF. + DetectPCF bool + // DetectGCP controls whether the Application attempts to detect + // GCP. + DetectGCP bool + // DetectDocker controls whether the Application attempts to + // detect Docker. + DetectDocker bool + // DetectKubernetes controls whether the Application attempts to + // detect Kubernetes. + DetectKubernetes bool + + // These settings provide system information when custom values + // are required. + LogicalProcessors int + TotalRAMMIB int + BillingHostname string + } + + // CrossApplicationTracer controls behaviour relating to cross application + // tracing (CAT), available since Go Agent v0.11. The + // CrossApplicationTracer and the DistributedTracer cannot be + // simultaneously enabled. + // + // https://docs.newrelic.com/docs/apm/transactions/cross-application-traces/introduction-cross-application-traces + CrossApplicationTracer struct { + Enabled bool + } + + // DistributedTracer controls behaviour relating to Distributed Tracing, + // available since Go Agent v2.1. The DistributedTracer and the + // CrossApplicationTracer cannot be simultaneously enabled. + // + // https://docs.newrelic.com/docs/apm/distributed-tracing/getting-started/introduction-distributed-tracing + DistributedTracer struct { + Enabled bool + } + + // SpanEvents controls behavior relating to Span Events. Span Events + // require that DistributedTracer is enabled. + SpanEvents struct { + Enabled bool + Attributes AttributeDestinationConfig + } + + // DatastoreTracer controls behavior relating to datastore segments. + DatastoreTracer struct { + // InstanceReporting controls whether the host and port are collected + // for datastore segments. + InstanceReporting struct { + Enabled bool + } + // DatabaseNameReporting controls whether the database name is + // collected for datastore segments. + DatabaseNameReporting struct { + Enabled bool + } + QueryParameters struct { + Enabled bool + } + // SlowQuery controls the capture of slow query traces. Slow + // query traces show you instances of your slowest datastore + // segments. + SlowQuery struct { + Enabled bool + Threshold time.Duration + } + } + + // Attributes controls which attributes are enabled and disabled globally. + // This setting affects all attribute destinations: Transaction Events, + // Error Events, Transaction Traces and segments, Traced Errors, Span + // Events, and Browser timing header. + Attributes AttributeDestinationConfig + + // RuntimeSampler controls the collection of runtime statistics like + // CPU/Memory usage, goroutine count, and GC pauses. + RuntimeSampler struct { + // Enabled controls whether runtime statistics are captured. + Enabled bool + } + + // ServerlessMode contains fields which control behavior when running in + // AWS Lambda. + // + // https://docs.newrelic.com/docs/serverless-function-monitoring/aws-lambda-monitoring/get-started/introduction-new-relic-monitoring-aws-lambda + ServerlessMode struct { + // Enabling ServerlessMode will print each transaction's data to + // stdout. No agent goroutines will be spawned in serverless mode, and + // no data will be sent directly to the New Relic backend. + // nrlambda.NewConfig sets Enabled to true. + Enabled bool + // ApdexThreshold sets the Apdex threshold when in ServerlessMode. The + // default is 500 milliseconds. nrlambda.NewConfig populates this + // field using the NEW_RELIC_APDEX_T environment variable. + // + // https://docs.newrelic.com/docs/apm/new-relic-apm/apdex/apdex-measure-user-satisfaction + ApdexThreshold time.Duration + // AccountID, TrustedAccountKey, and PrimaryAppID are used for + // distributed tracing in ServerlessMode. AccountID and + // TrustedAccountKey must be populated for distributed tracing to be + // enabled. nrlambda.NewConfig populates these fields using the + // NEW_RELIC_ACCOUNT_ID, NEW_RELIC_TRUSTED_ACCOUNT_KEY, and + // NEW_RELIC_PRIMARY_APPLICATION_ID environment variables. + AccountID string + TrustedAccountKey string + PrimaryAppID string + } +} + +// AttributeDestinationConfig controls the attributes sent to each destination. +// For more information, see: +// https://docs.newrelic.com/docs/agents/manage-apm-agents/agent-data/agent-attributes +type AttributeDestinationConfig struct { + // Enabled controls whether or not this destination will get any + // attributes at all. For example, to prevent any attributes from being + // added to errors, set: + // + // cfg.ErrorCollector.Attributes.Enabled = false + // + Enabled bool + Include []string + // Exclude allows you to prevent the capture of certain attributes. For + // example, to prevent the capture of the request URL attribute + // "request.uri", set: + // + // cfg.Attributes.Exclude = append(cfg.Attributes.Exclude, newrelic.AttributeRequestURI) + // + // The '*' character acts as a wildcard. For example, to prevent the + // capture of all request related attributes, set: + // + // cfg.Attributes.Exclude = append(cfg.Attributes.Exclude, "request.*") + // + Exclude []string +} + +// NewConfig creates a Config populated with default settings and the given +// appname and license. +func NewConfig(appname, license string) Config { + c := Config{} + + c.AppName = appname + c.License = license + c.Enabled = true + c.Labels = make(map[string]string) + c.CustomInsightsEvents.Enabled = true + c.TransactionEvents.Enabled = true + c.TransactionEvents.Attributes.Enabled = true + c.TransactionEvents.MaxSamplesStored = internal.MaxTxnEvents + c.HighSecurity = false + c.ErrorCollector.Enabled = true + c.ErrorCollector.CaptureEvents = true + c.ErrorCollector.IgnoreStatusCodes = []int{ + // https://github.com/grpc/grpc/blob/master/doc/statuscodes.md + 0, // gRPC OK + 5, // gRPC NOT_FOUND + http.StatusNotFound, // 404 + } + c.ErrorCollector.Attributes.Enabled = true + c.Utilization.DetectAWS = true + c.Utilization.DetectAzure = true + c.Utilization.DetectPCF = true + c.Utilization.DetectGCP = true + c.Utilization.DetectDocker = true + c.Utilization.DetectKubernetes = true + c.Attributes.Enabled = true + c.RuntimeSampler.Enabled = true + + c.TransactionTracer.Enabled = true + c.TransactionTracer.Threshold.IsApdexFailing = true + c.TransactionTracer.Threshold.Duration = 500 * time.Millisecond + c.TransactionTracer.SegmentThreshold = 2 * time.Millisecond + c.TransactionTracer.StackTraceThreshold = 500 * time.Millisecond + c.TransactionTracer.Attributes.Enabled = true + c.TransactionTracer.Segments.Attributes.Enabled = true + + c.BrowserMonitoring.Enabled = true + // browser monitoring attributes are disabled by default + c.BrowserMonitoring.Attributes.Enabled = false + + c.CrossApplicationTracer.Enabled = true + c.DistributedTracer.Enabled = false + c.SpanEvents.Enabled = true + c.SpanEvents.Attributes.Enabled = true + + c.DatastoreTracer.InstanceReporting.Enabled = true + c.DatastoreTracer.DatabaseNameReporting.Enabled = true + c.DatastoreTracer.QueryParameters.Enabled = true + c.DatastoreTracer.SlowQuery.Enabled = true + c.DatastoreTracer.SlowQuery.Threshold = 10 * time.Millisecond + + c.ServerlessMode.ApdexThreshold = 500 * time.Millisecond + c.ServerlessMode.Enabled = false + + return c +} + +const ( + licenseLength = 40 + appNameLimit = 3 +) + +// The following errors will be returned if your Config fails to validate. +var ( + errLicenseLen = fmt.Errorf("license length is not %d", licenseLength) + errAppNameMissing = errors.New("string AppName required") + errAppNameLimit = fmt.Errorf("max of %d rollup application names", appNameLimit) + errHighSecurityWithSecurityPolicies = errors.New("SecurityPoliciesToken and HighSecurity are incompatible; please ensure HighSecurity is set to false if SecurityPoliciesToken is a non-empty string and a security policy has been set for your account") +) + +// Validate checks the config for improper fields. If the config is invalid, +// newrelic.NewApplication returns an error. +func (c Config) Validate() error { + if c.Enabled && !c.ServerlessMode.Enabled { + if len(c.License) != licenseLength { + return errLicenseLen + } + } else { + // The License may be empty when the agent is not enabled. + if len(c.License) != licenseLength && len(c.License) != 0 { + return errLicenseLen + } + } + if "" == c.AppName && c.Enabled && !c.ServerlessMode.Enabled { + return errAppNameMissing + } + if c.HighSecurity && "" != c.SecurityPoliciesToken { + return errHighSecurityWithSecurityPolicies + } + if strings.Count(c.AppName, ";") >= appNameLimit { + return errAppNameLimit + } + return nil +} + +// MaxTxnEvents returns the configured maximum number of Transaction Events if it has been configured +// and is less than the default maximum; otherwise it returns the default max. +func (c Config) MaxTxnEvents() int { + configured := c.TransactionEvents.MaxSamplesStored + if configured < 0 || configured > internal.MaxTxnEvents { + return internal.MaxTxnEvents + } + return configured +} diff --git a/context.go b/context.go new file mode 100644 index 000000000..d0063135e --- /dev/null +++ b/context.go @@ -0,0 +1,49 @@ +// +build go1.7 + +package newrelic + +import ( + "context" + "net/http" + + "github.com/newrelic/go-agent/internal" +) + +// NewContext returns a new Context that carries the provided transaction. +func NewContext(ctx context.Context, txn Transaction) context.Context { + return context.WithValue(ctx, internal.TransactionContextKey, txn) +} + +// FromContext returns the Transaction from the context if present, and nil +// otherwise. +func FromContext(ctx context.Context) Transaction { + h, _ := ctx.Value(internal.TransactionContextKey).(Transaction) + if nil != h { + return h + } + // If we couldn't find a transaction using + // internal.TransactionContextKey, try with + // internal.GinTransactionContextKey. Unfortunately, gin.Context.Set + // requires a string key, so we cannot use + // internal.TransactionContextKey in nrgin.Middleware. We check for two + // keys (rather than turning internal.TransactionContextKey into a + // string key) because context.WithValue will cause golint to complain + // if used with a string key. + h, _ = ctx.Value(internal.GinTransactionContextKey).(Transaction) + return h +} + +// RequestWithTransactionContext adds the transaction to the request's context. +func RequestWithTransactionContext(req *http.Request, txn Transaction) *http.Request { + ctx := req.Context() + ctx = NewContext(ctx, txn) + return req.WithContext(ctx) +} + +func transactionFromRequestContext(req *http.Request) Transaction { + var txn Transaction + if nil != req { + txn = FromContext(req.Context()) + } + return txn +} diff --git a/context_stub.go b/context_stub.go new file mode 100644 index 000000000..03ea58da3 --- /dev/null +++ b/context_stub.go @@ -0,0 +1,14 @@ +// +build !go1.7 + +package newrelic + +import "net/http" + +// RequestWithTransactionContext adds the transaction to the request's context. +func RequestWithTransactionContext(req *http.Request, txn Transaction) *http.Request { + return req +} + +func transactionFromRequestContext(req *http.Request) Transaction { + return nil +} diff --git a/datastore.go b/datastore.go new file mode 100644 index 000000000..fd393114a --- /dev/null +++ b/datastore.go @@ -0,0 +1,31 @@ +package newrelic + +// DatastoreProduct is used to identify your datastore type in New Relic. It +// is used in the DatastoreSegment Product field. See +// https://github.com/newrelic/go-agent/blob/master/datastore.go for the full +// list of available DatastoreProducts. +type DatastoreProduct string + +// Datastore names used across New Relic agents: +const ( + DatastoreCassandra DatastoreProduct = "Cassandra" + DatastoreDerby DatastoreProduct = "Derby" + DatastoreElasticsearch DatastoreProduct = "Elasticsearch" + DatastoreFirebird DatastoreProduct = "Firebird" + DatastoreIBMDB2 DatastoreProduct = "IBMDB2" + DatastoreInformix DatastoreProduct = "Informix" + DatastoreMemcached DatastoreProduct = "Memcached" + DatastoreMongoDB DatastoreProduct = "MongoDB" + DatastoreMySQL DatastoreProduct = "MySQL" + DatastoreMSSQL DatastoreProduct = "MSSQL" + DatastoreNeptune DatastoreProduct = "Neptune" + DatastoreOracle DatastoreProduct = "Oracle" + DatastorePostgres DatastoreProduct = "Postgres" + DatastoreRedis DatastoreProduct = "Redis" + DatastoreSolr DatastoreProduct = "Solr" + DatastoreSQLite DatastoreProduct = "SQLite" + DatastoreCouchDB DatastoreProduct = "CouchDB" + DatastoreRiak DatastoreProduct = "Riak" + DatastoreVoltDB DatastoreProduct = "VoltDB" + DatastoreDynamoDB DatastoreProduct = "DynamoDB" +) diff --git a/errors.go b/errors.go new file mode 100644 index 000000000..1a2da0e78 --- /dev/null +++ b/errors.go @@ -0,0 +1,59 @@ +package newrelic + +import "github.com/newrelic/go-agent/internal" + +// StackTracer can be implemented by errors to provide a stack trace when using +// Transaction.NoticeError. +type StackTracer interface { + StackTrace() []uintptr +} + +// ErrorClasser can be implemented by errors to provide a custom class when +// using Transaction.NoticeError. +type ErrorClasser interface { + ErrorClass() string +} + +// ErrorAttributer can be implemented by errors to provide extra context when +// using Transaction.NoticeError. +type ErrorAttributer interface { + ErrorAttributes() map[string]interface{} +} + +// Error is an error that implements ErrorClasser, ErrorAttributer, and +// StackTracer. Use it with Transaction.NoticeError to directly control error +// message, class, stacktrace, and attributes. +type Error struct { + // Message is the error message which will be returned by the Error() + // method. + Message string + // Class indicates how the error may be aggregated. + Class string + // Attributes are attached to traced errors and error events for + // additional context. These attributes are validated just like those + // added to `Transaction.AddAttribute`. + Attributes map[string]interface{} + // Stack is the stack trace. Assign this field using NewStackTrace, + // or leave it nil to indicate that Transaction.NoticeError should + // generate one. + Stack []uintptr +} + +// NewStackTrace generates a stack trace which can be assigned to the Error +// struct's Stack field or returned by an error that implements the ErrorClasser +// interface. +func NewStackTrace() []uintptr { + st := internal.GetStackTrace() + return []uintptr(st) +} + +func (e Error) Error() string { return e.Message } + +// ErrorClass implements the ErrorClasser interface. +func (e Error) ErrorClass() string { return e.Class } + +// ErrorAttributes implements the ErrorAttributes interface. +func (e Error) ErrorAttributes() map[string]interface{} { return e.Attributes } + +// StackTrace implements the StackTracer interface. +func (e Error) StackTrace() []uintptr { return e.Stack } diff --git a/examples/client-round-tripper/main.go b/examples/client-round-tripper/main.go new file mode 100644 index 000000000..27cbcd0e8 --- /dev/null +++ b/examples/client-round-tripper/main.go @@ -0,0 +1,67 @@ +// An application that illustrates Distributed Tracing or Cross Application +// Tracing when using NewRoundTripper. +package main + +import ( + "fmt" + "net/http" + "os" + "time" + + "github.com/newrelic/go-agent" +) + +func mustGetEnv(key string) string { + if val := os.Getenv(key); "" != val { + return val + } + panic(fmt.Sprintf("environment variable %s unset", key)) +} + +func doRequest(txn newrelic.Transaction) error { + for _, addr := range []string{"segments", "mysql"} { + url := fmt.Sprintf("http://localhost:8000/%s", addr) + req, err := http.NewRequest("GET", url, nil) + if nil != err { + return err + } + client := &http.Client{} + + // Using NewRoundTripper automatically instruments all request + // for Distributed Tracing and Cross Application Tracing. + client.Transport = newrelic.NewRoundTripper(txn, nil) + + resp, err := client.Do(req) + if nil != err { + return err + } + fmt.Println("response code is", resp.StatusCode) + } + return nil +} + +func main() { + cfg := newrelic.NewConfig("Client App RoundTripper", mustGetEnv("NEW_RELIC_LICENSE_KEY")) + cfg.Logger = newrelic.NewDebugLogger(os.Stdout) + cfg.DistributedTracer.Enabled = true + app, err := newrelic.NewApplication(cfg) + if nil != err { + fmt.Println(err) + os.Exit(1) + } + + // Wait for the application to connect. + if err = app.WaitForConnection(5 * time.Second); nil != err { + fmt.Println(err) + } + + txn := app.StartTransaction("client-txn", nil, nil) + err = doRequest(txn) + if nil != err { + txn.NoticeError(err) + } + txn.End() + + // Shut down the application to flush data to New Relic. + app.Shutdown(10 * time.Second) +} diff --git a/examples/client/main.go b/examples/client/main.go new file mode 100644 index 000000000..5be57af9e --- /dev/null +++ b/examples/client/main.go @@ -0,0 +1,58 @@ +package main + +import ( + "fmt" + "net/http" + "os" + "time" + + "github.com/newrelic/go-agent" +) + +func mustGetEnv(key string) string { + if val := os.Getenv(key); "" != val { + return val + } + panic(fmt.Sprintf("environment variable %s unset", key)) +} + +func doRequest(txn newrelic.Transaction) error { + req, err := http.NewRequest("GET", "http://localhost:8000/segments", nil) + if nil != err { + return err + } + client := &http.Client{} + seg := newrelic.StartExternalSegment(txn, req) + defer seg.End() + resp, err := client.Do(req) + if nil != err { + return err + } + fmt.Println("response code is", resp.StatusCode) + return nil +} + +func main() { + cfg := newrelic.NewConfig("Client App", mustGetEnv("NEW_RELIC_LICENSE_KEY")) + cfg.Logger = newrelic.NewDebugLogger(os.Stdout) + app, err := newrelic.NewApplication(cfg) + if nil != err { + fmt.Println(err) + os.Exit(1) + } + + // Wait for the application to connect. + if err = app.WaitForConnection(5 * time.Second); nil != err { + fmt.Println(err) + } + + txn := app.StartTransaction("client-txn", nil, nil) + err = doRequest(txn) + if nil != err { + txn.NoticeError(err) + } + txn.End() + + // Shut down the application to flush data to New Relic. + app.Shutdown(10 * time.Second) +} diff --git a/examples/custom-instrumentation/main.go b/examples/custom-instrumentation/main.go new file mode 100644 index 000000000..ee18c00f6 --- /dev/null +++ b/examples/custom-instrumentation/main.go @@ -0,0 +1,106 @@ +// An application that illustrates Distributed Tracing with custom +// instrumentation. +// +// This application simulates simple inter-process communication between a +// calling and a called process. +// +// Invoked without arguments, the application acts as a calling process; +// invoked with one argument representing a payload, it acts as a called +// process. The calling process creates a payload, starts a called process and +// passes on the payload. The calling process waits until the called process is +// done and then terminates. Thus to start both processes, only a single +// invocation of the application (without any arguments) is needed. +package main + +import ( + "fmt" + "os" + "os/exec" + "time" + + "github.com/newrelic/go-agent" +) + +func mustGetEnv(key string) string { + if val := os.Getenv(key); "" != val { + return val + } + panic(fmt.Sprintf("environment variable %s unset", key)) +} + +func called(app newrelic.Application, payload string) { + txn := app.StartTransaction("called-txn", nil, nil) + defer txn.End() + + // Accept the payload that was passed on the command line. + txn.AcceptDistributedTracePayload(newrelic.TransportOther, payload) + time.Sleep(1 * time.Second) +} + +func calling(app newrelic.Application) { + txn := app.StartTransaction("calling-txn", nil, nil) + defer txn.End() + + // Create a payload, start the called process and pass the payload. + payload := txn.CreateDistributedTracePayload() + cmd := exec.Command(os.Args[0], payload.Text()) + cmd.Start() + + // Wait until the called process is done, then exit. + cmd.Wait() + time.Sleep(1 * time.Second) +} + +func makeApplication(name string) (newrelic.Application, error) { + cfg := newrelic.NewConfig(name, mustGetEnv("NEW_RELIC_LICENSE_KEY")) + cfg.Logger = newrelic.NewDebugLogger(os.Stdout) + + // Distributed Tracing and Cross Application Tracing cannot both be + // enabled at the same time. + cfg.DistributedTracer.Enabled = true + + app, err := newrelic.NewApplication(cfg) + + if nil != err { + return nil, err + } + + // Wait for the application to connect. + if err = app.WaitForConnection(5 * time.Second); nil != err { + return nil, err + } + + return app, nil +} + +func main() { + // Calling processes have no command line arguments, called processes + // have one command line argument (the payload). + isCalled := (len(os.Args) > 1) + + // Initialize the application name. + name := "Go Custom Instrumentation" + if isCalled { + name += " Called" + } else { + name += " Calling" + } + + // Initialize the application. + app, err := makeApplication(name) + if nil != err { + fmt.Println(err) + os.Exit(1) + } + + // Run calling/called routines. + if isCalled { + payload := os.Args[1] + called(app, payload) + } else { + calling(app) + } + + // Shut down the application to flush data to New Relic. + app.Shutdown(10 * time.Second) +} diff --git a/examples/server-http/main.go b/examples/server-http/main.go new file mode 100644 index 000000000..49e140e7d --- /dev/null +++ b/examples/server-http/main.go @@ -0,0 +1,81 @@ +// An application that illustrates Distributed Tracing or Cross Application +// Tracing when using http.Server or similar frameworks. +package main + +import ( + "fmt" + "io" + "net/http" + "os" + "time" + + newrelic "github.com/newrelic/go-agent" +) + +type handler struct { + App newrelic.Application +} + +func (h *handler) ServeHTTP(writer http.ResponseWriter, req *http.Request) { + // The call to StartTransaction must include the response writer and the + // request. + txn := h.App.StartTransaction("server-txn", writer, req) + defer txn.End() + + if req.URL.String() == "/segments" { + defer newrelic.StartSegment(txn, "f1").End() + + func() { + defer newrelic.StartSegment(txn, "f2").End() + + io.WriteString(writer, "segments!") + time.Sleep(10 * time.Millisecond) + }() + time.Sleep(10 * time.Millisecond) + } else { + // Transaction.WriteHeader has to be used instead of invoking + // WriteHeader on the response writer. + txn.WriteHeader(http.StatusNotFound) + } +} + +func mustGetEnv(key string) string { + if val := os.Getenv(key); "" != val { + return val + } + panic(fmt.Sprintf("environment variable %s unset", key)) +} + +func makeApplication() (newrelic.Application, error) { + cfg := newrelic.NewConfig("HTTP Server App", mustGetEnv("NEW_RELIC_LICENSE_KEY")) + cfg.Logger = newrelic.NewDebugLogger(os.Stdout) + cfg.DistributedTracer.Enabled = true + app, err := newrelic.NewApplication(cfg) + + if nil != err { + return nil, err + } + + // Wait for the application to connect. + if err = app.WaitForConnection(5 * time.Second); nil != err { + return nil, err + } + + return app, nil +} + +func main() { + + app, err := makeApplication() + if nil != err { + fmt.Println(err) + os.Exit(1) + } + + server := http.Server{ + Addr: ":8000", + Handler: &handler{App: app}, + } + + server.ListenAndServe() +} diff --git a/examples/server/main.go b/examples/server/main.go new file mode 100644 index 000000000..d6be7dced --- /dev/null +++ b/examples/server/main.go @@ -0,0 +1,296 @@ +// +build go1.7 + +package main + +import ( + "errors" + "fmt" + "io" + "log" + "math/rand" + "net/http" + "os" + "sync" + "time" + + newrelic "github.com/newrelic/go-agent" +) + +func index(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, "hello world") +} + +func versionHandler(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, "New Relic Go Agent Version: "+newrelic.Version) +} + +func noticeError(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, "noticing an error") + + if txn := newrelic.FromContext(r.Context()); txn != nil { + txn.NoticeError(errors.New("my error message")) + } +} + +func noticeErrorWithAttributes(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, "noticing an error") + + if txn := newrelic.FromContext(r.Context()); txn != nil { + txn.NoticeError(newrelic.Error{ + Message: "uh oh. something went very wrong", + Class: "errors are aggregated by class", + Attributes: map[string]interface{}{ + "important_number": 97232, + "relevant_string": "zap", + }, + }) + } +} + +func customEvent(w http.ResponseWriter, r *http.Request) { + txn := newrelic.FromContext(r.Context()) + + io.WriteString(w, "recording a custom event") + + if nil != txn { + txn.Application().RecordCustomEvent("my_event_type", map[string]interface{}{ + "myString": "hello", + "myFloat": 0.603, + "myInt": 123, + "myBool": true, + }) + } +} + +func setName(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, "changing the transaction's name") + + if txn := newrelic.FromContext(r.Context()); txn != nil { + txn.SetName("other-name") + } +} + +func addAttribute(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, "adding attributes") + + if txn := newrelic.FromContext(r.Context()); txn != nil { + txn.AddAttribute("myString", "hello") + txn.AddAttribute("myInt", 123) + } +} + +func ignore(w http.ResponseWriter, r *http.Request) { + if coinFlip := (0 == rand.Intn(2)); coinFlip { + if txn := newrelic.FromContext(r.Context()); txn != nil { + txn.Ignore() + } + io.WriteString(w, "ignoring the transaction") + } else { + io.WriteString(w, "not ignoring the transaction") + } +} + +func segments(w http.ResponseWriter, r *http.Request) { + txn := newrelic.FromContext(r.Context()) + + func() { + defer newrelic.StartSegment(txn, "f1").End() + + func() { + defer newrelic.StartSegment(txn, "f2").End() + + io.WriteString(w, "segments!") + time.Sleep(10 * time.Millisecond) + }() + time.Sleep(15 * time.Millisecond) + }() + time.Sleep(20 * time.Millisecond) +} + +func mysql(w http.ResponseWriter, r *http.Request) { + txn := newrelic.FromContext(r.Context()) + s := newrelic.DatastoreSegment{ + StartTime: newrelic.StartSegmentNow(txn), + // Product, Collection, and Operation are the most important + // fields to populate because they are used in the breakdown + // metrics. + Product: newrelic.DatastoreMySQL, + Collection: "users", + Operation: "INSERT", + + ParameterizedQuery: "INSERT INTO users (name, age) VALUES ($1, $2)", + QueryParameters: map[string]interface{}{ + "name": "Dracula", + "age": 439, + }, + Host: "mysql-server-1", + PortPathOrID: "3306", + DatabaseName: "my_database", + } + defer s.End() + + time.Sleep(20 * time.Millisecond) + io.WriteString(w, `performing fake query "INSERT * from users"`) +} + +func message(w http.ResponseWriter, r *http.Request) { + txn := newrelic.FromContext(r.Context()) + s := newrelic.MessageProducerSegment{ + StartTime: newrelic.StartSegmentNow(txn), + Library: "RabbitMQ", + DestinationType: newrelic.MessageQueue, + DestinationName: "myQueue", + } + defer s.End() + + time.Sleep(20 * time.Millisecond) + io.WriteString(w, `producing a message queue message`) +} + +func external(w http.ResponseWriter, r *http.Request) { + txn := newrelic.FromContext(r.Context()) + req, _ := http.NewRequest("GET", "http://example.com", nil) + + // Using StartExternalSegment is recommended because it does distributed + // tracing header setup, but if you don't have an *http.Request and + // instead only have a url string then you can start the external + // segment like this: + // + // es := newrelic.ExternalSegment{ + // StartTime: newrelic.StartSegmentNow(txn), + // URL: urlString, + // } + // + es := newrelic.StartExternalSegment(txn, req) + resp, err := http.DefaultClient.Do(req) + es.End() + + if nil != err { + io.WriteString(w, err.Error()) + return + } + defer resp.Body.Close() + io.Copy(w, resp.Body) +} + +func roundtripper(w http.ResponseWriter, r *http.Request) { + // NewRoundTripper allows you to instrument external calls without + // calling StartExternalSegment by modifying the http.Client's Transport + // field. If the Transaction parameter is nil, the RoundTripper + // returned will look for a Transaction in the request's context (using + // FromContext). This is recommended because it allows you to reuse the + // same client for multiple transactions. + client := &http.Client{} + client.Transport = newrelic.NewRoundTripper(nil, client.Transport) + + request, _ := http.NewRequest("GET", "http://example.com", nil) + // Since the transaction is already added to the inbound request's + // context by WrapHandleFunc, we just need to copy the context from the + // inbound request to the external request. + request = request.WithContext(r.Context()) + // Alternatively, if you don't want to copy entire context, and instead + // wanted just to add the transaction to the external request's context, + // you could do that like this: + // + // txn := newrelic.FromContext(r.Context()) + // request = newrelic.RequestWithTransactionContext(request, txn) + + resp, err := client.Do(request) + if nil != err { + io.WriteString(w, err.Error()) + return + } + defer resp.Body.Close() + io.Copy(w, resp.Body) +} + +func async(w http.ResponseWriter, r *http.Request) { + txn := newrelic.FromContext(r.Context()) + wg := &sync.WaitGroup{} + wg.Add(1) + go func(txn newrelic.Transaction) { + defer wg.Done() + defer newrelic.StartSegment(txn, "async").End() + time.Sleep(100 * time.Millisecond) + }(txn.NewGoroutine()) + + segment := newrelic.StartSegment(txn, "wg.Wait") + wg.Wait() + segment.End() + w.Write([]byte("done!")) +} + +func customMetric(w http.ResponseWriter, r *http.Request) { + txn := newrelic.FromContext(r.Context()) + for _, vals := range r.Header { + for _, v := range vals { + // This custom metric will have the name + // "Custom/HeaderLength" in the New Relic UI. + if nil != txn { + txn.Application().RecordCustomMetric("HeaderLength", float64(len(v))) + } + } + } + io.WriteString(w, "custom metric recorded") +} + +func browser(w http.ResponseWriter, r *http.Request) { + txn := newrelic.FromContext(r.Context()) + hdr, err := txn.BrowserTimingHeader() + if nil != err { + log.Printf("unable to create browser timing header: %v", err) + } + // BrowserTimingHeader() will always return a header whose methods can + // be safely called. + if js := hdr.WithTags(); js != nil { + w.Write(js) + } + io.WriteString(w, "browser header page") +} + +func mustGetEnv(key string) string { + if val := os.Getenv(key); "" != val { + return val + } + panic(fmt.Sprintf("environment variable %s unset", key)) +} + +func main() { + cfg := newrelic.NewConfig("Example App", mustGetEnv("NEW_RELIC_LICENSE_KEY")) + cfg.Logger = newrelic.NewDebugLogger(os.Stdout) + + app, err := newrelic.NewApplication(cfg) + if nil != err { + fmt.Println(err) + os.Exit(1) + } + + http.HandleFunc(newrelic.WrapHandleFunc(app, "/", index)) + http.HandleFunc(newrelic.WrapHandleFunc(app, "/version", versionHandler)) + http.HandleFunc(newrelic.WrapHandleFunc(app, "/notice_error", noticeError)) + http.HandleFunc(newrelic.WrapHandleFunc(app, "/notice_error_with_attributes", noticeErrorWithAttributes)) + http.HandleFunc(newrelic.WrapHandleFunc(app, "/custom_event", customEvent)) + http.HandleFunc(newrelic.WrapHandleFunc(app, "/set_name", setName)) + http.HandleFunc(newrelic.WrapHandleFunc(app, "/add_attribute", addAttribute)) + http.HandleFunc(newrelic.WrapHandleFunc(app, "/ignore", ignore)) + http.HandleFunc(newrelic.WrapHandleFunc(app, "/segments", segments)) + http.HandleFunc(newrelic.WrapHandleFunc(app, "/mysql", mysql)) + http.HandleFunc(newrelic.WrapHandleFunc(app, "/external", external)) + http.HandleFunc(newrelic.WrapHandleFunc(app, "/roundtripper", roundtripper)) + http.HandleFunc(newrelic.WrapHandleFunc(app, "/custommetric", customMetric)) + http.HandleFunc(newrelic.WrapHandleFunc(app, "/browser", browser)) + http.HandleFunc(newrelic.WrapHandleFunc(app, "/async", async)) + http.HandleFunc(newrelic.WrapHandleFunc(app, "/message", message)) + + http.HandleFunc("/background", func(w http.ResponseWriter, req *http.Request) { + // Transactions started without an http.Request are classified as + // background transactions. + txn := app.StartTransaction("background", nil, nil) + defer txn.End() + + io.WriteString(w, "background transaction") + time.Sleep(150 * time.Millisecond) + }) + + http.ListenAndServe(":8000", nil) +} diff --git a/examples/short-lived-process/main.go b/examples/short-lived-process/main.go new file mode 100644 index 000000000..ddf188024 --- /dev/null +++ b/examples/short-lived-process/main.go @@ -0,0 +1,46 @@ +package main + +import ( + "fmt" + "os" + "time" + + "github.com/newrelic/go-agent" +) + +func mustGetEnv(key string) string { + if val := os.Getenv(key); "" != val { + return val + } + panic(fmt.Sprintf("environment variable %s unset", key)) +} + +func main() { + cfg := newrelic.NewConfig("Short Lived App", mustGetEnv("NEW_RELIC_LICENSE_KEY")) + cfg.Logger = newrelic.NewDebugLogger(os.Stdout) + app, err := newrelic.NewApplication(cfg) + if nil != err { + fmt.Println(err) + os.Exit(1) + } + + // Wait for the application to connect. + if err := app.WaitForConnection(5 * time.Second); nil != err { + fmt.Println(err) + } + + // Do the tasks at hand. Perhaps record them using transactions and/or + // custom events. + tasks := []string{"white", "black", "red", "blue", "green", "yellow"} + for _, task := range tasks { + txn := app.StartTransaction("task", nil, nil) + time.Sleep(10 * time.Millisecond) + txn.End() + app.RecordCustomEvent("task", map[string]interface{}{ + "color": task, + }) + } + + // Shut down the application to flush data to New Relic. + app.Shutdown(10 * time.Second) +} diff --git a/examples_test.go b/examples_test.go new file mode 100644 index 000000000..f17449bc4 --- /dev/null +++ b/examples_test.go @@ -0,0 +1,213 @@ +// +build go1.7 + +package newrelic + +import ( + "fmt" + "io" + "log" + "net/http" + "net/url" + "os" + "time" +) + +func Example() { + // First create a Config. + cfg := NewConfig("Example Application", "__YOUR_NEW_RELIC_LICENSE_KEY__") + + // Modify Config fields to control agent behavior. + cfg.Logger = NewDebugLogger(os.Stdout) + + // Now use the Config the create an Application. + app, err := NewApplication(cfg) + if nil != err { + fmt.Println(err) + os.Exit(1) + } + + // Now you can use the Application to collect data! Create transactions + // to time inbound requests or background tasks. You can start and stop + // transactions directly using Application.StartTransaction and + // Transaction.End. + func() { + txn := app.StartTransaction("myTask", nil, nil) + defer txn.End() + + time.Sleep(time.Second) + }() + + // WrapHandler and WrapHandleFunc make it easy to instrument inbound web + // requests handled by the http standard library without calling + // StartTransaction. Popular framework instrumentation packages exist + // in the _integrations directory. + http.HandleFunc(WrapHandleFunc(app, "", func(w http.ResponseWriter, req *http.Request) { + io.WriteString(w, "this is the index page") + })) + helloHandler := func(w http.ResponseWriter, req *http.Request) { + // WrapHandler and WrapHandleFunc add the transaction to the + // inbound request's context. Access the transaction using + // FromContext to add attributes, create segments, and notice. + // errors. + txn := FromContext(req.Context()) + + func() { + // Segments help you understand where the time in your + // transaction is being spent. You can use them to time + // functions or arbitrary blocks of code. + defer StartSegment(txn, "helperFunction").End() + }() + + io.WriteString(w, "hello world") + } + http.HandleFunc(WrapHandleFunc(app, "/hello", helloHandler)) + http.ListenAndServe(":8000", nil) +} + +func currentTransaction() Transaction { + return nil +} + +func ExampleNewRoundTripper() { + client := &http.Client{} + // The RoundTripper returned by NewRoundTripper instruments all requests + // done by this client with external segments. + client.Transport = NewRoundTripper(nil, client.Transport) + + request, _ := http.NewRequest("GET", "http://example.com", nil) + + // Be sure to add the current Transaction to each request's context so + // the Transport has access to it. + txn := currentTransaction() + request = RequestWithTransactionContext(request, txn) + + client.Do(request) +} + +func getApp() Application { + return nil +} + +func ExampleBrowserTimingHeader() { + handler := func(w http.ResponseWriter, req *http.Request) { + io.WriteString(w, "
") + // The New Relic browser javascript should be placed as high in the + // HTML as possible. We suggest including it immediately after the + // opening tag and any tags. + if txn := FromContext(req.Context()); nil != txn { + hdr, err := txn.BrowserTimingHeader() + if nil != err { + log.Printf("unable to create browser timing header: %v", err) + } + // BrowserTimingHeader() will always return a header whose methods can + // be safely called. + if js := hdr.WithTags(); js != nil { + w.Write(js) + } + } + io.WriteString(w, "browser header page") + } + http.HandleFunc(WrapHandleFunc(getApp(), "/browser", handler)) + http.ListenAndServe(":8000", nil) +} + +func ExampleDatastoreSegment() { + txn := currentTransaction() + ds := &DatastoreSegment{ + StartTime: StartSegmentNow(txn), + // Product, Collection, and Operation are the primary metric + // aggregation fields which we encourage you to populate. + Product: DatastoreMySQL, + Collection: "users_table", + Operation: "SELECT", + } + // your database call here + ds.End() +} + +func ExampleMessageProducerSegment() { + txn := currentTransaction() + seg := &MessageProducerSegment{ + StartTime: StartSegmentNow(txn), + Library: "RabbitMQ", + DestinationType: MessageExchange, + DestinationName: "myExchange", + } + // add message to queue here + seg.End() +} + +func ExampleError() { + txn := currentTransaction() + username := "gopher" + e := fmt.Errorf("error unable to login user %s", username) + // txn.NoticeError(newrelic.Error{...}) instead of txn.NoticeError(e) + // allows more control over error fields. Class is how errors are + // aggregated and Attributes are added to the error event and error + // trace. + txn.NoticeError(Error{ + Message: e.Error(), + Class: "LoginError", + Attributes: map[string]interface{}{ + "username": username, + }, + }) +} + +func ExampleExternalSegment() { + txn := currentTransaction() + client := &http.Client{} + request, _ := http.NewRequest("GET", "http://www.example.com", nil) + segment := StartExternalSegment(txn, request) + response, _ := client.Do(request) + segment.Response = response + segment.End() +} + +// StartExternalSegment is the recommend way of creating ExternalSegments. If +// you don't have access to an http.Request, however, you may create an +// ExternalSegment and control the URL manually. +func ExampleExternalSegment_url() { + txn := currentTransaction() + segment := ExternalSegment{ + StartTime: StartSegmentNow(txn), + // URL is parsed using url.Parse so it must include the protocol + // scheme (eg. "http://"). The host of the URL is used to + // create metrics. Change the host to alter aggregation. + URL: "http://www.example.com", + } + http.Get("http://www.example.com") + segment.End() +} + +func ExampleStartExternalSegment() { + txn := currentTransaction() + client := &http.Client{} + request, _ := http.NewRequest("GET", "http://www.example.com", nil) + segment := StartExternalSegment(txn, request) + response, _ := client.Do(request) + segment.Response = response + segment.End() +} + +func ExampleStartExternalSegment_context() { + txn := currentTransaction() + request, _ := http.NewRequest("GET", "http://www.example.com", nil) + + // If the transaction is added to the request's context then it does not + // need to be provided as a parameter to StartExternalSegment. + request = RequestWithTransactionContext(request, txn) + segment := StartExternalSegment(nil, request) + + client := &http.Client{} + response, _ := client.Do(request) + segment.Response = response + segment.End() +} + +func ExampleNewStaticWebRequest() { + app := getApp() + webReq := NewStaticWebRequest(http.Header{}, &url.URL{Path: "path"}, "GET", TransportHTTP) + txn := app.StartTransaction("My-Transaction", nil, nil) + txn.SetWebRequest(webReq) +} diff --git a/instrumentation.go b/instrumentation.go new file mode 100644 index 000000000..a5c50c2d3 --- /dev/null +++ b/instrumentation.go @@ -0,0 +1,113 @@ +package newrelic + +import ( + "net/http" +) + +// instrumentation.go contains helpers built on the lower level api. + +// WrapHandle instruments http.Handler handlers with transactions. To +// instrument this code: +// +// http.Handle("/foo", myHandler) +// +// Perform this replacement: +// +// http.Handle(newrelic.WrapHandle(app, "/foo", myHandler)) +// +// WrapHandle adds the Transaction to the request's context. Access it using +// FromContext to add attributes, create segments, or notice errors: +// +// func myHandler(rw ResponseWriter, req *Request) { +// if txn := newrelic.FromContext(req.Context()); nil != txn { +// txn.AddAttribute("customerLevel", "gold") +// } +// } +// +// This function is safe to call if app is nil. +func WrapHandle(app Application, pattern string, handler http.Handler) (string, http.Handler) { + if app == nil { + return pattern, handler + } + return pattern, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + txn := app.StartTransaction(pattern, w, r) + defer txn.End() + + r = RequestWithTransactionContext(r, txn) + + handler.ServeHTTP(txn, r) + }) +} + +// WrapHandleFunc instruments handler functions using transactions. To +// instrument this code: +// +// http.HandleFunc("/users", func(w http.ResponseWriter, req *http.Request) { +// io.WriteString(w, "users page") +// }) +// +// Perform this replacement: +// +// http.HandleFunc(WrapHandleFunc(app, "/users", func(w http.ResponseWriter, req *http.Request) { +// io.WriteString(w, "users page") +// })) +// +// WrapHandleFunc adds the Transaction to the request's context. Access it using +// FromContext to add attributes, create segments, or notice errors: +// +// http.HandleFunc(WrapHandleFunc(app, "/users", func(w http.ResponseWriter, req *http.Request) { +// if txn := newrelic.FromContext(req.Context()); nil != txn { +// txn.AddAttribute("customerLevel", "gold") +// } +// io.WriteString(w, "users page") +// })) +// +// This function is safe to call if app is nil. +func WrapHandleFunc(app Application, pattern string, handler func(http.ResponseWriter, *http.Request)) (string, func(http.ResponseWriter, *http.Request)) { + p, h := WrapHandle(app, pattern, http.HandlerFunc(handler)) + return p, func(w http.ResponseWriter, r *http.Request) { h.ServeHTTP(w, r) } +} + +// NewRoundTripper creates an http.RoundTripper to instrument external requests +// and add distributed tracing headers. The RoundTripper returned creates an +// external segment before delegating to the original RoundTripper provided (or +// http.DefaultTransport if none is provided). If the Transaction parameter is +// nil then the RoundTripper will look for a Transaction in the request's +// context (using FromContext). Using a nil Transaction is STRONGLY recommended +// because it allows the same RoundTripper (and client) to be reused for +// multiple transactions. +func NewRoundTripper(txn Transaction, original http.RoundTripper) http.RoundTripper { + return roundTripperFunc(func(request *http.Request) (*http.Response, error) { + // The specification of http.RoundTripper requires that the request is never modified. + request = cloneRequest(request) + segment := StartExternalSegment(txn, request) + + if nil == original { + original = http.DefaultTransport + } + response, err := original.RoundTrip(request) + + segment.Response = response + segment.End() + + return response, err + }) +} + +// cloneRequest mimics implementation of +// https://godoc.org/github.com/google/go-github/github#BasicAuthTransport.RoundTrip +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header, len(r.Header)) + for k, s := range r.Header { + r2.Header[k] = append([]string(nil), s...) + } + return r2 +} + +type roundTripperFunc func(*http.Request) (*http.Response, error) + +func (f roundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) { return f(r) } diff --git a/internal/adaptive_sampler.go b/internal/adaptive_sampler.go new file mode 100644 index 000000000..f965d8534 --- /dev/null +++ b/internal/adaptive_sampler.go @@ -0,0 +1,99 @@ +package internal + +import ( + "math" + "sync" + "time" +) + +// AdaptiveSampler calculates which transactions should be sampled. An interface +// is used in the connect reply to facilitate testing. +type AdaptiveSampler interface { + ComputeSampled(priority float32, now time.Time) bool +} + +// SampleEverything is used for testing. +type SampleEverything struct{} + +// SampleNothing is used when the application is not yet connected. +type SampleNothing struct{} + +// ComputeSampled implements AdaptiveSampler. +func (s SampleEverything) ComputeSampled(priority float32, now time.Time) bool { return true } + +// ComputeSampled implements AdaptiveSampler. +func (s SampleNothing) ComputeSampled(priority float32, now time.Time) bool { return false } + +type adaptiveSampler struct { + sync.Mutex + period time.Duration + target uint64 + + // Transactions with priority higher than this are sampled. + // This is 1 - sampleRatio. + priorityMin float32 + + currentPeriod struct { + numSampled uint64 + numSeen uint64 + end time.Time + } +} + +// NewAdaptiveSampler creates an AdaptiveSampler. +func NewAdaptiveSampler(period time.Duration, target uint64, now time.Time) AdaptiveSampler { + as := &adaptiveSampler{} + as.period = period + as.target = target + as.currentPeriod.end = now.Add(period) + + // Sample the first transactions in the first period. + as.priorityMin = 0.0 + return as +} + +// ComputeSampled calculates if the transaction should be sampled. +func (as *adaptiveSampler) ComputeSampled(priority float32, now time.Time) bool { + as.Lock() + defer as.Unlock() + + // If the current time is after the end of the "currentPeriod". This is in + // a `for`/`while` loop in case there's a harvest where no sampling happened. + // i.e. for situations where a single call to + // as.currentPeriod.end = as.currentPeriod.end.Add(as.period) + // might not catch us up to the current period + for now.After(as.currentPeriod.end) { + as.priorityMin = 0.0 + if as.currentPeriod.numSeen > 0 { + sampledRatio := float32(as.target) / float32(as.currentPeriod.numSeen) + as.priorityMin = 1.0 - sampledRatio + } + as.currentPeriod.numSampled = 0 + as.currentPeriod.numSeen = 0 + as.currentPeriod.end = as.currentPeriod.end.Add(as.period) + } + + as.currentPeriod.numSeen++ + + // exponential backoff -- if the number of sampled items is greater than our + // target, we need to apply the exponential backoff + if as.currentPeriod.numSampled > as.target { + if as.computeSampledBackoff(as.target, as.currentPeriod.numSeen, as.currentPeriod.numSampled) { + as.currentPeriod.numSampled++ + return true + } + return false + } + + if priority >= as.priorityMin { + as.currentPeriod.numSampled++ + return true + } + + return false +} + +func (as *adaptiveSampler) computeSampledBackoff(target uint64, decidedCount uint64, sampledTrueCount uint64) bool { + return float64(RandUint64N(decidedCount)) < + math.Pow(float64(target), (float64(target)/float64(sampledTrueCount)))-math.Pow(float64(target), 0.5) +} diff --git a/internal/adaptive_sampler_test.go b/internal/adaptive_sampler_test.go new file mode 100644 index 000000000..74a9ff0d8 --- /dev/null +++ b/internal/adaptive_sampler_test.go @@ -0,0 +1,93 @@ +package internal + +import ( + "testing" + "time" +) + +func assert(t testing.TB, expectTrue bool) { + if h, ok := t.(interface { + Helper() + }); ok { + h.Helper() + } + if !expectTrue { + t.Error(expectTrue) + } +} + +func TestDefaultReplyValidSampler(t *testing.T) { + reply := ConnectReplyDefaults() + assert(t, !reply.AdaptiveSampler.ComputeSampled(1.0, time.Now())) +} + +func TestAdaptiveSampler(t *testing.T) { + start := time.Now() + sampler := NewAdaptiveSampler(60*time.Second, 2, start) + + // first period -- we're guaranteed to get 2 sampled + // due to our target, and we'll send through a total of 4 + assert(t, sampler.ComputeSampled(0.0, start)) + assert(t, sampler.ComputeSampled(0.0, start)) + sampler.ComputeSampled(0.0, start) + sampler.ComputeSampled(0.0, start) + + // Next period! 4 calls in the last period means a new sample ratio + // of 1/2. Nothing with a priority less than the ratio will get through + now := start.Add(61 * time.Second) + assert(t, !sampler.ComputeSampled(0.0, now)) + assert(t, !sampler.ComputeSampled(0.0, now)) + assert(t, !sampler.ComputeSampled(0.0, now)) + assert(t, !sampler.ComputeSampled(0.0, now)) + assert(t, !sampler.ComputeSampled(0.49, now)) + assert(t, !sampler.ComputeSampled(0.49, now)) + + // but these two will get through, and we'll still be under + // our target rate so there's no random sampling to deal with + assert(t, sampler.ComputeSampled(0.55, now)) + assert(t, sampler.ComputeSampled(1.0, now)) + + // Next period! 8 calls in the last period means a new sample ratio + // of 1/4. + now = start.Add(121 * time.Second) + assert(t, !sampler.ComputeSampled(0.0, now)) + assert(t, !sampler.ComputeSampled(0.5, now)) + assert(t, !sampler.ComputeSampled(0.7, now)) + assert(t, sampler.ComputeSampled(0.8, now)) +} + +func TestAdaptiveSamplerSkipPeriod(t *testing.T) { + start := time.Now() + sampler := NewAdaptiveSampler(60*time.Second, 2, start) + + // same as the previous test, we know we can get two through + // and we'll send a total of 4 through + assert(t, sampler.ComputeSampled(0.0, start)) + assert(t, sampler.ComputeSampled(0.0, start)) + sampler.ComputeSampled(0.0, start) + sampler.ComputeSampled(0.0, start) + + // Two periods later! Since there was a period with no samples, priorityMin + // should be zero + + now := start.Add(121 * time.Second) + assert(t, sampler.ComputeSampled(0.0, now)) + assert(t, sampler.ComputeSampled(0.0, now)) +} + +func TestAdaptiveSamplerTarget(t *testing.T) { + var target uint64 + target = 20 + start := time.Now() + sampler := NewAdaptiveSampler(60*time.Second, target, start) + + // we should always sample up to the number of target events + for i := 0; uint64(i) < target; i++ { + assert(t, sampler.ComputeSampled(0.0, start)) + } + + // but now further calls to ComputeSampled are subject to exponential backoff. + // this means their sampling is subject to a bit of randomness and we have no + // guarantee of a true or false sample, just an increasing unlikeliness that + // things will be sampled +} diff --git a/internal/analytics_events.go b/internal/analytics_events.go new file mode 100644 index 000000000..cb1b94f72 --- /dev/null +++ b/internal/analytics_events.go @@ -0,0 +1,145 @@ +package internal + +import ( + "bytes" + "container/heap" + + "github.com/newrelic/go-agent/internal/jsonx" +) + +type analyticsEvent struct { + priority Priority + jsonWriter +} + +type analyticsEventHeap []analyticsEvent + +type analyticsEvents struct { + numSeen int + events analyticsEventHeap + failedHarvests int +} + +func (events *analyticsEvents) NumSeen() float64 { return float64(events.numSeen) } +func (events *analyticsEvents) NumSaved() float64 { return float64(len(events.events)) } + +func (h analyticsEventHeap) Len() int { return len(h) } +func (h analyticsEventHeap) Less(i, j int) bool { return h[i].priority.isLowerPriority(h[j].priority) } +func (h analyticsEventHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } + +// Push and Pop are unused: only heap.Init and heap.Fix are used. +func (h analyticsEventHeap) Push(x interface{}) {} +func (h analyticsEventHeap) Pop() interface{} { return nil } + +func newAnalyticsEvents(max int) *analyticsEvents { + return &analyticsEvents{ + numSeen: 0, + events: make(analyticsEventHeap, 0, max), + failedHarvests: 0, + } +} + +func (events *analyticsEvents) capacity() int { + return cap(events.events) +} + +func (events *analyticsEvents) addEvent(e analyticsEvent) { + events.numSeen++ + + if events.capacity() == 0 { + // Configurable event harvest limits may be zero. + return + } + + if len(events.events) < cap(events.events) { + events.events = append(events.events, e) + if len(events.events) == cap(events.events) { + // Delay heap initialization so that we can have + // deterministic ordering for integration tests (the max + // is not being reached). + heap.Init(events.events) + } + return + } + + if e.priority.isLowerPriority((events.events)[0].priority) { + return + } + + events.events[0] = e + heap.Fix(events.events, 0) +} + +func (events *analyticsEvents) mergeFailed(other *analyticsEvents) { + fails := other.failedHarvests + 1 + if fails >= failedEventsAttemptsLimit { + return + } + events.failedHarvests = fails + events.Merge(other) +} + +func (events *analyticsEvents) Merge(other *analyticsEvents) { + allSeen := events.numSeen + other.numSeen + + for _, e := range other.events { + events.addEvent(e) + } + events.numSeen = allSeen +} + +func (events *analyticsEvents) CollectorJSON(agentRunID string) ([]byte, error) { + if 0 == len(events.events) { + return nil, nil + } + + estimate := 256 * len(events.events) + buf := bytes.NewBuffer(make([]byte, 0, estimate)) + + buf.WriteByte('[') + jsonx.AppendString(buf, agentRunID) + buf.WriteByte(',') + buf.WriteByte('{') + buf.WriteString(`"reservoir_size":`) + jsonx.AppendUint(buf, uint64(cap(events.events))) + buf.WriteByte(',') + buf.WriteString(`"events_seen":`) + jsonx.AppendUint(buf, uint64(events.numSeen)) + buf.WriteByte('}') + buf.WriteByte(',') + buf.WriteByte('[') + for i, e := range events.events { + if i > 0 { + buf.WriteByte(',') + } + e.WriteJSON(buf) + } + buf.WriteByte(']') + buf.WriteByte(']') + + return buf.Bytes(), nil + +} + +// split splits the events into two. NOTE! The two event pools are not valid +// priority queues, and should only be used to create JSON, not for adding any +// events. +func (events *analyticsEvents) split() (*analyticsEvents, *analyticsEvents) { + // numSeen is conserved: e1.numSeen + e2.numSeen == events.numSeen. + e1 := &analyticsEvents{ + numSeen: len(events.events) / 2, + events: make([]analyticsEvent, len(events.events)/2), + failedHarvests: events.failedHarvests, + } + e2 := &analyticsEvents{ + numSeen: events.numSeen - e1.numSeen, + events: make([]analyticsEvent, len(events.events)-len(e1.events)), + failedHarvests: events.failedHarvests, + } + // Note that slicing is not used to ensure that length == capacity for + // e1.events and e2.events. + copy(e1.events, events.events) + copy(e2.events, events.events[len(events.events)/2:]) + + return e1, e2 +} diff --git a/internal/analytics_events_test.go b/internal/analytics_events_test.go new file mode 100644 index 000000000..0a070964f --- /dev/null +++ b/internal/analytics_events_test.go @@ -0,0 +1,343 @@ +package internal + +import ( + "bytes" + "strconv" + "testing" + "time" +) + +var ( + agentRunID = `12345` +) + +type priorityWriter Priority + +func (x priorityWriter) WriteJSON(buf *bytes.Buffer) { + buf.WriteString(strconv.FormatFloat(float64(x), 'f', -1, 32)) +} + +func sampleAnalyticsEvent(priority Priority) analyticsEvent { + return analyticsEvent{ + priority, + priorityWriter(priority), + } +} + +func TestBasic(t *testing.T) { + events := newAnalyticsEvents(10) + events.addEvent(sampleAnalyticsEvent(0.5)) + events.addEvent(sampleAnalyticsEvent(0.5)) + events.addEvent(sampleAnalyticsEvent(0.5)) + + json, err := events.CollectorJSON(agentRunID) + if nil != err { + t.Fatal(err) + } + + expected := `["12345",{"reservoir_size":10,"events_seen":3},[0.5,0.5,0.5]]` + + if string(json) != expected { + t.Error(string(json), expected) + } + if 3 != events.numSeen { + t.Error(events.numSeen) + } + if 3 != events.NumSaved() { + t.Error(events.NumSaved()) + } +} + +func TestEmpty(t *testing.T) { + events := newAnalyticsEvents(10) + json, err := events.CollectorJSON(agentRunID) + if nil != err { + t.Fatal(err) + } + if nil != json { + t.Error(string(json)) + } + if 0 != events.numSeen { + t.Error(events.numSeen) + } + if 0 != events.NumSaved() { + t.Error(events.NumSaved()) + } +} + +func TestSampling(t *testing.T) { + events := newAnalyticsEvents(3) + events.addEvent(sampleAnalyticsEvent(0.999999)) + events.addEvent(sampleAnalyticsEvent(0.1)) + events.addEvent(sampleAnalyticsEvent(0.9)) + events.addEvent(sampleAnalyticsEvent(0.2)) + events.addEvent(sampleAnalyticsEvent(0.8)) + events.addEvent(sampleAnalyticsEvent(0.3)) + + json, err := events.CollectorJSON(agentRunID) + if nil != err { + t.Fatal(err) + } + if string(json) != `["12345",{"reservoir_size":3,"events_seen":6},[0.8,0.999999,0.9]]` { + t.Error(string(json)) + } + if 6 != events.numSeen { + t.Error(events.numSeen) + } + if 3 != events.NumSaved() { + t.Error(events.NumSaved()) + } +} + +func TestMergeEmpty(t *testing.T) { + e1 := newAnalyticsEvents(10) + e2 := newAnalyticsEvents(10) + e1.Merge(e2) + json, err := e1.CollectorJSON(agentRunID) + if nil != err { + t.Fatal(err) + } + if nil != json { + t.Error(string(json)) + } + if 0 != e1.numSeen { + t.Error(e1.numSeen) + } + if 0 != e1.NumSaved() { + t.Error(e1.NumSaved()) + } +} + +func TestMergeFull(t *testing.T) { + e1 := newAnalyticsEvents(2) + e2 := newAnalyticsEvents(3) + + e1.addEvent(sampleAnalyticsEvent(0.1)) + e1.addEvent(sampleAnalyticsEvent(0.15)) + e1.addEvent(sampleAnalyticsEvent(0.25)) + + e2.addEvent(sampleAnalyticsEvent(0.06)) + e2.addEvent(sampleAnalyticsEvent(0.12)) + e2.addEvent(sampleAnalyticsEvent(0.18)) + e2.addEvent(sampleAnalyticsEvent(0.24)) + + e1.Merge(e2) + json, err := e1.CollectorJSON(agentRunID) + if nil != err { + t.Fatal(err) + } + if string(json) != `["12345",{"reservoir_size":2,"events_seen":7},[0.24,0.25]]` { + t.Error(string(json)) + } + if 7 != e1.numSeen { + t.Error(e1.numSeen) + } + if 2 != e1.NumSaved() { + t.Error(e1.NumSaved()) + } +} + +func TestAnalyticsEventMergeFailedSuccess(t *testing.T) { + e1 := newAnalyticsEvents(2) + e2 := newAnalyticsEvents(3) + + e1.addEvent(sampleAnalyticsEvent(0.1)) + e1.addEvent(sampleAnalyticsEvent(0.15)) + e1.addEvent(sampleAnalyticsEvent(0.25)) + + e2.addEvent(sampleAnalyticsEvent(0.06)) + e2.addEvent(sampleAnalyticsEvent(0.12)) + e2.addEvent(sampleAnalyticsEvent(0.18)) + e2.addEvent(sampleAnalyticsEvent(0.24)) + + e1.mergeFailed(e2) + + json, err := e1.CollectorJSON(agentRunID) + if nil != err { + t.Fatal(err) + } + if string(json) != `["12345",{"reservoir_size":2,"events_seen":7},[0.24,0.25]]` { + t.Error(string(json)) + } + if 7 != e1.numSeen { + t.Error(e1.numSeen) + } + if 2 != e1.NumSaved() { + t.Error(e1.NumSaved()) + } + if 1 != e1.failedHarvests { + t.Error(e1.failedHarvests) + } +} + +func TestAnalyticsEventMergeFailedLimitReached(t *testing.T) { + e1 := newAnalyticsEvents(2) + e2 := newAnalyticsEvents(3) + + e1.addEvent(sampleAnalyticsEvent(0.1)) + e1.addEvent(sampleAnalyticsEvent(0.15)) + e1.addEvent(sampleAnalyticsEvent(0.25)) + + e2.addEvent(sampleAnalyticsEvent(0.06)) + e2.addEvent(sampleAnalyticsEvent(0.12)) + e2.addEvent(sampleAnalyticsEvent(0.18)) + e2.addEvent(sampleAnalyticsEvent(0.24)) + + e2.failedHarvests = failedEventsAttemptsLimit + + e1.mergeFailed(e2) + + json, err := e1.CollectorJSON(agentRunID) + if nil != err { + t.Fatal(err) + } + if string(json) != `["12345",{"reservoir_size":2,"events_seen":3},[0.15,0.25]]` { + t.Error(string(json)) + } + if 3 != e1.numSeen { + t.Error(e1.numSeen) + } + if 2 != e1.NumSaved() { + t.Error(e1.NumSaved()) + } + if 0 != e1.failedHarvests { + t.Error(e1.failedHarvests) + } +} + +func analyticsEventBenchmarkHelper(b *testing.B, w jsonWriter) { + events := newAnalyticsEvents(MaxTxnEvents) + event := analyticsEvent{0, w} + for n := 0; n < MaxTxnEvents; n++ { + events.addEvent(event) + } + + b.ReportAllocs() + b.ResetTimer() + + for n := 0; n < b.N; n++ { + js, err := events.CollectorJSON(agentRunID) + if nil != err { + b.Fatal(err, js) + } + } +} + +func BenchmarkTxnEventsCollectorJSON(b *testing.B) { + event := &TxnEvent{ + FinalName: "WebTransaction/Go/zip/zap", + Start: time.Now(), + Duration: 2 * time.Second, + Queuing: 1 * time.Second, + Zone: ApdexSatisfying, + Attrs: nil, + } + analyticsEventBenchmarkHelper(b, event) +} + +func BenchmarkCustomEventsCollectorJSON(b *testing.B) { + now := time.Now() + ce, err := CreateCustomEvent("myEventType", map[string]interface{}{ + "string": "myString", + "bool": true, + "int64": int64(123), + }, now) + if nil != err { + b.Fatal(err) + } + analyticsEventBenchmarkHelper(b, ce) +} + +func BenchmarkErrorEventsCollectorJSON(b *testing.B) { + e := TxnErrorFromResponseCode(time.Now(), 503) + e.Stack = GetStackTrace() + + txnName := "WebTransaction/Go/zip/zap" + event := &ErrorEvent{ + ErrorData: e, + TxnEvent: TxnEvent{ + FinalName: txnName, + Duration: 3 * time.Second, + Attrs: nil, + }, + } + analyticsEventBenchmarkHelper(b, event) +} + +func TestSplitFull(t *testing.T) { + events := newAnalyticsEvents(10) + for i := 0; i < 15; i++ { + events.addEvent(sampleAnalyticsEvent(Priority(float32(i) / 10.0))) + } + // Test that the capacity cannot exceed the max. + if 10 != events.capacity() { + t.Error(events.capacity()) + } + e1, e2 := events.split() + j1, err1 := e1.CollectorJSON(agentRunID) + j2, err2 := e2.CollectorJSON(agentRunID) + if err1 != nil || err2 != nil { + t.Fatal(err1, err2) + } + if string(j1) != `["12345",{"reservoir_size":5,"events_seen":5},[0.5,0.7,0.6,0.8,0.9]]` { + t.Error(string(j1)) + } + if string(j2) != `["12345",{"reservoir_size":5,"events_seen":10},[1.1,1.4,1,1.3,1.2]]` { + t.Error(string(j2)) + } +} + +func TestSplitNotFullOdd(t *testing.T) { + events := newAnalyticsEvents(10) + for i := 0; i < 7; i++ { + events.addEvent(sampleAnalyticsEvent(Priority(float32(i) / 10.0))) + } + e1, e2 := events.split() + j1, err1 := e1.CollectorJSON(agentRunID) + j2, err2 := e2.CollectorJSON(agentRunID) + if err1 != nil || err2 != nil { + t.Fatal(err1, err2) + } + if string(j1) != `["12345",{"reservoir_size":3,"events_seen":3},[0,0.1,0.2]]` { + t.Error(string(j1)) + } + if string(j2) != `["12345",{"reservoir_size":4,"events_seen":4},[0.3,0.4,0.5,0.6]]` { + t.Error(string(j2)) + } +} + +func TestSplitNotFullEven(t *testing.T) { + events := newAnalyticsEvents(10) + for i := 0; i < 8; i++ { + events.addEvent(sampleAnalyticsEvent(Priority(float32(i) / 10.0))) + } + e1, e2 := events.split() + j1, err1 := e1.CollectorJSON(agentRunID) + j2, err2 := e2.CollectorJSON(agentRunID) + if err1 != nil || err2 != nil { + t.Fatal(err1, err2) + } + if string(j1) != `["12345",{"reservoir_size":4,"events_seen":4},[0,0.1,0.2,0.3]]` { + t.Error(string(j1)) + } + if string(j2) != `["12345",{"reservoir_size":4,"events_seen":4},[0.4,0.5,0.6,0.7]]` { + t.Error(string(j2)) + } +} + +func TestAnalyticsEventsZeroCapacity(t *testing.T) { + // Analytics events methods should be safe when configurable harvest + // settings have an event limit of zero. + events := newAnalyticsEvents(0) + if 0 != events.NumSeen() || 0 != events.NumSaved() || 0 != events.capacity() { + t.Error(events.NumSeen(), events.NumSaved(), events.capacity()) + } + events.addEvent(sampleAnalyticsEvent(0.5)) + if 1 != events.NumSeen() || 0 != events.NumSaved() || 0 != events.capacity() { + t.Error(events.NumSeen(), events.NumSaved(), events.capacity()) + } + js, err := events.CollectorJSON("agentRunID") + if err != nil || js != nil { + t.Error(err, string(js)) + } +} diff --git a/internal/apdex.go b/internal/apdex.go new file mode 100644 index 000000000..28225f7d0 --- /dev/null +++ b/internal/apdex.go @@ -0,0 +1,48 @@ +package internal + +import "time" + +// ApdexZone is a transaction classification. +type ApdexZone int + +// https://en.wikipedia.org/wiki/Apdex +const ( + ApdexNone ApdexZone = iota + ApdexSatisfying + ApdexTolerating + ApdexFailing +) + +// ApdexFailingThreshold calculates the threshold at which the transaction is +// considered a failure. +func ApdexFailingThreshold(threshold time.Duration) time.Duration { + return 4 * threshold +} + +// CalculateApdexZone calculates the apdex based on the transaction duration and +// threshold. +// +// Note that this does not take into account whether or not the transaction +// had an error. That is expected to be done by the caller. +func CalculateApdexZone(threshold, duration time.Duration) ApdexZone { + if duration <= threshold { + return ApdexSatisfying + } + if duration <= ApdexFailingThreshold(threshold) { + return ApdexTolerating + } + return ApdexFailing +} + +func (zone ApdexZone) label() string { + switch zone { + case ApdexSatisfying: + return "S" + case ApdexTolerating: + return "T" + case ApdexFailing: + return "F" + default: + return "" + } +} diff --git a/internal/apdex_test.go b/internal/apdex_test.go new file mode 100644 index 000000000..768b95373 --- /dev/null +++ b/internal/apdex_test.go @@ -0,0 +1,46 @@ +package internal + +import ( + "testing" + "time" +) + +func dur(d int) time.Duration { + return time.Duration(d) +} + +func TestCalculateApdexZone(t *testing.T) { + if z := CalculateApdexZone(dur(10), dur(1)); z != ApdexSatisfying { + t.Fatal(z) + } + if z := CalculateApdexZone(dur(10), dur(10)); z != ApdexSatisfying { + t.Fatal(z) + } + if z := CalculateApdexZone(dur(10), dur(11)); z != ApdexTolerating { + t.Fatal(z) + } + if z := CalculateApdexZone(dur(10), dur(40)); z != ApdexTolerating { + t.Fatal(z) + } + if z := CalculateApdexZone(dur(10), dur(41)); z != ApdexFailing { + t.Fatal(z) + } + if z := CalculateApdexZone(dur(10), dur(100)); z != ApdexFailing { + t.Fatal(z) + } +} + +func TestApdexLabel(t *testing.T) { + if out := ApdexSatisfying.label(); "S" != out { + t.Fatal(out) + } + if out := ApdexTolerating.label(); "T" != out { + t.Fatal(out) + } + if out := ApdexFailing.label(); "F" != out { + t.Fatal(out) + } + if out := ApdexNone.label(); "" != out { + t.Fatal(out) + } +} diff --git a/internal/attributes.go b/internal/attributes.go new file mode 100644 index 000000000..847474174 --- /dev/null +++ b/internal/attributes.go @@ -0,0 +1,611 @@ +package internal + +import ( + "bytes" + "fmt" + "net/http" + "net/url" + "sort" + "strconv" + "strings" +) + +// AgentAttributeID uniquely identifies each agent attribute. +type AgentAttributeID int + +// New agent attributes must be added in the following places: +// * Constants here. +// * Top level attributes.go file. +// * agentAttributeInfo +const ( + AttributeHostDisplayName AgentAttributeID = iota + attributeRequestMethod + attributeRequestAcceptHeader + attributeRequestContentType + attributeRequestContentLength + attributeRequestHeadersHost + attributeRequestHeadersUserAgent + attributeRequestHeadersReferer + attributeRequestURI + attributeResponseHeadersContentType + attributeResponseHeadersContentLength + attributeResponseCode + AttributeAWSRequestID + AttributeAWSLambdaARN + AttributeAWSLambdaColdStart + AttributeAWSLambdaEventSourceARN + AttributeMessageRoutingKey + AttributeMessageQueueName + AttributeMessageExchangeType + AttributeMessageReplyTo + AttributeMessageCorrelationID +) + +// SpanAttribute is an attribute put in span events. +type SpanAttribute string + +// AddAgentSpanAttributer should be implemented by the Transaction. +type AddAgentSpanAttributer interface { + AddAgentSpanAttribute(key SpanAttribute, val string) +} + +// AddAgentSpanAttribute allows instrumentation packages to add span attributes. +func AddAgentSpanAttribute(txn interface{}, key SpanAttribute, val string) { + if aa, ok := txn.(AddAgentSpanAttributer); ok { + aa.AddAgentSpanAttribute(key, val) + } +} + +// These span event string constants must match the contents of the top level +// attributes.go file. +const ( + spanAttributeDBStatement SpanAttribute = "db.statement" + spanAttributeDBInstance SpanAttribute = "db.instance" + spanAttributeDBCollection SpanAttribute = "db.collection" + spanAttributePeerAddress SpanAttribute = "peer.address" + spanAttributePeerHostname SpanAttribute = "peer.hostname" + spanAttributeHTTPURL SpanAttribute = "http.url" + spanAttributeHTTPMethod SpanAttribute = "http.method" + // query parameters only appear in segments, not span events, but is + // listed as span attributes to simplify code. + spanAttributeQueryParameters SpanAttribute = "query_parameters" + // These span attributes are added by aws sdk instrumentation. + // https://source.datanerd.us/agents/agent-specs/blob/master/implementation_guides/aws-sdk.md#span-and-segment-attributes + SpanAttributeAWSOperation SpanAttribute = "aws.operation" + SpanAttributeAWSRequestID SpanAttribute = "aws.requestId" + SpanAttributeAWSRegion SpanAttribute = "aws.region" +) + +func (sa SpanAttribute) String() string { return string(sa) } + +var ( + usualDests = DestAll &^ destBrowser + tracesDests = destTxnTrace | destError + agentAttributeInfo = map[AgentAttributeID]struct { + name string + defaultDests destinationSet + }{ + AttributeHostDisplayName: {name: "host.displayName", defaultDests: usualDests}, + attributeRequestMethod: {name: "request.method", defaultDests: usualDests}, + attributeRequestAcceptHeader: {name: "request.headers.accept", defaultDests: usualDests}, + attributeRequestContentType: {name: "request.headers.contentType", defaultDests: usualDests}, + attributeRequestContentLength: {name: "request.headers.contentLength", defaultDests: usualDests}, + attributeRequestHeadersHost: {name: "request.headers.host", defaultDests: usualDests}, + attributeRequestHeadersUserAgent: {name: "request.headers.User-Agent", defaultDests: tracesDests}, + attributeRequestHeadersReferer: {name: "request.headers.referer", defaultDests: tracesDests}, + attributeRequestURI: {name: "request.uri", defaultDests: usualDests}, + attributeResponseHeadersContentType: {name: "response.headers.contentType", defaultDests: usualDests}, + attributeResponseHeadersContentLength: {name: "response.headers.contentLength", defaultDests: usualDests}, + attributeResponseCode: {name: "httpResponseCode", defaultDests: usualDests}, + AttributeAWSRequestID: {name: "aws.requestId", defaultDests: usualDests}, + AttributeAWSLambdaARN: {name: "aws.lambda.arn", defaultDests: usualDests}, + AttributeAWSLambdaColdStart: {name: "aws.lambda.coldStart", defaultDests: usualDests}, + AttributeAWSLambdaEventSourceARN: {name: "aws.lambda.eventSource.arn", defaultDests: usualDests}, + AttributeMessageRoutingKey: {name: "message.routingKey", defaultDests: usualDests}, + AttributeMessageQueueName: {name: "message.queueName", defaultDests: usualDests}, + AttributeMessageExchangeType: {name: "message.exchangeType", defaultDests: destNone}, + AttributeMessageReplyTo: {name: "message.replyTo", defaultDests: destNone}, + AttributeMessageCorrelationID: {name: "message.correlationId", defaultDests: destNone}, + } + spanAttributes = []SpanAttribute{ + spanAttributeDBStatement, + spanAttributeDBInstance, + spanAttributeDBCollection, + spanAttributePeerAddress, + spanAttributePeerHostname, + spanAttributeHTTPURL, + spanAttributeHTTPMethod, + spanAttributeQueryParameters, + SpanAttributeAWSOperation, + SpanAttributeAWSRequestID, + SpanAttributeAWSRegion, + } +) + +func (id AgentAttributeID) name() string { return agentAttributeInfo[id].name } + +// https://source.datanerd.us/agents/agent-specs/blob/master/Agent-Attributes-PORTED.md + +// AttributeDestinationConfig matches newrelic.AttributeDestinationConfig to +// avoid circular dependency issues. +type AttributeDestinationConfig struct { + Enabled bool + Include []string + Exclude []string +} + +type destinationSet int + +const ( + destTxnEvent destinationSet = 1 << iota + destError + destTxnTrace + destBrowser + destSpan + destSegment +) + +const ( + destNone destinationSet = 0 + // DestAll contains all destinations. + DestAll destinationSet = destTxnEvent | destTxnTrace | destError | destBrowser | destSpan | destSegment +) + +const ( + attributeWildcardSuffix = '*' +) + +type attributeModifier struct { + match string // This will not contain a trailing '*'. + includeExclude +} + +type byMatch []*attributeModifier + +func (m byMatch) Len() int { return len(m) } +func (m byMatch) Swap(i, j int) { m[i], m[j] = m[j], m[i] } +func (m byMatch) Less(i, j int) bool { return m[i].match < m[j].match } + +// AttributeConfig is created at connect and shared between all transactions. +type AttributeConfig struct { + disabledDestinations destinationSet + exactMatchModifiers map[string]*attributeModifier + // Once attributeConfig is constructed, wildcardModifiers is sorted in + // lexicographical order. Modifiers appearing later have precedence + // over modifiers appearing earlier. + wildcardModifiers []*attributeModifier + agentDests map[AgentAttributeID]destinationSet + spanDests map[SpanAttribute]destinationSet +} + +type includeExclude struct { + include destinationSet + exclude destinationSet +} + +func modifierApply(m *attributeModifier, d destinationSet) destinationSet { + // Include before exclude, since exclude has priority. + d |= m.include + d &^= m.exclude + return d +} + +func applyAttributeConfig(c *AttributeConfig, key string, d destinationSet) destinationSet { + // Important: The wildcard modifiers must be applied before the exact + // match modifiers, and the slice must be iterated in a forward + // direction. + for _, m := range c.wildcardModifiers { + if strings.HasPrefix(key, m.match) { + d = modifierApply(m, d) + } + } + + if m, ok := c.exactMatchModifiers[key]; ok { + d = modifierApply(m, d) + } + + d &^= c.disabledDestinations + + return d +} + +func addModifier(c *AttributeConfig, match string, d includeExclude) { + if "" == match { + return + } + exactMatch := true + if attributeWildcardSuffix == match[len(match)-1] { + exactMatch = false + match = match[0 : len(match)-1] + } + mod := &attributeModifier{ + match: match, + includeExclude: d, + } + + if exactMatch { + if m, ok := c.exactMatchModifiers[mod.match]; ok { + m.include |= mod.include + m.exclude |= mod.exclude + } else { + c.exactMatchModifiers[mod.match] = mod + } + } else { + for _, m := range c.wildcardModifiers { + // Important: Duplicate entries for the same match + // string would not work because exclude needs + // precedence over include. + if m.match == mod.match { + m.include |= mod.include + m.exclude |= mod.exclude + return + } + } + c.wildcardModifiers = append(c.wildcardModifiers, mod) + } +} + +func processDest(c *AttributeConfig, includeEnabled bool, dc *AttributeDestinationConfig, d destinationSet) { + if !dc.Enabled { + c.disabledDestinations |= d + } + if includeEnabled { + for _, match := range dc.Include { + addModifier(c, match, includeExclude{include: d}) + } + } + for _, match := range dc.Exclude { + addModifier(c, match, includeExclude{exclude: d}) + } +} + +// AttributeConfigInput is used as the input to CreateAttributeConfig: it +// transforms newrelic.Config settings into an AttributeConfig. +type AttributeConfigInput struct { + Attributes AttributeDestinationConfig + ErrorCollector AttributeDestinationConfig + TransactionEvents AttributeDestinationConfig + BrowserMonitoring AttributeDestinationConfig + TransactionTracer AttributeDestinationConfig + SpanEvents AttributeDestinationConfig + TraceSegments AttributeDestinationConfig +} + +var ( + sampleAttributeConfigInput = AttributeConfigInput{ + Attributes: AttributeDestinationConfig{Enabled: true}, + ErrorCollector: AttributeDestinationConfig{Enabled: true}, + TransactionEvents: AttributeDestinationConfig{Enabled: true}, + TransactionTracer: AttributeDestinationConfig{Enabled: true}, + BrowserMonitoring: AttributeDestinationConfig{Enabled: true}, + SpanEvents: AttributeDestinationConfig{Enabled: true}, + TraceSegments: AttributeDestinationConfig{Enabled: true}, + } +) + +// CreateAttributeConfig creates a new AttributeConfig. +func CreateAttributeConfig(input AttributeConfigInput, includeEnabled bool) *AttributeConfig { + c := &AttributeConfig{ + exactMatchModifiers: make(map[string]*attributeModifier), + wildcardModifiers: make([]*attributeModifier, 0, 64), + } + + processDest(c, includeEnabled, &input.Attributes, DestAll) + processDest(c, includeEnabled, &input.ErrorCollector, destError) + processDest(c, includeEnabled, &input.TransactionEvents, destTxnEvent) + processDest(c, includeEnabled, &input.TransactionTracer, destTxnTrace) + processDest(c, includeEnabled, &input.BrowserMonitoring, destBrowser) + processDest(c, includeEnabled, &input.SpanEvents, destSpan) + processDest(c, includeEnabled, &input.TraceSegments, destSegment) + + sort.Sort(byMatch(c.wildcardModifiers)) + + c.agentDests = make(map[AgentAttributeID]destinationSet) + for id, info := range agentAttributeInfo { + c.agentDests[id] = applyAttributeConfig(c, info.name, info.defaultDests) + } + c.spanDests = make(map[SpanAttribute]destinationSet, len(spanAttributes)) + for _, id := range spanAttributes { + c.spanDests[id] = applyAttributeConfig(c, id.String(), destSpan|destSegment) + } + + return c +} + +type userAttribute struct { + value interface{} + dests destinationSet +} + +type agentAttributeValue struct { + stringVal string + otherVal interface{} +} + +type agentAttributes map[AgentAttributeID]agentAttributeValue + +func (a *Attributes) filterSpanAttributes(s map[SpanAttribute]jsonWriter, d destinationSet) map[SpanAttribute]jsonWriter { + if nil != a { + for key := range s { + if a.config.spanDests[key]&d == 0 { + delete(s, key) + } + } + } + return s +} + +// GetAgentValue is used to access agent attributes. This function returns ("", +// nil) if the attribute doesn't exist or it doesn't match the destinations +// provided. +func (a *Attributes) GetAgentValue(id AgentAttributeID, d destinationSet) (string, interface{}) { + if nil == a || 0 == a.config.agentDests[id]&d { + return "", nil + } + v, _ := a.Agent[id] + return v.stringVal, v.otherVal +} + +// AddAgentAttributer allows instrumentation to add agent attributes without +// exposing a Transaction method. +type AddAgentAttributer interface { + AddAgentAttribute(id AgentAttributeID, stringVal string, otherVal interface{}) +} + +// Add is used to add agent attributes. Only one of stringVal and +// otherVal should be populated. Since most agent attribute values are strings, +// stringVal exists to avoid allocations. +func (attr agentAttributes) Add(id AgentAttributeID, stringVal string, otherVal interface{}) { + if "" != stringVal || otherVal != nil { + attr[id] = agentAttributeValue{ + stringVal: truncateStringValueIfLong(stringVal), + otherVal: otherVal, + } + } +} + +// Attributes are key value pairs attached to the various collected data types. +type Attributes struct { + config *AttributeConfig + user map[string]userAttribute + Agent agentAttributes +} + +// NewAttributes creates a new Attributes. +func NewAttributes(config *AttributeConfig) *Attributes { + return &Attributes{ + config: config, + Agent: make(agentAttributes), + } +} + +// ErrInvalidAttributeType is returned when the value is not valid. +type ErrInvalidAttributeType struct { + key string + val interface{} +} + +func (e ErrInvalidAttributeType) Error() string { + return fmt.Sprintf("attribute '%s' value of type %T is invalid", e.key, e.val) +} + +type invalidAttributeKeyErr struct{ key string } + +func (e invalidAttributeKeyErr) Error() string { + return fmt.Sprintf("attribute key '%.32s...' exceeds length limit %d", + e.key, attributeKeyLengthLimit) +} + +type userAttributeLimitErr struct{ key string } + +func (e userAttributeLimitErr) Error() string { + return fmt.Sprintf("attribute '%s' discarded: limit of %d reached", e.key, + attributeUserLimit) +} + +func truncateStringValueIfLong(val string) string { + if len(val) > attributeValueLengthLimit { + return StringLengthByteLimit(val, attributeValueLengthLimit) + } + return val +} + +// ValidateUserAttribute validates a user attribute. +func ValidateUserAttribute(key string, val interface{}) (interface{}, error) { + if str, ok := val.(string); ok { + val = interface{}(truncateStringValueIfLong(str)) + } + + switch val.(type) { + case string, bool, + uint8, uint16, uint32, uint64, int8, int16, int32, int64, + float32, float64, uint, int, uintptr: + default: + return nil, ErrInvalidAttributeType{ + key: key, + val: val, + } + } + + // Attributes whose keys are excessively long are dropped rather than + // truncated to avoid worrying about the application of configuration to + // truncated values or performing the truncation after configuration. + if len(key) > attributeKeyLengthLimit { + return nil, invalidAttributeKeyErr{key: key} + } + return val, nil +} + +// AddUserAttribute adds a user attribute. +func AddUserAttribute(a *Attributes, key string, val interface{}, d destinationSet) error { + val, err := ValidateUserAttribute(key, val) + if nil != err { + return err + } + dests := applyAttributeConfig(a.config, key, d) + if destNone == dests { + return nil + } + if nil == a.user { + a.user = make(map[string]userAttribute) + } + + if _, exists := a.user[key]; !exists && len(a.user) >= attributeUserLimit { + return userAttributeLimitErr{key} + } + + // Note: Duplicates are overridden: last attribute in wins. + a.user[key] = userAttribute{ + value: val, + dests: dests, + } + return nil +} + +func writeAttributeValueJSON(w *jsonFieldsWriter, key string, val interface{}) { + switch v := val.(type) { + case string: + w.stringField(key, v) + case bool: + if v { + w.rawField(key, `true`) + } else { + w.rawField(key, `false`) + } + case uint8: + w.intField(key, int64(v)) + case uint16: + w.intField(key, int64(v)) + case uint32: + w.intField(key, int64(v)) + case uint64: + w.intField(key, int64(v)) + case uint: + w.intField(key, int64(v)) + case uintptr: + w.intField(key, int64(v)) + case int8: + w.intField(key, int64(v)) + case int16: + w.intField(key, int64(v)) + case int32: + w.intField(key, int64(v)) + case int64: + w.intField(key, v) + case int: + w.intField(key, int64(v)) + case float32: + w.floatField(key, float64(v)) + case float64: + w.floatField(key, v) + default: + w.stringField(key, fmt.Sprintf("%T", v)) + } +} + +func agentAttributesJSON(a *Attributes, buf *bytes.Buffer, d destinationSet) { + if nil == a { + buf.WriteString("{}") + return + } + w := jsonFieldsWriter{buf: buf} + buf.WriteByte('{') + for id, val := range a.Agent { + if 0 != a.config.agentDests[id]&d { + if val.stringVal != "" { + w.stringField(id.name(), val.stringVal) + } else { + writeAttributeValueJSON(&w, id.name(), val.otherVal) + } + } + } + buf.WriteByte('}') + +} + +func userAttributesJSON(a *Attributes, buf *bytes.Buffer, d destinationSet, extraAttributes map[string]interface{}) { + buf.WriteByte('{') + if nil != a { + w := jsonFieldsWriter{buf: buf} + for key, val := range extraAttributes { + outputDest := applyAttributeConfig(a.config, key, d) + if 0 != outputDest&d { + writeAttributeValueJSON(&w, key, val) + } + } + for name, atr := range a.user { + if 0 != atr.dests&d { + if _, found := extraAttributes[name]; found { + continue + } + writeAttributeValueJSON(&w, name, atr.value) + } + } + } + buf.WriteByte('}') +} + +// userAttributesStringJSON is only used for testing. +func userAttributesStringJSON(a *Attributes, d destinationSet, extraAttributes map[string]interface{}) string { + estimate := len(a.user) * 128 + buf := bytes.NewBuffer(make([]byte, 0, estimate)) + userAttributesJSON(a, buf, d, extraAttributes) + return buf.String() +} + +// RequestAgentAttributes gathers agent attributes out of the request. +func RequestAgentAttributes(a *Attributes, method string, h http.Header, u *url.URL) { + a.Agent.Add(attributeRequestMethod, method, nil) + + if nil != u { + a.Agent.Add(attributeRequestURI, SafeURL(u), nil) + } + + if nil == h { + return + } + a.Agent.Add(attributeRequestAcceptHeader, h.Get("Accept"), nil) + a.Agent.Add(attributeRequestContentType, h.Get("Content-Type"), nil) + a.Agent.Add(attributeRequestHeadersHost, h.Get("Host"), nil) + a.Agent.Add(attributeRequestHeadersUserAgent, h.Get("User-Agent"), nil) + a.Agent.Add(attributeRequestHeadersReferer, SafeURLFromString(h.Get("Referer")), nil) + + if l := GetContentLengthFromHeader(h); l >= 0 { + a.Agent.Add(attributeRequestContentLength, "", l) + } +} + +// ResponseHeaderAttributes gather agent attributes from the response headers. +func ResponseHeaderAttributes(a *Attributes, h http.Header) { + if nil == h { + return + } + a.Agent.Add(attributeResponseHeadersContentType, h.Get("Content-Type"), nil) + + if l := GetContentLengthFromHeader(h); l >= 0 { + a.Agent.Add(attributeResponseHeadersContentLength, "", l) + } +} + +var ( + // statusCodeLookup avoids a strconv.Itoa call. + statusCodeLookup = map[int]string{ + 100: "100", 101: "101", + 200: "200", 201: "201", 202: "202", 203: "203", 204: "204", 205: "205", 206: "206", + 300: "300", 301: "301", 302: "302", 303: "303", 304: "304", 305: "305", 307: "307", + 400: "400", 401: "401", 402: "402", 403: "403", 404: "404", 405: "405", 406: "406", + 407: "407", 408: "408", 409: "409", 410: "410", 411: "411", 412: "412", 413: "413", + 414: "414", 415: "415", 416: "416", 417: "417", 418: "418", 428: "428", 429: "429", + 431: "431", 451: "451", + 500: "500", 501: "501", 502: "502", 503: "503", 504: "504", 505: "505", 511: "511", + } +) + +// ResponseCodeAttribute sets the response code agent attribute. +func ResponseCodeAttribute(a *Attributes, code int) { + rc := statusCodeLookup[code] + if rc == "" { + rc = strconv.Itoa(code) + } + a.Agent.Add(attributeResponseCode, rc, nil) +} diff --git a/internal/attributes_test.go b/internal/attributes_test.go new file mode 100644 index 000000000..3ffa92401 --- /dev/null +++ b/internal/attributes_test.go @@ -0,0 +1,458 @@ +package internal + +import ( + "bytes" + "encoding/json" + "net/http" + "strconv" + "strings" + "testing" + + "github.com/newrelic/go-agent/internal/crossagent" +) + +type AttributeTestcase struct { + Testname string `json:"testname"` + Config struct { + AttributesEnabled bool `json:"attributes.enabled"` + AttributesInclude []string `json:"attributes.include"` + AttributesExclude []string `json:"attributes.exclude"` + BrowserAttributesEnabled bool `json:"browser_monitoring.attributes.enabled"` + BrowserAttributesInclude []string `json:"browser_monitoring.attributes.include"` + BrowserAttributesExclude []string `json:"browser_monitoring.attributes.exclude"` + ErrorAttributesEnabled bool `json:"error_collector.attributes.enabled"` + ErrorAttributesInclude []string `json:"error_collector.attributes.include"` + ErrorAttributesExclude []string `json:"error_collector.attributes.exclude"` + EventsAttributesEnabled bool `json:"transaction_events.attributes.enabled"` + EventsAttributesInclude []string `json:"transaction_events.attributes.include"` + EventsAttributesExclude []string `json:"transaction_events.attributes.exclude"` + TracerAttributesEnabled bool `json:"transaction_tracer.attributes.enabled"` + TracerAttributesInclude []string `json:"transaction_tracer.attributes.include"` + TracerAttributesExclude []string `json:"transaction_tracer.attributes.exclude"` + } `json:"config"` + Key string `json:"input_key"` + InputDestinations []string `json:"input_default_destinations"` + ExpectedDestinations []string `json:"expected_destinations"` +} + +var ( + destTranslate = map[string]destinationSet{ + "attributes": DestAll, + "transaction_events": destTxnEvent, + "transaction_tracer": destTxnTrace, + "error_collector": destError, + "browser_monitoring": destBrowser, + } +) + +func destinationsFromArray(dests []string) destinationSet { + d := destNone + for _, s := range dests { + if x, ok := destTranslate[s]; ok { + d |= x + } + } + return d +} + +func destToString(d destinationSet) string { + if 0 == d { + return "none" + } + out := "" + for _, ds := range []struct { + Name string + Dest destinationSet + }{ + {Name: "event", Dest: destTxnEvent}, + {Name: "trace", Dest: destTxnTrace}, + {Name: "error", Dest: destError}, + {Name: "browser", Dest: destBrowser}, + {Name: "span", Dest: destSpan}, + {Name: "segment", Dest: destSegment}, + } { + if 0 != d&ds.Dest { + if "" == out { + out = ds.Name + } else { + out = out + "," + ds.Name + } + } + } + return out +} + +func runAttributeTestcase(t *testing.T, js json.RawMessage) { + var tc AttributeTestcase + + tc.Config.AttributesEnabled = true + tc.Config.BrowserAttributesEnabled = false + tc.Config.ErrorAttributesEnabled = true + tc.Config.EventsAttributesEnabled = true + tc.Config.TracerAttributesEnabled = true + + if err := json.Unmarshal(js, &tc); nil != err { + t.Error(err) + return + } + + input := AttributeConfigInput{ + Attributes: AttributeDestinationConfig{ + Enabled: tc.Config.AttributesEnabled, + Include: tc.Config.AttributesInclude, + Exclude: tc.Config.AttributesExclude, + }, + ErrorCollector: AttributeDestinationConfig{ + Enabled: tc.Config.ErrorAttributesEnabled, + Include: tc.Config.ErrorAttributesInclude, + Exclude: tc.Config.ErrorAttributesExclude, + }, + TransactionEvents: AttributeDestinationConfig{ + Enabled: tc.Config.EventsAttributesEnabled, + Include: tc.Config.EventsAttributesInclude, + Exclude: tc.Config.EventsAttributesExclude, + }, + BrowserMonitoring: AttributeDestinationConfig{ + Enabled: tc.Config.BrowserAttributesEnabled, + Include: tc.Config.BrowserAttributesInclude, + Exclude: tc.Config.BrowserAttributesExclude, + }, + TransactionTracer: AttributeDestinationConfig{ + Enabled: tc.Config.TracerAttributesEnabled, + Include: tc.Config.TracerAttributesInclude, + Exclude: tc.Config.TracerAttributesExclude, + }, + } + + cfg := CreateAttributeConfig(input, true) + + inputDests := destinationsFromArray(tc.InputDestinations) + expectedDests := destinationsFromArray(tc.ExpectedDestinations) + + out := applyAttributeConfig(cfg, tc.Key, inputDests) + + if out != expectedDests { + t.Errorf(`name="%s" input="%s" expected="%s" got="%s"`, + tc.Testname, + destToString(inputDests), + destToString(expectedDests), + destToString(out)) + } +} + +func TestCrossAgentAttributes(t *testing.T) { + var tcs []json.RawMessage + + err := crossagent.ReadJSON("attribute_configuration.json", &tcs) + if err != nil { + t.Fatal(err) + } + + for _, tc := range tcs { + runAttributeTestcase(t, tc) + } +} + +func TestWriteAttributeValueJSON(t *testing.T) { + buf := &bytes.Buffer{} + w := jsonFieldsWriter{buf: buf} + + buf.WriteByte('{') + writeAttributeValueJSON(&w, "a", `escape\me!`) + writeAttributeValueJSON(&w, "a", true) + writeAttributeValueJSON(&w, "a", false) + writeAttributeValueJSON(&w, "a", uint8(1)) + writeAttributeValueJSON(&w, "a", uint16(2)) + writeAttributeValueJSON(&w, "a", uint32(3)) + writeAttributeValueJSON(&w, "a", uint64(4)) + writeAttributeValueJSON(&w, "a", uint(5)) + writeAttributeValueJSON(&w, "a", uintptr(6)) + writeAttributeValueJSON(&w, "a", int8(-1)) + writeAttributeValueJSON(&w, "a", int16(-2)) + writeAttributeValueJSON(&w, "a", int32(-3)) + writeAttributeValueJSON(&w, "a", int64(-4)) + writeAttributeValueJSON(&w, "a", int(-5)) + writeAttributeValueJSON(&w, "a", float32(1.5)) + writeAttributeValueJSON(&w, "a", float64(4.56)) + buf.WriteByte('}') + + expect := CompactJSONString(`{ + "a":"escape\\me!", + "a":true, + "a":false, + "a":1, + "a":2, + "a":3, + "a":4, + "a":5, + "a":6, + "a":-1, + "a":-2, + "a":-3, + "a":-4, + "a":-5, + "a":1.5, + "a":4.56 + }`) + js := buf.String() + if js != expect { + t.Error(js, expect) + } +} + +func TestValidAttributeTypes(t *testing.T) { + testcases := []struct { + Input interface{} + Valid bool + }{ + // Valid attribute types. + {Input: "string value", Valid: true}, + {Input: true, Valid: true}, + {Input: uint8(0), Valid: true}, + {Input: uint16(0), Valid: true}, + {Input: uint32(0), Valid: true}, + {Input: uint64(0), Valid: true}, + {Input: int8(0), Valid: true}, + {Input: int16(0), Valid: true}, + {Input: int32(0), Valid: true}, + {Input: int64(0), Valid: true}, + {Input: float32(0), Valid: true}, + {Input: float64(0), Valid: true}, + {Input: uint(0), Valid: true}, + {Input: int(0), Valid: true}, + {Input: uintptr(0), Valid: true}, + // Invalid attribute types. + {Input: nil, Valid: false}, + {Input: struct{}{}, Valid: false}, + {Input: &struct{}{}, Valid: false}, + } + + for _, tc := range testcases { + val, err := ValidateUserAttribute("key", tc.Input) + _, invalid := err.(ErrInvalidAttributeType) + if tc.Valid == invalid { + t.Error(tc.Input, tc.Valid, val, err) + } + } +} + +func TestUserAttributeValLength(t *testing.T) { + cfg := CreateAttributeConfig(sampleAttributeConfigInput, true) + attrs := NewAttributes(cfg) + + atLimit := strings.Repeat("a", attributeValueLengthLimit) + tooLong := atLimit + "a" + + err := AddUserAttribute(attrs, `escape\me`, tooLong, DestAll) + if err != nil { + t.Error(err) + } + js := userAttributesStringJSON(attrs, DestAll, nil) + if `{"escape\\me":"`+atLimit+`"}` != js { + t.Error(js) + } +} + +func TestUserAttributeKeyLength(t *testing.T) { + cfg := CreateAttributeConfig(sampleAttributeConfigInput, true) + attrs := NewAttributes(cfg) + + lengthyKey := strings.Repeat("a", attributeKeyLengthLimit+1) + err := AddUserAttribute(attrs, lengthyKey, 123, DestAll) + if _, ok := err.(invalidAttributeKeyErr); !ok { + t.Error(err) + } + js := userAttributesStringJSON(attrs, DestAll, nil) + if `{}` != js { + t.Error(js) + } +} + +func TestNumUserAttributesLimit(t *testing.T) { + cfg := CreateAttributeConfig(sampleAttributeConfigInput, true) + attrs := NewAttributes(cfg) + + for i := 0; i < attributeUserLimit; i++ { + s := strconv.Itoa(i) + err := AddUserAttribute(attrs, s, s, DestAll) + if err != nil { + t.Fatal(err) + } + } + + err := AddUserAttribute(attrs, "cant_add_me", 123, DestAll) + if _, ok := err.(userAttributeLimitErr); !ok { + t.Fatal(err) + } + + js := userAttributesStringJSON(attrs, DestAll, nil) + var out map[string]string + err = json.Unmarshal([]byte(js), &out) + if nil != err { + t.Fatal(err) + } + if len(out) != attributeUserLimit { + t.Error(len(out)) + } + if strings.Contains(js, "cant_add_me") { + t.Fatal(js) + } + + // Now test that replacement works when the limit is reached. + err = AddUserAttribute(attrs, "0", "BEEN_REPLACED", DestAll) + if nil != err { + t.Fatal(err) + } + js = userAttributesStringJSON(attrs, DestAll, nil) + if !strings.Contains(js, "BEEN_REPLACED") { + t.Fatal(js) + } +} + +func TestExtraAttributesIncluded(t *testing.T) { + cfg := CreateAttributeConfig(sampleAttributeConfigInput, true) + attrs := NewAttributes(cfg) + + err := AddUserAttribute(attrs, "a", 1, DestAll) + if nil != err { + t.Error(err) + } + js := userAttributesStringJSON(attrs, DestAll, map[string]interface{}{"b": 2}) + if `{"b":2,"a":1}` != js { + t.Error(js) + } +} + +func TestExtraAttributesPrecedence(t *testing.T) { + cfg := CreateAttributeConfig(sampleAttributeConfigInput, true) + attrs := NewAttributes(cfg) + + err := AddUserAttribute(attrs, "a", 1, DestAll) + if nil != err { + t.Error(err) + } + js := userAttributesStringJSON(attrs, DestAll, map[string]interface{}{"a": 2}) + if `{"a":2}` != js { + t.Error(js) + } +} + +func TestIncludeDisabled(t *testing.T) { + input := sampleAttributeConfigInput + input.Attributes.Include = append(input.Attributes.Include, "include_me") + cfg := CreateAttributeConfig(input, false) + attrs := NewAttributes(cfg) + + err := AddUserAttribute(attrs, "include_me", 1, destNone) + if nil != err { + t.Error(err) + } + js := userAttributesStringJSON(attrs, DestAll, nil) + if `{}` != js { + t.Error(js) + } +} + +func agentAttributesMap(attrs *Attributes, d destinationSet) map[string]interface{} { + buf := &bytes.Buffer{} + agentAttributesJSON(attrs, buf, d) + var m map[string]interface{} + err := json.Unmarshal(buf.Bytes(), &m) + if err != nil { + panic(err) + } + return m +} + +func TestRequestAgentAttributesEmptyInput(t *testing.T) { + cfg := CreateAttributeConfig(sampleAttributeConfigInput, true) + attrs := NewAttributes(cfg) + RequestAgentAttributes(attrs, "", nil, nil) + got := agentAttributesMap(attrs, DestAll) + expectAttributes(t, got, map[string]interface{}{}) +} + +func TestRequestAgentAttributesPresent(t *testing.T) { + req, err := http.NewRequest("GET", "http://www.newrelic.com?remove=me", nil) + if nil != err { + t.Fatal(err) + } + req.Header.Set("Accept", "the-accept") + req.Header.Set("Content-Type", "the-content-type") + req.Header.Set("Host", "the-host") + req.Header.Set("User-Agent", "the-agent") + req.Header.Set("Referer", "http://www.example.com") + req.Header.Set("Content-Length", "123") + + cfg := CreateAttributeConfig(sampleAttributeConfigInput, true) + + attrs := NewAttributes(cfg) + RequestAgentAttributes(attrs, req.Method, req.Header, req.URL) + got := agentAttributesMap(attrs, DestAll) + expectAttributes(t, got, map[string]interface{}{ + "request.headers.contentType": "the-content-type", + "request.headers.host": "the-host", + "request.headers.User-Agent": "the-agent", + "request.headers.referer": "http://www.example.com", + "request.headers.contentLength": 123, + "request.method": "GET", + "request.uri": "http://www.newrelic.com", + "request.headers.accept": "the-accept", + }) +} + +func BenchmarkAgentAttributes(b *testing.B) { + cfg := CreateAttributeConfig(sampleAttributeConfigInput, true) + + req, err := http.NewRequest("GET", "http://www.newrelic.com", nil) + if nil != err { + b.Fatal(err) + } + + req.Header.Set("Accept", "zap") + req.Header.Set("Content-Type", "zap") + req.Header.Set("Host", "zap") + req.Header.Set("User-Agent", "zap") + req.Header.Set("Referer", "http://www.newrelic.com") + req.Header.Set("Content-Length", "123") + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + attrs := NewAttributes(cfg) + RequestAgentAttributes(attrs, req.Method, req.Header, req.URL) + buf := bytes.Buffer{} + agentAttributesJSON(attrs, &buf, destTxnTrace) + } +} + +func TestGetAgentValue(t *testing.T) { + // Test nil safe + var attrs *Attributes + outstr, outother := attrs.GetAgentValue(attributeRequestURI, destTxnTrace) + if outstr != "" || outother != nil { + t.Error(outstr, outother) + } + + c := sampleAttributeConfigInput + c.TransactionTracer.Exclude = []string{"request.uri"} + cfg := CreateAttributeConfig(c, true) + attrs = NewAttributes(cfg) + attrs.Agent.Add(attributeResponseHeadersContentLength, "", 123) + attrs.Agent.Add(attributeRequestMethod, "GET", nil) + attrs.Agent.Add(attributeRequestURI, "/url", nil) // disabled by configuration + + outstr, outother = attrs.GetAgentValue(attributeResponseHeadersContentLength, destTxnTrace) + if outstr != "" || outother != 123 { + t.Error(outstr, outother) + } + outstr, outother = attrs.GetAgentValue(attributeRequestMethod, destTxnTrace) + if outstr != "GET" || outother != nil { + t.Error(outstr, outother) + } + outstr, outother = attrs.GetAgentValue(attributeRequestURI, destTxnTrace) + if outstr != "" || outother != nil { + t.Error(outstr, outother) + } +} diff --git a/internal/browser.go b/internal/browser.go new file mode 100644 index 000000000..a55456ad3 --- /dev/null +++ b/internal/browser.go @@ -0,0 +1,18 @@ +package internal + +import "bytes" + +// BrowserAttributes returns a string with the attributes that are attached to +// the browser destination encoded in the JSON format expected by the Browser +// agent. +func BrowserAttributes(a *Attributes) []byte { + buf := &bytes.Buffer{} + + buf.WriteString(`{"u":`) + userAttributesJSON(a, buf, destBrowser, nil) + buf.WriteString(`,"a":`) + agentAttributesJSON(a, buf, destBrowser) + buf.WriteByte('}') + + return buf.Bytes() +} diff --git a/internal/browser_test.go b/internal/browser_test.go new file mode 100644 index 000000000..953ae0951 --- /dev/null +++ b/internal/browser_test.go @@ -0,0 +1,26 @@ +package internal + +import ( + "testing" +) + +func TestBrowserAttributesNil(t *testing.T) { + expected := `{"u":{},"a":{}}` + actual := string(BrowserAttributes(nil)) + if expected != actual { + t.Errorf("unexpected browser attributes: expected %s; got %s", expected, actual) + } +} + +func TestBrowserAttributes(t *testing.T) { + a := NewAttributes(CreateAttributeConfig(sampleAttributeConfigInput, true)) + AddUserAttribute(a, "user", "thing", destBrowser) + AddUserAttribute(a, "not", "shown", destError) + a.Agent.Add(AttributeHostDisplayName, "host", nil) + + expected := `{"u":{"user":"thing"},"a":{}}` + actual := string(BrowserAttributes(a)) + if expected != actual { + t.Errorf("unexpected browser attributes: expected %s; got %s", expected, actual) + } +} diff --git a/internal/cat/appdata.go b/internal/cat/appdata.go new file mode 100644 index 000000000..d62b71f2b --- /dev/null +++ b/internal/cat/appdata.go @@ -0,0 +1,111 @@ +package cat + +import ( + "bytes" + "encoding/json" + "errors" + + "github.com/newrelic/go-agent/internal/jsonx" +) + +// AppDataHeader represents a decoded AppData header. +type AppDataHeader struct { + CrossProcessID string + TransactionName string + QueueTimeInSeconds float64 + ResponseTimeInSeconds float64 + ContentLength int64 + TransactionGUID string +} + +var ( + errInvalidAppDataJSON = errors.New("invalid transaction data JSON") + errInvalidAppDataCrossProcessID = errors.New("cross process ID is not a string") + errInvalidAppDataTransactionName = errors.New("transaction name is not a string") + errInvalidAppDataQueueTimeInSeconds = errors.New("queue time is not a float64") + errInvalidAppDataResponseTimeInSeconds = errors.New("response time is not a float64") + errInvalidAppDataContentLength = errors.New("content length is not a float64") + errInvalidAppDataTransactionGUID = errors.New("transaction GUID is not a string") +) + +// MarshalJSON marshalls an AppDataHeader as raw JSON. +func (appData *AppDataHeader) MarshalJSON() ([]byte, error) { + buf := bytes.NewBufferString("[") + + jsonx.AppendString(buf, appData.CrossProcessID) + + buf.WriteString(",") + jsonx.AppendString(buf, appData.TransactionName) + + buf.WriteString(",") + jsonx.AppendFloat(buf, appData.QueueTimeInSeconds) + + buf.WriteString(",") + jsonx.AppendFloat(buf, appData.ResponseTimeInSeconds) + + buf.WriteString(",") + jsonx.AppendInt(buf, appData.ContentLength) + + buf.WriteString(",") + jsonx.AppendString(buf, appData.TransactionGUID) + + // The mysterious unused field. We don't need to round trip this, so we'll + // just hardcode it to false. + buf.WriteString(",false]") + return buf.Bytes(), nil +} + +// UnmarshalJSON unmarshalls an AppDataHeader from raw JSON. +func (appData *AppDataHeader) UnmarshalJSON(data []byte) error { + var ok bool + var v interface{} + + if err := json.Unmarshal(data, &v); err != nil { + return err + } + + arr, ok := v.([]interface{}) + if !ok { + return errInvalidAppDataJSON + } + if len(arr) < 7 { + return errUnexpectedArraySize{ + label: "unexpected number of application data elements", + expected: 7, + actual: len(arr), + } + } + + if appData.CrossProcessID, ok = arr[0].(string); !ok { + return errInvalidAppDataCrossProcessID + } + + if appData.TransactionName, ok = arr[1].(string); !ok { + return errInvalidAppDataTransactionName + } + + if appData.QueueTimeInSeconds, ok = arr[2].(float64); !ok { + return errInvalidAppDataQueueTimeInSeconds + } + + if appData.ResponseTimeInSeconds, ok = arr[3].(float64); !ok { + return errInvalidAppDataResponseTimeInSeconds + } + + cl, ok := arr[4].(float64) + if !ok { + return errInvalidAppDataContentLength + } + // Content length is specced as int32, but not all agents are consistent on + // this in practice. Let's handle it as int64 to maximise compatibility. + appData.ContentLength = int64(cl) + + if appData.TransactionGUID, ok = arr[5].(string); !ok { + return errInvalidAppDataTransactionGUID + } + + // As above, we don't bother decoding the unused field here. It just has to + // be present (which was checked earlier with the length check). + + return nil +} diff --git a/internal/cat/appdata_test.go b/internal/cat/appdata_test.go new file mode 100644 index 000000000..592f1d8a3 --- /dev/null +++ b/internal/cat/appdata_test.go @@ -0,0 +1,120 @@ +package cat + +import ( + "encoding/json" + "testing" +) + +func TestAppDataRoundTrip(t *testing.T) { + for _, test := range []struct { + json string + appData AppDataHeader + }{ + { + json: `["xpid","txn",1,2,4096,"guid",false]`, + appData: AppDataHeader{ + CrossProcessID: "xpid", + TransactionName: "txn", + QueueTimeInSeconds: 1.0, + ResponseTimeInSeconds: 2.0, + ContentLength: 4096, + TransactionGUID: "guid", + }, + }, + } { + // Test unmarshalling. + appData := &AppDataHeader{} + if err := json.Unmarshal([]byte(test.json), appData); err != nil { + t.Errorf("given %s: error expected to be nil; got %v", test.json, err) + } + + if test.appData.CrossProcessID != appData.CrossProcessID { + t.Errorf("given %s: CrossProcessID expected to be %s; got %s", test.json, test.appData.CrossProcessID, appData.CrossProcessID) + } + + if test.appData.TransactionName != appData.TransactionName { + t.Errorf("given %s: TransactionName expected to be %s; got %s", test.json, test.appData.TransactionName, appData.TransactionName) + } + + if test.appData.QueueTimeInSeconds != appData.QueueTimeInSeconds { + t.Errorf("given %s: QueueTimeInSeconds expected to be %f; got %f", test.json, test.appData.QueueTimeInSeconds, appData.QueueTimeInSeconds) + } + + if test.appData.ResponseTimeInSeconds != appData.ResponseTimeInSeconds { + t.Errorf("given %s: ResponseTimeInSeconds expected to be %f; got %f", test.json, test.appData.ResponseTimeInSeconds, appData.ResponseTimeInSeconds) + } + + if test.appData.ContentLength != appData.ContentLength { + t.Errorf("given %s: ContentLength expected to be %d; got %d", test.json, test.appData.ContentLength, appData.ContentLength) + } + + if test.appData.TransactionGUID != appData.TransactionGUID { + t.Errorf("given %s: TransactionGUID expected to be %s; got %s", test.json, test.appData.TransactionGUID, appData.TransactionGUID) + } + + // Test marshalling. + data, err := json.Marshal(&test.appData) + if err != nil { + t.Errorf("given %s: error expected to be nil; got %v", test.json, err) + } + + if string(data) != test.json { + t.Errorf("given %s: unexpected JSON %s", test.json, string(data)) + } + } +} + +func TestAppDataUnmarshal(t *testing.T) { + // Test error cases where we get a generic error from the JSON package. + for _, input := range []string{ + // Basic malformed JSON test: beyond this, we're not going to unit test the + // Go standard library's JSON package. + ``, + } { + appData := &AppDataHeader{} + + if err := json.Unmarshal([]byte(input), appData); err == nil { + t.Errorf("given %s: error expected to be non-nil; got nil", input) + } + } + + // Test error cases where a specific variable is returned. + for _, tc := range []struct { + input string + err error + }{ + // Unexpected JSON types. + {`false`, errInvalidAppDataJSON}, + {`true`, errInvalidAppDataJSON}, + {`1234`, errInvalidAppDataJSON}, + {`{}`, errInvalidAppDataJSON}, + {`""`, errInvalidAppDataJSON}, + + // Invalid data types for each field in turn. + {`[0,"txn",1.0,2.0,4096,"guid",false]`, errInvalidAppDataCrossProcessID}, + {`["xpid",0,1.0,2.0,4096,"guid",false]`, errInvalidAppDataTransactionName}, + {`["xpid","txn","queue",2.0,4096,"guid",false]`, errInvalidAppDataQueueTimeInSeconds}, + {`["xpid","txn",1.0,"response",4096,"guid",false]`, errInvalidAppDataResponseTimeInSeconds}, + {`["xpid","txn",1.0,2.0,"content length","guid",false]`, errInvalidAppDataContentLength}, + {`["xpid","txn",1.0,2.0,4096,0,false]`, errInvalidAppDataTransactionGUID}, + } { + appData := &AppDataHeader{} + + if err := json.Unmarshal([]byte(tc.input), appData); err != tc.err { + t.Errorf("given %s: error expected to be %v; got %v", tc.input, tc.err, err) + } + } + + // Test error cases where the incorrect number of elements was provided. + for _, input := range []string{ + `[]`, + `[1,2,3,4,5,6]`, + } { + appData := &AppDataHeader{} + + err := json.Unmarshal([]byte(input), appData) + if _, ok := err.(errUnexpectedArraySize); !ok { + t.Errorf("given %s: error expected to be errUnexpectedArraySize; got %v", input, err) + } + } +} diff --git a/internal/cat/errors.go b/internal/cat/errors.go new file mode 100644 index 000000000..d19ce5183 --- /dev/null +++ b/internal/cat/errors.go @@ -0,0 +1,15 @@ +package cat + +import ( + "fmt" +) + +type errUnexpectedArraySize struct { + label string + expected int + actual int +} + +func (e errUnexpectedArraySize) Error() string { + return fmt.Sprintf("%s: expected %d; got %d", e.label, e.expected, e.actual) +} diff --git a/internal/cat/headers.go b/internal/cat/headers.go new file mode 100644 index 000000000..52586ed93 --- /dev/null +++ b/internal/cat/headers.go @@ -0,0 +1,13 @@ +// Package cat provides functionality related to the wire format of CAT +// headers. +package cat + +// These header names don't match the spec in terms of their casing, but does +// match what Go will give us from http.CanonicalHeaderKey(). Besides, HTTP +// headers are case insensitive anyway. Rejoice! +const ( + NewRelicIDName = "X-Newrelic-Id" + NewRelicTxnName = "X-Newrelic-Transaction" + NewRelicAppDataName = "X-Newrelic-App-Data" + NewRelicSyntheticsName = "X-Newrelic-Synthetics" +) diff --git a/internal/cat/id.go b/internal/cat/id.go new file mode 100644 index 000000000..f8d3928ac --- /dev/null +++ b/internal/cat/id.go @@ -0,0 +1,41 @@ +package cat + +import ( + "errors" + "strconv" + "strings" +) + +// IDHeader represents a decoded cross process ID header (generally encoded as +// a string in the form ACCOUNT#BLOB). +type IDHeader struct { + AccountID int + Blob string +} + +var ( + errInvalidAccountID = errors.New("invalid account ID") +) + +// NewIDHeader parses the given decoded ID header and creates an IDHeader +// representing it. +func NewIDHeader(in []byte) (*IDHeader, error) { + parts := strings.Split(string(in), "#") + if len(parts) != 2 { + return nil, errUnexpectedArraySize{ + label: "unexpected number of ID elements", + expected: 2, + actual: len(parts), + } + } + + account, err := strconv.Atoi(parts[0]) + if err != nil { + return nil, errInvalidAccountID + } + + return &IDHeader{ + AccountID: account, + Blob: parts[1], + }, nil +} diff --git a/internal/cat/id_test.go b/internal/cat/id_test.go new file mode 100644 index 000000000..5e458127f --- /dev/null +++ b/internal/cat/id_test.go @@ -0,0 +1,53 @@ +package cat + +import ( + "testing" +) + +func TestIDHeaderUnmarshal(t *testing.T) { + // Test error cases where the output is errUnexpectedArraySize. + for _, input := range []string{ + ``, + `1234`, + `1234#5678#90`, + `foo`, + } { + _, err := NewIDHeader([]byte(input)) + if _, ok := err.(errUnexpectedArraySize); !ok { + t.Errorf("given %s: error expected to be errUnexpectedArraySize; got %v", input, err) + } + } + + // Test error cases where the output is errInvalidAccountID. + for _, input := range []string{ + `#1234`, + `foo#bar`, + } { + if _, err := NewIDHeader([]byte(input)); err != errInvalidAccountID { + t.Errorf("given %s: error expected to be %v; got %v", input, errInvalidAccountID, err) + } + } + + // Test success cases. + for _, test := range []struct { + input string + expected IDHeader + }{ + {`1234#`, IDHeader{1234, ""}}, + {`1234#5678`, IDHeader{1234, "5678"}}, + {`1234#blob`, IDHeader{1234, "blob"}}, + {`0#5678`, IDHeader{0, "5678"}}, + } { + id, err := NewIDHeader([]byte(test.input)) + + if err != nil { + t.Errorf("given %s: error expected to be nil; got %v", test.input, err) + } + if test.expected.AccountID != id.AccountID { + t.Errorf("given %s: account ID expected to be %d; got %d", test.input, test.expected.AccountID, id.AccountID) + } + if test.expected.Blob != id.Blob { + t.Errorf("given %s: account ID expected to be %s; got %s", test.input, test.expected.Blob, id.Blob) + } + } +} diff --git a/internal/cat/path_hash.go b/internal/cat/path_hash.go new file mode 100644 index 000000000..34014464f --- /dev/null +++ b/internal/cat/path_hash.go @@ -0,0 +1,35 @@ +package cat + +import ( + "crypto/md5" + "encoding/binary" + "fmt" + "regexp" +) + +var pathHashValidator = regexp.MustCompile("^[0-9a-f]{8}$") + +// GeneratePathHash generates a path hash given a referring path hash, +// transaction name, and application name. referringPathHash can be an empty +// string if there was no referring path hash. +func GeneratePathHash(referringPathHash, txnName, appName string) (string, error) { + var rph uint32 + if referringPathHash != "" { + if !pathHashValidator.MatchString(referringPathHash) { + // Per the spec, invalid referring path hashes should be treated as "0". + referringPathHash = "0" + } + + if _, err := fmt.Sscanf(referringPathHash, "%x", &rph); err != nil { + fmt.Println(rph) + return "", err + } + rph = (rph << 1) | (rph >> 31) + } + + hashInput := fmt.Sprintf("%s;%s", appName, txnName) + hash := md5.Sum([]byte(hashInput)) + low32 := binary.BigEndian.Uint32(hash[12:]) + + return fmt.Sprintf("%08x", rph^low32), nil +} diff --git a/internal/cat/path_hash_test.go b/internal/cat/path_hash_test.go new file mode 100644 index 000000000..a32d2a663 --- /dev/null +++ b/internal/cat/path_hash_test.go @@ -0,0 +1,32 @@ +package cat + +import ( + "testing" + + "github.com/newrelic/go-agent/internal/crossagent" +) + +func TestGeneratePathHash(t *testing.T) { + var tcs []struct { + Name string + ReferringPathHash string + ApplicationName string + TransactionName string + ExpectedPathHash string + } + + err := crossagent.ReadJSON("cat/path_hashing.json", &tcs) + if err != nil { + t.Fatal(err) + } + + for _, tc := range tcs { + hash, err := GeneratePathHash(tc.ReferringPathHash, tc.TransactionName, tc.ApplicationName) + if err != nil { + t.Errorf("%s: error expected to be nil; got %v", tc.Name, err) + } + if hash != tc.ExpectedPathHash { + t.Errorf("%s: expected %s; got %s", tc.Name, tc.ExpectedPathHash, hash) + } + } +} diff --git a/internal/cat/synthetics.go b/internal/cat/synthetics.go new file mode 100644 index 000000000..3836f625b --- /dev/null +++ b/internal/cat/synthetics.go @@ -0,0 +1,82 @@ +package cat + +import ( + "encoding/json" + "errors" + "fmt" +) + +// SyntheticsHeader represents a decoded Synthetics header. +type SyntheticsHeader struct { + Version int + AccountID int + ResourceID string + JobID string + MonitorID string +} + +var ( + errInvalidSyntheticsJSON = errors.New("invalid synthetics JSON") + errInvalidSyntheticsVersion = errors.New("version is not a float64") + errInvalidSyntheticsAccountID = errors.New("account ID is not a float64") + errInvalidSyntheticsResourceID = errors.New("synthetics resource ID is not a string") + errInvalidSyntheticsJobID = errors.New("synthetics job ID is not a string") + errInvalidSyntheticsMonitorID = errors.New("synthetics monitor ID is not a string") +) + +type errUnexpectedSyntheticsVersion int + +func (e errUnexpectedSyntheticsVersion) Error() string { + return fmt.Sprintf("unexpected synthetics header version: %d", e) +} + +// UnmarshalJSON unmarshalls a SyntheticsHeader from raw JSON. +func (s *SyntheticsHeader) UnmarshalJSON(data []byte) error { + var ok bool + var v interface{} + + if err := json.Unmarshal(data, &v); err != nil { + return err + } + + arr, ok := v.([]interface{}) + if !ok { + return errInvalidSyntheticsJSON + } + if len(arr) != 5 { + return errUnexpectedArraySize{ + label: "unexpected number of application data elements", + expected: 5, + actual: len(arr), + } + } + + version, ok := arr[0].(float64) + if !ok { + return errInvalidSyntheticsVersion + } + s.Version = int(version) + if s.Version != 1 { + return errUnexpectedSyntheticsVersion(s.Version) + } + + accountID, ok := arr[1].(float64) + if !ok { + return errInvalidSyntheticsAccountID + } + s.AccountID = int(accountID) + + if s.ResourceID, ok = arr[2].(string); !ok { + return errInvalidSyntheticsResourceID + } + + if s.JobID, ok = arr[3].(string); !ok { + return errInvalidSyntheticsJobID + } + + if s.MonitorID, ok = arr[4].(string); !ok { + return errInvalidSyntheticsMonitorID + } + + return nil +} diff --git a/internal/cat/synthetics_test.go b/internal/cat/synthetics_test.go new file mode 100644 index 000000000..fa518d178 --- /dev/null +++ b/internal/cat/synthetics_test.go @@ -0,0 +1,117 @@ +package cat + +import ( + "encoding/json" + "testing" +) + +func TestSyntheticsUnmarshalInvalid(t *testing.T) { + // Test error cases where we get a generic error from the JSON package. + for _, input := range []string{ + // Basic malformed JSON test: beyond this, we're not going to unit test the + // Go standard library's JSON package. + ``, + } { + synthetics := &SyntheticsHeader{} + + if err := json.Unmarshal([]byte(input), synthetics); err == nil { + t.Errorf("given %s: error expected to be non-nil; got nil", input) + } + } + + // Test error cases where the incorrect number of elements was provided. + for _, input := range []string{ + `[]`, + `[1,2,3,4]`, + } { + synthetics := &SyntheticsHeader{} + + err := json.Unmarshal([]byte(input), synthetics) + if _, ok := err.(errUnexpectedArraySize); !ok { + t.Errorf("given %s: error expected to be errUnexpectedArraySize; got %v", input, err) + } + } + + // Test error cases with invalid version numbers. + for _, input := range []string{ + `[0,1234,"resource","job","monitor"]`, + `[2,1234,"resource","job","monitor"]`, + } { + synthetics := &SyntheticsHeader{} + + err := json.Unmarshal([]byte(input), synthetics) + if _, ok := err.(errUnexpectedSyntheticsVersion); !ok { + t.Errorf("given %s: error expected to be errUnexpectedSyntheticsVersion; got %v", input, err) + } + } + + // Test error cases where a specific variable is returned. + for _, tc := range []struct { + input string + err error + }{ + // Unexpected JSON types. + {`false`, errInvalidSyntheticsJSON}, + {`true`, errInvalidSyntheticsJSON}, + {`1234`, errInvalidSyntheticsJSON}, + {`{}`, errInvalidSyntheticsJSON}, + {`""`, errInvalidSyntheticsJSON}, + + // Invalid data types for each field in turn. + {`["version",1234,"resource","job","monitor"]`, errInvalidSyntheticsVersion}, + {`[1,"account","resource","job","monitor"]`, errInvalidSyntheticsAccountID}, + {`[1,1234,0,"job","monitor"]`, errInvalidSyntheticsResourceID}, + {`[1,1234,"resource",-1,"monitor"]`, errInvalidSyntheticsJobID}, + {`[1,1234,"resource","job",false]`, errInvalidSyntheticsMonitorID}, + } { + synthetics := &SyntheticsHeader{} + + if err := json.Unmarshal([]byte(tc.input), synthetics); err != tc.err { + t.Errorf("given %s: error expected to be %v; got %v", tc.input, tc.err, err) + } + } +} + +func TestSyntheticsUnmarshalValid(t *testing.T) { + for _, test := range []struct { + json string + synthetics SyntheticsHeader + }{ + { + json: `[1,1234,"resource","job","monitor"]`, + synthetics: SyntheticsHeader{ + Version: 1, + AccountID: 1234, + ResourceID: "resource", + JobID: "job", + MonitorID: "monitor", + }, + }, + } { + // Test unmarshalling. + synthetics := &SyntheticsHeader{} + if err := json.Unmarshal([]byte(test.json), synthetics); err != nil { + t.Errorf("given %s: error expected to be nil; got %v", test.json, err) + } + + if test.synthetics.Version != synthetics.Version { + t.Errorf("given %s: Version expected to be %d; got %d", test.json, test.synthetics.Version, synthetics.Version) + } + + if test.synthetics.AccountID != synthetics.AccountID { + t.Errorf("given %s: AccountID expected to be %d; got %d", test.json, test.synthetics.AccountID, synthetics.AccountID) + } + + if test.synthetics.ResourceID != synthetics.ResourceID { + t.Errorf("given %s: ResourceID expected to be %s; got %s", test.json, test.synthetics.ResourceID, synthetics.ResourceID) + } + + if test.synthetics.JobID != synthetics.JobID { + t.Errorf("given %s: JobID expected to be %s; got %s", test.json, test.synthetics.JobID, synthetics.JobID) + } + + if test.synthetics.MonitorID != synthetics.MonitorID { + t.Errorf("given %s: MonitorID expected to be %s; got %s", test.json, test.synthetics.MonitorID, synthetics.MonitorID) + } + } +} diff --git a/internal/cat/txndata.go b/internal/cat/txndata.go new file mode 100644 index 000000000..a766926a8 --- /dev/null +++ b/internal/cat/txndata.go @@ -0,0 +1,96 @@ +package cat + +import ( + "bytes" + "encoding/json" + "errors" + + "github.com/newrelic/go-agent/internal/jsonx" +) + +// TxnDataHeader represents a decoded TxnData header. +type TxnDataHeader struct { + GUID string + TripID string + PathHash string +} + +var ( + errInvalidTxnDataJSON = errors.New("invalid transaction data JSON") + errInvalidTxnDataGUID = errors.New("GUID is not a string") + errInvalidTxnDataTripID = errors.New("trip ID is not a string or null") + errInvalidTxnDataPathHash = errors.New("path hash is not a string or null") +) + +// MarshalJSON marshalls a TxnDataHeader as raw JSON. +func (txnData *TxnDataHeader) MarshalJSON() ([]byte, error) { + // Note that, although there are two and four element versions of this header + // in the wild, we will only ever generate the four element version. + + buf := bytes.NewBufferString("[") + + jsonx.AppendString(buf, txnData.GUID) + + // Write the unused second field. + buf.WriteString(",false,") + jsonx.AppendString(buf, txnData.TripID) + + buf.WriteString(",") + jsonx.AppendString(buf, txnData.PathHash) + + buf.WriteString("]") + + return buf.Bytes(), nil +} + +// UnmarshalJSON unmarshalls a TxnDataHeader from raw JSON. +func (txnData *TxnDataHeader) UnmarshalJSON(data []byte) error { + var ok bool + var v interface{} + + if err := json.Unmarshal(data, &v); err != nil { + return err + } + + arr, ok := v.([]interface{}) + if !ok { + return errInvalidTxnDataJSON + } + if len(arr) < 2 { + return errUnexpectedArraySize{ + label: "unexpected number of transaction data elements", + expected: 2, + actual: len(arr), + } + } + + if txnData.GUID, ok = arr[0].(string); !ok { + return errInvalidTxnDataGUID + } + + // Ignore the unused second field. + + // Set up defaults for the optional values. + txnData.TripID = "" + txnData.PathHash = "" + + if len(arr) >= 3 { + // Per the cross agent tests, an explicit null is valid here. + if nil != arr[2] { + if txnData.TripID, ok = arr[2].(string); !ok { + return errInvalidTxnDataTripID + } + } + + if len(arr) >= 4 { + // Per the cross agent tests, an explicit null is also valid here. + if nil != arr[3] { + if txnData.PathHash, ok = arr[3].(string); !ok { + return errInvalidTxnDataPathHash + } + } + } + } + + return nil +} diff --git a/internal/cat/txndata_test.go b/internal/cat/txndata_test.go new file mode 100644 index 000000000..ca73f22ce --- /dev/null +++ b/internal/cat/txndata_test.go @@ -0,0 +1,140 @@ +package cat + +import ( + "encoding/json" + "testing" +) + +func TestTxnDataRoundTrip(t *testing.T) { + for _, test := range []struct { + input string + output string + txnData TxnDataHeader + }{ + { + input: `["guid",false]`, + output: `["guid",false,"",""]`, + txnData: TxnDataHeader{ + GUID: "guid", + TripID: "", + PathHash: "", + }, + }, + { + input: `["guid",false,"trip"]`, + output: `["guid",false,"trip",""]`, + txnData: TxnDataHeader{ + GUID: "guid", + TripID: "trip", + PathHash: "", + }, + }, + { + input: `["guid",false,null]`, + output: `["guid",false,"",""]`, + txnData: TxnDataHeader{ + GUID: "guid", + TripID: "", + PathHash: "", + }, + }, + { + input: `["guid",false,"trip",null]`, + output: `["guid",false,"trip",""]`, + txnData: TxnDataHeader{ + GUID: "guid", + TripID: "trip", + PathHash: "", + }, + }, + { + input: `["guid",false,"trip","hash"]`, + output: `["guid",false,"trip","hash"]`, + txnData: TxnDataHeader{ + GUID: "guid", + TripID: "trip", + PathHash: "hash", + }, + }, + } { + // Test unmarshalling. + txnData := &TxnDataHeader{} + if err := json.Unmarshal([]byte(test.input), txnData); err != nil { + t.Errorf("given %s: error expected to be nil; got %v", test.input, err) + } + + if test.txnData.GUID != txnData.GUID { + t.Errorf("given %s: GUID expected to be %s; got %s", test.input, test.txnData.GUID, txnData.GUID) + } + + if test.txnData.TripID != txnData.TripID { + t.Errorf("given %s: TripID expected to be %s; got %s", test.input, test.txnData.TripID, txnData.TripID) + } + + if test.txnData.PathHash != txnData.PathHash { + t.Errorf("given %s: PathHash expected to be %s; got %s", test.input, test.txnData.PathHash, txnData.PathHash) + } + + // Test marshalling. + data, err := json.Marshal(&test.txnData) + if err != nil { + t.Errorf("given %s: error expected to be nil; got %v", test.output, err) + } + + if string(data) != test.output { + t.Errorf("given %s: unexpected JSON %s", test.output, string(data)) + } + } +} + +func TestTxnDataUnmarshal(t *testing.T) { + // Test error cases where we get a generic error from the JSON package. + for _, input := range []string{ + // Basic malformed JSON test: beyond this, we're not going to unit test the + // Go standard library's JSON package. + ``, + } { + txnData := &TxnDataHeader{} + + if err := json.Unmarshal([]byte(input), txnData); err == nil { + t.Errorf("given %s: error expected to be non-nil; got nil", input) + } + } + + // Test error cases where the incorrect number of elements was provided. + for _, input := range []string{ + `[]`, + `[1]`, + } { + txnData := &TxnDataHeader{} + + err := json.Unmarshal([]byte(input), txnData) + if _, ok := err.(errUnexpectedArraySize); !ok { + t.Errorf("given %s: error expected to be errUnexpectedArraySize; got %v", input, err) + } + } + + // Test error cases where a specific variable is returned. + for _, tc := range []struct { + input string + err error + }{ + // Unexpected JSON types. + {`false`, errInvalidTxnDataJSON}, + {`true`, errInvalidTxnDataJSON}, + {`1234`, errInvalidTxnDataJSON}, + {`{}`, errInvalidTxnDataJSON}, + {`""`, errInvalidTxnDataJSON}, + + // Invalid data types for each field in turn. + {`[false,false,"trip","hash"]`, errInvalidTxnDataGUID}, + {`["guid",false,0,"hash"]`, errInvalidTxnDataTripID}, + {`["guid",false,"trip",[]]`, errInvalidTxnDataPathHash}, + } { + txnData := &TxnDataHeader{} + + if err := json.Unmarshal([]byte(tc.input), txnData); err != tc.err { + t.Errorf("given %s: error expected to be %v; got %v", tc.input, tc.err, err) + } + } +} diff --git a/internal/cat_test.go b/internal/cat_test.go new file mode 100644 index 000000000..5588f4fa7 --- /dev/null +++ b/internal/cat_test.go @@ -0,0 +1,174 @@ +package internal + +import ( + "encoding/json" + "fmt" + "testing" + + "github.com/newrelic/go-agent/internal/crossagent" +) + +type eventAttributes map[string]interface{} + +func (e eventAttributes) has(key string) bool { + _, ok := e[key] + return ok +} + +func (e eventAttributes) isString(key string, expected string) error { + actual, ok := e[key].(string) + if !ok { + return fmt.Errorf("key %s is not a string; got type %t with value %v", key, e[key], e[key]) + } + + if actual != expected { + return fmt.Errorf("key %s has unexpected value: expected=%s; got=%s", key, expected, actual) + } + + return nil +} + +type harvestedTxnEvent struct { + intrinsics eventAttributes + userAttributes eventAttributes + agentAttributes eventAttributes +} + +func (h *harvestedTxnEvent) UnmarshalJSON(data []byte) error { + var arr []eventAttributes + + if err := json.Unmarshal(data, &arr); err != nil { + return err + } + + if len(arr) != 3 { + return fmt.Errorf("unexpected number of transaction event items: %d", len(arr)) + } + + h.intrinsics = arr[0] + h.userAttributes = arr[1] + h.agentAttributes = arr[2] + + return nil +} + +func harvestTxnDataEvent(t *TxnData) (*harvestedTxnEvent, error) { + // Since transaction event JSON is built using string manipulation, we have + // to do an awkward marshal/unmarshal shuffle to be able to verify the + // intrinsics. + js, err := json.Marshal(&t.TxnEvent) + if err != nil { + return nil, err + } + + event := &harvestedTxnEvent{} + if err := json.Unmarshal(js, event); err != nil { + return nil, err + } + + return event, nil +} + +// This function implements as close as we can get to the round trip tests in +// the cross agent tests. +func TestCatMap(t *testing.T) { + var testcases []struct { + Name string `json:"name"` + AppName string `json:"appName"` + TransactionName string `json:"transactionName"` + TransactionGUID string `json:"transactionGuid"` + InboundPayload []interface{} `json:"inboundPayload"` + ExpectedIntrinsicFields map[string]string `json:"expectedIntrinsicFields"` + NonExpectedIntrinsicFields []string `json:"nonExpectedIntrinsicFields"` + OutboundRequests []struct { + OutboundTxnName string `json:"outboundTxnName"` + ExpectedOutboundPayload json.RawMessage `json:"expectedOutboundPayload"` + } `json:"outboundRequests"` + } + + err := crossagent.ReadJSON("cat/cat_map.json", &testcases) + if err != nil { + t.Fatal(err) + } + + for _, tc := range testcases { + // Fake enough transaction data to run the test. + tr := &TxnData{ + Name: tc.TransactionName, + } + + tr.CrossProcess.Init(true, false, &ConnectReply{ + CrossProcessID: "1#1", + EncodingKey: "foo", + TrustedAccounts: map[int]struct{}{1: {}}, + }) + + // Marshal the inbound payload into JSON for easier testing. + txnData, err := json.Marshal(tc.InboundPayload) + if err != nil { + t.Errorf("%s: error marshalling inbound payload: %v", tc.Name, err) + } + + // Set up the GUID. + if tc.TransactionGUID != "" { + tr.CrossProcess.GUID = tc.TransactionGUID + } + + // Swallow errors, since some of these tests are testing the behaviour when + // erroneous headers are provided. + tr.CrossProcess.handleInboundRequestTxnData(txnData) + + // Simulate outbound requests. + for _, req := range tc.OutboundRequests { + metadata, err := tr.CrossProcess.CreateCrossProcessMetadata(req.OutboundTxnName, tc.AppName) + if err != nil { + t.Errorf("%s: error creating outbound request headers: %v", tc.Name, err) + } + + // Grab and deobfuscate the txndata that would have been sent to the + // external service. + txnData, err := Deobfuscate(metadata.TxnData, tr.CrossProcess.EncodingKey) + if err != nil { + t.Errorf("%s: error deobfuscating outbound request header: %v", tc.Name, err) + } + + // Check the JSON against the expected value. + compacted := CompactJSONString(string(txnData)) + expected := CompactJSONString(string(req.ExpectedOutboundPayload)) + if compacted != expected { + t.Errorf("%s: outbound metadata does not match expected value: expected=%s; got=%s", tc.Name, expected, compacted) + } + } + + // Finalise the transaction, ignoring errors. + tr.CrossProcess.Finalise(tc.TransactionName, tc.AppName) + + // Harvest the event. + event, err := harvestTxnDataEvent(tr) + if err != nil { + t.Errorf("%s: error harvesting event data: %v", tc.Name, err) + } + + // Now we have the event, let's look for the expected intrinsics. + for key, value := range tc.ExpectedIntrinsicFields { + // First, check if the key exists at all. + if !event.intrinsics.has(key) { + t.Fatalf("%s: missing intrinsic %s", tc.Name, key) + } + + // Everything we're looking for is a string, so we can be a little lazy + // here. + if err := event.intrinsics.isString(key, value); err != nil { + t.Errorf("%s: %v", tc.Name, err) + } + } + + // Finally, we verify that the unexpected intrinsics didn't miraculously + // appear. + for _, key := range tc.NonExpectedIntrinsicFields { + if event.intrinsics.has(key) { + t.Errorf("%s: expected intrinsic %s to be missing; instead, got value %v", tc.Name, key, event.intrinsics[key]) + } + } + } +} diff --git a/internal/collector.go b/internal/collector.go new file mode 100644 index 000000000..680c2f119 --- /dev/null +++ b/internal/collector.go @@ -0,0 +1,331 @@ +package internal + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "os" + "regexp" + "strconv" + "time" + + "github.com/newrelic/go-agent/internal/logger" +) + +const ( + // ProcotolVersion is the protocol version used to communicate with NR + // backend. + ProcotolVersion = 17 + userAgentPrefix = "NewRelic-Go-Agent/" + + // Methods used in collector communication. + cmdPreconnect = "preconnect" + cmdConnect = "connect" + cmdMetrics = "metric_data" + cmdCustomEvents = "custom_event_data" + cmdTxnEvents = "analytic_event_data" + cmdErrorEvents = "error_event_data" + cmdErrorData = "error_data" + cmdTxnTraces = "transaction_sample_data" + cmdSlowSQLs = "sql_trace_data" + cmdSpanEvents = "span_event_data" +) + +// RpmCmd contains fields specific to an individual call made to RPM. +type RpmCmd struct { + Name string + Collector string + RunID string + Data []byte + RequestHeadersMap map[string]string + MaxPayloadSize int +} + +// RpmControls contains fields which will be the same for all calls made +// by the same application. +type RpmControls struct { + License string + Client *http.Client + Logger logger.Logger + AgentVersion string +} + +// RPMResponse contains a NR endpoint response. +// +// Agent Behavior Summary: +// +// on connect/preconnect: +// 410 means shutdown +// 200, 202 mean success (start run) +// all other response codes and errors mean try after backoff +// +// on harvest: +// 410 means shutdown +// 401, 409 mean restart run +// 408, 429, 500, 503 mean save data for next harvest +// all other response codes and errors discard the data and continue the current harvest +type RPMResponse struct { + statusCode int + body []byte + // Err indicates whether or not the call was successful: newRPMResponse + // should be used to avoid mismatch between statusCode and Err. + Err error + disconnectSecurityPolicy bool +} + +func newRPMResponse(statusCode int) RPMResponse { + var err error + if statusCode != 200 && statusCode != 202 { + err = fmt.Errorf("response code: %d", statusCode) + } + return RPMResponse{statusCode: statusCode, Err: err} +} + +// IsDisconnect indicates that the agent should disconnect. +func (resp RPMResponse) IsDisconnect() bool { + return resp.statusCode == 410 || resp.disconnectSecurityPolicy +} + +// IsRestartException indicates that the agent should restart. +func (resp RPMResponse) IsRestartException() bool { + return resp.statusCode == 401 || + resp.statusCode == 409 +} + +// ShouldSaveHarvestData indicates that the agent should save the data and try +// to send it in the next harvest. +func (resp RPMResponse) ShouldSaveHarvestData() bool { + switch resp.statusCode { + case 408, 429, 500, 503: + return true + default: + return false + } +} + +func rpmURL(cmd RpmCmd, cs RpmControls) string { + var u url.URL + + u.Host = cmd.Collector + u.Path = "agent_listener/invoke_raw_method" + u.Scheme = "https" + + query := url.Values{} + query.Set("marshal_format", "json") + query.Set("protocol_version", strconv.Itoa(ProcotolVersion)) + query.Set("method", cmd.Name) + query.Set("license_key", cs.License) + + if len(cmd.RunID) > 0 { + query.Set("run_id", cmd.RunID) + } + + u.RawQuery = query.Encode() + return u.String() +} + +func collectorRequestInternal(url string, cmd RpmCmd, cs RpmControls) RPMResponse { + compressed, err := compress(cmd.Data) + if nil != err { + return RPMResponse{Err: err} + } + + if l := compressed.Len(); l > cmd.MaxPayloadSize { + return RPMResponse{Err: fmt.Errorf("Payload size for %s too large: %d greater than %d", cmd.Name, l, cmd.MaxPayloadSize)} + } + + req, err := http.NewRequest("POST", url, compressed) + if nil != err { + return RPMResponse{Err: err} + } + + req.Header.Add("Accept-Encoding", "identity, deflate") + req.Header.Add("Content-Type", "application/octet-stream") + req.Header.Add("User-Agent", userAgentPrefix+cs.AgentVersion) + req.Header.Add("Content-Encoding", "gzip") + for k, v := range cmd.RequestHeadersMap { + req.Header.Add(k, v) + } + + resp, err := cs.Client.Do(req) + if err != nil { + return RPMResponse{Err: err} + } + + defer resp.Body.Close() + + r := newRPMResponse(resp.StatusCode) + + // Read the entire response, rather than using resp.Body as input to json.NewDecoder to + // avoid the issue described here: + // https://github.com/google/go-github/pull/317 + // https://ahmetalpbalkan.com/blog/golang-json-decoder-pitfalls/ + // Also, collector JSON responses are expected to be quite small. + body, err := ioutil.ReadAll(resp.Body) + if nil == r.Err { + r.Err = err + } + r.body = body + + return r +} + +// CollectorRequest makes a request to New Relic. +func CollectorRequest(cmd RpmCmd, cs RpmControls) RPMResponse { + url := rpmURL(cmd, cs) + + if cs.Logger.DebugEnabled() { + cs.Logger.Debug("rpm request", map[string]interface{}{ + "command": cmd.Name, + "url": url, + "payload": JSONString(cmd.Data), + }) + } + + resp := collectorRequestInternal(url, cmd, cs) + + if cs.Logger.DebugEnabled() { + if err := resp.Err; err != nil { + cs.Logger.Debug("rpm failure", map[string]interface{}{ + "command": cmd.Name, + "url": url, + "response": string(resp.body), // Body might not be JSON on failure. + "error": err.Error(), + }) + } else { + cs.Logger.Debug("rpm response", map[string]interface{}{ + "command": cmd.Name, + "url": url, + "response": JSONString(resp.body), + }) + } + } + + return resp +} + +const ( + // NEW_RELIC_HOST can be used to override the New Relic endpoint. This + // is useful for testing. + envHost = "NEW_RELIC_HOST" +) + +var ( + preconnectHostOverride = os.Getenv(envHost) + preconnectHostDefault = "collector.newrelic.com" + preconnectRegionLicenseRegex = regexp.MustCompile(`(^.+?)x`) +) + +func calculatePreconnectHost(license, overrideHost string) string { + if "" != overrideHost { + return overrideHost + } + m := preconnectRegionLicenseRegex.FindStringSubmatch(license) + if len(m) > 1 { + return "collector." + m[1] + ".nr-data.net" + } + return preconnectHostDefault +} + +// ConnectJSONCreator allows the creation of the connect payload JSON to be +// deferred until the SecurityPolicies are acquired and vetted. +type ConnectJSONCreator interface { + CreateConnectJSON(*SecurityPolicies) ([]byte, error) +} + +type preconnectRequest struct { + SecurityPoliciesToken string `json:"security_policies_token,omitempty"` + HighSecurity bool `json:"high_security"` +} + +var ( + errMissingAgentRunID = errors.New("connect reply missing agent run id") +) + +// ConnectAttempt tries to connect an application. +func ConnectAttempt(config ConnectJSONCreator, securityPoliciesToken string, highSecurity bool, cs RpmControls) (*ConnectReply, RPMResponse) { + preconnectData, err := json.Marshal([]preconnectRequest{{ + SecurityPoliciesToken: securityPoliciesToken, + HighSecurity: highSecurity, + }}) + if nil != err { + return nil, RPMResponse{Err: fmt.Errorf("unable to marshal preconnect data: %v", err)} + } + + call := RpmCmd{ + Name: cmdPreconnect, + Collector: calculatePreconnectHost(cs.License, preconnectHostOverride), + Data: preconnectData, + MaxPayloadSize: maxPayloadSizeInBytes, + } + + resp := CollectorRequest(call, cs) + if nil != resp.Err { + return nil, resp + } + + var preconnect struct { + Preconnect PreconnectReply `json:"return_value"` + } + err = json.Unmarshal(resp.body, &preconnect) + if nil != err { + // Certain security policy errors must be treated as a disconnect. + return nil, RPMResponse{ + Err: fmt.Errorf("unable to process preconnect reply: %v", err), + disconnectSecurityPolicy: isDisconnectSecurityPolicyError(err), + } + } + + js, err := config.CreateConnectJSON(preconnect.Preconnect.SecurityPolicies.PointerIfPopulated()) + if nil != err { + return nil, RPMResponse{Err: fmt.Errorf("unable to create connect data: %v", err)} + } + + call.Collector = preconnect.Preconnect.Collector + call.Data = js + call.Name = cmdConnect + + resp = CollectorRequest(call, cs) + if nil != resp.Err { + return nil, resp + } + + reply, err := ConstructConnectReply(resp.body, preconnect.Preconnect) + if nil != err { + return nil, RPMResponse{Err: err} + } + + // Note: This should never happen. It would mean the collector + // response is malformed. This exists merely as extra defensiveness. + if "" == reply.RunID { + return nil, RPMResponse{Err: errMissingAgentRunID} + } + + return reply, resp +} + +// ConstructConnectReply takes the body of a Connect reply, in the form of bytes, and a +// PreconnectReply, and converts it into a *ConnectReply +func ConstructConnectReply(body []byte, preconnect PreconnectReply) (*ConnectReply, error) { + var reply struct { + Reply *ConnectReply `json:"return_value"` + } + reply.Reply = ConnectReplyDefaults() + err := json.Unmarshal(body, &reply) + if nil != err { + return nil, fmt.Errorf("unable to parse connect reply: %v", err) + } + + reply.Reply.PreconnectReply = preconnect + + reply.Reply.AdaptiveSampler = NewAdaptiveSampler( + time.Duration(reply.Reply.SamplingTargetPeriodInSeconds)*time.Second, + reply.Reply.SamplingTarget, + time.Now()) + reply.Reply.rulesCache = newRulesCache(txnNameCacheLimit) + + return reply.Reply, nil +} diff --git a/internal/collector_test.go b/internal/collector_test.go new file mode 100644 index 000000000..5d41d7196 --- /dev/null +++ b/internal/collector_test.go @@ -0,0 +1,559 @@ +package internal + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strings" + "testing" + + "github.com/newrelic/go-agent/internal/crossagent" + "github.com/newrelic/go-agent/internal/logger" +) + +func TestResponseCodeError(t *testing.T) { + testcases := []struct { + code int + success bool + disconnect bool + restart bool + saveHarvestData bool + }{ + // success + {code: 200, success: true, disconnect: false, restart: false, saveHarvestData: false}, + {code: 202, success: true, disconnect: false, restart: false, saveHarvestData: false}, + // disconnect + {code: 410, success: false, disconnect: true, restart: false, saveHarvestData: false}, + // restart + {code: 401, success: false, disconnect: false, restart: true, saveHarvestData: false}, + {code: 409, success: false, disconnect: false, restart: true, saveHarvestData: false}, + // save data + {code: 408, success: false, disconnect: false, restart: false, saveHarvestData: true}, + {code: 429, success: false, disconnect: false, restart: false, saveHarvestData: true}, + {code: 500, success: false, disconnect: false, restart: false, saveHarvestData: true}, + {code: 503, success: false, disconnect: false, restart: false, saveHarvestData: true}, + // other errors + {code: 400, success: false, disconnect: false, restart: false, saveHarvestData: false}, + {code: 403, success: false, disconnect: false, restart: false, saveHarvestData: false}, + {code: 404, success: false, disconnect: false, restart: false, saveHarvestData: false}, + {code: 405, success: false, disconnect: false, restart: false, saveHarvestData: false}, + {code: 407, success: false, disconnect: false, restart: false, saveHarvestData: false}, + {code: 411, success: false, disconnect: false, restart: false, saveHarvestData: false}, + {code: 413, success: false, disconnect: false, restart: false, saveHarvestData: false}, + {code: 414, success: false, disconnect: false, restart: false, saveHarvestData: false}, + {code: 415, success: false, disconnect: false, restart: false, saveHarvestData: false}, + {code: 417, success: false, disconnect: false, restart: false, saveHarvestData: false}, + {code: 431, success: false, disconnect: false, restart: false, saveHarvestData: false}, + // unexpected weird codes + {code: -1, success: false, disconnect: false, restart: false, saveHarvestData: false}, + {code: 1, success: false, disconnect: false, restart: false, saveHarvestData: false}, + {code: 999999, success: false, disconnect: false, restart: false, saveHarvestData: false}, + } + for _, tc := range testcases { + resp := newRPMResponse(tc.code) + if tc.success != (nil == resp.Err) { + t.Error("error", tc.code, tc.success, resp.Err) + } + if tc.disconnect != resp.IsDisconnect() { + t.Error("disconnect", tc.code, tc.disconnect, resp.Err) + } + if tc.restart != resp.IsRestartException() { + t.Error("restart", tc.code, tc.restart, resp.Err) + } + if tc.saveHarvestData != resp.ShouldSaveHarvestData() { + t.Error("save harvest data", tc.code, tc.saveHarvestData, resp.Err) + } + } +} + +type roundTripperFunc func(*http.Request) (*http.Response, error) + +func (fn roundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) { + return fn(r) +} + +func TestCollectorRequest(t *testing.T) { + cmd := RpmCmd{ + Name: "cmd_name", + Collector: "collector.com", + RunID: "run_id", + Data: nil, + RequestHeadersMap: map[string]string{"zip": "zap"}, + MaxPayloadSize: maxPayloadSizeInBytes, + } + testField := func(name, v1, v2 string) { + if v1 != v2 { + t.Error(name, v1, v2) + } + } + cs := RpmControls{ + License: "the_license", + Client: &http.Client{ + Transport: roundTripperFunc(func(r *http.Request) (*http.Response, error) { + testField("method", r.Method, "POST") + testField("url", r.URL.String(), "https://collector.com/agent_listener/invoke_raw_method?license_key=the_license&marshal_format=json&method=cmd_name&protocol_version=17&run_id=run_id") + testField("Accept-Encoding", r.Header.Get("Accept-Encoding"), "identity, deflate") + testField("Content-Type", r.Header.Get("Content-Type"), "application/octet-stream") + testField("User-Agent", r.Header.Get("User-Agent"), "NewRelic-Go-Agent/agent_version") + testField("Content-Encoding", r.Header.Get("Content-Encoding"), "gzip") + testField("zip", r.Header.Get("zip"), "zap") + return &http.Response{ + StatusCode: 200, + Body: ioutil.NopCloser(strings.NewReader("body")), + }, nil + }), + }, + Logger: logger.ShimLogger{IsDebugEnabled: true}, + AgentVersion: "agent_version", + } + resp := CollectorRequest(cmd, cs) + if nil != resp.Err { + t.Error(resp.Err) + } +} + +func TestCollectorBadRequest(t *testing.T) { + cmd := RpmCmd{ + Name: "cmd_name", + Collector: "collector.com", + RunID: "run_id", + Data: nil, + RequestHeadersMap: map[string]string{"zip": "zap"}, + } + cs := RpmControls{ + License: "the_license", + Client: &http.Client{ + Transport: roundTripperFunc(func(r *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: 200, + Body: ioutil.NopCloser(strings.NewReader("body")), + }, nil + }), + }, + Logger: logger.ShimLogger{IsDebugEnabled: true}, + AgentVersion: "agent_version", + } + u := ":" // bad url + resp := collectorRequestInternal(u, cmd, cs) + if nil == resp.Err { + t.Error("missing expected error") + } + +} + +func TestUrl(t *testing.T) { + cmd := RpmCmd{ + Name: "foo_method", + Collector: "example.com", + } + cs := RpmControls{ + License: "123abc", + Client: nil, + Logger: nil, + AgentVersion: "1", + } + + out := rpmURL(cmd, cs) + u, err := url.Parse(out) + if err != nil { + t.Fatalf("url.Parse(%q) = %q", out, err) + } + + got := u.Query().Get("license_key") + if got != cs.License { + t.Errorf("got=%q cmd.License=%q", got, cs.License) + } + if u.Scheme != "https" { + t.Error(u.Scheme) + } +} + +const ( + unknownRequiredPolicyBody = `{"return_value":{"redirect_host":"special_collector","security_policies":{"unknown_policy":{"enabled":true,"required":true}}}}` + redirectBody = `{"return_value":{"redirect_host":"special_collector"}}` + connectBody = `{"return_value":{"agent_run_id":"my_agent_run_id"}}` + malformedBody = `{"return_value":}}` +) + +func makeResponse(code int, body string) *http.Response { + return &http.Response{ + StatusCode: code, + Body: ioutil.NopCloser(strings.NewReader(body)), + } +} + +type endpointResult struct { + response *http.Response + err error +} + +type connectMock struct { + redirect endpointResult + connect endpointResult + // testConfig will be used if this is nil + config ConnectJSONCreator +} + +func (m connectMock) RoundTrip(r *http.Request) (*http.Response, error) { + cmd := r.URL.Query().Get("method") + switch cmd { + case cmdPreconnect: + return m.redirect.response, m.redirect.err + case cmdConnect: + return m.connect.response, m.connect.err + default: + return nil, fmt.Errorf("unknown cmd: %s", cmd) + } +} + +func (m connectMock) CancelRequest(req *http.Request) {} + +type testConfig struct{} + +func (tc testConfig) CreateConnectJSON(*SecurityPolicies) ([]byte, error) { + return []byte(`"connect-json"`), nil +} + +type errorConfig struct{} + +func (c errorConfig) CreateConnectJSON(*SecurityPolicies) ([]byte, error) { + return nil, errors.New("error creating config JSON") +} + +func testConnectHelper(cm connectMock) (*ConnectReply, RPMResponse) { + config := cm.config + if nil == config { + config = testConfig{} + } + cs := RpmControls{ + License: "12345", + Client: &http.Client{Transport: cm}, + Logger: logger.ShimLogger{IsDebugEnabled: true}, + AgentVersion: "1", + } + + return ConnectAttempt(config, "", false, cs) +} + +func TestConnectAttemptSuccess(t *testing.T) { + run, resp := testConnectHelper(connectMock{ + redirect: endpointResult{response: makeResponse(200, redirectBody)}, + connect: endpointResult{response: makeResponse(200, connectBody)}, + }) + if nil == run || nil != resp.Err { + t.Fatal(run, resp.Err) + } + if run.Collector != "special_collector" { + t.Error(run.Collector) + } + if run.RunID != "my_agent_run_id" { + t.Error(run) + } +} + +func TestConnectClientError(t *testing.T) { + run, resp := testConnectHelper(connectMock{ + redirect: endpointResult{response: makeResponse(200, redirectBody)}, + connect: endpointResult{err: errors.New("client error")}, + }) + if nil != run { + t.Fatal(run) + } + if resp.Err == nil { + t.Fatal("missing expected error") + } +} + +func TestConnectConfigJSONError(t *testing.T) { + run, resp := testConnectHelper(connectMock{ + redirect: endpointResult{response: makeResponse(200, redirectBody)}, + connect: endpointResult{response: makeResponse(200, connectBody)}, + config: errorConfig{}, + }) + if nil != run { + t.Fatal(run) + } + if resp.Err == nil { + t.Fatal("missing expected error") + } +} + +func TestConnectAttemptDisconnectOnRedirect(t *testing.T) { + run, resp := testConnectHelper(connectMock{ + redirect: endpointResult{response: makeResponse(410, "")}, + connect: endpointResult{response: makeResponse(200, connectBody)}, + }) + if nil != run { + t.Error(run) + } + if nil == resp.Err { + t.Fatal("missing error") + } + if !resp.IsDisconnect() { + t.Fatal("should be disconnect") + } +} + +func TestConnectAttemptDisconnectOnConnect(t *testing.T) { + run, resp := testConnectHelper(connectMock{ + redirect: endpointResult{response: makeResponse(200, redirectBody)}, + connect: endpointResult{response: makeResponse(410, "")}, + }) + if nil != run { + t.Error(run) + } + if nil == resp.Err { + t.Fatal("missing error") + } + if !resp.IsDisconnect() { + t.Fatal("should be disconnect") + } +} + +func TestConnectAttemptBadSecurityPolicies(t *testing.T) { + run, resp := testConnectHelper(connectMock{ + redirect: endpointResult{response: makeResponse(200, unknownRequiredPolicyBody)}, + connect: endpointResult{response: makeResponse(200, connectBody)}, + }) + if nil != run { + t.Error(run) + } + if nil == resp.Err { + t.Fatal("missing error") + } + if !resp.IsDisconnect() { + t.Fatal("should be disconnect") + } +} + +func TestConnectAttemptInvalidJSON(t *testing.T) { + run, resp := testConnectHelper(connectMock{ + redirect: endpointResult{response: makeResponse(200, redirectBody)}, + connect: endpointResult{response: makeResponse(200, malformedBody)}, + }) + if nil != run { + t.Error(run) + } + if nil == resp.Err { + t.Fatal("missing error") + } +} + +func TestConnectAttemptCollectorNotString(t *testing.T) { + run, resp := testConnectHelper(connectMock{ + redirect: endpointResult{response: makeResponse(200, `{"return_value":123}`)}, + connect: endpointResult{response: makeResponse(200, connectBody)}, + }) + if nil != run { + t.Error(run) + } + if nil == resp.Err { + t.Fatal("missing error") + } +} + +func TestConnectAttempt401(t *testing.T) { + run, resp := testConnectHelper(connectMock{ + redirect: endpointResult{response: makeResponse(200, redirectBody)}, + connect: endpointResult{response: makeResponse(401, connectBody)}, + }) + if nil != run { + t.Error(run) + } + if nil == resp.Err { + t.Fatal("missing error") + } + if !resp.IsRestartException() { + t.Fatal("should be restart") + } +} + +func TestConnectAttemptOtherReturnCode(t *testing.T) { + run, resp := testConnectHelper(connectMock{ + redirect: endpointResult{response: makeResponse(200, redirectBody)}, + connect: endpointResult{response: makeResponse(413, connectBody)}, + }) + if nil != run { + t.Error(run) + } + if nil == resp.Err { + t.Fatal("missing error") + } +} + +func TestConnectAttemptMissingRunID(t *testing.T) { + run, resp := testConnectHelper(connectMock{ + redirect: endpointResult{response: makeResponse(200, redirectBody)}, + connect: endpointResult{response: makeResponse(200, `{"return_value":{}}`)}, + }) + if nil != run { + t.Error(run) + } + if errMissingAgentRunID != resp.Err { + t.Fatal("wrong error", resp.Err) + } +} + +func TestCalculatePreconnectHost(t *testing.T) { + // non-region license + host := calculatePreconnectHost("0123456789012345678901234567890123456789", "") + if host != preconnectHostDefault { + t.Error(host) + } + // override present + override := "other-collector.newrelic.com" + host = calculatePreconnectHost("0123456789012345678901234567890123456789", override) + if host != override { + t.Error(host) + } + // four letter region + host = calculatePreconnectHost("eu01xx6789012345678901234567890123456789", "") + if host != "collector.eu01.nr-data.net" { + t.Error(host) + } + // five letter region + host = calculatePreconnectHost("gov01x6789012345678901234567890123456789", "") + if host != "collector.gov01.nr-data.net" { + t.Error(host) + } + // six letter region + host = calculatePreconnectHost("foo001x6789012345678901234567890123456789", "") + if host != "collector.foo001.nr-data.net" { + t.Error(host) + } +} + +func TestPreconnectHostCrossAgent(t *testing.T) { + var testcases []struct { + Name string `json:"name"` + ConfigFileKey string `json:"config_file_key"` + EnvKey string `json:"env_key"` + ConfigOverrideHost string `json:"config_override_host"` + EnvOverrideHost string `json:"env_override_host"` + ExpectHostname string `json:"hostname"` + } + err := crossagent.ReadJSON("collector_hostname.json", &testcases) + if err != nil { + t.Fatal(err) + } + + for _, tc := range testcases { + // mimic file/environment precedence of other agents + configKey := tc.ConfigFileKey + if "" != tc.EnvKey { + configKey = tc.EnvKey + } + overrideHost := tc.ConfigOverrideHost + if "" != tc.EnvOverrideHost { + overrideHost = tc.EnvOverrideHost + } + + host := calculatePreconnectHost(configKey, overrideHost) + if host != tc.ExpectHostname { + t.Errorf(`test="%s" got="%s" expected="%s"`, tc.Name, host, tc.ExpectHostname) + } + } +} + +func TestCollectorRequestRespectsMaxPayloadSize(t *testing.T) { + // Test that CollectorRequest returns an error when MaxPayloadSize is + // exceeded + cmd := RpmCmd{ + Name: "cmd_name", + Collector: "collector.com", + RunID: "run_id", + Data: []byte("abcdefghijklmnopqrstuvwxyz"), + MaxPayloadSize: 3, + } + cs := RpmControls{ + Client: &http.Client{ + Transport: roundTripperFunc(func(r *http.Request) (*http.Response, error) { + t.Error("no response should have gone out!") + return nil, nil + }), + }, + Logger: logger.ShimLogger{IsDebugEnabled: true}, + } + resp := CollectorRequest(cmd, cs) + if nil == resp.Err { + t.Error("response should have contained error") + } + if resp.ShouldSaveHarvestData() { + t.Error("harvest data should be discarded when max_payload_size_in_bytes is exceeded") + } +} + +func TestConnectReplyMaxPayloadSize(t *testing.T) { + testcases := []struct { + replyBody string + expectedMaxPayloadSize int + }{ + { + replyBody: `{"return_value":{"agent_run_id":"my_agent_run_id"}}`, + expectedMaxPayloadSize: 1000 * 1000, + }, + { + replyBody: `{"return_value":{"agent_run_id":"my_agent_run_id","max_payload_size_in_bytes":123}}`, + expectedMaxPayloadSize: 123, + }, + } + + controls := func(replyBody string) RpmControls { + return RpmControls{ + Client: &http.Client{ + Transport: roundTripperFunc(func(r *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: 200, + Body: ioutil.NopCloser(strings.NewReader(replyBody)), + }, nil + }), + }, + Logger: logger.ShimLogger{IsDebugEnabled: true}, + } + } + + for _, test := range testcases { + reply, resp := ConnectAttempt(testConfig{}, "", false, controls(test.replyBody)) + if nil != resp.Err { + t.Error("resp returned unexpected error:", resp.Err) + } + if test.expectedMaxPayloadSize != reply.MaxPayloadSizeInBytes { + t.Errorf("incorrect MaxPayloadSizeInBytes: expected=%d actual=%d", + test.expectedMaxPayloadSize, reply.MaxPayloadSizeInBytes) + } + } +} + +func TestPreconnectRequestMarshall(t *testing.T) { + tests := map[string]preconnectRequest{ + `[{"security_policies_token":"securityPoliciesToken","high_security":false}]`: { + SecurityPoliciesToken: "securityPoliciesToken", + HighSecurity: false, + }, + `[{"security_policies_token":"securityPoliciesToken","high_security":true}]`: { + SecurityPoliciesToken: "securityPoliciesToken", + HighSecurity: true, + }, + `[{"high_security":true}]`: { + SecurityPoliciesToken: "", + HighSecurity: true, + }, + `[{"high_security":false}]`: { + SecurityPoliciesToken: "", + HighSecurity: false, + }, + } + for expected, request := range tests { + b, e := json.Marshal([]preconnectRequest{request}) + if e != nil { + t.Fatal("Unable to marshall preconnect request", e) + } + result := string(b) + if result != expected { + t.Errorf("Invalid preconnect request marshall: expected %s, got %s", expected, result) + } + } +} diff --git a/internal/compress.go b/internal/compress.go new file mode 100644 index 000000000..b20b9600f --- /dev/null +++ b/internal/compress.go @@ -0,0 +1,19 @@ +package internal + +import ( + "bytes" + "compress/gzip" +) + +func compress(b []byte) (*bytes.Buffer, error) { + var buf bytes.Buffer + w := gzip.NewWriter(&buf) + _, err := w.Write(b) + w.Close() + + if nil != err { + return nil, err + } + + return &buf, nil +} diff --git a/internal/connect_reply.go b/internal/connect_reply.go new file mode 100644 index 000000000..254844a7c --- /dev/null +++ b/internal/connect_reply.go @@ -0,0 +1,237 @@ +package internal + +import ( + "encoding/json" + "strings" + "time" +) + +// AgentRunID identifies the current connection with the collector. +type AgentRunID string + +func (id AgentRunID) String() string { + return string(id) +} + +// PreconnectReply contains settings from the preconnect endpoint. +type PreconnectReply struct { + Collector string `json:"redirect_host"` + SecurityPolicies SecurityPolicies `json:"security_policies"` +} + +// ConnectReply contains all of the settings and state send down from the +// collector. It should not be modified after creation. +type ConnectReply struct { + RunID AgentRunID `json:"agent_run_id"` + RequestHeadersMap map[string]string `json:"request_headers_map"` + MaxPayloadSizeInBytes int `json:"max_payload_size_in_bytes"` + EntityGUID string `json:"entity_guid"` + + // Transaction Name Modifiers + SegmentTerms segmentRules `json:"transaction_segment_terms"` + TxnNameRules metricRules `json:"transaction_name_rules"` + URLRules metricRules `json:"url_rules"` + MetricRules metricRules `json:"metric_name_rules"` + + // Cross Process + EncodingKey string `json:"encoding_key"` + CrossProcessID string `json:"cross_process_id"` + TrustedAccounts trustedAccountSet `json:"trusted_account_ids"` + + // Settings + KeyTxnApdex map[string]float64 `json:"web_transactions_apdex"` + ApdexThresholdSeconds float64 `json:"apdex_t"` + CollectAnalyticsEvents bool `json:"collect_analytics_events"` + CollectCustomEvents bool `json:"collect_custom_events"` + CollectTraces bool `json:"collect_traces"` + CollectErrors bool `json:"collect_errors"` + CollectErrorEvents bool `json:"collect_error_events"` + CollectSpanEvents bool `json:"collect_span_events"` + + // RUM + AgentLoader string `json:"js_agent_loader"` + Beacon string `json:"beacon"` + BrowserKey string `json:"browser_key"` + AppID string `json:"application_id"` + ErrorBeacon string `json:"error_beacon"` + JSAgentFile string `json:"js_agent_file"` + + // PreconnectReply fields are not in the connect reply, this embedding + // is done to simplify code. + PreconnectReply `json:"-"` + + Messages []struct { + Message string `json:"message"` + Level string `json:"level"` + } `json:"messages"` + + AdaptiveSampler AdaptiveSampler + // TraceIDGenerator creates random IDs for distributed tracing. It + // exists here in the connect reply so it can be modified to create + // deterministic identifiers in tests. + TraceIDGenerator *TraceIDGenerator `json:"-"` + + // BetterCAT/Distributed Tracing + AccountID string `json:"account_id"` + TrustedAccountKey string `json:"trusted_account_key"` + PrimaryAppID string `json:"primary_application_id"` + SamplingTarget uint64 `json:"sampling_target"` + SamplingTargetPeriodInSeconds int `json:"sampling_target_period_in_seconds"` + + // rulesCache caches the results of calling CreateFullTxnName. It + // exists here in ConnectReply since it is specific to a set of rules + // and is shared between transactions. + rulesCache *rulesCache + + ServerSideConfig struct { + TransactionTracerEnabled *bool `json:"transaction_tracer.enabled"` + // TransactionTracerThreshold should contain either a number or + // "apdex_f" if it is non-nil. + TransactionTracerThreshold interface{} `json:"transaction_tracer.transaction_threshold"` + TransactionTracerStackTraceThreshold *float64 `json:"transaction_tracer.stack_trace_threshold"` + ErrorCollectorEnabled *bool `json:"error_collector.enabled"` + ErrorCollectorIgnoreStatusCodes []int `json:"error_collector.ignore_status_codes"` + CrossApplicationTracerEnabled *bool `json:"cross_application_tracer.enabled"` + } `json:"agent_config"` + + // Faster Event Harvest + EventData EventHarvestConfig `json:"event_harvest_config"` +} + +// EventHarvestConfig contains fields relating to faster event harvest. +// This structure is used in the connect request (to send up defaults) +// and in the connect response (to get the server values). +// +// https://source.datanerd.us/agents/agent-specs/blob/master/Connect-LEGACY.md#event_harvest_config-hash +// https://source.datanerd.us/agents/agent-specs/blob/master/Connect-LEGACY.md#event-harvest-config +type EventHarvestConfig struct { + ReportPeriodMs int `json:"report_period_ms,omitempty"` + Limits struct { + TxnEvents *uint `json:"analytic_event_data,omitempty"` + CustomEvents *uint `json:"custom_event_data,omitempty"` + ErrorEvents *uint `json:"error_event_data,omitempty"` + SpanEvents *uint `json:"span_event_data,omitempty"` + } `json:"harvest_limits"` +} + +// ConfigurablePeriod returns the Faster Event Harvest configurable reporting period if it is set, or the default +// report period otherwise. +func (r *ConnectReply) ConfigurablePeriod() time.Duration { + ms := DefaultConfigurableEventHarvestMs + if nil != r && r.EventData.ReportPeriodMs > 0 { + ms = r.EventData.ReportPeriodMs + } + return time.Duration(ms) * time.Millisecond +} + +func uintPtr(x uint) *uint { return &x } + +// DefaultEventHarvestConfig provides faster event harvest defaults. +func DefaultEventHarvestConfig(eventer MaxTxnEventer) EventHarvestConfig { + cfg := EventHarvestConfig{} + cfg.ReportPeriodMs = DefaultConfigurableEventHarvestMs + cfg.Limits.TxnEvents = uintPtr(uint(eventer.MaxTxnEvents())) + cfg.Limits.CustomEvents = uintPtr(uint(MaxCustomEvents)) + cfg.Limits.ErrorEvents = uintPtr(uint(MaxErrorEvents)) + return cfg +} + +type trustedAccountSet map[int]struct{} + +func (t *trustedAccountSet) IsTrusted(account int) bool { + _, exists := (*t)[account] + return exists +} + +func (t *trustedAccountSet) UnmarshalJSON(data []byte) error { + accounts := make([]int, 0) + if err := json.Unmarshal(data, &accounts); err != nil { + return err + } + + *t = make(trustedAccountSet) + for _, account := range accounts { + (*t)[account] = struct{}{} + } + + return nil +} + +// ConnectReplyDefaults returns a newly allocated ConnectReply with the proper +// default settings. A pointer to a global is not used to prevent consumers +// from changing the default settings. +func ConnectReplyDefaults() *ConnectReply { + return &ConnectReply{ + ApdexThresholdSeconds: 0.5, + CollectAnalyticsEvents: true, + CollectCustomEvents: true, + CollectTraces: true, + CollectErrors: true, + CollectErrorEvents: true, + CollectSpanEvents: true, + MaxPayloadSizeInBytes: maxPayloadSizeInBytes, + // No transactions should be sampled before the application is + // connected. + AdaptiveSampler: SampleNothing{}, + + SamplingTarget: 10, + SamplingTargetPeriodInSeconds: 60, + + TraceIDGenerator: NewTraceIDGenerator(int64(time.Now().UnixNano())), + } +} + +// CalculateApdexThreshold calculates the apdex threshold. +func CalculateApdexThreshold(c *ConnectReply, txnName string) time.Duration { + if t, ok := c.KeyTxnApdex[txnName]; ok { + return FloatSecondsToDuration(t) + } + return FloatSecondsToDuration(c.ApdexThresholdSeconds) +} + +// CreateFullTxnName uses collector rules and the appropriate metric prefix to +// construct the full transaction metric name from the name given by the +// consumer. +func CreateFullTxnName(input string, reply *ConnectReply, isWeb bool) string { + if name := reply.rulesCache.find(input, isWeb); "" != name { + return name + } + name := constructFullTxnName(input, reply, isWeb) + if "" != name { + // Note that we don't cache situations where the rules say + // ignore. It would increase complication (we would need to + // disambiguate not-found vs ignore). Also, the ignore code + // path is probably extremely uncommon. + reply.rulesCache.set(input, isWeb, name) + } + return name +} + +func constructFullTxnName(input string, reply *ConnectReply, isWeb bool) string { + var afterURLRules string + if "" != input { + afterURLRules = reply.URLRules.Apply(input) + if "" == afterURLRules { + return "" + } + } + + prefix := backgroundMetricPrefix + if isWeb { + prefix = webMetricPrefix + } + + var beforeNameRules string + if strings.HasPrefix(afterURLRules, "/") { + beforeNameRules = prefix + afterURLRules + } else { + beforeNameRules = prefix + "/" + afterURLRules + } + + afterNameRules := reply.TxnNameRules.Apply(beforeNameRules) + if "" == afterNameRules { + return "" + } + + return reply.SegmentTerms.apply(afterNameRules) +} diff --git a/internal/connect_reply_test.go b/internal/connect_reply_test.go new file mode 100644 index 000000000..fe9ddd7be --- /dev/null +++ b/internal/connect_reply_test.go @@ -0,0 +1,221 @@ +package internal + +import ( + "encoding/json" + "testing" + "time" +) + +func TestCreateFullTxnNameBasic(t *testing.T) { + emptyReply := ConnectReplyDefaults() + + tcs := []struct { + input string + background bool + expect string + }{ + {"", true, "WebTransaction/Go/"}, + {"/", true, "WebTransaction/Go/"}, + {"hello", true, "WebTransaction/Go/hello"}, + {"/hello", true, "WebTransaction/Go/hello"}, + + {"", false, "OtherTransaction/Go/"}, + {"/", false, "OtherTransaction/Go/"}, + {"hello", false, "OtherTransaction/Go/hello"}, + {"/hello", false, "OtherTransaction/Go/hello"}, + } + + for _, tc := range tcs { + if out := CreateFullTxnName(tc.input, emptyReply, tc.background); out != tc.expect { + t.Error(tc.input, tc.background, out, tc.expect) + } + } +} + +func TestCreateFullTxnNameURLRulesIgnore(t *testing.T) { + js := `[{ + "match_expression":".*zip.*$", + "ignore":true + }]` + reply := ConnectReplyDefaults() + err := json.Unmarshal([]byte(js), &reply.URLRules) + if nil != err { + t.Fatal(err) + } + if out := CreateFullTxnName("/zap/zip/zep", reply, true); out != "" { + t.Error(out) + } +} + +func TestCreateFullTxnNameTxnRulesIgnore(t *testing.T) { + js := `[{ + "match_expression":"^WebTransaction/Go/zap/zip/zep$", + "ignore":true + }]` + reply := ConnectReplyDefaults() + err := json.Unmarshal([]byte(js), &reply.TxnNameRules) + if nil != err { + t.Fatal(err) + } + if out := CreateFullTxnName("/zap/zip/zep", reply, true); out != "" { + t.Error(out) + } +} + +func TestCreateFullTxnNameAllRulesWithCache(t *testing.T) { + js := `{ + "url_rules":[ + {"match_expression":"zip","each_segment":true,"replacement":"zoop"} + ], + "transaction_name_rules":[ + {"match_expression":"WebTransaction/Go/zap/zoop/zep", + "replacement":"WebTransaction/Go/zap/zoop/zep/zup/zyp"} + ], + "transaction_segment_terms":[ + {"prefix": "WebTransaction/Go/", + "terms": ["zyp", "zoop", "zap"]} + ] + }` + reply := ConnectReplyDefaults() + reply.rulesCache = newRulesCache(3) + err := json.Unmarshal([]byte(js), &reply) + if nil != err { + t.Fatal(err) + } + want := "WebTransaction/Go/zap/zoop/*/zyp" + if out := CreateFullTxnName("/zap/zip/zep", reply, true); out != want { + t.Error("wanted:", want, "got:", out) + } + // Check that the cache was populated as expected. + if out := reply.rulesCache.find("/zap/zip/zep", true); out != want { + t.Error("wanted:", want, "got:", out) + } + // Check that the next CreateFullTxnName returns the same output. + if out := CreateFullTxnName("/zap/zip/zep", reply, true); out != want { + t.Error("wanted:", want, "got:", out) + } +} + +func TestCalculateApdexThreshold(t *testing.T) { + reply := ConnectReplyDefaults() + threshold := CalculateApdexThreshold(reply, "WebTransaction/Go/hello") + if threshold != 500*time.Millisecond { + t.Error("default apdex threshold", threshold) + } + + reply = ConnectReplyDefaults() + reply.ApdexThresholdSeconds = 1.3 + reply.KeyTxnApdex = map[string]float64{ + "WebTransaction/Go/zip": 2.2, + "WebTransaction/Go/zap": 2.3, + } + threshold = CalculateApdexThreshold(reply, "WebTransaction/Go/hello") + if threshold != 1300*time.Millisecond { + t.Error(threshold) + } + threshold = CalculateApdexThreshold(reply, "WebTransaction/Go/zip") + if threshold != 2200*time.Millisecond { + t.Error(threshold) + } +} + +func TestIsTrusted(t *testing.T) { + for _, test := range []struct { + id int + trusted string + expected bool + }{ + {1, `[]`, false}, + {1, `[2, 3]`, false}, + {1, `[1]`, true}, + {1, `[1, 2, 3]`, true}, + } { + trustedAccounts := make(trustedAccountSet) + if err := json.Unmarshal([]byte(test.trusted), &trustedAccounts); err != nil { + t.Fatal(err) + } + + if actual := trustedAccounts.IsTrusted(test.id); test.expected != actual { + t.Errorf("failed asserting whether %d is trusted by %v: expected %v; got %v", test.id, test.trusted, test.expected, actual) + } + } +} + +func BenchmarkDefaultRules(b *testing.B) { + js := `{"url_rules":[ + { + "match_expression":".*\\.(ace|arj|ini|txt|udl|plist|css|gif|ico|jpe?g|js|png|swf|woff|caf|aiff|m4v|mpe?g|mp3|mp4|mov)$", + "replacement":"/*.\\1", + "ignore":false, + "eval_order":1000, + "terminate_chain":true, + "replace_all":false, + "each_segment":false + }, + { + "match_expression":"^[0-9][0-9a-f_,.-]*$", + "replacement":"*", + "ignore":false, + "eval_order":1001, + "terminate_chain":false, + "replace_all":false, + "each_segment":true + }, + { + "match_expression":"^(.*)/[0-9][0-9a-f_,-]*\\.([0-9a-z][0-9a-z]*)$", + "replacement":"\\1/.*\\2", + "ignore":false, + "eval_order":1002, + "terminate_chain":false, + "replace_all":false, + "each_segment":false + } + ]}` + reply := ConnectReplyDefaults() + reply.rulesCache = newRulesCache(1) + err := json.Unmarshal([]byte(js), &reply) + if nil != err { + b.Fatal(err) + } + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + if out := CreateFullTxnName("/myEndpoint", reply, true); out != "WebTransaction/Go/myEndpoint" { + b.Error(out) + } + } +} + +func TestNegativeHarvestLimits(t *testing.T) { + // Test that negative harvest event limits will cause a connect error. + // Harvest event limits are never expected to be negative: This is just + // extra defensiveness. + _, err := ConstructConnectReply([]byte(`{"return_value":{ + "event_harvest_config": { + "harvest_limits": { + "error_event_data": -1 + } + } + }}`), PreconnectReply{}) + if err == nil { + t.Fatal("expected error missing") + } +} + +type dfltMaxTxnEvents struct{} + +func (dfltMaxTxnEvents) MaxTxnEvents() int { + return MaxTxnEvents +} + +func TestDefaultEventHarvestConfigJSON(t *testing.T) { + js, err := json.Marshal(DefaultEventHarvestConfig(dfltMaxTxnEvents{})) + if err != nil { + t.Error(err) + } + if string(js) != `{"report_period_ms":60000,"harvest_limits":{"analytic_event_data":10000,"custom_event_data":10000,"error_event_data":100}}` { + t.Error(string(js)) + } +} diff --git a/internal/context.go b/internal/context.go new file mode 100644 index 000000000..2b3cab688 --- /dev/null +++ b/internal/context.go @@ -0,0 +1,17 @@ +package internal + +type contextKeyType struct{} + +var ( + // TransactionContextKey is the key used for newrelic.FromContext and + // newrelic.NewContext. + TransactionContextKey = contextKeyType(struct{}{}) + + // GinTransactionContextKey is used as the context key in + // nrgin.Middleware and nrgin.Transaction. Unfortunately, Gin requires + // a string context key. We use two different context keys (and check + // both in nrgin.Transaction and newrelic.FromContext) rather than use a + // single string key because context.WithValue will fail golint if used + // with a string key. + GinTransactionContextKey = "newRelicTransaction" +) diff --git a/internal/cross_process_http.go b/internal/cross_process_http.go new file mode 100644 index 000000000..73145c62c --- /dev/null +++ b/internal/cross_process_http.go @@ -0,0 +1,67 @@ +package internal + +import ( + "net/http" + + "github.com/newrelic/go-agent/internal/cat" +) + +// InboundHTTPRequest adds the inbound request metadata to the TxnCrossProcess. +func (txp *TxnCrossProcess) InboundHTTPRequest(hdr http.Header) error { + return txp.handleInboundRequestHeaders(HTTPHeaderToMetadata(hdr)) +} + +// AppDataToHTTPHeader encapsulates the given appData value in the correct HTTP +// header. +func AppDataToHTTPHeader(appData string) http.Header { + header := http.Header{} + + if appData != "" { + header.Add(cat.NewRelicAppDataName, appData) + } + + return header +} + +// HTTPHeaderToAppData gets the appData value from the correct HTTP header. +func HTTPHeaderToAppData(header http.Header) string { + if header == nil { + return "" + } + + return header.Get(cat.NewRelicAppDataName) +} + +// HTTPHeaderToMetadata gets the cross process metadata from the relevant HTTP +// headers. +func HTTPHeaderToMetadata(header http.Header) CrossProcessMetadata { + if header == nil { + return CrossProcessMetadata{} + } + + return CrossProcessMetadata{ + ID: header.Get(cat.NewRelicIDName), + TxnData: header.Get(cat.NewRelicTxnName), + Synthetics: header.Get(cat.NewRelicSyntheticsName), + } +} + +// MetadataToHTTPHeader creates a set of HTTP headers to represent the given +// cross process metadata. +func MetadataToHTTPHeader(metadata CrossProcessMetadata) http.Header { + header := http.Header{} + + if metadata.ID != "" { + header.Add(cat.NewRelicIDName, metadata.ID) + } + + if metadata.TxnData != "" { + header.Add(cat.NewRelicTxnName, metadata.TxnData) + } + + if metadata.Synthetics != "" { + header.Add(cat.NewRelicSyntheticsName, metadata.Synthetics) + } + + return header +} diff --git a/internal/cross_process_http_test.go b/internal/cross_process_http_test.go new file mode 100644 index 000000000..3b6d7b842 --- /dev/null +++ b/internal/cross_process_http_test.go @@ -0,0 +1,183 @@ +package internal + +import ( + "net/http" + "reflect" + "testing" + + "github.com/newrelic/go-agent/internal/cat" +) + +func TestTxnCrossProcessInitFromHTTPRequest(t *testing.T) { + txp := &TxnCrossProcess{} + txp.Init(true, false, replyAccountOne) + if txp.IsInbound() { + t.Error("inbound CAT enabled even though there was no request") + } + + txp = &TxnCrossProcess{} + req, err := http.NewRequest("GET", "http://foo.bar/", nil) + if err != nil { + t.Fatal(err) + } + txp.Init(true, false, replyAccountOne) + if err := txp.InboundHTTPRequest(req.Header); err != nil { + t.Errorf("got error while consuming an empty request: %v", err) + } + if txp.IsInbound() { + t.Error("inbound CAT enabled even though there was no metadata in the request") + } + + txp = &TxnCrossProcess{} + req, err = http.NewRequest("GET", "http://foo.bar/", nil) + if err != nil { + t.Fatal(err) + } + req.Header.Add(cat.NewRelicIDName, mustObfuscate(`1#1`, "foo")) + req.Header.Add(cat.NewRelicTxnName, mustObfuscate(`["abcdefgh",false,"12345678","b95be233"]`, "foo")) + txp.Init(true, false, replyAccountOne) + if err := txp.InboundHTTPRequest(req.Header); err != nil { + t.Errorf("got error while consuming an inbound CAT request: %v", err) + } + // A second call to InboundHTTPRequest to ensure that it can safely + // be called multiple times: + if err := txp.InboundHTTPRequest(req.Header); err != nil { + t.Errorf("got error while consuming an inbound CAT request: %v", err) + } + if !txp.IsInbound() { + t.Error("inbound CAT disabled even though there was metadata in the request") + } + if txp.ClientID != "1#1" { + t.Errorf("incorrect ClientID: %s", txp.ClientID) + } + if txp.ReferringTxnGUID != "abcdefgh" { + t.Errorf("incorrect ReferringTxnGUID: %s", txp.ReferringTxnGUID) + } + if txp.TripID != "12345678" { + t.Errorf("incorrect TripID: %s", txp.TripID) + } + if txp.ReferringPathHash != "b95be233" { + t.Errorf("incorrect ReferringPathHash: %s", txp.ReferringPathHash) + } +} + +func TestAppDataToHTTPHeader(t *testing.T) { + header := AppDataToHTTPHeader("") + if len(header) != 0 { + t.Errorf("unexpected number of header elements: %d", len(header)) + } + + header = AppDataToHTTPHeader("foo") + if len(header) != 1 { + t.Errorf("unexpected number of header elements: %d", len(header)) + } + if actual := header.Get(cat.NewRelicAppDataName); actual != "foo" { + t.Errorf("unexpected header value: %s", actual) + } +} + +func TestHTTPHeaderToAppData(t *testing.T) { + if appData := HTTPHeaderToAppData(nil); appData != "" { + t.Errorf("unexpected app data: %s", appData) + } + + header := http.Header{} + if appData := HTTPHeaderToAppData(header); appData != "" { + t.Errorf("unexpected app data: %s", appData) + } + + header.Add("X-Foo", "bar") + if appData := HTTPHeaderToAppData(header); appData != "" { + t.Errorf("unexpected app data: %s", appData) + } + + header.Add(cat.NewRelicAppDataName, "foo") + if appData := HTTPHeaderToAppData(header); appData != "foo" { + t.Errorf("unexpected app data: %s", appData) + } +} + +func TestHTTPHeaderToMetadata(t *testing.T) { + if metadata := HTTPHeaderToMetadata(nil); !reflect.DeepEqual(metadata, CrossProcessMetadata{}) { + t.Errorf("unexpected metadata: %v", metadata) + } + + header := http.Header{} + if metadata := HTTPHeaderToMetadata(header); !reflect.DeepEqual(metadata, CrossProcessMetadata{}) { + t.Errorf("unexpected metadata: %v", metadata) + } + + header.Add("X-Foo", "bar") + if metadata := HTTPHeaderToMetadata(header); !reflect.DeepEqual(metadata, CrossProcessMetadata{}) { + t.Errorf("unexpected metadata: %v", metadata) + } + + header.Add(cat.NewRelicIDName, "id") + if metadata := HTTPHeaderToMetadata(header); !reflect.DeepEqual(metadata, CrossProcessMetadata{ + ID: "id", + }) { + t.Errorf("unexpected metadata: %v", metadata) + } + + header.Add(cat.NewRelicTxnName, "txn") + if metadata := HTTPHeaderToMetadata(header); !reflect.DeepEqual(metadata, CrossProcessMetadata{ + ID: "id", + TxnData: "txn", + }) { + t.Errorf("unexpected metadata: %v", metadata) + } + + header.Add(cat.NewRelicSyntheticsName, "synth") + if metadata := HTTPHeaderToMetadata(header); !reflect.DeepEqual(metadata, CrossProcessMetadata{ + ID: "id", + TxnData: "txn", + Synthetics: "synth", + }) { + t.Errorf("unexpected metadata: %v", metadata) + } +} + +func TestMetadataToHTTPHeader(t *testing.T) { + metadata := CrossProcessMetadata{} + + header := MetadataToHTTPHeader(metadata) + if len(header) != 0 { + t.Errorf("unexpected number of header elements: %d", len(header)) + } + + metadata.ID = "id" + header = MetadataToHTTPHeader(metadata) + if len(header) != 1 { + t.Errorf("unexpected number of header elements: %d", len(header)) + } + if actual := header.Get(cat.NewRelicIDName); actual != "id" { + t.Errorf("unexpected header value: %s", actual) + } + + metadata.TxnData = "txn" + header = MetadataToHTTPHeader(metadata) + if len(header) != 2 { + t.Errorf("unexpected number of header elements: %d", len(header)) + } + if actual := header.Get(cat.NewRelicIDName); actual != "id" { + t.Errorf("unexpected header value: %s", actual) + } + if actual := header.Get(cat.NewRelicTxnName); actual != "txn" { + t.Errorf("unexpected header value: %s", actual) + } + + metadata.Synthetics = "synth" + header = MetadataToHTTPHeader(metadata) + if len(header) != 3 { + t.Errorf("unexpected number of header elements: %d", len(header)) + } + if actual := header.Get(cat.NewRelicIDName); actual != "id" { + t.Errorf("unexpected header value: %s", actual) + } + if actual := header.Get(cat.NewRelicTxnName); actual != "txn" { + t.Errorf("unexpected header value: %s", actual) + } + if actual := header.Get(cat.NewRelicSyntheticsName); actual != "synth" { + t.Errorf("unexpected header value: %s", actual) + } +} diff --git a/internal/crossagent/README.md b/internal/crossagent/README.md new file mode 100644 index 000000000..49b233213 --- /dev/null +++ b/internal/crossagent/README.md @@ -0,0 +1,3 @@ +# Cross Agent Tests + +At commit a4ec8e617340c8c7936d15ad18309ff5b9cfa93e. diff --git a/internal/crossagent/cross_agent_tests b/internal/crossagent/cross_agent_tests new file mode 160000 index 000000000..a4ec8e617 --- /dev/null +++ b/internal/crossagent/cross_agent_tests @@ -0,0 +1 @@ +Subproject commit a4ec8e617340c8c7936d15ad18309ff5b9cfa93e diff --git a/internal/crossagent/crossagent.go b/internal/crossagent/crossagent.go new file mode 100644 index 000000000..0fbc734ea --- /dev/null +++ b/internal/crossagent/crossagent.go @@ -0,0 +1,54 @@ +package crossagent + +import ( + "encoding/json" + "io/ioutil" + "os" + "path/filepath" + "runtime" +) + +var ( + crossAgentDir = func() string { + if s := os.Getenv("NEW_RELIC_CROSS_AGENT_TESTS"); s != "" { + return s + } + _, here, _, _ := runtime.Caller(0) + return filepath.Join(filepath.Dir(here), "cross_agent_tests") + }() +) + +// ReadFile reads a file from the crossagent tests directory given as with +// ioutil.ReadFile. +func ReadFile(name string) ([]byte, error) { + return ioutil.ReadFile(filepath.Join(crossAgentDir, name)) +} + +// ReadJSON takes the name of a file and parses it using JSON.Unmarshal into +// the interface given. +func ReadJSON(name string, v interface{}) error { + data, err := ReadFile(name) + if err != nil { + return err + } + return json.Unmarshal(data, v) +} + +// ReadDir reads a directory relative to crossagent tests and returns an array +// of absolute filepaths of the files in that directory. +func ReadDir(name string) ([]string, error) { + dir := filepath.Join(crossAgentDir, name) + + entries, err := ioutil.ReadDir(dir) + if err != nil { + return nil, err + } + + var files []string + for _, info := range entries { + if !info.IsDir() { + files = append(files, filepath.Join(dir, info.Name())) + } + } + return files, nil +} diff --git a/internal/custom_event.go b/internal/custom_event.go new file mode 100644 index 000000000..20cf5918a --- /dev/null +++ b/internal/custom_event.go @@ -0,0 +1,103 @@ +package internal + +import ( + "bytes" + "fmt" + "regexp" + "time" +) + +// https://newrelic.atlassian.net/wiki/display/eng/Custom+Events+in+New+Relic+Agents + +var ( + eventTypeRegexRaw = `^[a-zA-Z0-9:_ ]+$` + eventTypeRegex = regexp.MustCompile(eventTypeRegexRaw) + + errEventTypeLength = fmt.Errorf("event type exceeds length limit of %d", + attributeKeyLengthLimit) + // ErrEventTypeRegex will be returned to caller of app.RecordCustomEvent + // if the event type is not valid. + ErrEventTypeRegex = fmt.Errorf("event type must match %s", eventTypeRegexRaw) + errNumAttributes = fmt.Errorf("maximum of %d attributes exceeded", + customEventAttributeLimit) +) + +// CustomEvent is a custom event. +type CustomEvent struct { + eventType string + timestamp time.Time + truncatedParams map[string]interface{} +} + +// WriteJSON prepares JSON in the format expected by the collector. +func (e *CustomEvent) WriteJSON(buf *bytes.Buffer) { + w := jsonFieldsWriter{buf: buf} + buf.WriteByte('[') + buf.WriteByte('{') + w.stringField("type", e.eventType) + w.floatField("timestamp", timeToFloatSeconds(e.timestamp)) + buf.WriteByte('}') + + buf.WriteByte(',') + buf.WriteByte('{') + w = jsonFieldsWriter{buf: buf} + for key, val := range e.truncatedParams { + writeAttributeValueJSON(&w, key, val) + } + buf.WriteByte('}') + + buf.WriteByte(',') + buf.WriteByte('{') + buf.WriteByte('}') + buf.WriteByte(']') +} + +// MarshalJSON is used for testing. +func (e *CustomEvent) MarshalJSON() ([]byte, error) { + buf := bytes.NewBuffer(make([]byte, 0, 256)) + + e.WriteJSON(buf) + + return buf.Bytes(), nil +} + +func eventTypeValidate(eventType string) error { + if len(eventType) > attributeKeyLengthLimit { + return errEventTypeLength + } + if !eventTypeRegex.MatchString(eventType) { + return ErrEventTypeRegex + } + return nil +} + +// CreateCustomEvent creates a custom event. +func CreateCustomEvent(eventType string, params map[string]interface{}, now time.Time) (*CustomEvent, error) { + if err := eventTypeValidate(eventType); nil != err { + return nil, err + } + + if len(params) > customEventAttributeLimit { + return nil, errNumAttributes + } + + truncatedParams := make(map[string]interface{}) + for key, val := range params { + val, err := ValidateUserAttribute(key, val) + if nil != err { + return nil, err + } + truncatedParams[key] = val + } + + return &CustomEvent{ + eventType: eventType, + timestamp: now, + truncatedParams: truncatedParams, + }, nil +} + +// MergeIntoHarvest implements Harvestable. +func (e *CustomEvent) MergeIntoHarvest(h *Harvest) { + h.CustomEvents.Add(e) +} diff --git a/internal/custom_event_test.go b/internal/custom_event_test.go new file mode 100644 index 000000000..7534130cb --- /dev/null +++ b/internal/custom_event_test.go @@ -0,0 +1,223 @@ +package internal + +import ( + "encoding/json" + "strconv" + "testing" + "time" +) + +var ( + now = time.Date(2014, time.November, 28, 1, 1, 0, 0, time.UTC) + strLen512 = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + strLen255 = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" +) + +// Tests use a single key-value pair in params to ensure deterministic JSON +// ordering. + +func TestCreateCustomEventSuccess(t *testing.T) { + event, err := CreateCustomEvent("myEvent", map[string]interface{}{"alpha": 1}, now) + if nil != err { + t.Fatal(err) + } + js, err := json.Marshal(event) + if nil != err { + t.Fatal(err) + } + if string(js) != `[{"type":"myEvent","timestamp":1.41713646e+09},{"alpha":1},{}]` { + t.Fatal(string(js)) + } +} + +func TestInvalidEventTypeCharacter(t *testing.T) { + event, err := CreateCustomEvent("myEvent!", map[string]interface{}{"alpha": 1}, now) + if err != ErrEventTypeRegex { + t.Fatal(err) + } + if nil != event { + t.Fatal(event) + } +} + +func TestLongEventType(t *testing.T) { + event, err := CreateCustomEvent(strLen512, map[string]interface{}{"alpha": 1}, now) + if err != errEventTypeLength { + t.Fatal(err) + } + if nil != event { + t.Fatal(event) + } +} + +func TestNilParams(t *testing.T) { + event, err := CreateCustomEvent("myEvent", nil, now) + if nil != err { + t.Fatal(err) + } + js, err := json.Marshal(event) + if nil != err { + t.Fatal(err) + } + if string(js) != `[{"type":"myEvent","timestamp":1.41713646e+09},{},{}]` { + t.Fatal(string(js)) + } +} + +func TestMissingEventType(t *testing.T) { + event, err := CreateCustomEvent("", map[string]interface{}{"alpha": 1}, now) + if err != ErrEventTypeRegex { + t.Fatal(err) + } + if nil != event { + t.Fatal(event) + } +} + +func TestEmptyParams(t *testing.T) { + event, err := CreateCustomEvent("myEvent", map[string]interface{}{}, now) + if nil != err { + t.Fatal(err) + } + js, err := json.Marshal(event) + if nil != err { + t.Fatal(err) + } + if string(js) != `[{"type":"myEvent","timestamp":1.41713646e+09},{},{}]` { + t.Fatal(string(js)) + } +} + +func TestTruncatedStringValue(t *testing.T) { + event, err := CreateCustomEvent("myEvent", map[string]interface{}{"alpha": strLen512}, now) + if nil != err { + t.Fatal(err) + } + js, err := json.Marshal(event) + if nil != err { + t.Fatal(err) + } + if string(js) != `[{"type":"myEvent","timestamp":1.41713646e+09},{"alpha":"`+strLen255+`"},{}]` { + t.Fatal(string(js)) + } +} + +func TestInvalidValueType(t *testing.T) { + event, err := CreateCustomEvent("myEvent", map[string]interface{}{"alpha": []string{}}, now) + if _, ok := err.(ErrInvalidAttributeType); !ok { + t.Fatal(err) + } + if nil != event { + t.Fatal(event) + } +} + +func TestInvalidCustomAttributeKey(t *testing.T) { + event, err := CreateCustomEvent("myEvent", map[string]interface{}{strLen512: 1}, now) + if nil == err { + t.Fatal(err) + } + if _, ok := err.(invalidAttributeKeyErr); !ok { + t.Fatal(err) + } + if nil != event { + t.Fatal(event) + } +} + +func TestTooManyAttributes(t *testing.T) { + params := make(map[string]interface{}) + for i := 0; i < customEventAttributeLimit+1; i++ { + params[strconv.Itoa(i)] = i + } + event, err := CreateCustomEvent("myEvent", params, now) + if errNumAttributes != err { + t.Fatal(err) + } + if nil != event { + t.Fatal(event) + } +} + +func TestCustomEventAttributeTypes(t *testing.T) { + testcases := []struct { + val interface{} + js string + }{ + {"string", `"string"`}, + {true, `true`}, + {false, `false`}, + {uint8(1), `1`}, + {uint16(1), `1`}, + {uint32(1), `1`}, + {uint64(1), `1`}, + {int8(1), `1`}, + {int16(1), `1`}, + {int32(1), `1`}, + {int64(1), `1`}, + {float32(1), `1`}, + {float64(1), `1`}, + {uint(1), `1`}, + {int(1), `1`}, + {uintptr(1), `1`}, + } + + for _, tc := range testcases { + event, err := CreateCustomEvent("myEvent", map[string]interface{}{"key": tc.val}, now) + if nil != err { + t.Fatal(err) + } + js, err := json.Marshal(event) + if nil != err { + t.Fatal(err) + } + if string(js) != `[{"type":"myEvent","timestamp":1.41713646e+09},{"key":`+tc.js+`},{}]` { + t.Fatal(string(js)) + } + } +} + +func TestCustomParamsCopied(t *testing.T) { + params := map[string]interface{}{"alpha": 1} + event, err := CreateCustomEvent("myEvent", params, now) + if nil != err { + t.Fatal(err) + } + // Attempt to change the params after the event created: + params["zip"] = "zap" + js, err := json.Marshal(event) + if nil != err { + t.Fatal(err) + } + if string(js) != `[{"type":"myEvent","timestamp":1.41713646e+09},{"alpha":1},{}]` { + t.Fatal(string(js)) + } +} + +func TestMultipleAttributeJSON(t *testing.T) { + params := map[string]interface{}{"alpha": 1, "beta": 2} + event, err := CreateCustomEvent("myEvent", params, now) + if nil != err { + t.Fatal(err) + } + js, err := json.Marshal(event) + if nil != err { + t.Fatal(err) + } + // Params order may not be deterministic, so we simply test that the + // JSON created is valid. + var valid interface{} + if err := json.Unmarshal(js, &valid); nil != err { + t.Error(string(js)) + } +} diff --git a/internal/custom_events.go b/internal/custom_events.go new file mode 100644 index 000000000..bdeabacd4 --- /dev/null +++ b/internal/custom_events.go @@ -0,0 +1,33 @@ +package internal + +import "time" + +type customEvents struct { + *analyticsEvents +} + +func newCustomEvents(max int) *customEvents { + return &customEvents{ + analyticsEvents: newAnalyticsEvents(max), + } +} + +func (cs *customEvents) Add(e *CustomEvent) { + // For the Go Agent, customEvents are added to the application, not the transaction. + // As a result, customEvents do not inherit their priority from the transaction, though + // they are still sampled according to priority sampling. + priority := NewPriority() + cs.addEvent(analyticsEvent{priority, e}) +} + +func (cs *customEvents) MergeIntoHarvest(h *Harvest) { + h.CustomEvents.mergeFailed(cs.analyticsEvents) +} + +func (cs *customEvents) Data(agentRunID string, harvestStart time.Time) ([]byte, error) { + return cs.CollectorJSON(agentRunID) +} + +func (cs *customEvents) EndpointMethod() string { + return cmdCustomEvents +} diff --git a/internal/custom_metric.go b/internal/custom_metric.go new file mode 100644 index 000000000..61600aea2 --- /dev/null +++ b/internal/custom_metric.go @@ -0,0 +1,12 @@ +package internal + +// CustomMetric is a custom metric. +type CustomMetric struct { + RawInputName string + Value float64 +} + +// MergeIntoHarvest implements Harvestable. +func (m CustomMetric) MergeIntoHarvest(h *Harvest) { + h.Metrics.addValue(customMetric(m.RawInputName), "", m.Value, unforced) +} diff --git a/internal/distributed_tracing.go b/internal/distributed_tracing.go new file mode 100644 index 000000000..82af9b667 --- /dev/null +++ b/internal/distributed_tracing.go @@ -0,0 +1,203 @@ +package internal + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "time" +) + +type distTraceVersion [2]int + +func (v distTraceVersion) major() int { return v[0] } +func (v distTraceVersion) minor() int { return v[1] } + +const ( + // CallerType is the Type field's value for outbound payloads. + CallerType = "App" +) + +var ( + currentDistTraceVersion = distTraceVersion([2]int{0 /* Major */, 1 /* Minor */}) + callerUnknown = payloadCaller{Type: "Unknown", App: "Unknown", Account: "Unknown", TransportType: "Unknown"} +) + +// timestampMillis allows raw payloads to use exact times, and marshalled +// payloads to use times in millis. +type timestampMillis time.Time + +func (tm *timestampMillis) UnmarshalJSON(data []byte) error { + var millis uint64 + if err := json.Unmarshal(data, &millis); nil != err { + return err + } + *tm = timestampMillis(timeFromUnixMilliseconds(millis)) + return nil +} + +func (tm timestampMillis) MarshalJSON() ([]byte, error) { + return json.Marshal(TimeToUnixMilliseconds(tm.Time())) +} + +func (tm timestampMillis) Time() time.Time { return time.Time(tm) } +func (tm *timestampMillis) Set(t time.Time) { *tm = timestampMillis(t) } + +// Payload is the distributed tracing payload. +type Payload struct { + payloadCaller + TransactionID string `json:"tx,omitempty"` + ID string `json:"id,omitempty"` + TracedID string `json:"tr"` + Priority Priority `json:"pr"` + Sampled *bool `json:"sa"` + Timestamp timestampMillis `json:"ti"` + TransportDuration time.Duration `json:"-"` +} + +type payloadCaller struct { + TransportType string `json:"-"` + Type string `json:"ty"` + App string `json:"ap"` + Account string `json:"ac"` + TrustedAccountKey string `json:"tk,omitempty"` +} + +// IsValid validates the payload data by looking for missing fields. +// Returns an error if there's a problem, nil if everything's fine +func (p Payload) IsValid() error { + + // If a payload is missing both `guid` and `transactionId` is received, + // a ParseException supportability metric should be generated. + if "" == p.TransactionID && "" == p.ID { + return ErrPayloadMissingField{message: "missing both guid/id and TransactionId/tx"} + } + + if "" == p.Type { + return ErrPayloadMissingField{message: "missing Type/ty"} + } + + if "" == p.Account { + return ErrPayloadMissingField{message: "missing Account/ac"} + } + + if "" == p.App { + return ErrPayloadMissingField{message: "missing App/ap"} + } + + if "" == p.TracedID { + return ErrPayloadMissingField{message: "missing TracedID/tr"} + } + + if p.Timestamp.Time().IsZero() || 0 == p.Timestamp.Time().Unix() { + return ErrPayloadMissingField{message: "missing Timestamp/ti"} + } + + return nil +} + +func (p Payload) text(v distTraceVersion) []byte { + js, _ := json.Marshal(struct { + Version distTraceVersion `json:"v"` + Data Payload `json:"d"` + }{ + Version: v, + Data: p, + }) + return js +} + +// Text implements newrelic.DistributedTracePayload. +func (p Payload) Text() string { + t := p.text(currentDistTraceVersion) + return string(t) +} + +// HTTPSafe implements newrelic.DistributedTracePayload. +func (p Payload) HTTPSafe() string { + t := p.text(currentDistTraceVersion) + return base64.StdEncoding.EncodeToString(t) +} + +// SetSampled lets us set a value for our *bool, +// which we can't do directly since a pointer +// needs something to point at. +func (p *Payload) SetSampled(sampled bool) { + p.Sampled = &sampled +} + +// ErrPayloadParse indicates that the payload was malformed. +type ErrPayloadParse struct{ err error } + +func (e ErrPayloadParse) Error() string { + return fmt.Sprintf("unable to parse inbound payload: %s", e.err.Error()) +} + +// ErrPayloadMissingField indicates there's a required field that's missing +type ErrPayloadMissingField struct{ message string } + +func (e ErrPayloadMissingField) Error() string { + return fmt.Sprintf("payload is missing required fields: %s", e.message) +} + +// ErrUnsupportedPayloadVersion indicates that the major version number is +// unknown. +type ErrUnsupportedPayloadVersion struct{ version int } + +func (e ErrUnsupportedPayloadVersion) Error() string { + return fmt.Sprintf("unsupported major version number %d", e.version) +} + +// AcceptPayload parses the inbound distributed tracing payload. +func AcceptPayload(p interface{}) (*Payload, error) { + var payload Payload + if byteSlice, ok := p.([]byte); ok { + p = string(byteSlice) + } + switch v := p.(type) { + case string: + if "" == v { + return nil, nil + } + var decoded []byte + if '{' == v[0] { + decoded = []byte(v) + } else { + var err error + decoded, err = base64.StdEncoding.DecodeString(v) + if nil != err { + return nil, ErrPayloadParse{err: err} + } + } + envelope := struct { + Version distTraceVersion `json:"v"` + Data json.RawMessage `json:"d"` + }{} + if err := json.Unmarshal(decoded, &envelope); nil != err { + return nil, ErrPayloadParse{err: err} + } + + if 0 == envelope.Version.major() && 0 == envelope.Version.minor() { + return nil, ErrPayloadMissingField{message: "missing v"} + } + + if envelope.Version.major() > currentDistTraceVersion.major() { + return nil, ErrUnsupportedPayloadVersion{ + version: envelope.Version.major(), + } + } + if err := json.Unmarshal(envelope.Data, &payload); nil != err { + return nil, ErrPayloadParse{err: err} + } + case Payload: + payload = v + default: + // Could be a shim payload (if the app is not yet connected). + return nil, nil + } + // Ensure that we don't have a reference to the input payload: we don't + // want to change it, it could be used multiple times. + alloc := new(Payload) + *alloc = payload + + return alloc, nil +} diff --git a/internal/distributed_tracing_test.go b/internal/distributed_tracing_test.go new file mode 100644 index 000000000..650de3254 --- /dev/null +++ b/internal/distributed_tracing_test.go @@ -0,0 +1,332 @@ +package internal + +import ( + "encoding/json" + "testing" + "time" +) + +var ( + samplePayload = Payload{ + payloadCaller: payloadCaller{ + Type: CallerType, + Account: "123", + App: "456", + }, + ID: "myid", + TracedID: "mytrip", + Priority: 0.12345, + Timestamp: timestampMillis(time.Now()), + } +) + +func TestPayloadRaw(t *testing.T) { + out, err := AcceptPayload(samplePayload) + if err != nil || out == nil { + t.Fatal(err, out) + } + if samplePayload != *out { + t.Fatal(samplePayload, out) + } +} + +func TestPayloadNil(t *testing.T) { + out, err := AcceptPayload(nil) + if err != nil || out != nil { + t.Fatal(err, out) + } +} + +func TestPayloadText(t *testing.T) { + out, err := AcceptPayload(samplePayload.Text()) + if err != nil || out == nil { + t.Fatal(err, out) + } + out.Timestamp = samplePayload.Timestamp // account for timezone differences + if samplePayload != *out { + t.Fatal(samplePayload, out) + } +} + +func TestPayloadTextByteSlice(t *testing.T) { + out, err := AcceptPayload([]byte(samplePayload.Text())) + if err != nil || out == nil { + t.Fatal(err, out) + } + out.Timestamp = samplePayload.Timestamp // account for timezone differences + if samplePayload != *out { + t.Fatal(samplePayload, out) + } +} + +func TestPayloadHTTPSafe(t *testing.T) { + out, err := AcceptPayload(samplePayload.HTTPSafe()) + if err != nil || nil == out { + t.Fatal(err, out) + } + out.Timestamp = samplePayload.Timestamp // account for timezone differences + if samplePayload != *out { + t.Fatal(samplePayload, out) + } +} + +func TestPayloadHTTPSafeByteSlice(t *testing.T) { + out, err := AcceptPayload([]byte(samplePayload.HTTPSafe())) + if err != nil || nil == out { + t.Fatal(err, out) + } + out.Timestamp = samplePayload.Timestamp // account for timezone differences + if samplePayload != *out { + t.Fatal(samplePayload, out) + } +} + +func TestPayloadInvalidBase64(t *testing.T) { + out, err := AcceptPayload("======") + if _, ok := err.(ErrPayloadParse); !ok { + t.Fatal(err) + } + if nil != out { + t.Fatal(out) + } +} + +func TestPayloadEmptyString(t *testing.T) { + out, err := AcceptPayload("") + if err != nil { + t.Fatal(err) + } + if nil != out { + t.Fatal(out) + } +} + +func TestPayloadUnexpectedType(t *testing.T) { + out, err := AcceptPayload(1) + if err != nil { + t.Fatal(err) + } + if nil != out { + t.Fatal(out) + } +} + +func TestPayloadBadVersion(t *testing.T) { + futuristicVersion := distTraceVersion([2]int{ + currentDistTraceVersion[0] + 1, + currentDistTraceVersion[1] + 1, + }) + out, err := AcceptPayload(samplePayload.text(futuristicVersion)) + if _, ok := err.(ErrUnsupportedPayloadVersion); !ok { + t.Fatal(err) + } + if out != nil { + t.Fatal(out) + } +} + +func TestPayloadBadEnvelope(t *testing.T) { + out, err := AcceptPayload("{") + if _, ok := err.(ErrPayloadParse); !ok { + t.Fatal(err) + } + if out != nil { + t.Fatal(out) + } +} + +func TestPayloadBadPayload(t *testing.T) { + var envelope map[string]interface{} + if err := json.Unmarshal([]byte(samplePayload.Text()), &envelope); nil != err { + t.Fatal(err) + } + envelope["d"] = "123" + payload, err := json.Marshal(envelope) + if nil != err { + t.Fatal(err) + } + out, err := AcceptPayload(payload) + if _, ok := err.(ErrPayloadParse); !ok { + t.Fatal(err) + } + if out != nil { + t.Fatal(out) + } +} + +func TestTimestampMillisMarshalUnmarshal(t *testing.T) { + var sec int64 = 111 + var millis int64 = 222 + var micros int64 = 333 + var nsecWithMicros = 1000*1000*millis + 1000*micros + var nsecWithoutMicros = 1000 * 1000 * millis + + input := time.Unix(sec, nsecWithMicros) + expectOutput := time.Unix(sec, nsecWithoutMicros) + + var tm timestampMillis + tm.Set(input) + js, err := json.Marshal(tm) + if nil != err { + t.Fatal(err) + } + var out timestampMillis + err = json.Unmarshal(js, &out) + if nil != err { + t.Fatal(err) + } + if out.Time() != expectOutput { + t.Fatal(out.Time(), expectOutput) + } +} + +func BenchmarkPayloadText(b *testing.B) { + b.ReportAllocs() + b.ResetTimer() + + for n := 0; n < b.N; n++ { + samplePayload.Text() + } +} + +func TestEmptyPayloadData(t *testing.T) { + // does an empty payload json blob result in an invalid payload + var payload Payload + fixture := []byte(`{}`) + + if err := json.Unmarshal(fixture, &payload); nil != err { + t.Log("Could not marshall fixture data into payload") + t.Error(err) + } + + if err := payload.IsValid(); err == nil { + t.Log("Expected error from empty payload data") + t.Fail() + } +} + +func TestRequiredFieldsPayloadData(t *testing.T) { + var payload Payload + fixture := []byte(`{ + "ty":"App", + "ac":"123", + "ap":"456", + "id":"id", + "tr":"traceID", + "ti":1488325987402 + }`) + + if err := json.Unmarshal(fixture, &payload); nil != err { + t.Log("Could not marshall fixture data into payload") + t.Error(err) + } + + if err := payload.IsValid(); err != nil { + t.Log("Expected valid payload if ty, ac, ap, id, tr, and ti are set") + t.Error(err) + } +} + +func TestRequiredFieldsMissingType(t *testing.T) { + var payload Payload + fixture := []byte(`{ + "ac":"123", + "ap":"456", + "id":"id", + "tr":"traceID", + "ti":1488325987402 + }`) + + if err := json.Unmarshal(fixture, &payload); nil != err { + t.Log("Could not marshall fixture data into payload") + t.Error(err) + } + + if err := payload.IsValid(); err == nil { + t.Log("Expected error from missing Type (ty)") + t.Fail() + } +} + +func TestRequiredFieldsMissingAccount(t *testing.T) { + var payload Payload + fixture := []byte(`{ + "ty":"App", + "ap":"456", + "id":"id", + "tr":"traceID", + "ti":1488325987402 + }`) + + if err := json.Unmarshal(fixture, &payload); nil != err { + t.Log("Could not marshall fixture data into payload") + t.Error(err) + } + + if err := payload.IsValid(); err == nil { + t.Log("Expected error from missing Account (ac)") + t.Fail() + } +} + +func TestRequiredFieldsMissingApp(t *testing.T) { + var payload Payload + fixture := []byte(`{ + "ty":"App", + "ac":"123", + "id":"id", + "tr":"traceID", + "ti":1488325987402 + }`) + + if err := json.Unmarshal(fixture, &payload); nil != err { + t.Log("Could not marshall fixture data into payload") + t.Error(err) + } + + if err := payload.IsValid(); err == nil { + t.Log("Expected error from missing App (ap)") + t.Fail() + } +} + +func TestRequiredFieldsMissingTimestamp(t *testing.T) { + var payload Payload + fixture := []byte(`{ + "ty":"App", + "ac":"123", + "ap":"456", + "tr":"traceID" + }`) + + if err := json.Unmarshal(fixture, &payload); nil != err { + t.Log("Could not marshall fixture data into payload") + t.Error(err) + } + + if err := payload.IsValid(); err == nil { + t.Log("Expected error from missing Timestamp (ti)") + t.Fail() + } +} + +func TestRequiredFieldsZeroTimestamp(t *testing.T) { + var payload Payload + fixture := []byte(`{ + "ty":"App", + "ac":"123", + "ap":"456", + "tr":"traceID", + "ti":0 + }`) + + if err := json.Unmarshal(fixture, &payload); nil != err { + t.Log("Could not marshall fixture data into payload") + t.Error(err) + } + + if err := payload.IsValid(); err == nil { + t.Log("Expected error from missing Timestamp (ti)") + t.Fail() + } +} diff --git a/internal/environment.go b/internal/environment.go new file mode 100644 index 000000000..f7f278012 --- /dev/null +++ b/internal/environment.go @@ -0,0 +1,61 @@ +package internal + +import ( + "encoding/json" + "reflect" + "runtime" +) + +// Environment describes the application's environment. +type Environment struct { + Compiler string `env:"runtime.Compiler"` + GOARCH string `env:"runtime.GOARCH"` + GOOS string `env:"runtime.GOOS"` + Version string `env:"runtime.Version"` + NumCPU int `env:"runtime.NumCPU"` +} + +var ( + // SampleEnvironment is useful for testing. + SampleEnvironment = Environment{ + Compiler: "comp", + GOARCH: "arch", + GOOS: "goos", + Version: "vers", + NumCPU: 8, + } +) + +// NewEnvironment returns a new Environment. +func NewEnvironment() Environment { + return Environment{ + Compiler: runtime.Compiler, + GOARCH: runtime.GOARCH, + GOOS: runtime.GOOS, + Version: runtime.Version(), + NumCPU: runtime.NumCPU(), + } +} + +// MarshalJSON prepares Environment JSON in the format expected by the collector +// during the connect command. +func (e Environment) MarshalJSON() ([]byte, error) { + var arr [][]interface{} + + val := reflect.ValueOf(e) + numFields := val.NumField() + + arr = make([][]interface{}, numFields) + + for i := 0; i < numFields; i++ { + v := val.Field(i) + t := val.Type().Field(i).Tag.Get("env") + + arr[i] = []interface{}{ + t, + v.Interface(), + } + } + + return json.Marshal(arr) +} diff --git a/internal/environment_test.go b/internal/environment_test.go new file mode 100644 index 000000000..e3f3e15d7 --- /dev/null +++ b/internal/environment_test.go @@ -0,0 +1,42 @@ +package internal + +import ( + "encoding/json" + "runtime" + "testing" +) + +func TestMarshalEnvironment(t *testing.T) { + js, err := json.Marshal(&SampleEnvironment) + if nil != err { + t.Fatal(err) + } + expect := CompactJSONString(`[ + ["runtime.Compiler","comp"], + ["runtime.GOARCH","arch"], + ["runtime.GOOS","goos"], + ["runtime.Version","vers"], + ["runtime.NumCPU",8]]`) + if string(js) != expect { + t.Fatal(string(js)) + } +} + +func TestEnvironmentFields(t *testing.T) { + env := NewEnvironment() + if env.Compiler != runtime.Compiler { + t.Error(env.Compiler, runtime.Compiler) + } + if env.GOARCH != runtime.GOARCH { + t.Error(env.GOARCH, runtime.GOARCH) + } + if env.GOOS != runtime.GOOS { + t.Error(env.GOOS, runtime.GOOS) + } + if env.Version != runtime.Version() { + t.Error(env.Version, runtime.Version()) + } + if env.NumCPU != runtime.NumCPU() { + t.Error(env.NumCPU, runtime.NumCPU()) + } +} diff --git a/internal/error_events.go b/internal/error_events.go new file mode 100644 index 000000000..08f607dbe --- /dev/null +++ b/internal/error_events.go @@ -0,0 +1,64 @@ +package internal + +import ( + "bytes" + "time" +) + +// MarshalJSON is used for testing. +func (e *ErrorEvent) MarshalJSON() ([]byte, error) { + buf := bytes.NewBuffer(make([]byte, 0, 256)) + + e.WriteJSON(buf) + + return buf.Bytes(), nil +} + +// WriteJSON prepares JSON in the format expected by the collector. +// https://source.datanerd.us/agents/agent-specs/blob/master/Error-Events.md +func (e *ErrorEvent) WriteJSON(buf *bytes.Buffer) { + w := jsonFieldsWriter{buf: buf} + buf.WriteByte('[') + buf.WriteByte('{') + w.stringField("type", "TransactionError") + w.stringField("error.class", e.Klass) + w.stringField("error.message", e.Msg) + w.floatField("timestamp", timeToFloatSeconds(e.When)) + w.stringField("transactionName", e.FinalName) + + sharedTransactionIntrinsics(&e.TxnEvent, &w) + sharedBetterCATIntrinsics(&e.TxnEvent, &w) + + buf.WriteByte('}') + buf.WriteByte(',') + userAttributesJSON(e.Attrs, buf, destError, e.ErrorData.ExtraAttributes) + buf.WriteByte(',') + agentAttributesJSON(e.Attrs, buf, destError) + buf.WriteByte(']') +} + +type errorEvents struct { + *analyticsEvents +} + +func newErrorEvents(max int) *errorEvents { + return &errorEvents{ + analyticsEvents: newAnalyticsEvents(max), + } +} + +func (events *errorEvents) Add(e *ErrorEvent, priority Priority) { + events.addEvent(analyticsEvent{priority, e}) +} + +func (events *errorEvents) MergeIntoHarvest(h *Harvest) { + h.ErrorEvents.mergeFailed(events.analyticsEvents) +} + +func (events *errorEvents) Data(agentRunID string, harvestStart time.Time) ([]byte, error) { + return events.CollectorJSON(agentRunID) +} + +func (events *errorEvents) EndpointMethod() string { + return cmdErrorEvents +} diff --git a/internal/error_events_test.go b/internal/error_events_test.go new file mode 100644 index 000000000..25ad35bc1 --- /dev/null +++ b/internal/error_events_test.go @@ -0,0 +1,227 @@ +package internal + +import ( + "encoding/json" + "testing" + "time" +) + +func testErrorEventJSON(t testing.TB, e *ErrorEvent, expect string) { + js, err := json.Marshal(e) + if nil != err { + t.Error(err) + return + } + expect = CompactJSONString(expect) + // Type assertion to support early Go versions. + if h, ok := t.(interface { + Helper() + }); ok { + h.Helper() + } + actual := string(js) + if expect != actual { + t.Errorf("\nexpect=%s\nactual=%s\n", expect, actual) + } +} + +var ( + sampleErrorData = ErrorData{ + Klass: "*errors.errorString", + Msg: "hello", + When: time.Date(2014, time.November, 28, 1, 1, 0, 0, time.UTC), + } +) + +func TestErrorEventMarshal(t *testing.T) { + testErrorEventJSON(t, &ErrorEvent{ + ErrorData: sampleErrorData, + TxnEvent: TxnEvent{ + FinalName: "myName", + Duration: 3 * time.Second, + Attrs: nil, + BetterCAT: BetterCAT{ + Enabled: true, + Priority: 0.5, + ID: "txn-guid-id", + }, + }, + }, `[ + { + "type":"TransactionError", + "error.class":"*errors.errorString", + "error.message":"hello", + "timestamp":1.41713646e+09, + "transactionName":"myName", + "duration":3, + "guid":"txn-guid-id", + "traceId":"txn-guid-id", + "priority":0.500000, + "sampled":false + }, + {}, + {} + ]`) + + // Many error event intrinsics are shared with txn events using sharedEventIntrinsics: See + // the txn event tests. +} + +func TestErrorEventMarshalOldCAT(t *testing.T) { + testErrorEventJSON(t, &ErrorEvent{ + ErrorData: sampleErrorData, + TxnEvent: TxnEvent{ + FinalName: "myName", + Duration: 3 * time.Second, + Attrs: nil, + BetterCAT: BetterCAT{ + Enabled: false, + }, + }, + }, `[ + { + "type":"TransactionError", + "error.class":"*errors.errorString", + "error.message":"hello", + "timestamp":1.41713646e+09, + "transactionName":"myName", + "duration":3 + }, + {}, + {} + ]`) + + // Many error event intrinsics are shared with txn events using sharedEventIntrinsics: See + // the txn event tests. +} + +func TestErrorEventAttributes(t *testing.T) { + aci := sampleAttributeConfigInput + aci.ErrorCollector.Exclude = append(aci.ErrorCollector.Exclude, "zap") + aci.ErrorCollector.Exclude = append(aci.ErrorCollector.Exclude, AttributeHostDisplayName.name()) + cfg := CreateAttributeConfig(aci, true) + attr := NewAttributes(cfg) + attr.Agent.Add(AttributeHostDisplayName, "exclude me", nil) + attr.Agent.Add(attributeRequestMethod, "GET", nil) + AddUserAttribute(attr, "zap", 123, DestAll) + AddUserAttribute(attr, "zip", 456, DestAll) + + testErrorEventJSON(t, &ErrorEvent{ + ErrorData: sampleErrorData, + TxnEvent: TxnEvent{ + FinalName: "myName", + Duration: 3 * time.Second, + Attrs: attr, + BetterCAT: BetterCAT{ + Enabled: true, + Priority: 0.5, + ID: "txn-guid-id", + }, + }, + }, `[ + { + "type":"TransactionError", + "error.class":"*errors.errorString", + "error.message":"hello", + "timestamp":1.41713646e+09, + "transactionName":"myName", + "duration":3, + "guid":"txn-guid-id", + "traceId":"txn-guid-id", + "priority":0.500000, + "sampled":false + }, + { + "zip":456 + }, + { + "request.method":"GET" + } + ]`) +} + +func TestErrorEventAttributesOldCAT(t *testing.T) { + aci := sampleAttributeConfigInput + aci.ErrorCollector.Exclude = append(aci.ErrorCollector.Exclude, "zap") + aci.ErrorCollector.Exclude = append(aci.ErrorCollector.Exclude, AttributeHostDisplayName.name()) + cfg := CreateAttributeConfig(aci, true) + attr := NewAttributes(cfg) + attr.Agent.Add(AttributeHostDisplayName, "exclude me", nil) + attr.Agent.Add(attributeRequestMethod, "GET", nil) + AddUserAttribute(attr, "zap", 123, DestAll) + AddUserAttribute(attr, "zip", 456, DestAll) + + testErrorEventJSON(t, &ErrorEvent{ + ErrorData: sampleErrorData, + TxnEvent: TxnEvent{ + FinalName: "myName", + Duration: 3 * time.Second, + Attrs: attr, + BetterCAT: BetterCAT{ + Enabled: false, + }, + }, + }, `[ + { + "type":"TransactionError", + "error.class":"*errors.errorString", + "error.message":"hello", + "timestamp":1.41713646e+09, + "transactionName":"myName", + "duration":3 + }, + { + "zip":456 + }, + { + "request.method":"GET" + } + ]`) +} + +func TestErrorEventMarshalWithInboundCaller(t *testing.T) { + e := TxnEvent{ + FinalName: "myName", + Duration: 3 * time.Second, + Attrs: nil, + } + + e.BetterCAT.Enabled = true + e.BetterCAT.Inbound = &Payload{ + payloadCaller: payloadCaller{ + TransportType: "HTTP", + Type: "Browser", + App: "caller-app", + Account: "caller-account", + }, + ID: "caller-id", + TransactionID: "caller-parent-id", + TracedID: "trip-id", + TransportDuration: 2 * time.Second, + } + + testErrorEventJSON(t, &ErrorEvent{ + ErrorData: sampleErrorData, + TxnEvent: e, + }, `[ + { + "type":"TransactionError", + "error.class":"*errors.errorString", + "error.message":"hello", + "timestamp":1.41713646e+09, + "transactionName":"myName", + "duration":3, + "parent.type": "Browser", + "parent.app": "caller-app", + "parent.account": "caller-account", + "parent.transportType": "HTTP", + "parent.transportDuration": 2, + "guid":"", + "traceId":"trip-id", + "priority":0.000000, + "sampled":false + }, + {}, + {} + ]`) +} diff --git a/internal/errors.go b/internal/errors.go new file mode 100644 index 000000000..23dec7029 --- /dev/null +++ b/internal/errors.go @@ -0,0 +1,175 @@ +package internal + +import ( + "bytes" + "fmt" + "net/http" + "strconv" + "time" + + "github.com/newrelic/go-agent/internal/jsonx" +) + +const ( + // PanicErrorKlass is the error klass used for errors generated by + // recovering panics in txn.End. + PanicErrorKlass = "panic" +) + +func panicValueMsg(v interface{}) string { + switch val := v.(type) { + case error: + return val.Error() + default: + return fmt.Sprintf("%v", v) + } +} + +// TxnErrorFromPanic creates a new TxnError from a panic. +func TxnErrorFromPanic(now time.Time, v interface{}) ErrorData { + return ErrorData{ + When: now, + Msg: panicValueMsg(v), + Klass: PanicErrorKlass, + } +} + +// TxnErrorFromResponseCode creates a new TxnError from an http response code. +func TxnErrorFromResponseCode(now time.Time, code int) ErrorData { + codeStr := strconv.Itoa(code) + msg := http.StatusText(code) + if msg == "" { + // Use a generic message if the code was not an http code + // to support gRPC. + msg = "response code " + codeStr + } + return ErrorData{ + When: now, + Msg: msg, + Klass: codeStr, + } +} + +// ErrorData contains the information about a recorded error. +type ErrorData struct { + When time.Time + Stack StackTrace + ExtraAttributes map[string]interface{} + Msg string + Klass string +} + +// TxnError combines error data with information about a transaction. TxnError is used for +// both error events and traced errors. +type TxnError struct { + ErrorData + TxnEvent +} + +// ErrorEvent and tracedError are separate types so that error events and traced errors can have +// different WriteJSON methods. +type ErrorEvent TxnError + +type tracedError TxnError + +// TxnErrors is a set of errors captured in a Transaction. +type TxnErrors []*ErrorData + +// NewTxnErrors returns a new empty TxnErrors. +func NewTxnErrors(max int) TxnErrors { + return make([]*ErrorData, 0, max) +} + +// Add adds a TxnError. +func (errors *TxnErrors) Add(e ErrorData) { + if len(*errors) < cap(*errors) { + *errors = append(*errors, &e) + } +} + +func (h *tracedError) WriteJSON(buf *bytes.Buffer) { + buf.WriteByte('[') + jsonx.AppendFloat(buf, timeToFloatMilliseconds(h.When)) + buf.WriteByte(',') + jsonx.AppendString(buf, h.FinalName) + buf.WriteByte(',') + jsonx.AppendString(buf, h.Msg) + buf.WriteByte(',') + jsonx.AppendString(buf, h.Klass) + buf.WriteByte(',') + + buf.WriteByte('{') + buf.WriteString(`"agentAttributes"`) + buf.WriteByte(':') + agentAttributesJSON(h.Attrs, buf, destError) + buf.WriteByte(',') + buf.WriteString(`"userAttributes"`) + buf.WriteByte(':') + userAttributesJSON(h.Attrs, buf, destError, h.ErrorData.ExtraAttributes) + buf.WriteByte(',') + buf.WriteString(`"intrinsics"`) + buf.WriteByte(':') + intrinsicsJSON(&h.TxnEvent, buf) + if nil != h.Stack { + buf.WriteByte(',') + buf.WriteString(`"stack_trace"`) + buf.WriteByte(':') + h.Stack.WriteJSON(buf) + } + buf.WriteByte('}') + + buf.WriteByte(']') +} + +// MarshalJSON is used for testing. +func (h *tracedError) MarshalJSON() ([]byte, error) { + buf := &bytes.Buffer{} + h.WriteJSON(buf) + return buf.Bytes(), nil +} + +type harvestErrors []*tracedError + +func newHarvestErrors(max int) harvestErrors { + return make([]*tracedError, 0, max) +} + +// MergeTxnErrors merges a transaction's errors into the harvest's errors. +func MergeTxnErrors(errors *harvestErrors, errs TxnErrors, txnEvent TxnEvent) { + for _, e := range errs { + if len(*errors) == cap(*errors) { + return + } + *errors = append(*errors, &tracedError{ + TxnEvent: txnEvent, + ErrorData: *e, + }) + } +} + +func (errors harvestErrors) Data(agentRunID string, harvestStart time.Time) ([]byte, error) { + if 0 == len(errors) { + return nil, nil + } + estimate := 1024 * len(errors) + buf := bytes.NewBuffer(make([]byte, 0, estimate)) + buf.WriteByte('[') + jsonx.AppendString(buf, agentRunID) + buf.WriteByte(',') + buf.WriteByte('[') + for i, e := range errors { + if i > 0 { + buf.WriteByte(',') + } + e.WriteJSON(buf) + } + buf.WriteByte(']') + buf.WriteByte(']') + return buf.Bytes(), nil +} + +func (errors harvestErrors) MergeIntoHarvest(h *Harvest) {} + +func (errors harvestErrors) EndpointMethod() string { + return cmdErrorData +} diff --git a/internal/errors_test.go b/internal/errors_test.go new file mode 100644 index 000000000..37117f325 --- /dev/null +++ b/internal/errors_test.go @@ -0,0 +1,352 @@ +package internal + +import ( + "encoding/json" + "errors" + "testing" + "time" +) + +var ( + emptyStackTrace = make([]uintptr, 0) +) + +func testExpectedJSON(t testing.TB, expect string, actual string) { + // Type assertion to support early Go versions. + if h, ok := t.(interface { + Helper() + }); ok { + h.Helper() + } + compactExpect := CompactJSONString(expect) + if compactExpect != actual { + t.Errorf("\nexpect=%s\nactual=%s\n", compactExpect, actual) + } +} + +func TestErrorTraceMarshal(t *testing.T) { + he := &tracedError{ + ErrorData: ErrorData{ + When: time.Date(2014, time.November, 28, 1, 1, 0, 0, time.UTC), + Stack: emptyStackTrace, + Msg: "my_msg", + Klass: "my_class", + }, + TxnEvent: TxnEvent{ + FinalName: "my_txn_name", + Attrs: nil, + BetterCAT: BetterCAT{ + Enabled: true, + ID: "txn-id", + Priority: 0.5, + }, + TotalTime: 2 * time.Second, + }, + } + js, err := json.Marshal(he) + if nil != err { + t.Error(err) + } + + expect := ` + [ + 1.41713646e+12, + "my_txn_name", + "my_msg", + "my_class", + { + "agentAttributes":{}, + "userAttributes":{}, + "intrinsics":{ + "totalTime":2, + "guid":"txn-id", + "traceId":"txn-id", + "priority":0.500000, + "sampled":false + }, + "stack_trace":[] + } + ]` + testExpectedJSON(t, expect, string(js)) +} + +func TestErrorTraceMarshalOldCAT(t *testing.T) { + he := &tracedError{ + ErrorData: ErrorData{ + When: time.Date(2014, time.November, 28, 1, 1, 0, 0, time.UTC), + Stack: emptyStackTrace, + Msg: "my_msg", + Klass: "my_class", + }, + TxnEvent: TxnEvent{ + FinalName: "my_txn_name", + Attrs: nil, + BetterCAT: BetterCAT{ + Enabled: false, + }, + TotalTime: 2 * time.Second, + }, + } + js, err := json.Marshal(he) + if nil != err { + t.Error(err) + } + + expect := ` + [ + 1.41713646e+12, + "my_txn_name", + "my_msg", + "my_class", + { + "agentAttributes":{}, + "userAttributes":{}, + "intrinsics":{ + "totalTime":2 + }, + "stack_trace":[] + } + ]` + testExpectedJSON(t, expect, string(js)) +} + +func TestErrorTraceAttributes(t *testing.T) { + aci := sampleAttributeConfigInput + aci.ErrorCollector.Exclude = append(aci.ErrorCollector.Exclude, "zap") + aci.ErrorCollector.Exclude = append(aci.ErrorCollector.Exclude, AttributeHostDisplayName.name()) + cfg := CreateAttributeConfig(aci, true) + attr := NewAttributes(cfg) + attr.Agent.Add(AttributeHostDisplayName, "exclude me", nil) + attr.Agent.Add(attributeRequestURI, "my_request_uri", nil) + AddUserAttribute(attr, "zap", 123, DestAll) + AddUserAttribute(attr, "zip", 456, DestAll) + + he := &tracedError{ + ErrorData: ErrorData{ + When: time.Date(2014, time.November, 28, 1, 1, 0, 0, time.UTC), + Stack: nil, + Msg: "my_msg", + Klass: "my_class", + }, + TxnEvent: TxnEvent{ + FinalName: "my_txn_name", + Attrs: attr, + BetterCAT: BetterCAT{ + Enabled: true, + ID: "txn-id", + Priority: 0.5, + }, + TotalTime: 2 * time.Second, + }, + } + js, err := json.Marshal(he) + if nil != err { + t.Error(err) + } + expect := ` + [ + 1.41713646e+12, + "my_txn_name", + "my_msg", + "my_class", + { + "agentAttributes":{"request.uri":"my_request_uri"}, + "userAttributes":{"zip":456}, + "intrinsics":{ + "totalTime":2, + "guid":"txn-id", + "traceId":"txn-id", + "priority":0.500000, + "sampled":false + } + } + ]` + testExpectedJSON(t, expect, string(js)) +} + +func TestErrorTraceAttributesOldCAT(t *testing.T) { + aci := sampleAttributeConfigInput + aci.ErrorCollector.Exclude = append(aci.ErrorCollector.Exclude, "zap") + aci.ErrorCollector.Exclude = append(aci.ErrorCollector.Exclude, AttributeHostDisplayName.name()) + cfg := CreateAttributeConfig(aci, true) + attr := NewAttributes(cfg) + attr.Agent.Add(AttributeHostDisplayName, "exclude me", nil) + attr.Agent.Add(attributeRequestURI, "my_request_uri", nil) + AddUserAttribute(attr, "zap", 123, DestAll) + AddUserAttribute(attr, "zip", 456, DestAll) + + he := &tracedError{ + ErrorData: ErrorData{ + When: time.Date(2014, time.November, 28, 1, 1, 0, 0, time.UTC), + Stack: nil, + Msg: "my_msg", + Klass: "my_class", + }, + TxnEvent: TxnEvent{ + FinalName: "my_txn_name", + Attrs: attr, + BetterCAT: BetterCAT{ + Enabled: false, + }, + TotalTime: 2 * time.Second, + }, + } + js, err := json.Marshal(he) + if nil != err { + t.Error(err) + } + expect := ` + [ + 1.41713646e+12, + "my_txn_name", + "my_msg", + "my_class", + { + "agentAttributes":{"request.uri":"my_request_uri"}, + "userAttributes":{"zip":456}, + "intrinsics":{ + "totalTime":2 + } + } + ]` + testExpectedJSON(t, expect, string(js)) +} + +func TestErrorsLifecycle(t *testing.T) { + ers := NewTxnErrors(5) + + when := time.Date(2014, time.November, 28, 1, 1, 0, 0, time.UTC) + ers.Add(TxnErrorFromResponseCode(when, 15)) + ers.Add(TxnErrorFromResponseCode(when, 400)) + ers.Add(TxnErrorFromPanic(when, errors.New("oh no panic"))) + ers.Add(TxnErrorFromPanic(when, 123)) + ers.Add(TxnErrorFromPanic(when, 123)) + + he := newHarvestErrors(4) + MergeTxnErrors(&he, ers, TxnEvent{ + FinalName: "txnName", + Attrs: nil, + BetterCAT: BetterCAT{ + Enabled: true, + ID: "txn-id", + Priority: 0.5, + }, + TotalTime: 2 * time.Second, + }) + js, err := he.Data("agentRunID", time.Now()) + if nil != err { + t.Error(err) + } + expect := CompactJSONString(` +[ + "agentRunID", + [ + [ + 1.41713646e+12, + "txnName", + "response code 15", + "15", + { + "agentAttributes":{}, + "userAttributes":{}, + "intrinsics":{ + "totalTime":2, + "guid":"txn-id", + "traceId":"txn-id", + "priority":0.500000, + "sampled":false + } + } + ], + [ + 1.41713646e+12, + "txnName", + "Bad Request", + "400", + { + "agentAttributes":{}, + "userAttributes":{}, + "intrinsics":{ + "totalTime":2, + "guid":"txn-id", + "traceId":"txn-id", + "priority":0.500000, + "sampled":false + } + } + ], + [ + 1.41713646e+12, + "txnName", + "oh no panic", + "panic", + { + "agentAttributes":{}, + "userAttributes":{}, + "intrinsics":{ + "totalTime":2, + "guid":"txn-id", + "traceId":"txn-id", + "priority":0.500000, + "sampled":false + } + } + ], + [ + 1.41713646e+12, + "txnName", + "123", + "panic", + { + "agentAttributes":{}, + "userAttributes":{}, + "intrinsics":{ + "totalTime":2, + "guid":"txn-id", + "traceId":"txn-id", + "priority":0.500000, + "sampled":false + } + } + ] + ] +]`) + if string(js) != expect { + t.Error(string(js), expect) + } +} + +func BenchmarkErrorsJSON(b *testing.B) { + when := time.Date(2014, time.November, 28, 1, 1, 0, 0, time.UTC) + max := 20 + ers := NewTxnErrors(max) + + for i := 0; i < max; i++ { + ers.Add(ErrorData{ + When: time.Date(2014, time.November, 28, 1, 1, 0, 0, time.UTC), + Msg: "error message", + Klass: "error class", + }) + } + + cfg := CreateAttributeConfig(sampleAttributeConfigInput, true) + attr := NewAttributes(cfg) + attr.Agent.Add(attributeRequestMethod, "GET", nil) + AddUserAttribute(attr, "zip", 456, DestAll) + + he := newHarvestErrors(max) + MergeTxnErrors(&he, ers, TxnEvent{ + FinalName: "WebTransaction/Go/hello", + Attrs: attr, + }) + + b.ReportAllocs() + b.ResetTimer() + + for n := 0; n < b.N; n++ { + js, err := he.Data("agentRundID", when) + if nil != err || nil == js { + b.Fatal(err, js) + } + } +} diff --git a/internal/expect.go b/internal/expect.go new file mode 100644 index 000000000..deff8f238 --- /dev/null +++ b/internal/expect.go @@ -0,0 +1,591 @@ +package internal + +import ( + "encoding/json" + "fmt" + "runtime" + + "time" +) + +var ( + // Unfortunately, the resolution of time.Now() on Windows is coarse: Two + // sequential calls to time.Now() may return the same value, and tests + // which expect non-zero durations may fail. To avoid adding sleep + // statements or mocking time.Now(), those tests are skipped on Windows. + doDurationTests = runtime.GOOS != `windows` +) + +// Validator is used for testing. +type Validator interface { + Error(...interface{}) +} + +func validateStringField(v Validator, fieldName, v1, v2 string) { + if v1 != v2 { + v.Error(fieldName, v1, v2) + } +} + +type addValidatorField struct { + field interface{} + original Validator +} + +func (a addValidatorField) Error(fields ...interface{}) { + fields = append([]interface{}{a.field}, fields...) + a.original.Error(fields...) +} + +// ExtendValidator is used to add more context to a validator. +func ExtendValidator(v Validator, field interface{}) Validator { + return addValidatorField{ + field: field, + original: v, + } +} + +// WantMetric is a metric expectation. If Data is nil, then any data values are +// acceptable. If Data has len 1, then only the metric count is validated. +type WantMetric struct { + Name string + Scope string + Forced interface{} // true, false, or nil + Data []float64 +} + +// WantError is a traced error expectation. +type WantError struct { + TxnName string + Msg string + Klass string + UserAttributes map[string]interface{} + AgentAttributes map[string]interface{} +} + +func uniquePointer() *struct{} { + s := struct{}{} + return &s +} + +var ( + // MatchAnything is for use when matching attributes. + MatchAnything = uniquePointer() +) + +// WantEvent is a transaction or error event expectation. +type WantEvent struct { + Intrinsics map[string]interface{} + UserAttributes map[string]interface{} + AgentAttributes map[string]interface{} +} + +// WantTxnTrace is a transaction trace expectation. +type WantTxnTrace struct { + MetricName string + NumSegments int + UserAttributes map[string]interface{} + AgentAttributes map[string]interface{} + Intrinsics map[string]interface{} + // If the Root's SegmentName is populated then the segments will be + // tested, otherwise NumSegments will be tested. + Root WantTraceSegment +} + +// WantTraceSegment is a transaction trace segment expectation. +type WantTraceSegment struct { + SegmentName string + // RelativeStartMillis and RelativeStopMillis will be tested if they are + // provided: This makes it easy for top level tests which cannot + // control duration. + RelativeStartMillis interface{} + RelativeStopMillis interface{} + Attributes map[string]interface{} + Children []WantTraceSegment +} + +// WantSlowQuery is a slowQuery expectation. +type WantSlowQuery struct { + Count int32 + MetricName string + Query string + TxnName string + TxnURL string + DatabaseName string + Host string + PortPathOrID string + Params map[string]interface{} +} + +// HarvestTestinger is implemented by the app. It sets an empty test harvest +// and modifies the connect reply if a callback is provided. +type HarvestTestinger interface { + HarvestTesting(replyfn func(*ConnectReply)) +} + +// HarvestTesting allows integration packages to test instrumentation. +func HarvestTesting(app interface{}, replyfn func(*ConnectReply)) { + ta, ok := app.(HarvestTestinger) + if !ok { + panic("HarvestTesting type assertion failure") + } + ta.HarvestTesting(replyfn) +} + +// WantTxn provides the expectation parameters to ExpectTxnMetrics. +type WantTxn struct { + Name string + IsWeb bool + NumErrors int +} + +// ExpectTxnMetrics tests that the app contains metrics for a transaction. +func ExpectTxnMetrics(t Validator, mt *metricTable, want WantTxn) { + var metrics []WantMetric + var scope string + var allWebOther string + if want.IsWeb { + scope = "WebTransaction/Go/" + want.Name + allWebOther = "allWeb" + metrics = []WantMetric{ + {Name: "WebTransaction/Go/" + want.Name, Scope: "", Forced: true, Data: nil}, + {Name: "WebTransaction", Scope: "", Forced: true, Data: nil}, + {Name: "WebTransactionTotalTime/Go/" + want.Name, Scope: "", Forced: false, Data: nil}, + {Name: "WebTransactionTotalTime", Scope: "", Forced: true, Data: nil}, + {Name: "HttpDispatcher", Scope: "", Forced: true, Data: nil}, + {Name: "Apdex", Scope: "", Forced: true, Data: nil}, + {Name: "Apdex/Go/" + want.Name, Scope: "", Forced: false, Data: nil}, + } + } else { + scope = "OtherTransaction/Go/" + want.Name + allWebOther = "allOther" + metrics = []WantMetric{ + {Name: "OtherTransaction/Go/" + want.Name, Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransaction/all", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransactionTotalTime/Go/" + want.Name, Scope: "", Forced: false, Data: nil}, + {Name: "OtherTransactionTotalTime", Scope: "", Forced: true, Data: nil}, + } + } + if want.NumErrors > 0 { + data := []float64{float64(want.NumErrors), 0, 0, 0, 0, 0} + metrics = append(metrics, []WantMetric{ + {Name: "Errors/all", Scope: "", Forced: true, Data: data}, + {Name: "Errors/" + allWebOther, Scope: "", Forced: true, Data: data}, + {Name: "Errors/" + scope, Scope: "", Forced: true, Data: data}, + }...) + } + ExpectMetrics(t, mt, metrics) +} + +// Expect exposes methods that allow for testing whether the correct data was +// captured. +type Expect interface { + ExpectCustomEvents(t Validator, want []WantEvent) + ExpectErrors(t Validator, want []WantError) + ExpectErrorEvents(t Validator, want []WantEvent) + + ExpectTxnEvents(t Validator, want []WantEvent) + + ExpectMetrics(t Validator, want []WantMetric) + ExpectMetricsPresent(t Validator, want []WantMetric) + ExpectTxnMetrics(t Validator, want WantTxn) + + ExpectTxnTraces(t Validator, want []WantTxnTrace) + ExpectSlowQueries(t Validator, want []WantSlowQuery) + + ExpectSpanEvents(t Validator, want []WantEvent) +} + +func expectMetricField(t Validator, id metricID, v1, v2 float64, fieldName string) { + if v1 != v2 { + t.Error("metric fields do not match", id, v1, v2, fieldName) + } +} + +// ExpectMetricsPresent allows testing of metrics without requiring an exact match +func ExpectMetricsPresent(t Validator, mt *metricTable, expect []WantMetric) { + expectMetrics(t, mt, expect, false) +} + +// ExpectMetrics allows testing of metrics. It passes if mt exactly matches expect. +func ExpectMetrics(t Validator, mt *metricTable, expect []WantMetric) { + expectMetrics(t, mt, expect, true) +} + +func expectMetrics(t Validator, mt *metricTable, expect []WantMetric, exactMatch bool) { + if exactMatch { + if len(mt.metrics) != len(expect) { + t.Error("metric counts do not match expectations", len(mt.metrics), len(expect)) + } + } + expectedIds := make(map[metricID]struct{}) + for _, e := range expect { + id := metricID{Name: e.Name, Scope: e.Scope} + expectedIds[id] = struct{}{} + m := mt.metrics[id] + if nil == m { + t.Error("unable to find metric", id) + continue + } + + if b, ok := e.Forced.(bool); ok { + if b != (forced == m.forced) { + t.Error("metric forced incorrect", b, m.forced, id) + } + } + + if nil != e.Data { + expectMetricField(t, id, e.Data[0], m.data.countSatisfied, "countSatisfied") + + if len(e.Data) > 1 { + expectMetricField(t, id, e.Data[1], m.data.totalTolerated, "totalTolerated") + expectMetricField(t, id, e.Data[2], m.data.exclusiveFailed, "exclusiveFailed") + expectMetricField(t, id, e.Data[3], m.data.min, "min") + expectMetricField(t, id, e.Data[4], m.data.max, "max") + expectMetricField(t, id, e.Data[5], m.data.sumSquares, "sumSquares") + } + } + } + if exactMatch { + for id := range mt.metrics { + if _, ok := expectedIds[id]; !ok { + t.Error("expected metrics does not contain", id.Name, id.Scope) + } + } + } +} + +func expectAttributes(v Validator, exists map[string]interface{}, expect map[string]interface{}) { + // TODO: This params comparison can be made smarter: Alert differences + // based on sub/super set behavior. + if len(exists) != len(expect) { + v.Error("attributes length difference", len(exists), len(expect)) + } + for key, val := range expect { + found, ok := exists[key] + if !ok { + v.Error("expected attribute not found: ", key) + continue + } + if val == MatchAnything { + continue + } + v1 := fmt.Sprint(found) + v2 := fmt.Sprint(val) + if v1 != v2 { + v.Error("value difference", fmt.Sprintf("key=%s", key), v1, v2) + } + } + for key, val := range exists { + _, ok := expect[key] + if !ok { + v.Error("unexpected attribute present: ", key, val) + continue + } + } +} + +// ExpectCustomEvents allows testing of custom events. It passes if cs exactly matches expect. +func ExpectCustomEvents(v Validator, cs *customEvents, expect []WantEvent) { + expectEvents(v, cs.analyticsEvents, expect, nil) +} + +func expectEvent(v Validator, e json.Marshaler, expect WantEvent) { + js, err := e.MarshalJSON() + if nil != err { + v.Error("unable to marshal event", err) + return + } + var event []map[string]interface{} + err = json.Unmarshal(js, &event) + if nil != err { + v.Error("unable to parse event json", err) + return + } + intrinsics := event[0] + userAttributes := event[1] + agentAttributes := event[2] + + if nil != expect.Intrinsics { + expectAttributes(v, intrinsics, expect.Intrinsics) + } + if nil != expect.UserAttributes { + expectAttributes(v, userAttributes, expect.UserAttributes) + } + if nil != expect.AgentAttributes { + expectAttributes(v, agentAttributes, expect.AgentAttributes) + } +} + +func expectEvents(v Validator, events *analyticsEvents, expect []WantEvent, extraAttributes map[string]interface{}) { + if len(events.events) != len(expect) { + v.Error("number of events does not match", len(events.events), len(expect)) + return + } + for i, e := range expect { + event, ok := events.events[i].jsonWriter.(json.Marshaler) + if !ok { + v.Error("event does not implement json.Marshaler") + continue + } + if nil != e.Intrinsics { + e.Intrinsics = mergeAttributes(extraAttributes, e.Intrinsics) + } + expectEvent(v, event, e) + } +} + +// Second attributes have priority. +func mergeAttributes(a1, a2 map[string]interface{}) map[string]interface{} { + a := make(map[string]interface{}) + for k, v := range a1 { + a[k] = v + } + for k, v := range a2 { + a[k] = v + } + return a +} + +// ExpectErrorEvents allows testing of error events. It passes if events exactly matches expect. +func ExpectErrorEvents(v Validator, events *errorEvents, expect []WantEvent) { + expectEvents(v, events.analyticsEvents, expect, map[string]interface{}{ + // The following intrinsics should always be present in + // error events: + "type": "TransactionError", + "timestamp": MatchAnything, + "duration": MatchAnything, + }) +} + +// ExpectSpanEvents allows testing of span events. It passes if events exactly matches expect. +func ExpectSpanEvents(v Validator, events *spanEvents, expect []WantEvent) { + expectEvents(v, events.analyticsEvents, expect, map[string]interface{}{ + // The following intrinsics should always be present in + // span events: + "type": "Span", + "timestamp": MatchAnything, + "duration": MatchAnything, + "traceId": MatchAnything, + "guid": MatchAnything, + "transactionId": MatchAnything, + // All span events are currently sampled. + "sampled": true, + "priority": MatchAnything, + }) +} + +// ExpectTxnEvents allows testing of txn events. +func ExpectTxnEvents(v Validator, events *txnEvents, expect []WantEvent) { + expectEvents(v, events.analyticsEvents, expect, map[string]interface{}{ + // The following intrinsics should always be present in + // txn events: + "type": "Transaction", + "timestamp": MatchAnything, + "duration": MatchAnything, + "totalTime": MatchAnything, + "error": MatchAnything, + }) +} + +func expectError(v Validator, err *tracedError, expect WantError) { + validateStringField(v, "txnName", expect.TxnName, err.FinalName) + validateStringField(v, "klass", expect.Klass, err.Klass) + validateStringField(v, "msg", expect.Msg, err.Msg) + js, errr := err.MarshalJSON() + if nil != errr { + v.Error("unable to marshal error json", errr) + return + } + var unmarshalled []interface{} + errr = json.Unmarshal(js, &unmarshalled) + if nil != errr { + v.Error("unable to unmarshal error json", errr) + return + } + attributes := unmarshalled[4].(map[string]interface{}) + agentAttributes := attributes["agentAttributes"].(map[string]interface{}) + userAttributes := attributes["userAttributes"].(map[string]interface{}) + + if nil != expect.UserAttributes { + expectAttributes(v, userAttributes, expect.UserAttributes) + } + if nil != expect.AgentAttributes { + expectAttributes(v, agentAttributes, expect.AgentAttributes) + } + if stack := attributes["stack_trace"]; nil == stack { + v.Error("missing error stack trace") + } +} + +// ExpectErrors allows testing of errors. +func ExpectErrors(v Validator, errors harvestErrors, expect []WantError) { + if len(errors) != len(expect) { + v.Error("number of errors mismatch", len(errors), len(expect)) + return + } + for i, e := range expect { + expectError(v, errors[i], e) + } +} + +func countSegments(node []interface{}) int { + count := 1 + children := node[4].([]interface{}) + for _, c := range children { + node := c.([]interface{}) + count += countSegments(node) + } + return count +} + +func expectTraceSegment(v Validator, nodeObj interface{}, expect WantTraceSegment) { + node := nodeObj.([]interface{}) + start := int(node[0].(float64)) + stop := int(node[1].(float64)) + name := node[2].(string) + attributes := node[3].(map[string]interface{}) + children := node[4].([]interface{}) + + validateStringField(v, "segmentName", expect.SegmentName, name) + if nil != expect.RelativeStartMillis { + expectStart, ok := expect.RelativeStartMillis.(int) + if !ok { + v.Error("invalid expect.RelativeStartMillis", expect.RelativeStartMillis) + } else if expectStart != start { + v.Error("segmentStartTime", expect.SegmentName, start, expectStart) + } + } + if nil != expect.RelativeStopMillis { + expectStop, ok := expect.RelativeStopMillis.(int) + if !ok { + v.Error("invalid expect.RelativeStopMillis", expect.RelativeStopMillis) + } else if expectStop != stop { + v.Error("segmentStopTime", expect.SegmentName, stop, expectStop) + } + } + if nil != expect.Attributes { + expectAttributes(v, attributes, expect.Attributes) + } + if len(children) != len(expect.Children) { + v.Error("segmentChildrenCount", expect.SegmentName, len(children), len(expect.Children)) + } else { + for idx, child := range children { + expectTraceSegment(v, child, expect.Children[idx]) + } + } +} + +func expectTxnTrace(v Validator, got interface{}, expect WantTxnTrace) { + unmarshalled := got.([]interface{}) + duration := unmarshalled[1].(float64) + name := unmarshalled[2].(string) + var arrayURL string + if nil != unmarshalled[3] { + arrayURL = unmarshalled[3].(string) + } + traceData := unmarshalled[4].([]interface{}) + + rootNode := traceData[3].([]interface{}) + attributes := traceData[4].(map[string]interface{}) + userAttributes := attributes["userAttributes"].(map[string]interface{}) + agentAttributes := attributes["agentAttributes"].(map[string]interface{}) + intrinsics := attributes["intrinsics"].(map[string]interface{}) + + validateStringField(v, "metric name", expect.MetricName, name) + + if doDurationTests && 0 == duration { + v.Error("zero trace duration") + } + + if nil != expect.UserAttributes { + expectAttributes(v, userAttributes, expect.UserAttributes) + } + if nil != expect.AgentAttributes { + expectAttributes(v, agentAttributes, expect.AgentAttributes) + expectURL, _ := expect.AgentAttributes["request.uri"].(string) + if "" != expectURL { + validateStringField(v, "request url in array", expectURL, arrayURL) + } + } + if nil != expect.Intrinsics { + expectAttributes(v, intrinsics, expect.Intrinsics) + } + if expect.Root.SegmentName != "" { + expectTraceSegment(v, rootNode, expect.Root) + } else { + numSegments := countSegments(rootNode) + // The expectation segment count does not include the two root nodes. + numSegments -= 2 + if expect.NumSegments != numSegments { + v.Error("wrong number of segments", expect.NumSegments, numSegments) + } + } +} + +// ExpectTxnTraces allows testing of transaction traces. +func ExpectTxnTraces(v Validator, traces *harvestTraces, want []WantTxnTrace) { + if len(want) != traces.Len() { + v.Error("number of traces do not match", len(want), traces.Len()) + return + } + if len(want) == 0 { + return + } + js, err := traces.Data("agentRunID", time.Now()) + if nil != err { + v.Error("error creasing harvest traces data", err) + return + } + + var unmarshalled []interface{} + err = json.Unmarshal(js, &unmarshalled) + if nil != err { + v.Error("unable to unmarshal error json", err) + return + } + if "agentRunID" != unmarshalled[0].(string) { + v.Error("traces agent run id wrong", unmarshalled[0]) + return + } + gotTraces := unmarshalled[1].([]interface{}) + if len(gotTraces) != len(want) { + v.Error("number of traces in json does not match", len(gotTraces), len(want)) + return + } + for i, expected := range want { + expectTxnTrace(v, gotTraces[i], expected) + } +} + +func expectSlowQuery(t Validator, slowQuery *slowQuery, want WantSlowQuery) { + if slowQuery.Count != want.Count { + t.Error("wrong Count field", slowQuery.Count, want.Count) + } + uri, _ := slowQuery.TxnEvent.Attrs.GetAgentValue(attributeRequestURI, destTxnTrace) + validateStringField(t, "MetricName", slowQuery.DatastoreMetric, want.MetricName) + validateStringField(t, "Query", slowQuery.ParameterizedQuery, want.Query) + validateStringField(t, "TxnEvent.FinalName", slowQuery.TxnEvent.FinalName, want.TxnName) + validateStringField(t, "request.uri", uri, want.TxnURL) + validateStringField(t, "DatabaseName", slowQuery.DatabaseName, want.DatabaseName) + validateStringField(t, "Host", slowQuery.Host, want.Host) + validateStringField(t, "PortPathOrID", slowQuery.PortPathOrID, want.PortPathOrID) + expectAttributes(t, map[string]interface{}(slowQuery.QueryParameters), want.Params) +} + +// ExpectSlowQueries allows testing of slow queries. +func ExpectSlowQueries(t Validator, slowQueries *slowQueries, want []WantSlowQuery) { + if len(want) != len(slowQueries.priorityQueue) { + t.Error("wrong number of slow queries", + "expected", len(want), "got", len(slowQueries.priorityQueue)) + return + } + for _, s := range want { + idx, ok := slowQueries.lookup[s.Query] + if !ok { + t.Error("unable to find slow query", s.Query) + continue + } + expectSlowQuery(t, slowQueries.priorityQueue[idx], s) + } +} diff --git a/internal/harvest.go b/internal/harvest.go new file mode 100644 index 000000000..453f04d09 --- /dev/null +++ b/internal/harvest.go @@ -0,0 +1,400 @@ +package internal + +import ( + "strings" + "sync" + "time" +) + +// Harvestable is something that can be merged into a Harvest. +type Harvestable interface { + MergeIntoHarvest(h *Harvest) +} + +// HarvestTypes is a bit set used to indicate which data types are ready to be +// reported. +type HarvestTypes uint + +const ( + // HarvestMetricsTraces is the Metrics Traces type + HarvestMetricsTraces HarvestTypes = 1 << iota + // HarvestSpanEvents is the Span Event type + HarvestSpanEvents + // HarvestCustomEvents is the Custom Event type + HarvestCustomEvents + // HarvestTxnEvents is the Transaction Event type + HarvestTxnEvents + // HarvestErrorEvents is the Error Event type + HarvestErrorEvents +) + +const ( + // HarvestTypesEvents includes all Event types + HarvestTypesEvents = HarvestSpanEvents | HarvestCustomEvents | HarvestTxnEvents | HarvestErrorEvents + // HarvestTypesAll includes all harvest types + HarvestTypesAll = HarvestMetricsTraces | HarvestTypesEvents +) + +type harvestTimer struct { + periods map[HarvestTypes]time.Duration + lastHarvest map[HarvestTypes]time.Time +} + +func newHarvestTimer(now time.Time, periods map[HarvestTypes]time.Duration) *harvestTimer { + lastHarvest := make(map[HarvestTypes]time.Time, len(periods)) + for tp := range periods { + lastHarvest[tp] = now + } + return &harvestTimer{periods: periods, lastHarvest: lastHarvest} +} + +func (timer *harvestTimer) ready(now time.Time) (ready HarvestTypes) { + for tp, period := range timer.periods { + if deadline := timer.lastHarvest[tp].Add(period); now.After(deadline) { + timer.lastHarvest[tp] = deadline + ready |= tp + } + } + return +} + +// Harvest contains collected data. +type Harvest struct { + timer *harvestTimer + + Metrics *metricTable + ErrorTraces harvestErrors + TxnTraces *harvestTraces + SlowSQLs *slowQueries + SpanEvents *spanEvents + CustomEvents *customEvents + TxnEvents *txnEvents + ErrorEvents *errorEvents +} + +const ( + // txnEventPayloadlimit is the maximum number of events that should be + // sent up in one post. + txnEventPayloadlimit = 5000 +) + +// Ready returns a new Harvest which contains the data types ready for harvest, +// or nil if no data is ready for harvest. +func (h *Harvest) Ready(now time.Time) *Harvest { + ready := &Harvest{} + + types := h.timer.ready(now) + if 0 == types { + return nil + } + + if 0 != types&HarvestCustomEvents { + h.Metrics.addCount(customEventsSeen, h.CustomEvents.NumSeen(), forced) + h.Metrics.addCount(customEventsSent, h.CustomEvents.NumSaved(), forced) + ready.CustomEvents = h.CustomEvents + h.CustomEvents = newCustomEvents(h.CustomEvents.capacity()) + } + if 0 != types&HarvestTxnEvents { + h.Metrics.addCount(txnEventsSeen, h.TxnEvents.NumSeen(), forced) + h.Metrics.addCount(txnEventsSent, h.TxnEvents.NumSaved(), forced) + ready.TxnEvents = h.TxnEvents + h.TxnEvents = newTxnEvents(h.TxnEvents.capacity()) + } + if 0 != types&HarvestErrorEvents { + h.Metrics.addCount(errorEventsSeen, h.ErrorEvents.NumSeen(), forced) + h.Metrics.addCount(errorEventsSent, h.ErrorEvents.NumSaved(), forced) + ready.ErrorEvents = h.ErrorEvents + h.ErrorEvents = newErrorEvents(h.ErrorEvents.capacity()) + } + if 0 != types&HarvestSpanEvents { + h.Metrics.addCount(spanEventsSeen, h.SpanEvents.NumSeen(), forced) + h.Metrics.addCount(spanEventsSent, h.SpanEvents.NumSaved(), forced) + ready.SpanEvents = h.SpanEvents + h.SpanEvents = newSpanEvents(h.SpanEvents.capacity()) + } + // NOTE! Metrics must happen after the event harvest conditionals to + // ensure that the metrics contain the event supportability metrics. + if 0 != types&HarvestMetricsTraces { + ready.Metrics = h.Metrics + ready.ErrorTraces = h.ErrorTraces + ready.SlowSQLs = h.SlowSQLs + ready.TxnTraces = h.TxnTraces + h.Metrics = newMetricTable(maxMetrics, now) + h.ErrorTraces = newHarvestErrors(maxHarvestErrors) + h.SlowSQLs = newSlowQueries(maxHarvestSlowSQLs) + h.TxnTraces = newHarvestTraces() + } + return ready +} + +// Payloads returns a slice of payload creators. +func (h *Harvest) Payloads(splitLargeTxnEvents bool) (ps []PayloadCreator) { + if nil == h { + return + } + if nil != h.CustomEvents { + ps = append(ps, h.CustomEvents) + } + if nil != h.ErrorEvents { + ps = append(ps, h.ErrorEvents) + } + if nil != h.SpanEvents { + ps = append(ps, h.SpanEvents) + } + if nil != h.Metrics { + ps = append(ps, h.Metrics) + } + if nil != h.ErrorTraces { + ps = append(ps, h.ErrorTraces) + } + if nil != h.TxnTraces { + ps = append(ps, h.TxnTraces) + } + if nil != h.SlowSQLs { + ps = append(ps, h.SlowSQLs) + } + if nil != h.TxnEvents { + if splitLargeTxnEvents { + ps = append(ps, h.TxnEvents.payloads(txnEventPayloadlimit)...) + } else { + ps = append(ps, h.TxnEvents) + } + } + return +} + +// MaxTxnEventer returns the maximum number of Transaction Events that should be reported per period +type MaxTxnEventer interface { + MaxTxnEvents() int +} + +// HarvestConfigurer contains information about the configured number of various +// types of events as well as the Faster Event Harvest report period. +// It is implemented by AppRun and DfltHarvestCfgr. +type HarvestConfigurer interface { + // ReportPeriods returns a map from the bitset of harvest types to the period that those types should be reported + ReportPeriods() map[HarvestTypes]time.Duration + // MaxSpanEvents returns the maximum number of Span Events that should be reported per period + MaxSpanEvents() int + // MaxCustomEvents returns the maximum number of Custom Events that should be reported per period + MaxCustomEvents() int + // MaxErrorEvents returns the maximum number of Error Events that should be reported per period + MaxErrorEvents() int + MaxTxnEventer +} + +// NewHarvest returns a new Harvest. +func NewHarvest(now time.Time, configurer HarvestConfigurer) *Harvest { + return &Harvest{ + timer: newHarvestTimer(now, configurer.ReportPeriods()), + Metrics: newMetricTable(maxMetrics, now), + ErrorTraces: newHarvestErrors(maxHarvestErrors), + TxnTraces: newHarvestTraces(), + SlowSQLs: newSlowQueries(maxHarvestSlowSQLs), + SpanEvents: newSpanEvents(configurer.MaxSpanEvents()), + CustomEvents: newCustomEvents(configurer.MaxCustomEvents()), + TxnEvents: newTxnEvents(configurer.MaxTxnEvents()), + ErrorEvents: newErrorEvents(configurer.MaxErrorEvents()), + } +} + +var ( + trackMutex sync.Mutex + trackMetrics []string +) + +// TrackUsage helps track which integration packages are used. +func TrackUsage(s ...string) { + trackMutex.Lock() + defer trackMutex.Unlock() + + m := "Supportability/" + strings.Join(s, "/") + trackMetrics = append(trackMetrics, m) +} + +func createTrackUsageMetrics(metrics *metricTable) { + trackMutex.Lock() + defer trackMutex.Unlock() + + for _, m := range trackMetrics { + metrics.addSingleCount(m, forced) + } +} + +// CreateFinalMetrics creates extra metrics at harvest time. +func (h *Harvest) CreateFinalMetrics(reply *ConnectReply, hc HarvestConfigurer) { + if nil == h { + return + } + // Metrics will be non-nil when harvesting metrics (regardless of + // whether or not there are any metrics to send). + if nil == h.Metrics { + return + } + + h.Metrics.addSingleCount(instanceReporting, forced) + + // Configurable event harvest supportability metrics: + // https://source.datanerd.us/agents/agent-specs/blob/master/Connect-LEGACY.md#event-harvest-config + period := reply.ConfigurablePeriod() + h.Metrics.addDuration(supportReportPeriod, "", period, period, forced) + h.Metrics.addValue(supportTxnEventLimit, "", float64(hc.MaxTxnEvents()), forced) + h.Metrics.addValue(supportCustomEventLimit, "", float64(hc.MaxCustomEvents()), forced) + h.Metrics.addValue(supportErrorEventLimit, "", float64(hc.MaxErrorEvents()), forced) + h.Metrics.addValue(supportSpanEventLimit, "", float64(hc.MaxSpanEvents()), forced) + + createTrackUsageMetrics(h.Metrics) + + h.Metrics = h.Metrics.ApplyRules(reply.MetricRules) +} + +// PayloadCreator is a data type in the harvest. +type PayloadCreator interface { + // In the event of a rpm request failure (hopefully simply an + // intermittent collector issue) the payload may be merged into the next + // time period's harvest. + Harvestable + // Data prepares JSON in the format expected by the collector endpoint. + // This method should return (nil, nil) if the payload is empty and no + // rpm request is necessary. + Data(agentRunID string, harvestStart time.Time) ([]byte, error) + // EndpointMethod is used for the "method" query parameter when posting + // the data. + EndpointMethod() string +} + +func supportMetric(metrics *metricTable, b bool, metricName string) { + if b { + metrics.addSingleCount(metricName, forced) + } +} + +// CreateTxnMetrics creates metrics for a transaction. +func CreateTxnMetrics(args *TxnData, metrics *metricTable) { + withoutFirstSegment := removeFirstSegment(args.FinalName) + + // Duration Metrics + var durationRollup string + var totalTimeRollup string + if args.IsWeb { + durationRollup = webRollup + totalTimeRollup = totalTimeWeb + metrics.addDuration(dispatcherMetric, "", args.Duration, 0, forced) + } else { + durationRollup = backgroundRollup + totalTimeRollup = totalTimeBackground + } + + metrics.addDuration(args.FinalName, "", args.Duration, 0, forced) + metrics.addDuration(durationRollup, "", args.Duration, 0, forced) + + metrics.addDuration(totalTimeRollup, "", args.TotalTime, args.TotalTime, forced) + metrics.addDuration(totalTimeRollup+"/"+withoutFirstSegment, "", args.TotalTime, args.TotalTime, unforced) + + // Better CAT Metrics + if cat := args.BetterCAT; cat.Enabled { + caller := callerUnknown + if nil != cat.Inbound { + caller = cat.Inbound.payloadCaller + } + m := durationByCallerMetric(caller) + metrics.addDuration(m.all, "", args.Duration, args.Duration, unforced) + metrics.addDuration(m.webOrOther(args.IsWeb), "", args.Duration, args.Duration, unforced) + + // Transport Duration Metric + if nil != cat.Inbound { + d := cat.Inbound.TransportDuration + m = transportDurationMetric(caller) + metrics.addDuration(m.all, "", d, d, unforced) + metrics.addDuration(m.webOrOther(args.IsWeb), "", d, d, unforced) + } + + // CAT Error Metrics + if args.HasErrors() { + m = errorsByCallerMetric(caller) + metrics.addSingleCount(m.all, unforced) + metrics.addSingleCount(m.webOrOther(args.IsWeb), unforced) + } + + supportMetric(metrics, args.AcceptPayloadSuccess, supportTracingAcceptSuccess) + supportMetric(metrics, args.AcceptPayloadException, supportTracingAcceptException) + supportMetric(metrics, args.AcceptPayloadParseException, supportTracingAcceptParseException) + supportMetric(metrics, args.AcceptPayloadCreateBeforeAccept, supportTracingCreateBeforeAccept) + supportMetric(metrics, args.AcceptPayloadIgnoredMultiple, supportTracingIgnoredMultiple) + supportMetric(metrics, args.AcceptPayloadIgnoredVersion, supportTracingIgnoredVersion) + supportMetric(metrics, args.AcceptPayloadUntrustedAccount, supportTracingAcceptUntrustedAccount) + supportMetric(metrics, args.AcceptPayloadNullPayload, supportTracingAcceptNull) + supportMetric(metrics, args.CreatePayloadSuccess, supportTracingCreatePayloadSuccess) + supportMetric(metrics, args.CreatePayloadException, supportTracingCreatePayloadException) + } + + // Apdex Metrics + if args.Zone != ApdexNone { + metrics.addApdex(apdexRollup, "", args.ApdexThreshold, args.Zone, forced) + + mname := apdexPrefix + withoutFirstSegment + metrics.addApdex(mname, "", args.ApdexThreshold, args.Zone, unforced) + } + + // Error Metrics + if args.HasErrors() { + metrics.addSingleCount(errorsRollupMetric.all, forced) + metrics.addSingleCount(errorsRollupMetric.webOrOther(args.IsWeb), forced) + metrics.addSingleCount(errorsPrefix+args.FinalName, forced) + } + + // Queueing Metrics + if args.Queuing > 0 { + metrics.addDuration(queueMetric, "", args.Queuing, args.Queuing, forced) + } +} + +// DfltHarvestCfgr implements HarvestConfigurer for internal test cases, and for situations where we don't +// have a ConnectReply, such as for serverless harvests +type DfltHarvestCfgr struct { + reportPeriods map[HarvestTypes]time.Duration + maxTxnEvents *uint + maxSpanEvents *uint + maxCustomEvents *uint + maxErrorEvents *uint +} + +// ReportPeriods returns a map from the bitset of harvest types to the period that those types should be reported +func (d *DfltHarvestCfgr) ReportPeriods() map[HarvestTypes]time.Duration { + if d.reportPeriods != nil { + return d.reportPeriods + } + return map[HarvestTypes]time.Duration{HarvestTypesAll: FixedHarvestPeriod} +} + +// MaxTxnEvents returns the maximum number of Transaction Events that should be reported per period +func (d *DfltHarvestCfgr) MaxTxnEvents() int { + if d.maxTxnEvents != nil { + return int(*d.maxTxnEvents) + } + return MaxTxnEvents +} + +// MaxSpanEvents returns the maximum number of Span Events that should be reported per period +func (d *DfltHarvestCfgr) MaxSpanEvents() int { + if d.maxSpanEvents != nil { + return int(*d.maxSpanEvents) + } + return MaxSpanEvents +} + +// MaxCustomEvents returns the maximum number of Custom Events that should be reported per period +func (d *DfltHarvestCfgr) MaxCustomEvents() int { + if d.maxCustomEvents != nil { + return int(*d.maxCustomEvents) + } + return MaxCustomEvents +} + +// MaxErrorEvents returns the maximum number of Error Events that should be reported per period +func (d *DfltHarvestCfgr) MaxErrorEvents() int { + if d.maxErrorEvents != nil { + return int(*d.maxErrorEvents) + } + return MaxErrorEvents +} diff --git a/internal/harvest_test.go b/internal/harvest_test.go new file mode 100644 index 000000000..17e5c9c9b --- /dev/null +++ b/internal/harvest_test.go @@ -0,0 +1,875 @@ +package internal + +import ( + "testing" + "time" +) + +func TestHarvestTimerAllFixed(t *testing.T) { + now := time.Now() + harvest := NewHarvest(now, &DfltHarvestCfgr{}) + timer := harvest.timer + for _, tc := range []struct { + Elapsed time.Duration + Expect HarvestTypes + }{ + {60 * time.Second, 0}, + {61 * time.Second, HarvestTypesAll}, + {62 * time.Second, 0}, + {120 * time.Second, 0}, + {121 * time.Second, HarvestTypesAll}, + {122 * time.Second, 0}, + } { + if ready := timer.ready(now.Add(tc.Elapsed)); ready != tc.Expect { + t.Error(tc.Elapsed, ready, tc.Expect) + } + } +} + +var one uint = 1 +var two uint = 2 +var three uint = 3 +var four uint = 4 + +func TestHarvestTimerAllConfigurable(t *testing.T) { + now := time.Now() + harvest := NewHarvest(now, &DfltHarvestCfgr{ + reportPeriods: map[HarvestTypes]time.Duration{ + HarvestMetricsTraces: FixedHarvestPeriod, + HarvestTypesEvents: time.Second * 30, + }, + maxTxnEvents: &one, + maxCustomEvents: &two, + maxSpanEvents: &three, + maxErrorEvents: &four, + }) + timer := harvest.timer + for _, tc := range []struct { + Elapsed time.Duration + Expect HarvestTypes + }{ + {30 * time.Second, 0}, + {31 * time.Second, HarvestTypesEvents}, + {32 * time.Second, 0}, + {61 * time.Second, HarvestTypesAll}, + {62 * time.Second, 0}, + {91 * time.Second, HarvestTypesEvents}, + {92 * time.Second, 0}, + } { + if ready := timer.ready(now.Add(tc.Elapsed)); ready != tc.Expect { + t.Error(tc.Elapsed, ready, tc.Expect) + } + } +} + +func TestCreateFinalMetrics(t *testing.T) { + now := time.Now() + + // If the harvest or metrics is nil then CreateFinalMetrics should + // not panic. + var nilHarvest *Harvest + nilHarvest.CreateFinalMetrics(nil, &DfltHarvestCfgr{}) + emptyHarvest := &Harvest{} + emptyHarvest.CreateFinalMetrics(nil, &DfltHarvestCfgr{}) + + replyJSON := []byte(`{"return_value":{ + "metric_name_rules":[{ + "match_expression": "rename_me", + "replacement": "been_renamed" + }], + "event_harvest_config":{ + "report_period_ms": 2000, + "harvest_limits": { + "analytic_event_data": 22, + "custom_event_data": 33, + "error_event_data": 44, + "span_event_data": 55 + } + } + }}`) + reply, err := ConstructConnectReply(replyJSON, PreconnectReply{}) + if err != nil { + t.Fatal(err) + } + var txnEvents uint = 22 + var customEvents uint = 33 + var errorEvents uint = 44 + var spanEvents uint = 55 + cfgr := &DfltHarvestCfgr{ + reportPeriods: map[HarvestTypes]time.Duration{ + HarvestMetricsTraces: FixedHarvestPeriod, + HarvestTypesEvents: time.Second * 2, + }, + maxTxnEvents: &txnEvents, + maxCustomEvents: &customEvents, + maxErrorEvents: &errorEvents, + maxSpanEvents: &spanEvents, + } + h := NewHarvest(now, cfgr) + h.Metrics.addCount("rename_me", 1.0, unforced) + h.CreateFinalMetrics(reply, cfgr) + ExpectMetrics(t, h.Metrics, []WantMetric{ + {instanceReporting, "", true, []float64{1, 0, 0, 0, 0, 0}}, + {"been_renamed", "", false, []float64{1.0, 0, 0, 0, 0, 0}}, + {"Supportability/EventHarvest/ReportPeriod", "", true, []float64{1, 2, 2, 2, 2, 2 * 2}}, + {"Supportability/EventHarvest/AnalyticEventData/HarvestLimit", "", true, []float64{1, 22, 22, 22, 22, 22 * 22}}, + {"Supportability/EventHarvest/CustomEventData/HarvestLimit", "", true, []float64{1, 33, 33, 33, 33, 33 * 33}}, + {"Supportability/EventHarvest/ErrorEventData/HarvestLimit", "", true, []float64{1, 44, 44, 44, 44, 44 * 44}}, + {"Supportability/EventHarvest/SpanEventData/HarvestLimit", "", true, []float64{1, 55, 55, 55, 55, 55 * 55}}, + }) + + // Test again without any metric rules or event_harvest_config. + + replyJSON = []byte(`{"return_value":{ + }}`) + reply, err = ConstructConnectReply(replyJSON, PreconnectReply{}) + if err != nil { + t.Fatal(err) + } + h = NewHarvest(now, &DfltHarvestCfgr{}) + h.Metrics.addCount("rename_me", 1.0, unforced) + h.CreateFinalMetrics(reply, &DfltHarvestCfgr{}) + ExpectMetrics(t, h.Metrics, []WantMetric{ + {instanceReporting, "", true, []float64{1, 0, 0, 0, 0, 0}}, + {"rename_me", "", false, []float64{1.0, 0, 0, 0, 0, 0}}, + {"Supportability/EventHarvest/ReportPeriod", "", true, []float64{1, 60, 60, 60, 60, 60 * 60}}, + {"Supportability/EventHarvest/AnalyticEventData/HarvestLimit", "", true, []float64{1, 10 * 1000, 10 * 1000, 10 * 1000, 10 * 1000, 10 * 1000 * 10 * 1000}}, + {"Supportability/EventHarvest/CustomEventData/HarvestLimit", "", true, []float64{1, 10 * 1000, 10 * 1000, 10 * 1000, 10 * 1000, 10 * 1000 * 10 * 1000}}, + {"Supportability/EventHarvest/ErrorEventData/HarvestLimit", "", true, []float64{1, 100, 100, 100, 100, 100 * 100}}, + {"Supportability/EventHarvest/SpanEventData/HarvestLimit", "", true, []float64{1, 1000, 1000, 1000, 1000, 1000 * 1000}}, + }) +} + +func TestEmptyPayloads(t *testing.T) { + h := NewHarvest(time.Now(), &DfltHarvestCfgr{}) + payloads := h.Payloads(true) + if len(payloads) != 8 { + t.Error(len(payloads)) + } + for _, p := range payloads { + d, err := p.Data("agentRunID", time.Now()) + if d != nil || err != nil { + t.Error(d, err) + } + } +} +func TestPayloadsNilHarvest(t *testing.T) { + var nilHarvest *Harvest + payloads := nilHarvest.Payloads(true) + if len(payloads) != 0 { + t.Error(len(payloads)) + } +} + +func TestPayloadsEmptyHarvest(t *testing.T) { + h := &Harvest{} + payloads := h.Payloads(true) + if len(payloads) != 0 { + t.Error(len(payloads)) + } +} + +func TestHarvestNothingReady(t *testing.T) { + now := time.Now() + h := NewHarvest(now, &DfltHarvestCfgr{}) + ready := h.Ready(now.Add(10 * time.Second)) + if ready != nil { + t.Error("harvest should be nil") + } + payloads := ready.Payloads(true) + if len(payloads) != 0 { + t.Error(payloads) + } + ExpectMetrics(t, h.Metrics, []WantMetric{}) +} + +func TestHarvestCustomEventsReady(t *testing.T) { + now := time.Now() + fixedHarvestTypes := HarvestMetricsTraces & HarvestTxnEvents & HarvestSpanEvents & HarvestErrorEvents + h := NewHarvest(now, &DfltHarvestCfgr{ + reportPeriods: map[HarvestTypes]time.Duration{ + fixedHarvestTypes: FixedHarvestPeriod, + HarvestCustomEvents: time.Second * 5, + }, + maxCustomEvents: &three, + }) + params := map[string]interface{}{"zip": 1} + ce, _ := CreateCustomEvent("myEvent", params, time.Now()) + h.CustomEvents.Add(ce) + ready := h.Ready(now.Add(10 * time.Second)) + payloads := ready.Payloads(true) + if len(payloads) != 1 { + t.Fatal(payloads) + } + p := payloads[0] + if m := p.EndpointMethod(); m != "custom_event_data" { + t.Error(m) + } + data, err := p.Data("agentRunID", now) + if nil != err || nil == data { + t.Error(err, data) + } + if h.CustomEvents.capacity() != 3 || h.CustomEvents.NumSaved() != 0 { + t.Fatal("custom events not correctly reset") + } + ExpectCustomEvents(t, ready.CustomEvents, []WantEvent{{ + Intrinsics: map[string]interface{}{"type": "myEvent", "timestamp": MatchAnything}, + UserAttributes: params, + }}) + ExpectMetrics(t, h.Metrics, []WantMetric{ + {customEventsSeen, "", true, []float64{1, 0, 0, 0, 0, 0}}, + {customEventsSent, "", true, []float64{1, 0, 0, 0, 0, 0}}, + }) +} + +func TestHarvestTxnEventsReady(t *testing.T) { + now := time.Now() + fixedHarvestTypes := HarvestMetricsTraces & HarvestCustomEvents & HarvestSpanEvents & HarvestErrorEvents + h := NewHarvest(now, &DfltHarvestCfgr{ + reportPeriods: map[HarvestTypes]time.Duration{ + fixedHarvestTypes: FixedHarvestPeriod, + HarvestTxnEvents: time.Second * 5, + }, + maxTxnEvents: &three, + }) + h.TxnEvents.AddTxnEvent(&TxnEvent{ + FinalName: "finalName", + Start: time.Now(), + Duration: 1 * time.Second, + TotalTime: 2 * time.Second, + }, 0) + ready := h.Ready(now.Add(10 * time.Second)) + payloads := ready.Payloads(true) + if len(payloads) != 1 { + t.Fatal(payloads) + } + p := payloads[0] + if m := p.EndpointMethod(); m != "analytic_event_data" { + t.Error(m) + } + data, err := p.Data("agentRunID", now) + if nil != err || nil == data { + t.Error(err, data) + } + if h.TxnEvents.capacity() != 3 || h.TxnEvents.NumSaved() != 0 { + t.Fatal("txn events not correctly reset") + } + ExpectTxnEvents(t, ready.TxnEvents, []WantEvent{{ + Intrinsics: map[string]interface{}{ + "name": "finalName", + "totalTime": 2.0, + }, + }}) + ExpectMetrics(t, h.Metrics, []WantMetric{ + {txnEventsSeen, "", true, []float64{1, 0, 0, 0, 0, 0}}, + {txnEventsSent, "", true, []float64{1, 0, 0, 0, 0, 0}}, + }) +} + +func TestHarvestErrorEventsReady(t *testing.T) { + now := time.Now() + fixedHarvestTypes := HarvestMetricsTraces & HarvestCustomEvents & HarvestSpanEvents & HarvestTxnEvents + h := NewHarvest(now, &DfltHarvestCfgr{ + reportPeriods: map[HarvestTypes]time.Duration{ + fixedHarvestTypes: FixedHarvestPeriod, + HarvestErrorEvents: time.Second * 5, + }, + maxErrorEvents: &three, + }) + h.ErrorEvents.Add(&ErrorEvent{ + ErrorData: ErrorData{Klass: "klass", Msg: "msg", When: time.Now()}, + TxnEvent: TxnEvent{FinalName: "finalName", Duration: 1 * time.Second}, + }, 0) + ready := h.Ready(now.Add(10 * time.Second)) + payloads := ready.Payloads(true) + if len(payloads) != 1 { + t.Fatal(payloads) + } + p := payloads[0] + if m := p.EndpointMethod(); m != "error_event_data" { + t.Error(m) + } + data, err := p.Data("agentRunID", now) + if nil != err || nil == data { + t.Error(err, data) + } + if h.ErrorEvents.capacity() != 3 || h.ErrorEvents.NumSaved() != 0 { + t.Fatal("error events not correctly reset") + } + ExpectErrorEvents(t, ready.ErrorEvents, []WantEvent{{ + Intrinsics: map[string]interface{}{ + "error.class": "klass", + "error.message": "msg", + "transactionName": "finalName", + }, + }}) + ExpectMetrics(t, h.Metrics, []WantMetric{ + {errorEventsSeen, "", true, []float64{1, 0, 0, 0, 0, 0}}, + {errorEventsSent, "", true, []float64{1, 0, 0, 0, 0, 0}}, + }) +} + +func TestHarvestSpanEventsReady(t *testing.T) { + now := time.Now() + fixedHarvestTypes := HarvestMetricsTraces & HarvestCustomEvents & HarvestTxnEvents & HarvestErrorEvents + h := NewHarvest(now, &DfltHarvestCfgr{ + reportPeriods: map[HarvestTypes]time.Duration{ + fixedHarvestTypes: FixedHarvestPeriod, + HarvestSpanEvents: time.Second * 5, + }, + maxSpanEvents: &three, + }) + h.SpanEvents.addEventPopulated(&sampleSpanEvent) + ready := h.Ready(now.Add(10 * time.Second)) + payloads := ready.Payloads(true) + if len(payloads) != 1 { + t.Fatal(payloads) + } + p := payloads[0] + if m := p.EndpointMethod(); m != "span_event_data" { + t.Error(m) + } + data, err := p.Data("agentRunID", now) + if nil != err || nil == data { + t.Error(err, data) + } + if h.SpanEvents.capacity() != 3 || h.SpanEvents.NumSaved() != 0 { + t.Fatal("span events not correctly reset") + } + ExpectSpanEvents(t, ready.SpanEvents, []WantEvent{{ + Intrinsics: map[string]interface{}{ + "type": "Span", + "name": "myName", + "sampled": true, + "priority": 0.5, + "category": spanCategoryGeneric, + "nr.entryPoint": true, + "guid": "guid", + "transactionId": "txn-id", + "traceId": "trace-id", + }, + }}) + ExpectMetrics(t, h.Metrics, []WantMetric{ + {spanEventsSeen, "", true, []float64{1, 0, 0, 0, 0, 0}}, + {spanEventsSent, "", true, []float64{1, 0, 0, 0, 0, 0}}, + }) +} + +func TestHarvestMetricsTracesReady(t *testing.T) { + now := time.Now() + h := NewHarvest(now, &DfltHarvestCfgr{ + reportPeriods: map[HarvestTypes]time.Duration{ + HarvestMetricsTraces: FixedHarvestPeriod, + HarvestTypesEvents: time.Second * 65, + }, + maxTxnEvents: &one, + maxCustomEvents: &one, + maxErrorEvents: &one, + maxSpanEvents: &one, + }) + h.Metrics.addCount("zip", 1, forced) + + ers := NewTxnErrors(10) + ers.Add(ErrorData{When: time.Now(), Msg: "msg", Klass: "klass", Stack: GetStackTrace()}) + MergeTxnErrors(&h.ErrorTraces, ers, TxnEvent{FinalName: "finalName", Attrs: nil}) + + h.TxnTraces.Witness(HarvestTrace{ + TxnEvent: TxnEvent{ + Start: time.Now(), + Duration: 20 * time.Second, + TotalTime: 30 * time.Second, + FinalName: "WebTransaction/Go/hello", + }, + Trace: TxnTrace{}, + }) + + slows := newSlowQueries(maxTxnSlowQueries) + slows.observeInstance(slowQueryInstance{ + Duration: 2 * time.Second, + DatastoreMetric: "Datastore/statement/MySQL/users/INSERT", + ParameterizedQuery: "INSERT users", + }) + h.SlowSQLs.Merge(slows, TxnEvent{FinalName: "finalName", Attrs: nil}) + + ready := h.Ready(now.Add(61 * time.Second)) + payloads := ready.Payloads(true) + if len(payloads) != 4 { + t.Fatal(payloads) + } + + ExpectMetrics(t, ready.Metrics, []WantMetric{ + {"zip", "", true, []float64{1, 0, 0, 0, 0, 0}}, + }) + ExpectMetrics(t, h.Metrics, []WantMetric{}) + + ExpectErrors(t, ready.ErrorTraces, []WantError{{ + TxnName: "finalName", + Msg: "msg", + Klass: "klass", + }}) + ExpectErrors(t, h.ErrorTraces, []WantError{}) + + ExpectSlowQueries(t, ready.SlowSQLs, []WantSlowQuery{{ + Count: 1, + MetricName: "Datastore/statement/MySQL/users/INSERT", + Query: "INSERT users", + TxnName: "finalName", + }}) + ExpectSlowQueries(t, h.SlowSQLs, []WantSlowQuery{}) + + ExpectTxnTraces(t, ready.TxnTraces, []WantTxnTrace{{ + MetricName: "WebTransaction/Go/hello", + }}) + ExpectTxnTraces(t, h.TxnTraces, []WantTxnTrace{}) +} + +func TestMergeFailedHarvest(t *testing.T) { + start1 := time.Now() + start2 := start1.Add(1 * time.Minute) + + h := NewHarvest(start1, &DfltHarvestCfgr{}) + h.Metrics.addCount("zip", 1, forced) + h.TxnEvents.AddTxnEvent(&TxnEvent{ + FinalName: "finalName", + Start: time.Now(), + Duration: 1 * time.Second, + TotalTime: 2 * time.Second, + }, 0) + customEventParams := map[string]interface{}{"zip": 1} + ce, err := CreateCustomEvent("myEvent", customEventParams, time.Now()) + if nil != err { + t.Fatal(err) + } + h.CustomEvents.Add(ce) + h.ErrorEvents.Add(&ErrorEvent{ + ErrorData: ErrorData{ + Klass: "klass", + Msg: "msg", + When: time.Now(), + }, + TxnEvent: TxnEvent{ + FinalName: "finalName", + Duration: 1 * time.Second, + }, + }, 0) + + ers := NewTxnErrors(10) + ers.Add(ErrorData{ + When: time.Now(), + Msg: "msg", + Klass: "klass", + Stack: GetStackTrace(), + }) + MergeTxnErrors(&h.ErrorTraces, ers, TxnEvent{ + FinalName: "finalName", + Attrs: nil, + }) + h.SpanEvents.addEventPopulated(&sampleSpanEvent) + + if start1 != h.Metrics.metricPeriodStart { + t.Error(h.Metrics.metricPeriodStart) + } + if 0 != h.Metrics.failedHarvests { + t.Error(h.Metrics.failedHarvests) + } + if 0 != h.CustomEvents.analyticsEvents.failedHarvests { + t.Error(h.CustomEvents.analyticsEvents.failedHarvests) + } + if 0 != h.TxnEvents.analyticsEvents.failedHarvests { + t.Error(h.TxnEvents.analyticsEvents.failedHarvests) + } + if 0 != h.ErrorEvents.analyticsEvents.failedHarvests { + t.Error(h.ErrorEvents.analyticsEvents.failedHarvests) + } + if 0 != h.SpanEvents.analyticsEvents.failedHarvests { + t.Error(h.SpanEvents.analyticsEvents.failedHarvests) + } + ExpectMetrics(t, h.Metrics, []WantMetric{ + {"zip", "", true, []float64{1, 0, 0, 0, 0, 0}}, + }) + ExpectCustomEvents(t, h.CustomEvents, []WantEvent{{ + Intrinsics: map[string]interface{}{ + "type": "myEvent", + "timestamp": MatchAnything, + }, + UserAttributes: customEventParams, + }}) + ExpectErrorEvents(t, h.ErrorEvents, []WantEvent{{ + Intrinsics: map[string]interface{}{ + "error.class": "klass", + "error.message": "msg", + "transactionName": "finalName", + }, + }}) + ExpectTxnEvents(t, h.TxnEvents, []WantEvent{{ + Intrinsics: map[string]interface{}{ + "name": "finalName", + "totalTime": 2.0, + }, + }}) + ExpectSpanEvents(t, h.SpanEvents, []WantEvent{{ + Intrinsics: map[string]interface{}{ + "type": "Span", + "name": "myName", + "sampled": true, + "priority": 0.5, + "category": spanCategoryGeneric, + "nr.entryPoint": true, + "guid": "guid", + "transactionId": "txn-id", + "traceId": "trace-id", + }, + }}) + ExpectErrors(t, h.ErrorTraces, []WantError{{ + TxnName: "finalName", + Msg: "msg", + Klass: "klass", + }}) + + nextHarvest := NewHarvest(start2, &DfltHarvestCfgr{}) + if start2 != nextHarvest.Metrics.metricPeriodStart { + t.Error(nextHarvest.Metrics.metricPeriodStart) + } + payloads := h.Payloads(true) + for _, p := range payloads { + p.MergeIntoHarvest(nextHarvest) + } + + if start1 != nextHarvest.Metrics.metricPeriodStart { + t.Error(nextHarvest.Metrics.metricPeriodStart) + } + if 1 != nextHarvest.Metrics.failedHarvests { + t.Error(nextHarvest.Metrics.failedHarvests) + } + if 1 != nextHarvest.CustomEvents.analyticsEvents.failedHarvests { + t.Error(nextHarvest.CustomEvents.analyticsEvents.failedHarvests) + } + if 1 != nextHarvest.TxnEvents.analyticsEvents.failedHarvests { + t.Error(nextHarvest.TxnEvents.analyticsEvents.failedHarvests) + } + if 1 != nextHarvest.ErrorEvents.analyticsEvents.failedHarvests { + t.Error(nextHarvest.ErrorEvents.analyticsEvents.failedHarvests) + } + if 1 != nextHarvest.SpanEvents.analyticsEvents.failedHarvests { + t.Error(nextHarvest.SpanEvents.analyticsEvents.failedHarvests) + } + ExpectMetrics(t, nextHarvest.Metrics, []WantMetric{ + {"zip", "", true, []float64{1, 0, 0, 0, 0, 0}}, + }) + ExpectCustomEvents(t, nextHarvest.CustomEvents, []WantEvent{{ + Intrinsics: map[string]interface{}{ + "type": "myEvent", + "timestamp": MatchAnything, + }, + UserAttributes: customEventParams, + }}) + ExpectErrorEvents(t, nextHarvest.ErrorEvents, []WantEvent{{ + Intrinsics: map[string]interface{}{ + "error.class": "klass", + "error.message": "msg", + "transactionName": "finalName", + }, + }}) + ExpectTxnEvents(t, nextHarvest.TxnEvents, []WantEvent{{ + Intrinsics: map[string]interface{}{ + "name": "finalName", + "totalTime": 2.0, + }, + }}) + ExpectSpanEvents(t, h.SpanEvents, []WantEvent{{ + Intrinsics: map[string]interface{}{ + "type": "Span", + "name": "myName", + "sampled": true, + "priority": 0.5, + "category": spanCategoryGeneric, + "nr.entryPoint": true, + "guid": "guid", + "transactionId": "txn-id", + "traceId": "trace-id", + }, + }}) + ExpectErrors(t, nextHarvest.ErrorTraces, []WantError{}) +} + +func TestCreateTxnMetrics(t *testing.T) { + txnErr := &ErrorData{} + txnErrors := []*ErrorData{txnErr} + webName := "WebTransaction/zip/zap" + backgroundName := "OtherTransaction/zip/zap" + args := &TxnData{} + args.Duration = 123 * time.Second + args.TotalTime = 150 * time.Second + args.ApdexThreshold = 2 * time.Second + + args.BetterCAT.Enabled = true + + args.FinalName = webName + args.IsWeb = true + args.Errors = txnErrors + args.Zone = ApdexTolerating + metrics := newMetricTable(100, time.Now()) + CreateTxnMetrics(args, metrics) + ExpectMetrics(t, metrics, []WantMetric{ + {webName, "", true, []float64{1, 123, 0, 123, 123, 123 * 123}}, + {webRollup, "", true, []float64{1, 123, 0, 123, 123, 123 * 123}}, + {dispatcherMetric, "", true, []float64{1, 123, 0, 123, 123, 123 * 123}}, + {"WebTransactionTotalTime", "", true, []float64{1, 150, 150, 150, 150, 150 * 150}}, + {"WebTransactionTotalTime/zip/zap", "", false, []float64{1, 150, 150, 150, 150, 150 * 150}}, + {"Errors/all", "", true, []float64{1, 0, 0, 0, 0, 0}}, + {"Errors/allWeb", "", true, []float64{1, 0, 0, 0, 0, 0}}, + {"Errors/" + webName, "", true, []float64{1, 0, 0, 0, 0, 0}}, + {apdexRollup, "", true, []float64{0, 1, 0, 2, 2, 0}}, + {"Apdex/zip/zap", "", false, []float64{0, 1, 0, 2, 2, 0}}, + {"DurationByCaller/Unknown/Unknown/Unknown/Unknown/all", "", false, []float64{1, 123, 123, 123, 123, 123 * 123}}, + {"DurationByCaller/Unknown/Unknown/Unknown/Unknown/allWeb", "", false, []float64{1, 123, 123, 123, 123, 123 * 123}}, + {"ErrorsByCaller/Unknown/Unknown/Unknown/Unknown/all", "", false, []float64{1, 0, 0, 0, 0, 0}}, + {"ErrorsByCaller/Unknown/Unknown/Unknown/Unknown/allWeb", "", false, []float64{1, 0, 0, 0, 0, 0}}, + }) + + args.FinalName = webName + args.IsWeb = true + args.Errors = nil + args.Zone = ApdexTolerating + metrics = newMetricTable(100, time.Now()) + CreateTxnMetrics(args, metrics) + ExpectMetrics(t, metrics, []WantMetric{ + {webName, "", true, []float64{1, 123, 0, 123, 123, 123 * 123}}, + {webRollup, "", true, []float64{1, 123, 0, 123, 123, 123 * 123}}, + {dispatcherMetric, "", true, []float64{1, 123, 0, 123, 123, 123 * 123}}, + {"WebTransactionTotalTime", "", true, []float64{1, 150, 150, 150, 150, 150 * 150}}, + {"WebTransactionTotalTime/zip/zap", "", false, []float64{1, 150, 150, 150, 150, 150 * 150}}, + {apdexRollup, "", true, []float64{0, 1, 0, 2, 2, 0}}, + {"Apdex/zip/zap", "", false, []float64{0, 1, 0, 2, 2, 0}}, + {"DurationByCaller/Unknown/Unknown/Unknown/Unknown/all", "", false, []float64{1, 123, 123, 123, 123, 123 * 123}}, + {"DurationByCaller/Unknown/Unknown/Unknown/Unknown/allWeb", "", false, []float64{1, 123, 123, 123, 123, 123 * 123}}, + }) + + args.FinalName = backgroundName + args.IsWeb = false + args.Errors = txnErrors + args.Zone = ApdexNone + metrics = newMetricTable(100, time.Now()) + CreateTxnMetrics(args, metrics) + ExpectMetrics(t, metrics, []WantMetric{ + {backgroundName, "", true, []float64{1, 123, 0, 123, 123, 123 * 123}}, + {backgroundRollup, "", true, []float64{1, 123, 0, 123, 123, 123 * 123}}, + {"OtherTransactionTotalTime", "", true, []float64{1, 150, 150, 150, 150, 150 * 150}}, + {"OtherTransactionTotalTime/zip/zap", "", false, []float64{1, 150, 150, 150, 150, 150 * 150}}, + {"Errors/all", "", true, []float64{1, 0, 0, 0, 0, 0}}, + {"Errors/allOther", "", true, []float64{1, 0, 0, 0, 0, 0}}, + {"Errors/" + backgroundName, "", true, []float64{1, 0, 0, 0, 0, 0}}, + {"DurationByCaller/Unknown/Unknown/Unknown/Unknown/all", "", false, []float64{1, 123, 123, 123, 123, 123 * 123}}, + {"DurationByCaller/Unknown/Unknown/Unknown/Unknown/allOther", "", false, []float64{1, 123, 123, 123, 123, 123 * 123}}, + {"ErrorsByCaller/Unknown/Unknown/Unknown/Unknown/all", "", false, []float64{1, 0, 0, 0, 0, 0}}, + {"ErrorsByCaller/Unknown/Unknown/Unknown/Unknown/allOther", "", false, []float64{1, 0, 0, 0, 0, 0}}, + }) + + args.FinalName = backgroundName + args.IsWeb = false + args.Errors = nil + args.Zone = ApdexNone + metrics = newMetricTable(100, time.Now()) + CreateTxnMetrics(args, metrics) + ExpectMetrics(t, metrics, []WantMetric{ + {backgroundName, "", true, []float64{1, 123, 0, 123, 123, 123 * 123}}, + {backgroundRollup, "", true, []float64{1, 123, 0, 123, 123, 123 * 123}}, + {"OtherTransactionTotalTime", "", true, []float64{1, 150, 150, 150, 150, 150 * 150}}, + {"OtherTransactionTotalTime/zip/zap", "", false, []float64{1, 150, 150, 150, 150, 150 * 150}}, + {"DurationByCaller/Unknown/Unknown/Unknown/Unknown/all", "", false, []float64{1, 123, 123, 123, 123, 123 * 123}}, + {"DurationByCaller/Unknown/Unknown/Unknown/Unknown/allOther", "", false, []float64{1, 123, 123, 123, 123, 123 * 123}}, + }) + +} + +func TestHarvestSplitTxnEvents(t *testing.T) { + now := time.Now() + h := NewHarvest(now, &DfltHarvestCfgr{}) + for i := 0; i < MaxTxnEvents; i++ { + h.TxnEvents.AddTxnEvent(&TxnEvent{}, Priority(float32(i))) + } + + payloadsWithSplit := h.Payloads(true) + payloadsWithoutSplit := h.Payloads(false) + + if len(payloadsWithSplit) != 9 { + t.Error(len(payloadsWithSplit)) + } + if len(payloadsWithoutSplit) != 8 { + t.Error(len(payloadsWithoutSplit)) + } +} + +func TestCreateTxnMetricsOldCAT(t *testing.T) { + txnErr := &ErrorData{} + txnErrors := []*ErrorData{txnErr} + webName := "WebTransaction/zip/zap" + backgroundName := "OtherTransaction/zip/zap" + args := &TxnData{} + args.Duration = 123 * time.Second + args.TotalTime = 150 * time.Second + args.ApdexThreshold = 2 * time.Second + + // When BetterCAT is disabled, affirm that the caller metrics are not created. + args.BetterCAT.Enabled = false + + args.FinalName = webName + args.IsWeb = true + args.Errors = txnErrors + args.Zone = ApdexTolerating + metrics := newMetricTable(100, time.Now()) + CreateTxnMetrics(args, metrics) + ExpectMetrics(t, metrics, []WantMetric{ + {webName, "", true, []float64{1, 123, 0, 123, 123, 123 * 123}}, + {webRollup, "", true, []float64{1, 123, 0, 123, 123, 123 * 123}}, + {dispatcherMetric, "", true, []float64{1, 123, 0, 123, 123, 123 * 123}}, + {"WebTransactionTotalTime", "", true, []float64{1, 150, 150, 150, 150, 150 * 150}}, + {"WebTransactionTotalTime/zip/zap", "", false, []float64{1, 150, 150, 150, 150, 150 * 150}}, + {"Errors/all", "", true, []float64{1, 0, 0, 0, 0, 0}}, + {"Errors/allWeb", "", true, []float64{1, 0, 0, 0, 0, 0}}, + {"Errors/" + webName, "", true, []float64{1, 0, 0, 0, 0, 0}}, + {apdexRollup, "", true, []float64{0, 1, 0, 2, 2, 0}}, + {"Apdex/zip/zap", "", false, []float64{0, 1, 0, 2, 2, 0}}, + }) + + args.FinalName = webName + args.IsWeb = true + args.Errors = nil + args.Zone = ApdexTolerating + metrics = newMetricTable(100, time.Now()) + CreateTxnMetrics(args, metrics) + ExpectMetrics(t, metrics, []WantMetric{ + {webName, "", true, []float64{1, 123, 0, 123, 123, 123 * 123}}, + {webRollup, "", true, []float64{1, 123, 0, 123, 123, 123 * 123}}, + {dispatcherMetric, "", true, []float64{1, 123, 0, 123, 123, 123 * 123}}, + {"WebTransactionTotalTime", "", true, []float64{1, 150, 150, 150, 150, 150 * 150}}, + {"WebTransactionTotalTime/zip/zap", "", false, []float64{1, 150, 150, 150, 150, 150 * 150}}, + {apdexRollup, "", true, []float64{0, 1, 0, 2, 2, 0}}, + {"Apdex/zip/zap", "", false, []float64{0, 1, 0, 2, 2, 0}}, + }) + + args.FinalName = backgroundName + args.IsWeb = false + args.Errors = txnErrors + args.Zone = ApdexNone + metrics = newMetricTable(100, time.Now()) + CreateTxnMetrics(args, metrics) + ExpectMetrics(t, metrics, []WantMetric{ + {backgroundName, "", true, []float64{1, 123, 0, 123, 123, 123 * 123}}, + {backgroundRollup, "", true, []float64{1, 123, 0, 123, 123, 123 * 123}}, + {"OtherTransactionTotalTime", "", true, []float64{1, 150, 150, 150, 150, 150 * 150}}, + {"OtherTransactionTotalTime/zip/zap", "", false, []float64{1, 150, 150, 150, 150, 150 * 150}}, + {"Errors/all", "", true, []float64{1, 0, 0, 0, 0, 0}}, + {"Errors/allOther", "", true, []float64{1, 0, 0, 0, 0, 0}}, + {"Errors/" + backgroundName, "", true, []float64{1, 0, 0, 0, 0, 0}}, + }) + + args.FinalName = backgroundName + args.IsWeb = false + args.Errors = nil + args.Zone = ApdexNone + metrics = newMetricTable(100, time.Now()) + CreateTxnMetrics(args, metrics) + ExpectMetrics(t, metrics, []WantMetric{ + {backgroundName, "", true, []float64{1, 123, 0, 123, 123, 123 * 123}}, + {backgroundRollup, "", true, []float64{1, 123, 0, 123, 123, 123 * 123}}, + {"OtherTransactionTotalTime", "", true, []float64{1, 150, 150, 150, 150, 150 * 150}}, + {"OtherTransactionTotalTime/zip/zap", "", false, []float64{1, 150, 150, 150, 150, 150 * 150}}, + }) +} + +func TestNewHarvestSetsDefaultValues(t *testing.T) { + now := time.Now() + h := NewHarvest(now, &DfltHarvestCfgr{}) + + if cp := h.TxnEvents.capacity(); cp != MaxTxnEvents { + t.Error("wrong txn event capacity", cp) + } + if cp := h.CustomEvents.capacity(); cp != MaxCustomEvents { + t.Error("wrong custom event capacity", cp) + } + if cp := h.ErrorEvents.capacity(); cp != MaxErrorEvents { + t.Error("wrong error event capacity", cp) + } + if cp := h.SpanEvents.capacity(); cp != MaxSpanEvents { + t.Error("wrong span event capacity", cp) + } +} + +func TestNewHarvestUsesConnectReply(t *testing.T) { + now := time.Now() + h := NewHarvest(now, &DfltHarvestCfgr{ + reportPeriods: map[HarvestTypes]time.Duration{ + HarvestMetricsTraces: FixedHarvestPeriod, + HarvestTypesEvents: time.Second * 5, + }, + maxTxnEvents: &one, + maxCustomEvents: &two, + maxErrorEvents: &three, + maxSpanEvents: &four, + }) + + if cp := h.TxnEvents.capacity(); cp != 1 { + t.Error("wrong txn event capacity", cp) + } + if cp := h.CustomEvents.capacity(); cp != 2 { + t.Error("wrong custom event capacity", cp) + } + if cp := h.ErrorEvents.capacity(); cp != 3 { + t.Error("wrong error event capacity", cp) + } + if cp := h.SpanEvents.capacity(); cp != 4 { + t.Error("wrong span event capacity", cp) + } +} + +func TestConfigurableHarvestZeroHarvestLimits(t *testing.T) { + now := time.Now() + + var zero uint + h := NewHarvest(now, &DfltHarvestCfgr{ + reportPeriods: map[HarvestTypes]time.Duration{ + HarvestMetricsTraces: FixedHarvestPeriod, + HarvestTypesEvents: time.Second * 5, + }, + maxTxnEvents: &zero, + maxCustomEvents: &zero, + maxErrorEvents: &zero, + maxSpanEvents: &zero, + }) + if cp := h.TxnEvents.capacity(); cp != 0 { + t.Error("wrong txn event capacity", cp) + } + if cp := h.CustomEvents.capacity(); cp != 0 { + t.Error("wrong custom event capacity", cp) + } + if cp := h.ErrorEvents.capacity(); cp != 0 { + t.Error("wrong error event capacity", cp) + } + if cp := h.SpanEvents.capacity(); cp != 0 { + t.Error("wrong error event capacity", cp) + } + + // Add events to ensure that adding events to zero-capacity pools is + // safe. + h.TxnEvents.AddTxnEvent(&TxnEvent{}, 1.0) + h.CustomEvents.Add(&CustomEvent{}) + h.ErrorEvents.Add(&ErrorEvent{}, 1.0) + h.SpanEvents.addEventPopulated(&sampleSpanEvent) + + // Create the payloads to ensure doing so with zero-capacity pools is + // safe. + payloads := h.Ready(now.Add(2 * time.Minute)).Payloads(false) + for _, p := range payloads { + js, err := p.Data("agentRunID", now.Add(2*time.Minute)) + if nil != err { + t.Error(err) + continue + } + // Only metric data should be present. + if (p.EndpointMethod() == "metric_data") != + (string(js) != "") { + t.Error(p.EndpointMethod(), string(js)) + } + } +} diff --git a/internal/integrationsupport/integrationsupport.go b/internal/integrationsupport/integrationsupport.go new file mode 100644 index 000000000..b306707ff --- /dev/null +++ b/internal/integrationsupport/integrationsupport.go @@ -0,0 +1,79 @@ +// Package integrationsupport exists to expose functionality to integration +// packages without adding noise to the public API. +package integrationsupport + +import ( + newrelic "github.com/newrelic/go-agent" + "github.com/newrelic/go-agent/internal" +) + +// AddAgentAttribute allows instrumentation packages to add agent attributes. +func AddAgentAttribute(txn newrelic.Transaction, id internal.AgentAttributeID, stringVal string, otherVal interface{}) { + if aa, ok := txn.(internal.AddAgentAttributer); ok { + aa.AddAgentAttribute(id, stringVal, otherVal) + } +} + +// AddAgentSpanAttribute allows instrumentation packages to add span attributes. +func AddAgentSpanAttribute(txn newrelic.Transaction, key internal.SpanAttribute, val string) { + internal.AddAgentSpanAttribute(txn, key, val) +} + +// This code below is used for testing and is based on the similar code in internal_test.go in +// the newrelic package. That code is not exported, though, and we frequently need something similar +// for integration packages, so it is copied here. +const ( + testLicenseKey = "0123456789012345678901234567890123456789" + SampleAppName = "my app" +) + +// ExpectApp combines Application and Expect, for use in validating data in test apps +type ExpectApp interface { + internal.Expect + newrelic.Application +} + +// NewTestApp creates an ExpectApp with the given ConnectReply function and Config function +func NewTestApp(replyfn func(*internal.ConnectReply), cfgFn func(*newrelic.Config)) ExpectApp { + + cfg := newrelic.NewConfig(SampleAppName, testLicenseKey) + + if nil != cfgFn { + cfgFn(&cfg) + } + + // Prevent spawning app goroutines in tests. + if !cfg.ServerlessMode.Enabled { + cfg.Enabled = false + } + + app, err := newrelic.NewApplication(cfg) + if nil != err { + panic(err) + } + + internal.HarvestTesting(app, replyfn) + + return app.(ExpectApp) +} + +// NewBasicTestApp creates an ExpectApp with the standard testing connect reply function and config +func NewBasicTestApp() ExpectApp { + return NewTestApp(nil, BasicConfigFn) +} + +// BasicConfigFn is a default config function to be used when no special settings are needed for a test app +var BasicConfigFn = func(cfg *newrelic.Config) { + cfg.Enabled = false +} + +// DTEnabledCfgFn is a reusable Config function that sets Distributed Tracing to enabled +var DTEnabledCfgFn = func(cfg *newrelic.Config) { + cfg.Enabled = false + cfg.DistributedTracer.Enabled = true +} + +// SampleEverythingReplyFn is a reusable ConnectReply function that samples everything +var SampleEverythingReplyFn = func(reply *internal.ConnectReply) { + reply.AdaptiveSampler = internal.SampleEverything{} +} diff --git a/internal/intrinsics.go b/internal/intrinsics.go new file mode 100644 index 000000000..6925e49ff --- /dev/null +++ b/internal/intrinsics.go @@ -0,0 +1,41 @@ +package internal + +import ( + "bytes" +) + +func addOptionalStringField(w *jsonFieldsWriter, key, value string) { + if value != "" { + w.stringField(key, value) + } +} + +func intrinsicsJSON(e *TxnEvent, buf *bytes.Buffer) { + w := jsonFieldsWriter{buf: buf} + + buf.WriteByte('{') + + w.floatField("totalTime", e.TotalTime.Seconds()) + + if e.BetterCAT.Enabled { + w.stringField("guid", e.BetterCAT.ID) + w.stringField("traceId", e.BetterCAT.TraceID()) + w.writerField("priority", e.BetterCAT.Priority) + w.boolField("sampled", e.BetterCAT.Sampled) + } + + if e.CrossProcess.Used() { + addOptionalStringField(&w, "client_cross_process_id", e.CrossProcess.ClientID) + addOptionalStringField(&w, "trip_id", e.CrossProcess.TripID) + addOptionalStringField(&w, "path_hash", e.CrossProcess.PathHash) + addOptionalStringField(&w, "referring_transaction_guid", e.CrossProcess.ReferringTxnGUID) + } + + if e.CrossProcess.IsSynthetics() { + addOptionalStringField(&w, "synthetics_resource_id", e.CrossProcess.Synthetics.ResourceID) + addOptionalStringField(&w, "synthetics_job_id", e.CrossProcess.Synthetics.JobID) + addOptionalStringField(&w, "synthetics_monitor_id", e.CrossProcess.Synthetics.MonitorID) + } + + buf.WriteByte('}') +} diff --git a/internal/json_object_writer.go b/internal/json_object_writer.go new file mode 100644 index 000000000..e9533f141 --- /dev/null +++ b/internal/json_object_writer.go @@ -0,0 +1,61 @@ +package internal + +import ( + "bytes" + + "github.com/newrelic/go-agent/internal/jsonx" +) + +type jsonWriter interface { + WriteJSON(buf *bytes.Buffer) +} + +type jsonFieldsWriter struct { + buf *bytes.Buffer + needsComma bool +} + +func (w *jsonFieldsWriter) addKey(key string) { + if w.needsComma { + w.buf.WriteByte(',') + } else { + w.needsComma = true + } + // defensively assume that the key needs escaping: + jsonx.AppendString(w.buf, key) + w.buf.WriteByte(':') +} + +func (w *jsonFieldsWriter) stringField(key string, val string) { + w.addKey(key) + jsonx.AppendString(w.buf, val) +} + +func (w *jsonFieldsWriter) intField(key string, val int64) { + w.addKey(key) + jsonx.AppendInt(w.buf, val) +} + +func (w *jsonFieldsWriter) floatField(key string, val float64) { + w.addKey(key) + jsonx.AppendFloat(w.buf, val) +} + +func (w *jsonFieldsWriter) boolField(key string, val bool) { + w.addKey(key) + if val { + w.buf.WriteString("true") + } else { + w.buf.WriteString("false") + } +} + +func (w *jsonFieldsWriter) rawField(key string, val JSONString) { + w.addKey(key) + w.buf.WriteString(string(val)) +} + +func (w *jsonFieldsWriter) writerField(key string, val jsonWriter) { + w.addKey(key) + val.WriteJSON(w.buf) +} diff --git a/internal/jsonx/encode.go b/internal/jsonx/encode.go new file mode 100644 index 000000000..6495829f7 --- /dev/null +++ b/internal/jsonx/encode.go @@ -0,0 +1,174 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package jsonx extends the encoding/json package to encode JSON +// incrementally and without requiring reflection. +package jsonx + +import ( + "bytes" + "encoding/json" + "math" + "reflect" + "strconv" + "unicode/utf8" +) + +var hex = "0123456789abcdef" + +// AppendString escapes s appends it to buf. +func AppendString(buf *bytes.Buffer, s string) { + buf.WriteByte('"') + start := 0 + for i := 0; i < len(s); { + if b := s[i]; b < utf8.RuneSelf { + if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { + i++ + continue + } + if start < i { + buf.WriteString(s[start:i]) + } + switch b { + case '\\', '"': + buf.WriteByte('\\') + buf.WriteByte(b) + case '\n': + buf.WriteByte('\\') + buf.WriteByte('n') + case '\r': + buf.WriteByte('\\') + buf.WriteByte('r') + case '\t': + buf.WriteByte('\\') + buf.WriteByte('t') + default: + // This encodes bytes < 0x20 except for \n and \r, + // as well as <, > and &. The latter are escaped because they + // can lead to security holes when user-controlled strings + // are rendered into JSON and served to some browsers. + buf.WriteString(`\u00`) + buf.WriteByte(hex[b>>4]) + buf.WriteByte(hex[b&0xF]) + } + i++ + start = i + continue + } + c, size := utf8.DecodeRuneInString(s[i:]) + if c == utf8.RuneError && size == 1 { + if start < i { + buf.WriteString(s[start:i]) + } + buf.WriteString(`\ufffd`) + i += size + start = i + continue + } + // U+2028 is LINE SEPARATOR. + // U+2029 is PARAGRAPH SEPARATOR. + // They are both technically valid characters in JSON strings, + // but don't work in JSONP, which has to be evaluated as JavaScript, + // and can lead to security holes there. It is valid JSON to + // escape them, so we do so unconditionally. + // See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion. + if c == '\u2028' || c == '\u2029' { + if start < i { + buf.WriteString(s[start:i]) + } + buf.WriteString(`\u202`) + buf.WriteByte(hex[c&0xF]) + i += size + start = i + continue + } + i += size + } + if start < len(s) { + buf.WriteString(s[start:]) + } + buf.WriteByte('"') +} + +// AppendStringArray appends an array of string literals to buf. +func AppendStringArray(buf *bytes.Buffer, a ...string) { + buf.WriteByte('[') + for i, s := range a { + if i > 0 { + buf.WriteByte(',') + } + AppendString(buf, s) + } + buf.WriteByte(']') +} + +// AppendFloat appends a numeric literal representing the value to buf. +func AppendFloat(buf *bytes.Buffer, x float64) error { + var scratch [64]byte + + if math.IsInf(x, 0) || math.IsNaN(x) { + return &json.UnsupportedValueError{ + Value: reflect.ValueOf(x), + Str: strconv.FormatFloat(x, 'g', -1, 64), + } + } + + buf.Write(strconv.AppendFloat(scratch[:0], x, 'g', -1, 64)) + return nil +} + +// AppendFloatArray appends an array of numeric literals to buf. +func AppendFloatArray(buf *bytes.Buffer, a ...float64) error { + buf.WriteByte('[') + for i, x := range a { + if i > 0 { + buf.WriteByte(',') + } + if err := AppendFloat(buf, x); err != nil { + return err + } + } + buf.WriteByte(']') + return nil +} + +// AppendInt appends a numeric literal representing the value to buf. +func AppendInt(buf *bytes.Buffer, x int64) { + var scratch [64]byte + buf.Write(strconv.AppendInt(scratch[:0], x, 10)) +} + +// AppendIntArray appends an array of numeric literals to buf. +func AppendIntArray(buf *bytes.Buffer, a ...int64) { + var scratch [64]byte + + buf.WriteByte('[') + for i, x := range a { + if i > 0 { + buf.WriteByte(',') + } + buf.Write(strconv.AppendInt(scratch[:0], x, 10)) + } + buf.WriteByte(']') +} + +// AppendUint appends a numeric literal representing the value to buf. +func AppendUint(buf *bytes.Buffer, x uint64) { + var scratch [64]byte + buf.Write(strconv.AppendUint(scratch[:0], x, 10)) +} + +// AppendUintArray appends an array of numeric literals to buf. +func AppendUintArray(buf *bytes.Buffer, a ...uint64) { + var scratch [64]byte + + buf.WriteByte('[') + for i, x := range a { + if i > 0 { + buf.WriteByte(',') + } + buf.Write(strconv.AppendUint(scratch[:0], x, 10)) + } + buf.WriteByte(']') +} diff --git a/internal/jsonx/encode_test.go b/internal/jsonx/encode_test.go new file mode 100644 index 000000000..2b97c5f0d --- /dev/null +++ b/internal/jsonx/encode_test.go @@ -0,0 +1,179 @@ +package jsonx + +import ( + "bytes" + "math" + "testing" +) + +func TestAppendFloat(t *testing.T) { + buf := &bytes.Buffer{} + + err := AppendFloat(buf, math.NaN()) + if err == nil { + t.Error("AppendFloat(NaN) should return an error") + } + + err = AppendFloat(buf, math.Inf(1)) + if err == nil { + t.Error("AppendFloat(+Inf) should return an error") + } + + err = AppendFloat(buf, math.Inf(-1)) + if err == nil { + t.Error("AppendFloat(-Inf) should return an error") + } +} + +func TestAppendFloats(t *testing.T) { + buf := &bytes.Buffer{} + + AppendFloatArray(buf) + if want, got := "[]", buf.String(); want != got { + t.Errorf("AppendFloatArray(buf)=%q want=%q", got, want) + } + + buf.Reset() + AppendFloatArray(buf, 3.14) + if want, got := "[3.14]", buf.String(); want != got { + t.Errorf("AppendFloatArray(buf)=%q want=%q", got, want) + } + + buf.Reset() + AppendFloatArray(buf, 1, 2) + if want, got := "[1,2]", buf.String(); want != got { + t.Errorf("AppendFloatArray(buf)=%q want=%q", got, want) + } +} + +func TestAppendInt(t *testing.T) { + buf := &bytes.Buffer{} + + AppendInt(buf, 42) + if got := buf.String(); got != "42" { + t.Errorf("AppendUint(42) = %#q want %#q", got, "42") + } + + buf.Reset() + AppendInt(buf, -42) + if got := buf.String(); got != "-42" { + t.Errorf("AppendUint(-42) = %#q want %#q", got, "-42") + } +} + +func TestAppendIntArray(t *testing.T) { + buf := &bytes.Buffer{} + + AppendIntArray(buf) + if want, got := "[]", buf.String(); want != got { + t.Errorf("AppendIntArray(buf)=%q want=%q", got, want) + } + + buf.Reset() + AppendIntArray(buf, 42) + if want, got := "[42]", buf.String(); want != got { + t.Errorf("AppendIntArray(buf)=%q want=%q", got, want) + } + + buf.Reset() + AppendIntArray(buf, 1, -2) + if want, got := "[1,-2]", buf.String(); want != got { + t.Errorf("AppendIntArray(buf)=%q want=%q", got, want) + } + + buf.Reset() + AppendIntArray(buf, 1, -2, 0) + if want, got := "[1,-2,0]", buf.String(); want != got { + t.Errorf("AppendIntArray(buf)=%q want=%q", got, want) + } +} + +func TestAppendUint(t *testing.T) { + buf := &bytes.Buffer{} + + AppendUint(buf, 42) + if got := buf.String(); got != "42" { + t.Errorf("AppendUint(42) = %#q want %#q", got, "42") + } +} + +func TestAppendUintArray(t *testing.T) { + buf := &bytes.Buffer{} + + AppendUintArray(buf) + if want, got := "[]", buf.String(); want != got { + t.Errorf("AppendUintArray(buf)=%q want=%q", got, want) + } + + buf.Reset() + AppendUintArray(buf, 42) + if want, got := "[42]", buf.String(); want != got { + t.Errorf("AppendUintArray(buf)=%q want=%q", got, want) + } + + buf.Reset() + AppendUintArray(buf, 1, 2) + if want, got := "[1,2]", buf.String(); want != got { + t.Errorf("AppendUintArray(buf)=%q want=%q", got, want) + } + + buf.Reset() + AppendUintArray(buf, 1, 2, 3) + if want, got := "[1,2,3]", buf.String(); want != got { + t.Errorf("AppendUintArray(buf)=%q want=%q", got, want) + } +} + +var encodeStringTests = []struct { + in string + out string +}{ + {"\x00", `"\u0000"`}, + {"\x01", `"\u0001"`}, + {"\x02", `"\u0002"`}, + {"\x03", `"\u0003"`}, + {"\x04", `"\u0004"`}, + {"\x05", `"\u0005"`}, + {"\x06", `"\u0006"`}, + {"\x07", `"\u0007"`}, + {"\x08", `"\u0008"`}, + {"\x09", `"\t"`}, + {"\x0a", `"\n"`}, + {"\x0b", `"\u000b"`}, + {"\x0c", `"\u000c"`}, + {"\x0d", `"\r"`}, + {"\x0e", `"\u000e"`}, + {"\x0f", `"\u000f"`}, + {"\x10", `"\u0010"`}, + {"\x11", `"\u0011"`}, + {"\x12", `"\u0012"`}, + {"\x13", `"\u0013"`}, + {"\x14", `"\u0014"`}, + {"\x15", `"\u0015"`}, + {"\x16", `"\u0016"`}, + {"\x17", `"\u0017"`}, + {"\x18", `"\u0018"`}, + {"\x19", `"\u0019"`}, + {"\x1a", `"\u001a"`}, + {"\x1b", `"\u001b"`}, + {"\x1c", `"\u001c"`}, + {"\x1d", `"\u001d"`}, + {"\x1e", `"\u001e"`}, + {"\x1f", `"\u001f"`}, + {"\\", `"\\"`}, + {`"`, `"\""`}, + {"the\u2028quick\t\nbrown\u2029fox", `"the\u2028quick\t\nbrown\u2029fox"`}, +} + +func TestAppendString(t *testing.T) { + buf := &bytes.Buffer{} + + for _, tt := range encodeStringTests { + buf.Reset() + + AppendString(buf, tt.in) + if got := buf.String(); got != tt.out { + t.Errorf("AppendString(%q) = %#q, want %#q", tt.in, got, tt.out) + } + } +} diff --git a/internal/labels.go b/internal/labels.go new file mode 100644 index 000000000..b3671c65c --- /dev/null +++ b/internal/labels.go @@ -0,0 +1,23 @@ +package internal + +import "encoding/json" + +// Labels is used for connect JSON formatting. +type Labels map[string]string + +// MarshalJSON requires a comment for golint? +func (l Labels) MarshalJSON() ([]byte, error) { + ls := make([]struct { + Key string `json:"label_type"` + Value string `json:"label_value"` + }, len(l)) + + i := 0 + for key, val := range l { + ls[i].Key = key + ls[i].Value = val + i++ + } + + return json.Marshal(ls) +} diff --git a/internal/limits.go b/internal/limits.go new file mode 100644 index 000000000..31362d73e --- /dev/null +++ b/internal/limits.go @@ -0,0 +1,75 @@ +package internal + +import "time" + +const ( + // app behavior + + // FixedHarvestPeriod is the period that fixed period data (metrics, + // traces, and span events) is sent to New Relic. + FixedHarvestPeriod = 60 * time.Second + // DefaultConfigurableEventHarvestMs is the period for custom, error, + // and transaction events if the connect response's + // "event_harvest_config.report_period_ms" is missing or invalid. + DefaultConfigurableEventHarvestMs = 60 * 1000 + // CollectorTimeout is the timeout used in the client for communication + // with New Relic's servers. + CollectorTimeout = 20 * time.Second + // AppDataChanSize is the size of the channel that contains data sent + // the app processor. + AppDataChanSize = 200 + failedMetricAttemptsLimit = 5 + failedEventsAttemptsLimit = 10 + // maxPayloadSizeInBytes specifies the maximum payload size in bytes that + // should be sent to any endpoint + maxPayloadSizeInBytes = 1000 * 1000 + + // transaction behavior + maxStackTraceFrames = 100 + // MaxTxnErrors is the maximum number of errors captured per + // transaction. + MaxTxnErrors = 5 + maxTxnSlowQueries = 10 + + startingTxnTraceNodes = 16 + maxTxnTraceNodes = 256 + + // harvest data + maxMetrics = 2 * 1000 + // MaxCustomEvents is the maximum number of Transaction Events that can be captured + // per 60-second harvest cycle + MaxCustomEvents = 10 * 1000 + // MaxTxnEvents is the maximum number of Transaction Events that can be captured + // per 60-second harvest cycle + MaxTxnEvents = 10 * 1000 + maxRegularTraces = 1 + maxSyntheticsTraces = 20 + // MaxErrorEvents is the maximum number of Error Events that can be captured + // per 60-second harvest cycle + MaxErrorEvents = 100 + maxHarvestErrors = 20 + maxHarvestSlowSQLs = 10 + // MaxSpanEvents is the maximum number of Span Events that can be captured + // per 60-second harvest cycle + MaxSpanEvents = 1000 + + // attributes + attributeKeyLengthLimit = 255 + attributeValueLengthLimit = 255 + attributeUserLimit = 64 + // AttributeErrorLimit limits the number of extra attributes that can be + // provided when noticing an error. + AttributeErrorLimit = 32 + customEventAttributeLimit = 64 + + // Limits affecting Config validation are found in the config package. + + // RuntimeSamplerPeriod is the period of the runtime sampler. Runtime + // metrics should not depend on the sampler period, but the period must + // be the same across instances. For that reason, this value should not + // be changed without notifying customers that they must update all + // instance simultaneously for valid runtime metrics. + RuntimeSamplerPeriod = 60 * time.Second + + txnNameCacheLimit = 40 +) diff --git a/internal/logger/logger.go b/internal/logger/logger.go new file mode 100644 index 000000000..9fda99da5 --- /dev/null +++ b/internal/logger/logger.go @@ -0,0 +1,93 @@ +package logger + +import ( + "encoding/json" + "fmt" + "io" + "log" + "os" +) + +// Logger matches newrelic.Logger to allow implementations to be passed to +// internal packages. +type Logger interface { + Error(msg string, context map[string]interface{}) + Warn(msg string, context map[string]interface{}) + Info(msg string, context map[string]interface{}) + Debug(msg string, context map[string]interface{}) + DebugEnabled() bool +} + +// ShimLogger implements Logger and does nothing. +type ShimLogger struct { + // IsDebugEnabled is useful as it allows DebugEnabled code paths to be + // tested. + IsDebugEnabled bool +} + +// Error allows ShimLogger to implement Logger. +func (s ShimLogger) Error(string, map[string]interface{}) {} + +// Warn allows ShimLogger to implement Logger. +func (s ShimLogger) Warn(string, map[string]interface{}) {} + +// Info allows ShimLogger to implement Logger. +func (s ShimLogger) Info(string, map[string]interface{}) {} + +// Debug allows ShimLogger to implement Logger. +func (s ShimLogger) Debug(string, map[string]interface{}) {} + +// DebugEnabled allows ShimLogger to implement Logger. +func (s ShimLogger) DebugEnabled() bool { return s.IsDebugEnabled } + +type logFile struct { + l *log.Logger + doDebug bool +} + +// New creates a basic Logger. +func New(w io.Writer, doDebug bool) Logger { + return &logFile{ + l: log.New(w, logPid, logFlags), + doDebug: doDebug, + } +} + +const logFlags = log.Ldate | log.Ltime | log.Lmicroseconds + +var ( + logPid = fmt.Sprintf("(%d) ", os.Getpid()) +) + +func (f *logFile) fire(level, msg string, ctx map[string]interface{}) { + js, err := json.Marshal(struct { + Level string `json:"level"` + Event string `json:"msg"` + Context map[string]interface{} `json:"context"` + }{ + level, + msg, + ctx, + }) + if nil == err { + f.l.Print(string(js)) + } else { + f.l.Printf("unable to marshal log entry: %v", err) + } +} + +func (f *logFile) Error(msg string, ctx map[string]interface{}) { + f.fire("error", msg, ctx) +} +func (f *logFile) Warn(msg string, ctx map[string]interface{}) { + f.fire("warn", msg, ctx) +} +func (f *logFile) Info(msg string, ctx map[string]interface{}) { + f.fire("info", msg, ctx) +} +func (f *logFile) Debug(msg string, ctx map[string]interface{}) { + if f.doDebug { + f.fire("debug", msg, ctx) + } +} +func (f *logFile) DebugEnabled() bool { return f.doDebug } diff --git a/internal/metric_names.go b/internal/metric_names.go new file mode 100644 index 000000000..97daf9c22 --- /dev/null +++ b/internal/metric_names.go @@ -0,0 +1,302 @@ +package internal + +const ( + apdexRollup = "Apdex" + apdexPrefix = "Apdex/" + + webRollup = "WebTransaction" + backgroundRollup = "OtherTransaction/all" + + // https://source.datanerd.us/agents/agent-specs/blob/master/Total-Time-Async.md + totalTimeWeb = "WebTransactionTotalTime" + totalTimeBackground = "OtherTransactionTotalTime" + + errorsPrefix = "Errors/" + + // "HttpDispatcher" metric is used for the overview graph, and + // therefore should only be made for web transactions. + dispatcherMetric = "HttpDispatcher" + + queueMetric = "WebFrontend/QueueTime" + + webMetricPrefix = "WebTransaction/Go" + backgroundMetricPrefix = "OtherTransaction/Go" + + instanceReporting = "Instance/Reporting" + + // https://newrelic.atlassian.net/wiki/display/eng/Custom+Events+in+New+Relic+Agents + customEventsSeen = "Supportability/Events/Customer/Seen" + customEventsSent = "Supportability/Events/Customer/Sent" + + // https://source.datanerd.us/agents/agent-specs/blob/master/Transaction-Events-PORTED.md + txnEventsSeen = "Supportability/AnalyticsEvents/TotalEventsSeen" + txnEventsSent = "Supportability/AnalyticsEvents/TotalEventsSent" + + // https://source.datanerd.us/agents/agent-specs/blob/master/Error-Events.md + errorEventsSeen = "Supportability/Events/TransactionError/Seen" + errorEventsSent = "Supportability/Events/TransactionError/Sent" + + // https://source.datanerd.us/agents/agent-specs/blob/master/Span-Events.md + spanEventsSeen = "Supportability/SpanEvent/TotalEventsSeen" + spanEventsSent = "Supportability/SpanEvent/TotalEventsSent" + + supportabilityDropped = "Supportability/MetricsDropped" + + // Runtime/System Metrics + memoryPhysical = "Memory/Physical" + heapObjectsAllocated = "Memory/Heap/AllocatedObjects" + cpuUserUtilization = "CPU/User/Utilization" + cpuSystemUtilization = "CPU/System/Utilization" + cpuUserTime = "CPU/User Time" + cpuSystemTime = "CPU/System Time" + runGoroutine = "Go/Runtime/Goroutines" + gcPauseFraction = "GC/System/Pause Fraction" + gcPauses = "GC/System/Pauses" + + // Distributed Tracing Supportability Metrics + supportTracingAcceptSuccess = "Supportability/DistributedTrace/AcceptPayload/Success" + supportTracingAcceptException = "Supportability/DistributedTrace/AcceptPayload/Exception" + supportTracingAcceptParseException = "Supportability/DistributedTrace/AcceptPayload/ParseException" + supportTracingCreateBeforeAccept = "Supportability/DistributedTrace/AcceptPayload/Ignored/CreateBeforeAccept" + supportTracingIgnoredMultiple = "Supportability/DistributedTrace/AcceptPayload/Ignored/Multiple" + supportTracingIgnoredVersion = "Supportability/DistributedTrace/AcceptPayload/Ignored/MajorVersion" + supportTracingAcceptUntrustedAccount = "Supportability/DistributedTrace/AcceptPayload/Ignored/UntrustedAccount" + supportTracingAcceptNull = "Supportability/DistributedTrace/AcceptPayload/Ignored/Null" + supportTracingCreatePayloadSuccess = "Supportability/DistributedTrace/CreatePayload/Success" + supportTracingCreatePayloadException = "Supportability/DistributedTrace/CreatePayload/Exception" + + // Configurable event harvest supportability metrics + supportReportPeriod = "Supportability/EventHarvest/ReportPeriod" + supportTxnEventLimit = "Supportability/EventHarvest/AnalyticEventData/HarvestLimit" + supportCustomEventLimit = "Supportability/EventHarvest/CustomEventData/HarvestLimit" + supportErrorEventLimit = "Supportability/EventHarvest/ErrorEventData/HarvestLimit" + supportSpanEventLimit = "Supportability/EventHarvest/SpanEventData/HarvestLimit" +) + +// DistributedTracingSupport is used to track distributed tracing activity for +// supportability. +type DistributedTracingSupport struct { + AcceptPayloadSuccess bool // AcceptPayload was called successfully + AcceptPayloadException bool // AcceptPayload had a generic exception + AcceptPayloadParseException bool // AcceptPayload had a parsing exception + AcceptPayloadCreateBeforeAccept bool // AcceptPayload was ignored because CreatePayload had already been called + AcceptPayloadIgnoredMultiple bool // AcceptPayload was ignored because AcceptPayload had already been called + AcceptPayloadIgnoredVersion bool // AcceptPayload was ignored because the payload's major version was greater than the agent's + AcceptPayloadUntrustedAccount bool // AcceptPayload was ignored because the payload was untrusted + AcceptPayloadNullPayload bool // AcceptPayload was ignored because the payload was nil + CreatePayloadSuccess bool // CreatePayload was called successfully + CreatePayloadException bool // CreatePayload had a generic exception +} + +type rollupMetric struct { + all string + allWeb string + allOther string +} + +func newRollupMetric(s string) rollupMetric { + return rollupMetric{ + all: s + "all", + allWeb: s + "allWeb", + allOther: s + "allOther", + } +} + +func (r rollupMetric) webOrOther(isWeb bool) string { + if isWeb { + return r.allWeb + } + return r.allOther +} + +var ( + errorsRollupMetric = newRollupMetric("Errors/") + + // source.datanerd.us/agents/agent-specs/blob/master/APIs/external_segment.md + // source.datanerd.us/agents/agent-specs/blob/master/APIs/external_cat.md + // source.datanerd.us/agents/agent-specs/blob/master/Cross-Application-Tracing-PORTED.md + externalRollupMetric = newRollupMetric("External/") + + // source.datanerd.us/agents/agent-specs/blob/master/Datastore-Metrics-PORTED.md + datastoreRollupMetric = newRollupMetric("Datastore/") + + datastoreProductMetricsCache = map[string]rollupMetric{ + "Cassandra": newRollupMetric("Datastore/Cassandra/"), + "Derby": newRollupMetric("Datastore/Derby/"), + "Elasticsearch": newRollupMetric("Datastore/Elasticsearch/"), + "Firebird": newRollupMetric("Datastore/Firebird/"), + "IBMDB2": newRollupMetric("Datastore/IBMDB2/"), + "Informix": newRollupMetric("Datastore/Informix/"), + "Memcached": newRollupMetric("Datastore/Memcached/"), + "MongoDB": newRollupMetric("Datastore/MongoDB/"), + "MySQL": newRollupMetric("Datastore/MySQL/"), + "MSSQL": newRollupMetric("Datastore/MSSQL/"), + "Oracle": newRollupMetric("Datastore/Oracle/"), + "Postgres": newRollupMetric("Datastore/Postgres/"), + "Redis": newRollupMetric("Datastore/Redis/"), + "Solr": newRollupMetric("Datastore/Solr/"), + "SQLite": newRollupMetric("Datastore/SQLite/"), + "CouchDB": newRollupMetric("Datastore/CouchDB/"), + "Riak": newRollupMetric("Datastore/Riak/"), + "VoltDB": newRollupMetric("Datastore/VoltDB/"), + } +) + +func customSegmentMetric(s string) string { + return "Custom/" + s +} + +// customMetric is used to construct custom metrics from the input given to +// Application.RecordCustomMetric. Note that the "Custom/" prefix helps prevent +// collision with other agent metrics, but does not eliminate the possibility +// since "Custom/" is also used for segments. +func customMetric(customerInput string) string { + return "Custom/" + customerInput +} + +// DatastoreMetricKey contains the fields by which datastore metrics are +// aggregated. +type DatastoreMetricKey struct { + Product string + Collection string + Operation string + Host string + PortPathOrID string +} + +type externalMetricKey struct { + Host string + Library string + Method string + ExternalCrossProcessID string + ExternalTransactionName string +} + +// MessageMetricKey is the key to use for message segments. +type MessageMetricKey struct { + Library string + DestinationType string + Consumer bool + DestinationName string + DestinationTemp bool +} + +// Name returns the metric name value for this MessageMetricKey to be used for +// scoped and unscoped metrics. +// +// Producers +// MessageBroker/{Library}/{Destination Type}/{Action}/Named/{Destination Name} +// MessageBroker/{Library}/{Destination Type}/{Action}/Temp +// +// Consumers +// OtherTransaction/Message/{Library}/{DestinationType}/Named/{Destination Name} +// OtherTransaction/Message/{Library}/{DestinationType}/Temp +func (key MessageMetricKey) Name() string { + var destination string + if key.DestinationTemp { + destination = "Temp" + } else if key.DestinationName == "" { + destination = "Named/Unknown" + } else { + destination = "Named/" + key.DestinationName + } + + if key.Consumer { + return "Message/" + key.Library + + "/" + key.DestinationType + + "/" + destination + } + return "MessageBroker/" + key.Library + + "/" + key.DestinationType + + "/Produce/" + destination +} + +func datastoreScopedMetric(key DatastoreMetricKey) string { + if "" != key.Collection { + return datastoreStatementMetric(key) + } + return datastoreOperationMetric(key) +} + +// Datastore/{datastore}/* +func datastoreProductMetric(key DatastoreMetricKey) rollupMetric { + d, ok := datastoreProductMetricsCache[key.Product] + if ok { + return d + } + return newRollupMetric("Datastore/" + key.Product + "/") +} + +// Datastore/operation/{datastore}/{operation} +func datastoreOperationMetric(key DatastoreMetricKey) string { + return "Datastore/operation/" + key.Product + + "/" + key.Operation +} + +// Datastore/statement/{datastore}/{table}/{operation} +func datastoreStatementMetric(key DatastoreMetricKey) string { + return "Datastore/statement/" + key.Product + + "/" + key.Collection + + "/" + key.Operation +} + +// Datastore/instance/{datastore}/{host}/{port_path_or_id} +func datastoreInstanceMetric(key DatastoreMetricKey) string { + return "Datastore/instance/" + key.Product + + "/" + key.Host + + "/" + key.PortPathOrID +} + +func (key externalMetricKey) scopedMetric() string { + if "" != key.ExternalCrossProcessID && "" != key.ExternalTransactionName { + return externalTransactionMetric(key) + } + + if key.Method == "" { + // External/{host}/{library} + return "External/" + key.Host + "/" + key.Library + } + // External/{host}/{library}/{method} + return "External/" + key.Host + "/" + key.Library + "/" + key.Method +} + +// External/{host}/all +func externalHostMetric(key externalMetricKey) string { + return "External/" + key.Host + "/all" +} + +// ExternalApp/{host}/{external_id}/all +func externalAppMetric(key externalMetricKey) string { + return "ExternalApp/" + key.Host + + "/" + key.ExternalCrossProcessID + "/all" +} + +// ExternalTransaction/{host}/{external_id}/{external_txnname} +func externalTransactionMetric(key externalMetricKey) string { + return "ExternalTransaction/" + key.Host + + "/" + key.ExternalCrossProcessID + + "/" + key.ExternalTransactionName +} + +func callerFields(c payloadCaller) string { + return "/" + c.Type + + "/" + c.Account + + "/" + c.App + + "/" + c.TransportType + + "/" +} + +// DurationByCaller/{type}/{account}/{app}/{transport}/* +func durationByCallerMetric(c payloadCaller) rollupMetric { + return newRollupMetric("DurationByCaller" + callerFields(c)) +} + +// ErrorsByCaller/{type}/{account}/{app}/{transport}/* +func errorsByCallerMetric(c payloadCaller) rollupMetric { + return newRollupMetric("ErrorsByCaller" + callerFields(c)) +} + +// TransportDuration/{type}/{account}/{app}/{transport}/* +func transportDurationMetric(c payloadCaller) rollupMetric { + return newRollupMetric("TransportDuration" + callerFields(c)) +} diff --git a/internal/metric_rules.go b/internal/metric_rules.go new file mode 100644 index 000000000..b634a8b5b --- /dev/null +++ b/internal/metric_rules.go @@ -0,0 +1,164 @@ +package internal + +import ( + "encoding/json" + "regexp" + "sort" + "strings" +) + +type ruleResult int + +const ( + ruleMatched ruleResult = iota + ruleUnmatched + ruleIgnore +) + +type metricRule struct { + // 'Ignore' indicates if the entire transaction should be discarded if + // there is a match. This field is only used by "url_rules" and + // "transaction_name_rules", not "metric_name_rules". + Ignore bool `json:"ignore"` + EachSegment bool `json:"each_segment"` + ReplaceAll bool `json:"replace_all"` + Terminate bool `json:"terminate_chain"` + Order int `json:"eval_order"` + OriginalReplacement string `json:"replacement"` + RawExpr string `json:"match_expression"` + + // Go's regexp backreferences use '${1}' instead of the Perlish '\1', so + // we transform the replacement string into the Go syntax and store it + // here. + TransformedReplacement string + re *regexp.Regexp +} + +type metricRules []*metricRule + +// Go's regexp backreferences use `${1}` instead of the Perlish `\1`, so we must +// transform the replacement string. This is non-trivial: `\1` is a +// backreference but `\\1` is not. Rather than count the number of back slashes +// preceding the digit, we simply skip rules with tricky replacements. +var ( + transformReplacementAmbiguous = regexp.MustCompile(`\\\\([0-9]+)`) + transformReplacementRegex = regexp.MustCompile(`\\([0-9]+)`) + transformReplacementReplacement = "$${${1}}" +) + +func (rules *metricRules) UnmarshalJSON(data []byte) (err error) { + var raw []*metricRule + + if err := json.Unmarshal(data, &raw); nil != err { + return err + } + + valid := make(metricRules, 0, len(raw)) + + for _, r := range raw { + re, err := regexp.Compile("(?i)" + r.RawExpr) + if err != nil { + // TODO + // Warn("unable to compile rule", { + // "match_expression": r.RawExpr, + // "error": err.Error(), + // }) + continue + } + + if transformReplacementAmbiguous.MatchString(r.OriginalReplacement) { + // TODO + // Warn("unable to transform replacement", { + // "match_expression": r.RawExpr, + // "replacement": r.OriginalReplacement, + // }) + continue + } + + r.re = re + r.TransformedReplacement = transformReplacementRegex.ReplaceAllString(r.OriginalReplacement, + transformReplacementReplacement) + valid = append(valid, r) + } + + sort.Sort(valid) + + *rules = valid + return nil +} + +func (rules metricRules) Len() int { + return len(rules) +} + +// Rules should be applied in increasing order +func (rules metricRules) Less(i, j int) bool { + return rules[i].Order < rules[j].Order +} +func (rules metricRules) Swap(i, j int) { + rules[i], rules[j] = rules[j], rules[i] +} + +func replaceFirst(re *regexp.Regexp, s string, replacement string) (ruleResult, string) { + // Note that ReplaceAllStringFunc cannot be used here since it does + // not replace $1 placeholders. + loc := re.FindStringIndex(s) + if nil == loc { + return ruleUnmatched, s + } + firstMatch := s[loc[0]:loc[1]] + firstMatchReplaced := re.ReplaceAllString(firstMatch, replacement) + return ruleMatched, s[0:loc[0]] + firstMatchReplaced + s[loc[1]:] +} + +func (r *metricRule) apply(s string) (ruleResult, string) { + // Rules are strange, and there is no spec. + // This code attempts to duplicate the logic of the PHP agent. + // Ambiguity abounds. + + if r.Ignore { + if r.re.MatchString(s) { + return ruleIgnore, "" + } + return ruleUnmatched, s + } + + if r.ReplaceAll { + if r.re.MatchString(s) { + return ruleMatched, r.re.ReplaceAllString(s, r.TransformedReplacement) + } + return ruleUnmatched, s + } else if r.EachSegment { + segments := strings.Split(s, "/") + applied := make([]string, len(segments)) + result := ruleUnmatched + for i, segment := range segments { + var segmentMatched ruleResult + segmentMatched, applied[i] = replaceFirst(r.re, segment, r.TransformedReplacement) + if segmentMatched == ruleMatched { + result = ruleMatched + } + } + return result, strings.Join(applied, "/") + } else { + return replaceFirst(r.re, s, r.TransformedReplacement) + } +} + +func (rules metricRules) Apply(input string) string { + var res ruleResult + s := input + + for _, rule := range rules { + res, s = rule.apply(s) + + if ruleIgnore == res { + return "" + } + if (ruleMatched == res) && rule.Terminate { + break + } + } + + return s +} diff --git a/internal/metric_rules_test.go b/internal/metric_rules_test.go new file mode 100644 index 000000000..fb13adee6 --- /dev/null +++ b/internal/metric_rules_test.go @@ -0,0 +1,96 @@ +package internal + +import ( + "encoding/json" + "testing" + + "github.com/newrelic/go-agent/internal/crossagent" +) + +func TestMetricRules(t *testing.T) { + var tcs []struct { + Testname string `json:"testname"` + Rules metricRules `json:"rules"` + Tests []struct { + Input string `json:"input"` + Expected string `json:"expected"` + } `json:"tests"` + } + + err := crossagent.ReadJSON("rules.json", &tcs) + if err != nil { + t.Fatal(err) + } + + for _, tc := range tcs { + // This test relies upon Perl-specific regex syntax (negative + // lookahead assertions) which are not implemented in Go's + // regexp package. We believe these types of rules are + // exceedingly rare in practice, so we're skipping + // implementation of this exotic syntax for now. + if tc.Testname == "saxon's test" { + continue + } + + for _, x := range tc.Tests { + out := tc.Rules.Apply(x.Input) + if out != x.Expected { + t.Fatal(tc.Testname, x.Input, out, x.Expected) + } + } + } +} + +func TestMetricRuleWithNegativeLookaheadAssertion(t *testing.T) { + js := `[{ + "match_expression":"^(?!account|application).*", + "replacement":"*", + "ignore":false, + "eval_order":0, + "each_segment":true + }]` + var rules metricRules + err := json.Unmarshal([]byte(js), &rules) + if nil != err { + t.Fatal(err) + } + if 0 != rules.Len() { + t.Fatal(rules) + } +} + +func TestNilApplyRules(t *testing.T) { + var rules metricRules + + input := "hello" + out := rules.Apply(input) + if input != out { + t.Fatal(input, out) + } +} + +func TestAmbiguousReplacement(t *testing.T) { + js := `[{ + "match_expression":"(.*)/[^/]*.(bmp|css|gif|ico|jpg|jpeg|js|png)", + "replacement":"\\\\1/*.\\2", + "ignore":false, + "eval_order":0 + }]` + var rules metricRules + err := json.Unmarshal([]byte(js), &rules) + if nil != err { + t.Fatal(err) + } + if 0 != rules.Len() { + t.Fatal(rules) + } +} + +func TestBadMetricRulesJSON(t *testing.T) { + js := `{}` + var rules metricRules + err := json.Unmarshal([]byte(js), &rules) + if nil == err { + t.Fatal("missing bad json error") + } +} diff --git a/internal/metrics.go b/internal/metrics.go new file mode 100644 index 000000000..1cbc5fcf4 --- /dev/null +++ b/internal/metrics.go @@ -0,0 +1,261 @@ +package internal + +import ( + "bytes" + "time" + + "github.com/newrelic/go-agent/internal/jsonx" +) + +type metricForce int + +const ( + forced metricForce = iota + unforced +) + +type metricID struct { + Name string `json:"name"` + Scope string `json:"scope,omitempty"` +} + +type metricData struct { + // These values are in the units expected by the collector. + countSatisfied float64 // Seconds, or count for Apdex + totalTolerated float64 // Seconds, or count for Apdex + exclusiveFailed float64 // Seconds, or count for Apdex + min float64 // Seconds + max float64 // Seconds + sumSquares float64 // Seconds**2, or 0 for Apdex +} + +func metricDataFromDuration(duration, exclusive time.Duration) metricData { + ds := duration.Seconds() + return metricData{ + countSatisfied: 1, + totalTolerated: ds, + exclusiveFailed: exclusive.Seconds(), + min: ds, + max: ds, + sumSquares: ds * ds, + } +} + +type metric struct { + forced metricForce + data metricData +} + +type metricTable struct { + metricPeriodStart time.Time + failedHarvests int + maxTableSize int // After this max is reached, only forced metrics are added + metrics map[metricID]*metric +} + +func newMetricTable(maxTableSize int, now time.Time) *metricTable { + return &metricTable{ + metricPeriodStart: now, + metrics: make(map[metricID]*metric), + maxTableSize: maxTableSize, + failedHarvests: 0, + } +} + +func (mt *metricTable) full() bool { + return len(mt.metrics) >= mt.maxTableSize +} + +func (data *metricData) aggregate(src metricData) { + data.countSatisfied += src.countSatisfied + data.totalTolerated += src.totalTolerated + data.exclusiveFailed += src.exclusiveFailed + + if src.min < data.min { + data.min = src.min + } + if src.max > data.max { + data.max = src.max + } + + data.sumSquares += src.sumSquares +} + +func (mt *metricTable) mergeMetric(id metricID, m metric) { + if to := mt.metrics[id]; nil != to { + to.data.aggregate(m.data) + return + } + + if mt.full() && (unforced == m.forced) { + mt.addSingleCount(supportabilityDropped, forced) + return + } + // NOTE: `new` is used in place of `&m` since the latter will make `m` + // get heap allocated regardless of whether or not this line gets + // reached (running go version go1.5 darwin/amd64). See + // BenchmarkAddingSameMetrics. + alloc := new(metric) + *alloc = m + mt.metrics[id] = alloc +} + +func (mt *metricTable) mergeFailed(from *metricTable) { + fails := from.failedHarvests + 1 + if fails >= failedMetricAttemptsLimit { + return + } + if from.metricPeriodStart.Before(mt.metricPeriodStart) { + mt.metricPeriodStart = from.metricPeriodStart + } + mt.failedHarvests = fails + mt.merge(from, "") +} + +func (mt *metricTable) merge(from *metricTable, newScope string) { + if "" == newScope { + for id, m := range from.metrics { + mt.mergeMetric(id, *m) + } + } else { + for id, m := range from.metrics { + mt.mergeMetric(metricID{Name: id.Name, Scope: newScope}, *m) + } + } +} + +func (mt *metricTable) add(name, scope string, data metricData, force metricForce) { + mt.mergeMetric(metricID{Name: name, Scope: scope}, metric{data: data, forced: force}) +} + +func (mt *metricTable) addCount(name string, count float64, force metricForce) { + mt.add(name, "", metricData{countSatisfied: count}, force) +} + +func (mt *metricTable) addSingleCount(name string, force metricForce) { + mt.addCount(name, float64(1), force) +} + +func (mt *metricTable) addDuration(name, scope string, duration, exclusive time.Duration, force metricForce) { + mt.add(name, scope, metricDataFromDuration(duration, exclusive), force) +} + +func (mt *metricTable) addValueExclusive(name, scope string, total, exclusive float64, force metricForce) { + data := metricData{ + countSatisfied: 1, + totalTolerated: total, + exclusiveFailed: exclusive, + min: total, + max: total, + sumSquares: total * total, + } + mt.add(name, scope, data, force) +} + +func (mt *metricTable) addValue(name, scope string, total float64, force metricForce) { + mt.addValueExclusive(name, scope, total, total, force) +} + +func (mt *metricTable) addApdex(name, scope string, apdexThreshold time.Duration, zone ApdexZone, force metricForce) { + apdexSeconds := apdexThreshold.Seconds() + data := metricData{min: apdexSeconds, max: apdexSeconds} + + switch zone { + case ApdexSatisfying: + data.countSatisfied = 1 + case ApdexTolerating: + data.totalTolerated = 1 + case ApdexFailing: + data.exclusiveFailed = 1 + } + + mt.add(name, scope, data, force) +} + +func (mt *metricTable) CollectorJSON(agentRunID string, now time.Time) ([]byte, error) { + if 0 == len(mt.metrics) { + return nil, nil + } + estimatedBytesPerMetric := 128 + estimatedLen := len(mt.metrics) * estimatedBytesPerMetric + buf := bytes.NewBuffer(make([]byte, 0, estimatedLen)) + buf.WriteByte('[') + + jsonx.AppendString(buf, agentRunID) + buf.WriteByte(',') + jsonx.AppendInt(buf, mt.metricPeriodStart.Unix()) + buf.WriteByte(',') + jsonx.AppendInt(buf, now.Unix()) + buf.WriteByte(',') + + buf.WriteByte('[') + first := true + for id, metric := range mt.metrics { + if first { + first = false + } else { + buf.WriteByte(',') + } + buf.WriteByte('[') + buf.WriteByte('{') + buf.WriteString(`"name":`) + jsonx.AppendString(buf, id.Name) + if id.Scope != "" { + buf.WriteString(`,"scope":`) + jsonx.AppendString(buf, id.Scope) + } + buf.WriteByte('}') + buf.WriteByte(',') + + jsonx.AppendFloatArray(buf, + metric.data.countSatisfied, + metric.data.totalTolerated, + metric.data.exclusiveFailed, + metric.data.min, + metric.data.max, + metric.data.sumSquares) + + buf.WriteByte(']') + } + buf.WriteByte(']') + + buf.WriteByte(']') + return buf.Bytes(), nil +} + +func (mt *metricTable) Data(agentRunID string, harvestStart time.Time) ([]byte, error) { + return mt.CollectorJSON(agentRunID, harvestStart) +} +func (mt *metricTable) MergeIntoHarvest(h *Harvest) { + h.Metrics.mergeFailed(mt) +} + +func (mt *metricTable) ApplyRules(rules metricRules) *metricTable { + if nil == rules { + return mt + } + if len(rules) == 0 { + return mt + } + + applied := newMetricTable(mt.maxTableSize, mt.metricPeriodStart) + cache := make(map[string]string) + + for id, m := range mt.metrics { + out, ok := cache[id.Name] + if !ok { + out = rules.Apply(id.Name) + cache[id.Name] = out + } + + if "" != out { + applied.mergeMetric(metricID{Name: out, Scope: id.Scope}, *m) + } + } + + return applied +} + +func (mt *metricTable) EndpointMethod() string { + return cmdMetrics +} diff --git a/internal/metrics_test.go b/internal/metrics_test.go new file mode 100644 index 000000000..926a7e3b0 --- /dev/null +++ b/internal/metrics_test.go @@ -0,0 +1,335 @@ +package internal + +import ( + "encoding/json" + "fmt" + "testing" + "time" +) + +var ( + start = time.Date(2014, time.November, 28, 1, 1, 0, 0, time.UTC) + end = time.Date(2014, time.November, 28, 1, 2, 0, 0, time.UTC) +) + +func TestEmptyMetrics(t *testing.T) { + mt := newMetricTable(20, start) + js, err := mt.CollectorJSON(`12345`, end) + if nil != err { + t.Fatal(err) + } + if nil != js { + t.Error(string(js)) + } +} + +func isValidJSON(data []byte) error { + var v interface{} + + return json.Unmarshal(data, &v) +} + +func TestMetrics(t *testing.T) { + mt := newMetricTable(20, start) + + mt.addDuration("one", "", 2*time.Second, 1*time.Second, unforced) + mt.addDuration("two", "my_scope", 4*time.Second, 2*time.Second, unforced) + mt.addDuration("one", "my_scope", 2*time.Second, 1*time.Second, unforced) + mt.addDuration("one", "", 2*time.Second, 1*time.Second, unforced) + + mt.addApdex("apdex satisfied", "", 9*time.Second, ApdexSatisfying, unforced) + mt.addApdex("apdex satisfied", "", 8*time.Second, ApdexSatisfying, unforced) + mt.addApdex("apdex tolerated", "", 7*time.Second, ApdexTolerating, unforced) + mt.addApdex("apdex tolerated", "", 8*time.Second, ApdexTolerating, unforced) + mt.addApdex("apdex failed", "my_scope", 1*time.Second, ApdexFailing, unforced) + + mt.addCount("count 123", float64(123), unforced) + mt.addSingleCount("count 1", unforced) + + ExpectMetrics(t, mt, []WantMetric{ + {"apdex satisfied", "", false, []float64{2, 0, 0, 8, 9, 0}}, + {"apdex tolerated", "", false, []float64{0, 2, 0, 7, 8, 0}}, + {"one", "", false, []float64{2, 4, 2, 2, 2, 8}}, + {"apdex failed", "my_scope", false, []float64{0, 0, 1, 1, 1, 0}}, + {"one", "my_scope", false, []float64{1, 2, 1, 2, 2, 4}}, + {"two", "my_scope", false, []float64{1, 4, 2, 4, 4, 16}}, + {"count 123", "", false, []float64{123, 0, 0, 0, 0, 0}}, + {"count 1", "", false, []float64{1, 0, 0, 0, 0, 0}}, + }) + + js, err := mt.Data("12345", end) + if nil != err { + t.Error(err) + } + // The JSON metric order is not deterministic, so we merely test that it + // is valid JSON. + if err := isValidJSON(js); nil != err { + t.Error(err, string(js)) + } +} + +func TestApplyRules(t *testing.T) { + js := `[ + { + "ignore":false, + "each_segment":false, + "terminate_chain":true, + "replacement":"been_renamed", + "replace_all":false, + "match_expression":"one$", + "eval_order":1 + }, + { + "ignore":true, + "each_segment":false, + "terminate_chain":true, + "replace_all":false, + "match_expression":"ignore_me", + "eval_order":1 + }, + { + "ignore":false, + "each_segment":false, + "terminate_chain":true, + "replacement":"merge_me", + "replace_all":false, + "match_expression":"merge_me[0-9]+$", + "eval_order":1 + } + ]` + var rules metricRules + err := json.Unmarshal([]byte(js), &rules) + if nil != err { + t.Fatal(err) + } + + mt := newMetricTable(20, start) + mt.addDuration("one", "", 2*time.Second, 1*time.Second, unforced) + mt.addDuration("one", "scope1", 2*time.Second, 1*time.Second, unforced) + mt.addDuration("one", "scope2", 2*time.Second, 1*time.Second, unforced) + mt.addDuration("ignore_me", "", 2*time.Second, 1*time.Second, unforced) + mt.addDuration("ignore_me", "scope1", 2*time.Second, 1*time.Second, unforced) + mt.addDuration("ignore_me", "scope2", 2*time.Second, 1*time.Second, unforced) + mt.addDuration("merge_me1", "", 2*time.Second, 1*time.Second, unforced) + mt.addDuration("merge_me2", "", 2*time.Second, 1*time.Second, unforced) + + applied := mt.ApplyRules(rules) + ExpectMetrics(t, applied, []WantMetric{ + {"been_renamed", "", false, []float64{1, 2, 1, 2, 2, 4}}, + {"been_renamed", "scope1", false, []float64{1, 2, 1, 2, 2, 4}}, + {"been_renamed", "scope2", false, []float64{1, 2, 1, 2, 2, 4}}, + {"merge_me", "", false, []float64{2, 4, 2, 2, 2, 8}}, + }) +} + +func TestApplyEmptyRules(t *testing.T) { + js := `[]` + var rules metricRules + err := json.Unmarshal([]byte(js), &rules) + if nil != err { + t.Fatal(err) + } + mt := newMetricTable(20, start) + mt.addDuration("one", "", 2*time.Second, 1*time.Second, unforced) + mt.addDuration("one", "my_scope", 2*time.Second, 1*time.Second, unforced) + applied := mt.ApplyRules(rules) + ExpectMetrics(t, applied, []WantMetric{ + {"one", "", false, []float64{1, 2, 1, 2, 2, 4}}, + {"one", "my_scope", false, []float64{1, 2, 1, 2, 2, 4}}, + }) +} + +func TestApplyNilRules(t *testing.T) { + var rules metricRules + + mt := newMetricTable(20, start) + mt.addDuration("one", "", 2*time.Second, 1*time.Second, unforced) + mt.addDuration("one", "my_scope", 2*time.Second, 1*time.Second, unforced) + applied := mt.ApplyRules(rules) + ExpectMetrics(t, applied, []WantMetric{ + {"one", "", false, []float64{1, 2, 1, 2, 2, 4}}, + {"one", "my_scope", false, []float64{1, 2, 1, 2, 2, 4}}, + }) +} + +func TestForced(t *testing.T) { + mt := newMetricTable(0, start) + + mt.addDuration("unforced", "", 1*time.Second, 1*time.Second, unforced) + mt.addDuration("forced", "", 2*time.Second, 2*time.Second, forced) + + ExpectMetrics(t, mt, []WantMetric{ + {"forced", "", true, []float64{1, 2, 2, 2, 2, 4}}, + {supportabilityDropped, "", true, []float64{1, 0, 0, 0, 0, 0}}, + }) + +} + +func TestMetricsMergeIntoEmpty(t *testing.T) { + src := newMetricTable(20, start) + src.addDuration("one", "", 2*time.Second, 1*time.Second, unforced) + src.addDuration("two", "", 2*time.Second, 1*time.Second, unforced) + dest := newMetricTable(20, start) + dest.merge(src, "") + + ExpectMetrics(t, dest, []WantMetric{ + {"one", "", false, []float64{1, 2, 1, 2, 2, 4}}, + {"two", "", false, []float64{1, 2, 1, 2, 2, 4}}, + }) +} + +func TestMetricsMergeFromEmpty(t *testing.T) { + src := newMetricTable(20, start) + dest := newMetricTable(20, start) + dest.addDuration("one", "", 2*time.Second, 1*time.Second, unforced) + dest.addDuration("two", "", 2*time.Second, 1*time.Second, unforced) + dest.merge(src, "") + + ExpectMetrics(t, dest, []WantMetric{ + {"one", "", false, []float64{1, 2, 1, 2, 2, 4}}, + {"two", "", false, []float64{1, 2, 1, 2, 2, 4}}, + }) +} + +func TestMetricsMerge(t *testing.T) { + src := newMetricTable(20, start) + dest := newMetricTable(20, start) + dest.addDuration("one", "", 2*time.Second, 1*time.Second, unforced) + dest.addDuration("two", "", 2*time.Second, 1*time.Second, unforced) + src.addDuration("two", "", 2*time.Second, 1*time.Second, unforced) + src.addDuration("three", "", 2*time.Second, 1*time.Second, unforced) + + dest.merge(src, "") + + ExpectMetrics(t, dest, []WantMetric{ + {"one", "", false, []float64{1, 2, 1, 2, 2, 4}}, + {"two", "", false, []float64{2, 4, 2, 2, 2, 8}}, + {"three", "", false, []float64{1, 2, 1, 2, 2, 4}}, + }) +} + +func TestMergeFailedSuccess(t *testing.T) { + src := newMetricTable(20, start) + dest := newMetricTable(20, end) + dest.addDuration("one", "", 2*time.Second, 1*time.Second, unforced) + dest.addDuration("two", "", 2*time.Second, 1*time.Second, unforced) + src.addDuration("two", "", 2*time.Second, 1*time.Second, unforced) + src.addDuration("three", "", 2*time.Second, 1*time.Second, unforced) + + if 0 != dest.failedHarvests { + t.Fatal(dest.failedHarvests) + } + + dest.mergeFailed(src) + + ExpectMetrics(t, dest, []WantMetric{ + {"one", "", false, []float64{1, 2, 1, 2, 2, 4}}, + {"two", "", false, []float64{2, 4, 2, 2, 2, 8}}, + {"three", "", false, []float64{1, 2, 1, 2, 2, 4}}, + }) +} + +func TestMergeFailedLimitReached(t *testing.T) { + src := newMetricTable(20, start) + dest := newMetricTable(20, end) + dest.addDuration("one", "", 2*time.Second, 1*time.Second, unforced) + dest.addDuration("two", "", 2*time.Second, 1*time.Second, unforced) + src.addDuration("two", "", 2*time.Second, 1*time.Second, unforced) + src.addDuration("three", "", 2*time.Second, 1*time.Second, unforced) + + src.failedHarvests = failedMetricAttemptsLimit + + dest.mergeFailed(src) + + ExpectMetrics(t, dest, []WantMetric{ + {"one", "", false, []float64{1, 2, 1, 2, 2, 4}}, + {"two", "", false, []float64{1, 2, 1, 2, 2, 4}}, + }) +} + +func BenchmarkMetricTableCollectorJSON(b *testing.B) { + mt := newMetricTable(2000, time.Now()) + md := metricData{ + countSatisfied: 1234567812345678.1234567812345678, + totalTolerated: 1234567812345678.1234567812345678, + exclusiveFailed: 1234567812345678.1234567812345678, + min: 1234567812345678.1234567812345678, + max: 1234567812345678.1234567812345678, + sumSquares: 1234567812345678.1234567812345678, + } + + for i := 0; i < 20; i++ { + scope := fmt.Sprintf("WebTransaction/Uri/myblog2/%d", i) + + for j := 0; j < 20; j++ { + name := fmt.Sprintf("Datastore/statement/MySQL/City%d/insert", j) + mt.add(name, "", md, forced) + mt.add(name, scope, md, forced) + + name = fmt.Sprintf("WebTransaction/Uri/myblog2/newPost_rum_%d.php", j) + mt.add(name, "", md, forced) + mt.add(name, scope, md, forced) + } + } + + data, err := mt.CollectorJSON("12345", time.Now()) + if nil != err { + b.Fatal(err) + } + if err := isValidJSON(data); nil != err { + b.Fatal(err, string(data)) + } + + b.ResetTimer() + b.ReportAllocs() + + id := "12345" + now := time.Now() + for i := 0; i < b.N; i++ { + mt.CollectorJSON(id, now) + } +} + +func BenchmarkAddingSameMetrics(b *testing.B) { + name := "my_name" + scope := "my_scope" + duration := 2 * time.Second + exclusive := 1 * time.Second + + mt := newMetricTable(2000, time.Now()) + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + mt.addDuration(name, scope, duration, exclusive, forced) + mt.addSingleCount(name, forced) + } +} + +func TestMergedMetricsAreCopied(t *testing.T) { + src := newMetricTable(20, start) + dest := newMetricTable(20, start) + + src.addSingleCount("zip", unforced) + dest.merge(src, "") + src.addSingleCount("zip", unforced) + ExpectMetrics(t, dest, []WantMetric{ + {"zip", "", false, []float64{1, 0, 0, 0, 0, 0}}, + }) +} + +func TestMergedWithScope(t *testing.T) { + src := newMetricTable(20, start) + dest := newMetricTable(20, start) + + src.addSingleCount("one", unforced) + src.addDuration("two", "", 2*time.Second, 1*time.Second, unforced) + dest.addDuration("two", "my_scope", 2*time.Second, 1*time.Second, unforced) + dest.merge(src, "my_scope") + + ExpectMetrics(t, dest, []WantMetric{ + {"one", "my_scope", false, []float64{1, 0, 0, 0, 0, 0}}, + {"two", "my_scope", false, []float64{2, 4, 2, 2, 2, 8}}, + }) +} diff --git a/internal/obfuscate.go b/internal/obfuscate.go new file mode 100644 index 000000000..0fcf85966 --- /dev/null +++ b/internal/obfuscate.go @@ -0,0 +1,39 @@ +package internal + +import ( + "encoding/base64" + "errors" +) + +// Deobfuscate deobfuscates a byte array. +func Deobfuscate(in string, key []byte) ([]byte, error) { + if len(key) == 0 { + return nil, errors.New("key cannot be zero length") + } + + decoded, err := base64.StdEncoding.DecodeString(in) + if err != nil { + return nil, err + } + + out := make([]byte, len(decoded)) + for i, c := range decoded { + out[i] = c ^ key[i%len(key)] + } + + return out, nil +} + +// Obfuscate obfuscates a byte array for transmission in CAT and RUM. +func Obfuscate(in, key []byte) (string, error) { + if len(key) == 0 { + return "", errors.New("key cannot be zero length") + } + + out := make([]byte, len(in)) + for i, c := range in { + out[i] = c ^ key[i%len(key)] + } + + return base64.StdEncoding.EncodeToString(out), nil +} diff --git a/internal/obfuscate_test.go b/internal/obfuscate_test.go new file mode 100644 index 000000000..b714d218f --- /dev/null +++ b/internal/obfuscate_test.go @@ -0,0 +1,79 @@ +package internal + +import ( + "testing" +) + +func TestDeobfuscate(t *testing.T) { + var out []byte + var err error + + for _, in := range []string{"", "foo"} { + out, err = Deobfuscate(in, []byte("")) + if err == nil { + t.Error("error is nil for an empty key") + } + if out != nil { + t.Errorf("out is not nil; got: %s", out) + } + } + + for _, in := range []string{"invalid_base64", "=moreinvalidbase64", "xx"} { + out, err = Deobfuscate(in, []byte("")) + if err == nil { + t.Error("error is nil for invalid base64") + } + if out != nil { + t.Errorf("out is not nil; got: %s", out) + } + } + + for _, test := range []struct { + input string + key string + expected string + }{ + {"", "BLAHHHH", ""}, + {"NikyPBs8OisiJg==", "BLAHHHH", "testString"}, + } { + out, err = Deobfuscate(test.input, []byte(test.key)) + if err != nil { + t.Errorf("error expected to be nil; got: %v", err) + } + if string(out) != test.expected { + t.Errorf("output mismatch; expected: %s; got: %s", test.expected, out) + } + } +} + +func TestObfuscate(t *testing.T) { + var out string + var err error + + for _, in := range []string{"", "foo"} { + out, err = Obfuscate([]byte(in), []byte("")) + if err == nil { + t.Error("error is nil for an empty key") + } + if out != "" { + t.Errorf("out is not an empty string; got: %s", out) + } + } + + for _, test := range []struct { + input string + key string + expected string + }{ + {"", "BLAHHHH", ""}, + {"testString", "BLAHHHH", "NikyPBs8OisiJg=="}, + } { + out, err = Obfuscate([]byte(test.input), []byte(test.key)) + if err != nil { + t.Errorf("error expected to be nil; got: %v", err) + } + if out != test.expected { + t.Errorf("output mismatch; expected: %s; got: %s", test.expected, out) + } + } +} diff --git a/internal/priority.go b/internal/priority.go new file mode 100644 index 000000000..e7aae796e --- /dev/null +++ b/internal/priority.go @@ -0,0 +1,27 @@ +package internal + +// Priority allows for a priority sampling of events. When an event +// is created it is given a Priority. Whenever an event pool is +// full and events need to be dropped, the events with the lowest priority +// are dropped. +type Priority float32 + +// According to spec, Agents SHOULD truncate the value to at most 6 +// digits past the decimal point. +const ( + priorityFormat = "%.6f" +) + +// NewPriority returns a new priority. +func NewPriority() Priority { + return Priority(RandFloat32()) +} + +// Float32 returns the priority as a float32. +func (p Priority) Float32() float32 { + return float32(p) +} + +func (p Priority) isLowerPriority(y Priority) bool { + return p < y +} diff --git a/internal/priority_test.go b/internal/priority_test.go new file mode 100644 index 000000000..82294c70c --- /dev/null +++ b/internal/priority_test.go @@ -0,0 +1,23 @@ +package internal + +import ( + "testing" +) + +func TestIsLowerPriority(t *testing.T) { + low := Priority(0.0) + middle := Priority(0.1) + high := Priority(0.999999) + + if !low.isLowerPriority(middle) { + t.Error(low, middle) + } + + if high.isLowerPriority(middle) { + t.Error(high, middle) + } + + if high.isLowerPriority(high) { + t.Error(high, high) + } +} diff --git a/internal/queuing.go b/internal/queuing.go new file mode 100644 index 000000000..cc361f820 --- /dev/null +++ b/internal/queuing.go @@ -0,0 +1,72 @@ +package internal + +import ( + "net/http" + "strconv" + "strings" + "time" +) + +const ( + xRequestStart = "X-Request-Start" + xQueueStart = "X-Queue-Start" +) + +var ( + earliestAcceptableSeconds = time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC).Unix() + latestAcceptableSeconds = time.Date(2050, time.January, 1, 0, 0, 0, 0, time.UTC).Unix() +) + +func checkQueueTimeSeconds(secondsFloat float64) time.Time { + seconds := int64(secondsFloat) + nanos := int64((secondsFloat - float64(seconds)) * (1000.0 * 1000.0 * 1000.0)) + if seconds > earliestAcceptableSeconds && seconds < latestAcceptableSeconds { + return time.Unix(seconds, nanos) + } + return time.Time{} +} + +func parseQueueTime(s string) time.Time { + f, err := strconv.ParseFloat(s, 64) + if nil != err { + return time.Time{} + } + if f <= 0 { + return time.Time{} + } + + // try microseconds + if t := checkQueueTimeSeconds(f / (1000.0 * 1000.0)); !t.IsZero() { + return t + } + // try milliseconds + if t := checkQueueTimeSeconds(f / (1000.0)); !t.IsZero() { + return t + } + // try seconds + if t := checkQueueTimeSeconds(f); !t.IsZero() { + return t + } + return time.Time{} +} + +// QueueDuration TODO +func QueueDuration(hdr http.Header, txnStart time.Time) time.Duration { + s := hdr.Get(xQueueStart) + if "" == s { + s = hdr.Get(xRequestStart) + } + if "" == s { + return 0 + } + + s = strings.TrimPrefix(s, "t=") + qt := parseQueueTime(s) + if qt.IsZero() { + return 0 + } + if qt.After(txnStart) { + return 0 + } + return txnStart.Sub(qt) +} diff --git a/internal/queuing_test.go b/internal/queuing_test.go new file mode 100644 index 000000000..54baf3f63 --- /dev/null +++ b/internal/queuing_test.go @@ -0,0 +1,93 @@ +package internal + +import ( + "net/http" + "testing" + "time" +) + +func TestParseQueueTime(t *testing.T) { + badInput := []string{ + "", + "nope", + "t", + "0", + "0.0", + "9999999999999999999999999999999999999999999999999", + "-1368811467146000", + "3000000000", + "3000000000000", + "900000000", + "900000000000", + } + for _, s := range badInput { + if qt := parseQueueTime(s); !qt.IsZero() { + t.Error(s, qt) + } + } + + testcases := []struct { + input string + expect int64 + }{ + // Microseconds + {"1368811467146000", 1368811467}, + // Milliseconds + {"1368811467146.000", 1368811467}, + {"1368811467146", 1368811467}, + // Seconds + {"1368811467.146000", 1368811467}, + {"1368811467.146", 1368811467}, + {"1368811467", 1368811467}, + } + for _, tc := range testcases { + qt := parseQueueTime(tc.input) + if qt.Unix() != tc.expect { + t.Error(tc.input, tc.expect, qt, qt.UnixNano()) + } + } +} + +func TestQueueDuration(t *testing.T) { + hdr := make(http.Header) + hdr.Set("X-Queue-Start", "1465798814") + qd := QueueDuration(hdr, time.Unix(1465798816, 0)) + if qd != 2*time.Second { + t.Error(qd) + } + + hdr = make(http.Header) + hdr.Set("X-Request-Start", "1465798814") + qd = QueueDuration(hdr, time.Unix(1465798816, 0)) + if qd != 2*time.Second { + t.Error(qd) + } + + hdr = make(http.Header) + qd = QueueDuration(hdr, time.Unix(1465798816, 0)) + if qd != 0 { + t.Error(qd) + } + + hdr = make(http.Header) + hdr.Set("X-Request-Start", "invalid-time") + qd = QueueDuration(hdr, time.Unix(1465798816, 0)) + if qd != 0 { + t.Error(qd) + } + + hdr = make(http.Header) + hdr.Set("X-Queue-Start", "t=1465798814") + qd = QueueDuration(hdr, time.Unix(1465798816, 0)) + if qd != 2*time.Second { + t.Error(qd) + } + + // incorrect time order + hdr = make(http.Header) + hdr.Set("X-Queue-Start", "t=1465798816") + qd = QueueDuration(hdr, time.Unix(1465798814, 0)) + if qd != 0 { + t.Error(qd) + } +} diff --git a/internal/rand.go b/internal/rand.go new file mode 100644 index 000000000..7e76d7d58 --- /dev/null +++ b/internal/rand.go @@ -0,0 +1,59 @@ +package internal + +import ( + "math/rand" + "sync" + "time" +) + +var ( + seededRand = struct { + sync.Mutex + *rand.Rand + }{ + Rand: rand.New(rand.NewSource(int64(time.Now().UnixNano()))), + } +) + +// RandUint64 returns a random uint64. +// +// IMPORTANT! The default rand package functions are not used, since we want to +// minimize the chance that different Go processes duplicate the same +// transaction id. (Note that the rand top level functions "use a default +// shared Source that produces a deterministic sequence of values each time a +// program is run" (and we don't seed the shared Source to avoid changing +// customer apps' behavior)). +func RandUint64() uint64 { + seededRand.Lock() + defer seededRand.Unlock() + + u1 := seededRand.Uint32() + u2 := seededRand.Uint32() + return (uint64(u1) << 32) | uint64(u2) +} + +// RandUint32 returns a random uint32. +func RandUint32() uint32 { + seededRand.Lock() + defer seededRand.Unlock() + + return seededRand.Uint32() +} + +// RandFloat32 returns a random float32 between 0.0 and 1.0. +func RandFloat32() float32 { + seededRand.Lock() + defer seededRand.Unlock() + + for { + if r := seededRand.Float32(); 0.0 != r { + return r + } + } +} + +// RandUint64N returns a random int64 that's +// between 0 and the passed in max, non-inclusive +func RandUint64N(max uint64) uint64 { + return RandUint64() % max +} diff --git a/internal/rules_cache.go b/internal/rules_cache.go new file mode 100644 index 000000000..d83570753 --- /dev/null +++ b/internal/rules_cache.go @@ -0,0 +1,52 @@ +package internal + +import "sync" + +// rulesCache is designed to avoid applying url-rules, txn-name-rules, and +// segment-rules since regexes are expensive! +type rulesCache struct { + sync.RWMutex + cache map[rulesCacheKey]string + maxCacheSize int +} + +type rulesCacheKey struct { + isWeb bool + inputName string +} + +func newRulesCache(maxCacheSize int) *rulesCache { + return &rulesCache{ + cache: make(map[rulesCacheKey]string, maxCacheSize), + maxCacheSize: maxCacheSize, + } +} + +func (cache *rulesCache) find(inputName string, isWeb bool) string { + if nil == cache { + return "" + } + cache.RLock() + defer cache.RUnlock() + + return cache.cache[rulesCacheKey{ + inputName: inputName, + isWeb: isWeb, + }] +} + +func (cache *rulesCache) set(inputName string, isWeb bool, finalName string) { + if nil == cache { + return + } + cache.Lock() + defer cache.Unlock() + + if len(cache.cache) >= cache.maxCacheSize { + return + } + cache.cache[rulesCacheKey{ + inputName: inputName, + isWeb: isWeb, + }] = finalName +} diff --git a/internal/rules_cache_test.go b/internal/rules_cache_test.go new file mode 100644 index 000000000..fd4857ef7 --- /dev/null +++ b/internal/rules_cache_test.go @@ -0,0 +1,56 @@ +package internal + +import "testing" + +func TestRulesCache(t *testing.T) { + testcases := []struct { + input string + isWeb bool + output string + }{ + {input: "name1", isWeb: true, output: "WebTransaction/Go/name1"}, + {input: "name1", isWeb: false, output: "OtherTransaction/Go/name1"}, + {input: "name2", isWeb: true, output: "WebTransaction/Go/name2"}, + {input: "name3", isWeb: true, output: "WebTransaction/Go/name3"}, + {input: "zap/123/zip", isWeb: false, output: "OtherTransaction/Go/zap/*/zip"}, + {input: "zap/45/zip", isWeb: false, output: "OtherTransaction/Go/zap/*/zip"}, + } + + cache := newRulesCache(len(testcases)) + for _, tc := range testcases { + // Test that nothing is in the cache before population. + if out := cache.find(tc.input, tc.isWeb); out != "" { + t.Error(out, tc.input, tc.isWeb) + } + } + for _, tc := range testcases { + cache.set(tc.input, tc.isWeb, tc.output) + } + for _, tc := range testcases { + // Test that everything is now in the cache as expected. + if out := cache.find(tc.input, tc.isWeb); out != tc.output { + t.Error(out, tc.input, tc.isWeb, tc.output) + } + } +} + +func TestRulesCacheLimit(t *testing.T) { + cache := newRulesCache(1) + cache.set("name1", true, "WebTransaction/Go/name1") + cache.set("name1", false, "OtherTransaction/Go/name1") + if out := cache.find("name1", true); out != "WebTransaction/Go/name1" { + t.Error(out) + } + if out := cache.find("name1", false); out != "" { + t.Error(out) + } +} + +func TestRulesCacheNil(t *testing.T) { + var cache *rulesCache + // No panics should happen if the rules cache pointer is nil. + if out := cache.find("name1", true); "" != out { + t.Error(out) + } + cache.set("name1", false, "OtherTransaction/Go/name1") +} diff --git a/internal/sampler.go b/internal/sampler.go new file mode 100644 index 000000000..d78cdc640 --- /dev/null +++ b/internal/sampler.go @@ -0,0 +1,145 @@ +package internal + +import ( + "runtime" + "time" + + "github.com/newrelic/go-agent/internal/logger" + "github.com/newrelic/go-agent/internal/sysinfo" +) + +// Sample is a system/runtime snapshot. +type Sample struct { + when time.Time + memStats runtime.MemStats + usage sysinfo.Usage + numGoroutine int + numCPU int +} + +func bytesToMebibytesFloat(bts uint64) float64 { + return float64(bts) / (1024 * 1024) +} + +// GetSample gathers a new Sample. +func GetSample(now time.Time, lg logger.Logger) *Sample { + s := Sample{ + when: now, + numGoroutine: runtime.NumGoroutine(), + numCPU: runtime.NumCPU(), + } + + if usage, err := sysinfo.GetUsage(); err == nil { + s.usage = usage + } else { + lg.Warn("unable to usage", map[string]interface{}{ + "error": err.Error(), + }) + } + + runtime.ReadMemStats(&s.memStats) + + return &s +} + +type cpuStats struct { + used time.Duration + fraction float64 // used / (elapsed * numCPU) +} + +// Stats contains system information for a period of time. +type Stats struct { + numGoroutine int + allocBytes uint64 + heapObjects uint64 + user cpuStats + system cpuStats + gcPauseFraction float64 + deltaNumGC uint32 + deltaPauseTotal time.Duration + minPause time.Duration + maxPause time.Duration +} + +// Samples is used as the parameter to GetStats to avoid mixing up the previous +// and current sample. +type Samples struct { + Previous *Sample + Current *Sample +} + +// GetStats combines two Samples into a Stats. +func GetStats(ss Samples) Stats { + cur := ss.Current + prev := ss.Previous + elapsed := cur.when.Sub(prev.when) + + s := Stats{ + numGoroutine: cur.numGoroutine, + allocBytes: cur.memStats.Alloc, + heapObjects: cur.memStats.HeapObjects, + } + + // CPU Utilization + totalCPUSeconds := elapsed.Seconds() * float64(cur.numCPU) + if prev.usage.User != 0 && cur.usage.User > prev.usage.User { + s.user.used = cur.usage.User - prev.usage.User + s.user.fraction = s.user.used.Seconds() / totalCPUSeconds + } + if prev.usage.System != 0 && cur.usage.System > prev.usage.System { + s.system.used = cur.usage.System - prev.usage.System + s.system.fraction = s.system.used.Seconds() / totalCPUSeconds + } + + // GC Pause Fraction + deltaPauseTotalNs := cur.memStats.PauseTotalNs - prev.memStats.PauseTotalNs + frac := float64(deltaPauseTotalNs) / float64(elapsed.Nanoseconds()) + s.gcPauseFraction = frac + + // GC Pauses + if deltaNumGC := cur.memStats.NumGC - prev.memStats.NumGC; deltaNumGC > 0 { + // In case more than 256 pauses have happened between samples + // and we are examining a subset of the pauses, we ensure that + // the min and max are not on the same side of the average by + // using the average as the starting min and max. + maxPauseNs := deltaPauseTotalNs / uint64(deltaNumGC) + minPauseNs := deltaPauseTotalNs / uint64(deltaNumGC) + for i := prev.memStats.NumGC + 1; i <= cur.memStats.NumGC; i++ { + pause := cur.memStats.PauseNs[(i+255)%256] + if pause > maxPauseNs { + maxPauseNs = pause + } + if pause < minPauseNs { + minPauseNs = pause + } + } + s.deltaPauseTotal = time.Duration(deltaPauseTotalNs) * time.Nanosecond + s.deltaNumGC = deltaNumGC + s.minPause = time.Duration(minPauseNs) * time.Nanosecond + s.maxPause = time.Duration(maxPauseNs) * time.Nanosecond + } + + return s +} + +// MergeIntoHarvest implements Harvestable. +func (s Stats) MergeIntoHarvest(h *Harvest) { + h.Metrics.addValue(heapObjectsAllocated, "", float64(s.heapObjects), forced) + h.Metrics.addValue(runGoroutine, "", float64(s.numGoroutine), forced) + h.Metrics.addValueExclusive(memoryPhysical, "", bytesToMebibytesFloat(s.allocBytes), 0, forced) + h.Metrics.addValueExclusive(cpuUserUtilization, "", s.user.fraction, 0, forced) + h.Metrics.addValueExclusive(cpuSystemUtilization, "", s.system.fraction, 0, forced) + h.Metrics.addValue(cpuUserTime, "", s.user.used.Seconds(), forced) + h.Metrics.addValue(cpuSystemTime, "", s.system.used.Seconds(), forced) + h.Metrics.addValueExclusive(gcPauseFraction, "", s.gcPauseFraction, 0, forced) + if s.deltaNumGC > 0 { + h.Metrics.add(gcPauses, "", metricData{ + countSatisfied: float64(s.deltaNumGC), + totalTolerated: s.deltaPauseTotal.Seconds(), + exclusiveFailed: 0, + min: s.minPause.Seconds(), + max: s.maxPause.Seconds(), + sumSquares: s.deltaPauseTotal.Seconds() * s.deltaPauseTotal.Seconds(), + }, forced) + } +} diff --git a/internal/sampler_test.go b/internal/sampler_test.go new file mode 100644 index 000000000..d84ddc491 --- /dev/null +++ b/internal/sampler_test.go @@ -0,0 +1,85 @@ +package internal + +import ( + "testing" + "time" + + "github.com/newrelic/go-agent/internal/logger" +) + +func TestGetSample(t *testing.T) { + now := time.Now() + sample := GetSample(now, logger.ShimLogger{}) + if nil == sample { + t.Fatal(sample) + } + if now != sample.when { + t.Error(now, sample.when) + } + if sample.numGoroutine <= 0 { + t.Error(sample.numGoroutine) + } + if sample.numCPU <= 0 { + t.Error(sample.numCPU) + } + if sample.memStats.HeapObjects == 0 { + t.Error(sample.memStats.HeapObjects) + } +} + +func TestMetricsCreated(t *testing.T) { + now := time.Now() + h := NewHarvest(now, &DfltHarvestCfgr{}) + + stats := Stats{ + heapObjects: 5 * 1000, + numGoroutine: 23, + allocBytes: 37 * 1024 * 1024, + user: cpuStats{ + used: 20 * time.Millisecond, + fraction: 0.01, + }, + system: cpuStats{ + used: 40 * time.Millisecond, + fraction: 0.02, + }, + gcPauseFraction: 3e-05, + deltaNumGC: 2, + deltaPauseTotal: 500 * time.Microsecond, + minPause: 100 * time.Microsecond, + maxPause: 400 * time.Microsecond, + } + + stats.MergeIntoHarvest(h) + + ExpectMetrics(t, h.Metrics, []WantMetric{ + {"Memory/Heap/AllocatedObjects", "", true, []float64{1, 5000, 5000, 5000, 5000, 25000000}}, + {"Memory/Physical", "", true, []float64{1, 37, 0, 37, 37, 1369}}, + {"CPU/User Time", "", true, []float64{1, 0.02, 0.02, 0.02, 0.02, 0.0004}}, + {"CPU/System Time", "", true, []float64{1, 0.04, 0.04, 0.04, 0.04, 0.0016}}, + {"CPU/User/Utilization", "", true, []float64{1, 0.01, 0, 0.01, 0.01, 0.0001}}, + {"CPU/System/Utilization", "", true, []float64{1, 0.02, 0, 0.02, 0.02, 0.0004}}, + {"Go/Runtime/Goroutines", "", true, []float64{1, 23, 23, 23, 23, 529}}, + {"GC/System/Pause Fraction", "", true, []float64{1, 3e-05, 0, 3e-05, 3e-05, 9e-10}}, + {"GC/System/Pauses", "", true, []float64{2, 0.0005, 0, 0.0001, 0.0004, 2.5e-7}}, + }) +} + +func TestMetricsCreatedEmpty(t *testing.T) { + now := time.Now() + h := NewHarvest(now, &DfltHarvestCfgr{}) + stats := Stats{} + + stats.MergeIntoHarvest(h) + + ExpectMetrics(t, h.Metrics, []WantMetric{ + {"Memory/Heap/AllocatedObjects", "", true, []float64{1, 0, 0, 0, 0, 0}}, + {"Memory/Physical", "", true, []float64{1, 0, 0, 0, 0, 0}}, + {"CPU/User Time", "", true, []float64{1, 0, 0, 0, 0, 0}}, + {"CPU/System Time", "", true, []float64{1, 0, 0, 0, 0, 0}}, + {"CPU/User/Utilization", "", true, []float64{1, 0, 0, 0, 0, 0}}, + {"CPU/System/Utilization", "", true, []float64{1, 0, 0, 0, 0, 0}}, + {"Go/Runtime/Goroutines", "", true, []float64{1, 0, 0, 0, 0, 0}}, + {"GC/System/Pause Fraction", "", true, []float64{1, 0, 0, 0, 0, 0}}, + }) +} diff --git a/internal/security_policies.go b/internal/security_policies.go new file mode 100644 index 000000000..d8d119b77 --- /dev/null +++ b/internal/security_policies.go @@ -0,0 +1,111 @@ +package internal + +import ( + "encoding/json" + "fmt" + "reflect" +) + +// Security policies documentation: +// https://source.datanerd.us/agents/agent-specs/blob/master/Language-Agent-Security-Policies.md + +// SecurityPolicies contains the security policies. +type SecurityPolicies struct { + RecordSQL securityPolicy `json:"record_sql"` + AttributesInclude securityPolicy `json:"attributes_include"` + AllowRawExceptionMessages securityPolicy `json:"allow_raw_exception_messages"` + CustomEvents securityPolicy `json:"custom_events"` + CustomParameters securityPolicy `json:"custom_parameters"` +} + +// PointerIfPopulated returns a reference to the security policies if they have +// been populated from JSON. +func (sp *SecurityPolicies) PointerIfPopulated() *SecurityPolicies { + emptyPolicies := SecurityPolicies{} + if nil != sp && *sp != emptyPolicies { + return sp + } + return nil +} + +type securityPolicy struct { + EnabledVal *bool `json:"enabled"` +} + +func (p *securityPolicy) Enabled() bool { return nil == p.EnabledVal || *p.EnabledVal } +func (p *securityPolicy) SetEnabled(enabled bool) { p.EnabledVal = &enabled } +func (p *securityPolicy) IsSet() bool { return nil != p.EnabledVal } + +type policyer interface { + SetEnabled(bool) + IsSet() bool +} + +// UnmarshalJSON decodes security policies sent from the preconnect endpoint. +func (sp *SecurityPolicies) UnmarshalJSON(data []byte) (er error) { + defer func() { + // Zero out all fields if there is an error to ensure that the + // populated check works. + if er != nil { + *sp = SecurityPolicies{} + } + }() + + var raw map[string]struct { + Enabled bool `json:"enabled"` + Required bool `json:"required"` + } + err := json.Unmarshal(data, &raw) + if err != nil { + return fmt.Errorf("unable to unmarshal security policies: %v", err) + } + + knownPolicies := make(map[string]policyer) + + spv := reflect.ValueOf(sp).Elem() + for i := 0; i < spv.NumField(); i++ { + fieldAddress := spv.Field(i).Addr() + field := fieldAddress.Interface().(policyer) + name := spv.Type().Field(i).Tag.Get("json") + knownPolicies[name] = field + } + + for name, policy := range raw { + p, ok := knownPolicies[name] + if !ok { + if policy.Required { + return errUnknownRequiredPolicy{name: name} + } + } else { + p.SetEnabled(policy.Enabled) + } + } + for name, policy := range knownPolicies { + if !policy.IsSet() { + return errUnsetPolicy{name: name} + } + } + return nil +} + +type errUnknownRequiredPolicy struct{ name string } + +func (err errUnknownRequiredPolicy) Error() string { + return fmt.Sprintf("policy '%s' is unrecognized, please check for a newer agent version or contact support", err.name) +} + +type errUnsetPolicy struct{ name string } + +func (err errUnsetPolicy) Error() string { + return fmt.Sprintf("policy '%s' not received, please contact support", err.name) +} + +func isDisconnectSecurityPolicyError(e error) bool { + if _, ok := e.(errUnknownRequiredPolicy); ok { + return true + } + if _, ok := e.(errUnsetPolicy); ok { + return true + } + return false +} diff --git a/internal/security_policies_test.go b/internal/security_policies_test.go new file mode 100644 index 000000000..7c65193ad --- /dev/null +++ b/internal/security_policies_test.go @@ -0,0 +1,109 @@ +package internal + +import ( + "encoding/json" + "testing" +) + +func testBool(t *testing.T, name string, expected, got bool) { + if expected != got { + t.Errorf("%v: expected=%v got=%v", name, expected, got) + } +} + +func TestSecurityPoliciesPresent(t *testing.T) { + inputJSON := []byte(`{ + "record_sql": { "enabled": false, "required": false }, + "attributes_include": { "enabled": false, "required": false }, + "allow_raw_exception_messages": { "enabled": false, "required": false }, + "custom_events": { "enabled": false, "required": false }, + "custom_parameters": { "enabled": false, "required": false }, + "custom_instrumentation_editor": { "enabled": false, "required": false }, + "message_parameters": { "enabled": false, "required": false }, + "job_arguments": { "enabled": false, "required": false } + }`) + var policies SecurityPolicies + err := json.Unmarshal(inputJSON, &policies) + if nil != err { + t.Fatal(err) + } + connectJSON, err := json.Marshal(policies) + if nil != err { + t.Fatal(err) + } + expectJSON := CompactJSONString(`{ + "record_sql": { "enabled": false }, + "attributes_include": { "enabled": false }, + "allow_raw_exception_messages": { "enabled": false }, + "custom_events": { "enabled": false }, + "custom_parameters": { "enabled": false } + }`) + if string(connectJSON) != expectJSON { + t.Error(string(connectJSON), expectJSON) + } + testBool(t, "PointerIfPopulated", true, nil != policies.PointerIfPopulated()) + testBool(t, "RecordSQLEnabled", false, policies.RecordSQL.Enabled()) + testBool(t, "AttributesIncludeEnabled", false, policies.AttributesInclude.Enabled()) + testBool(t, "AllowRawExceptionMessages", false, policies.AllowRawExceptionMessages.Enabled()) + testBool(t, "CustomEventsEnabled", false, policies.CustomEvents.Enabled()) + testBool(t, "CustomParametersEnabled", false, policies.CustomParameters.Enabled()) +} + +func TestNilSecurityPolicies(t *testing.T) { + var policies SecurityPolicies + testBool(t, "PointerIfPopulated", false, nil != policies.PointerIfPopulated()) + testBool(t, "RecordSQLEnabled", true, policies.RecordSQL.Enabled()) + testBool(t, "AttributesIncludeEnabled", true, policies.AttributesInclude.Enabled()) + testBool(t, "AllowRawExceptionMessages", true, policies.AllowRawExceptionMessages.Enabled()) + testBool(t, "CustomEventsEnabled", true, policies.CustomEvents.Enabled()) + testBool(t, "CustomParametersEnabled", true, policies.CustomParameters.Enabled()) +} + +func TestUnknownRequiredPolicy(t *testing.T) { + inputJSON := []byte(`{ + "record_sql": { "enabled": false, "required": false }, + "attributes_include": { "enabled": false, "required": false }, + "allow_raw_exception_messages": { "enabled": false, "required": false }, + "custom_events": { "enabled": false, "required": false }, + "custom_parameters": { "enabled": false, "required": false }, + "custom_instrumentation_editor": { "enabled": false, "required": false }, + "message_parameters": { "enabled": false, "required": false }, + "job_arguments": { "enabled": false, "required": false }, + "unknown_policy": { "enabled": false, "required": true } + }`) + var policies SecurityPolicies + err := json.Unmarshal(inputJSON, &policies) + if nil == err { + t.Fatal(err) + } + testBool(t, "PointerIfPopulated", false, nil != policies.PointerIfPopulated()) + testBool(t, "unknown required policy should be disconnect", true, isDisconnectSecurityPolicyError(err)) +} + +func TestSecurityPolicyMissing(t *testing.T) { + inputJSON := []byte(`{ + "record_sql": { "enabled": false, "required": false }, + "attributes_include": { "enabled": false, "required": false }, + "allow_raw_exception_messages": { "enabled": false, "required": false }, + "custom_events": { "enabled": false, "required": false }, + "request_parameters": { "enabled": false, "required": false } + }`) + var policies SecurityPolicies + err := json.Unmarshal(inputJSON, &policies) + _, ok := err.(errUnsetPolicy) + if !ok { + t.Fatal(err) + } + testBool(t, "PointerIfPopulated", false, nil != policies.PointerIfPopulated()) + testBool(t, "missing policy should be disconnect", true, isDisconnectSecurityPolicyError(err)) +} + +func TestMalformedPolicies(t *testing.T) { + inputJSON := []byte(`{`) + var policies SecurityPolicies + err := json.Unmarshal(inputJSON, &policies) + if nil == err { + t.Fatal(err) + } + testBool(t, "malformed policies should not be disconnect", false, isDisconnectSecurityPolicyError(err)) +} diff --git a/internal/segment_terms.go b/internal/segment_terms.go new file mode 100644 index 000000000..a0fd1f2e6 --- /dev/null +++ b/internal/segment_terms.go @@ -0,0 +1,145 @@ +package internal + +// https://newrelic.atlassian.net/wiki/display/eng/Language+agent+transaction+segment+terms+rules + +import ( + "encoding/json" + "strings" +) + +const ( + placeholder = "*" + separator = "/" +) + +type segmentRule struct { + Prefix string `json:"prefix"` + Terms []string `json:"terms"` + TermsMap map[string]struct{} +} + +// segmentRules is keyed by each segmentRule's Prefix field with any trailing +// slash removed. +type segmentRules map[string]*segmentRule + +func buildTermsMap(terms []string) map[string]struct{} { + m := make(map[string]struct{}, len(terms)) + for _, t := range terms { + m[t] = struct{}{} + } + return m +} + +func (rules *segmentRules) UnmarshalJSON(b []byte) error { + var raw []*segmentRule + + if err := json.Unmarshal(b, &raw); nil != err { + return err + } + + rs := make(map[string]*segmentRule) + + for _, rule := range raw { + prefix := strings.TrimSuffix(rule.Prefix, "/") + if len(strings.Split(prefix, "/")) != 2 { + // TODO + // Warn("invalid segment term rule prefix", + // {"prefix": rule.Prefix}) + continue + } + + if nil == rule.Terms { + // TODO + // Warn("segment term rule has missing terms", + // {"prefix": rule.Prefix}) + continue + } + + rule.TermsMap = buildTermsMap(rule.Terms) + + rs[prefix] = rule + } + + *rules = rs + return nil +} + +func (rule *segmentRule) apply(name string) string { + if !strings.HasPrefix(name, rule.Prefix) { + return name + } + + s := strings.TrimPrefix(name, rule.Prefix) + + leadingSlash := "" + if strings.HasPrefix(s, separator) { + leadingSlash = separator + s = strings.TrimPrefix(s, separator) + } + + if "" != s { + segments := strings.Split(s, separator) + + for i, segment := range segments { + _, whitelisted := rule.TermsMap[segment] + if whitelisted { + segments[i] = segment + } else { + segments[i] = placeholder + } + } + + segments = collapsePlaceholders(segments) + s = strings.Join(segments, separator) + } + + return rule.Prefix + leadingSlash + s +} + +func (rules segmentRules) apply(name string) string { + if nil == rules { + return name + } + + rule, ok := rules[firstTwoSegments(name)] + if !ok { + return name + } + + return rule.apply(name) +} + +func firstTwoSegments(name string) string { + firstSlashIdx := strings.Index(name, separator) + if firstSlashIdx == -1 { + return name + } + + secondSlashIdx := strings.Index(name[firstSlashIdx+1:], separator) + if secondSlashIdx == -1 { + return name + } + + return name[0 : firstSlashIdx+secondSlashIdx+1] +} + +func collapsePlaceholders(segments []string) []string { + j := 0 + prevStar := false + for i := 0; i < len(segments); i++ { + segment := segments[i] + if placeholder == segment { + if prevStar { + continue + } + segments[j] = placeholder + j++ + prevStar = true + } else { + segments[j] = segment + j++ + prevStar = false + } + } + return segments[0:j] +} diff --git a/internal/segment_terms_test.go b/internal/segment_terms_test.go new file mode 100644 index 000000000..2e40812fb --- /dev/null +++ b/internal/segment_terms_test.go @@ -0,0 +1,134 @@ +package internal + +import ( + "encoding/json" + "strings" + "testing" + + "github.com/newrelic/go-agent/internal/crossagent" +) + +func TestCrossAgentSegmentTerms(t *testing.T) { + var tcs []struct { + Testname string `json:"testname"` + Rules segmentRules `json:"transaction_segment_terms"` + Tests []struct { + Input string `json:"input"` + Expected string `json:"expected"` + } `json:"tests"` + } + + err := crossagent.ReadJSON("transaction_segment_terms.json", &tcs) + if err != nil { + t.Fatal(err) + } + + for _, tc := range tcs { + for _, test := range tc.Tests { + out := tc.Rules.apply(test.Input) + if out != test.Expected { + t.Fatal(tc.Testname, test.Input, out, test.Expected) + } + } + } +} + +func TestSegmentTerms(t *testing.T) { + js := `[ + { + "prefix":"WebTransaction\/Uri", + "terms":[ + "two", + "Users", + "willhf", + "dev", + "php", + "one", + "alpha", + "zap" + ] + } + ]` + var rules segmentRules + if err := json.Unmarshal([]byte(js), &rules); nil != err { + t.Fatal(err) + } + + out := rules.apply("WebTransaction/Uri/pen/two/pencil/dev/paper") + if out != "WebTransaction/Uri/*/two/*/dev/*" { + t.Fatal(out) + } +} + +func TestEmptySegmentTerms(t *testing.T) { + var rules segmentRules + + input := "my/name" + out := rules.apply(input) + if out != input { + t.Error(input, out) + } +} + +func BenchmarkSegmentTerms(b *testing.B) { + js := `[ + { + "prefix":"WebTransaction\/Uri", + "terms":[ + "two", + "Users", + "willhf", + "dev", + "php", + "one", + "alpha", + "zap" + ] + } + ]` + var rules segmentRules + if err := json.Unmarshal([]byte(js), &rules); nil != err { + b.Fatal(err) + } + + b.ResetTimer() + b.ReportAllocs() + + input := "WebTransaction/Uri/pen/two/pencil/dev/paper" + expected := "WebTransaction/Uri/*/two/*/dev/*" + for i := 0; i < b.N; i++ { + out := rules.apply(input) + if out != expected { + b.Fatal(out, expected) + } + } +} + +func TestCollapsePlaceholders(t *testing.T) { + testcases := []struct { + input string + expect string + }{ + {input: "", expect: ""}, + {input: "/", expect: "/"}, + {input: "*", expect: "*"}, + {input: "*/*", expect: "*"}, + {input: "a/b/c", expect: "a/b/c"}, + {input: "*/*/*", expect: "*"}, + {input: "a/*/*/*/b", expect: "a/*/b"}, + {input: "a/b/*/*/*/", expect: "a/b/*/"}, + {input: "a/b/*/*/*", expect: "a/b/*"}, + {input: "*/*/a/b/*/*/*", expect: "*/a/b/*"}, + {input: "*/*/a/b/*/c/*/*/d/e/*/*/*", expect: "*/a/b/*/c/*/d/e/*"}, + {input: "a/*/b", expect: "a/*/b"}, + } + + for _, tc := range testcases { + segments := strings.Split(tc.input, "/") + segments = collapsePlaceholders(segments) + out := strings.Join(segments, "/") + if out != tc.expect { + t.Error(tc.input, tc.expect, out) + } + } +} diff --git a/internal/serverless.go b/internal/serverless.go new file mode 100644 index 000000000..628797178 --- /dev/null +++ b/internal/serverless.go @@ -0,0 +1,217 @@ +package internal + +import ( + "bytes" + "compress/gzip" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "strings" + "sync" + "time" + + "github.com/newrelic/go-agent/internal/logger" +) + +const ( + lambdaMetadataVersion = 2 + + // AgentLanguage is used in the connect JSON and the Lambda JSON. + AgentLanguage = "go" +) + +// ServerlessHarvest is used to store and log data when the agent is running in +// serverless mode. +type ServerlessHarvest struct { + logger logger.Logger + version string + awsExecutionEnv string + + // The Lambda handler could be using multiple goroutines so we use a + // mutex to prevent race conditions. + sync.Mutex + harvest *Harvest +} + +// NewServerlessHarvest creates a new ServerlessHarvest. +func NewServerlessHarvest(logger logger.Logger, version string, getEnv func(string) string) *ServerlessHarvest { + return &ServerlessHarvest{ + logger: logger, + version: version, + awsExecutionEnv: getEnv("AWS_EXECUTION_ENV"), + + // We can use a default HarvestConfigured parameter because + // serverless mode doesn't have a connect, and therefore won't + // have custom event limits from the server. + harvest: NewHarvest(time.Now(), &DfltHarvestCfgr{}), + } +} + +// Consume adds data to the harvest. +func (sh *ServerlessHarvest) Consume(data Harvestable) { + if nil == sh { + return + } + sh.Lock() + defer sh.Unlock() + + data.MergeIntoHarvest(sh.harvest) +} + +func (sh *ServerlessHarvest) swapHarvest() *Harvest { + sh.Lock() + defer sh.Unlock() + + h := sh.harvest + sh.harvest = NewHarvest(time.Now(), &DfltHarvestCfgr{}) + return h +} + +// Write logs the data in the format described by: +// https://source.datanerd.us/agents/agent-specs/blob/master/Lambda.md +func (sh *ServerlessHarvest) Write(arn string, writer io.Writer) { + if nil == sh { + return + } + harvest := sh.swapHarvest() + payloads := harvest.Payloads(false) + // Note that *json.RawMessage (instead of json.RawMessage) is used to + // support older Go versions: https://go-review.googlesource.com/c/go/+/21811/ + harvestPayloads := make(map[string]*json.RawMessage, len(payloads)) + for _, p := range payloads { + agentRunID := "" + cmd := p.EndpointMethod() + data, err := p.Data(agentRunID, time.Now()) + if err != nil { + sh.logger.Error("error creating payload json", map[string]interface{}{ + "command": cmd, + "error": err.Error(), + }) + continue + } + if nil == data { + continue + } + // NOTE! This code relies on the fact that each payload is + // using a different endpoint method. Sometimes the transaction + // events payload might be split, but since there is only one + // transaction event per serverless transaction, that's not an + // issue. Likewise, if we ever split normal transaction events + // apart from synthetics events, the transaction will either be + // normal or synthetic, so that won't be an issue. Log an error + // if this happens for future defensiveness. + if _, ok := harvestPayloads[cmd]; ok { + sh.logger.Error("data with duplicate command name lost", map[string]interface{}{ + "command": cmd, + }) + } + d := json.RawMessage(data) + harvestPayloads[cmd] = &d + } + + if len(harvestPayloads) == 0 { + // The harvest may not contain any data if the serverless + // transaction was ignored. + return + } + + data, err := json.Marshal(harvestPayloads) + if nil != err { + sh.logger.Error("error creating serverless data json", map[string]interface{}{ + "error": err.Error(), + }) + return + } + + var dataBuf bytes.Buffer + gz := gzip.NewWriter(&dataBuf) + gz.Write(data) + gz.Flush() + gz.Close() + + js, err := json.Marshal([]interface{}{ + lambdaMetadataVersion, + "NR_LAMBDA_MONITORING", + struct { + MetadataVersion int `json:"metadata_version"` + ARN string `json:"arn,omitempty"` + ProtocolVersion int `json:"protocol_version"` + ExecutionEnvironment string `json:"execution_environment,omitempty"` + AgentVersion string `json:"agent_version"` + AgentLanguage string `json:"agent_language"` + }{ + MetadataVersion: lambdaMetadataVersion, + ProtocolVersion: ProcotolVersion, + AgentVersion: sh.version, + ExecutionEnvironment: sh.awsExecutionEnv, + ARN: arn, + AgentLanguage: AgentLanguage, + }, + base64.StdEncoding.EncodeToString(dataBuf.Bytes()), + }) + + if err != nil { + sh.logger.Error("error creating serverless json", map[string]interface{}{ + "error": err.Error(), + }) + return + } + + fmt.Fprintln(writer, string(js)) +} + +// ParseServerlessPayload exists for testing. +func ParseServerlessPayload(data []byte) (metadata, uncompressedData map[string]json.RawMessage, err error) { + var arr [4]json.RawMessage + if err = json.Unmarshal(data, &arr); nil != err { + err = fmt.Errorf("unable to unmarshal serverless data array: %v", err) + return + } + var dataJSON []byte + compressed := strings.Trim(string(arr[3]), `"`) + if dataJSON, err = decodeUncompress(compressed); nil != err { + err = fmt.Errorf("unable to uncompress serverless data: %v", err) + return + } + if err = json.Unmarshal(dataJSON, &uncompressedData); nil != err { + err = fmt.Errorf("unable to unmarshal uncompressed serverless data: %v", err) + return + } + if err = json.Unmarshal(arr[2], &metadata); nil != err { + err = fmt.Errorf("unable to unmarshal serverless metadata: %v", err) + return + } + return +} + +func decodeUncompress(input string) ([]byte, error) { + decoded, err := base64.StdEncoding.DecodeString(input) + if nil != err { + return nil, err + } + + buf := bytes.NewBuffer(decoded) + gz, err := gzip.NewReader(buf) + if nil != err { + return nil, err + } + var out bytes.Buffer + io.Copy(&out, gz) + gz.Close() + + return out.Bytes(), nil +} + +// ServerlessWriter is implemented by newrelic.Application. +type ServerlessWriter interface { + ServerlessWrite(arn string, writer io.Writer) +} + +// ServerlessWrite exists to avoid type assertion in the nrlambda integration +// package. +func ServerlessWrite(app interface{}, arn string, writer io.Writer) { + if s, ok := app.(ServerlessWriter); ok { + s.ServerlessWrite(arn, writer) + } +} diff --git a/internal/serverless_test.go b/internal/serverless_test.go new file mode 100644 index 000000000..4ed506fd1 --- /dev/null +++ b/internal/serverless_test.go @@ -0,0 +1,110 @@ +package internal + +import ( + "bytes" + "strings" + "testing" + "time" + + "github.com/newrelic/go-agent/internal/logger" +) + +func serverlessGetenvShim(s string) string { + if s == "AWS_EXECUTION_ENV" { + return "the-execution-env" + } + return "" +} + +func TestServerlessHarvest(t *testing.T) { + // Test the expected ServerlessHarvest use. + sh := NewServerlessHarvest(logger.ShimLogger{}, "the-version", serverlessGetenvShim) + event, err := CreateCustomEvent("myEvent", nil, time.Now()) + if nil != err { + t.Fatal(err) + } + sh.Consume(event) + buf := &bytes.Buffer{} + sh.Write("arn", buf) + metadata, data, err := ParseServerlessPayload(buf.Bytes()) + if nil != err { + t.Fatal(err) + } + if v := string(metadata["metadata_version"]); v != `2` { + t.Error(v) + } + if v := string(metadata["arn"]); v != `"arn"` { + t.Error(v) + } + if v := string(metadata["protocol_version"]); v != `17` { + t.Error(v) + } + if v := string(metadata["execution_environment"]); v != `"the-execution-env"` { + t.Error(v) + } + if v := string(metadata["agent_version"]); v != `"the-version"` { + t.Error(v) + } + if v := string(metadata["agent_language"]); v != `"go"` { + t.Error(v) + } + eventData := string(data["custom_event_data"]) + if !strings.Contains(eventData, `"type":"myEvent"`) { + t.Error(eventData) + } + if len(data) != 1 { + t.Fatal(data) + } + // Test that the harvest was replaced with a new harvest. + buf = &bytes.Buffer{} + sh.Write("arn", buf) + if 0 != buf.Len() { + t.Error(buf.String()) + } +} + +func TestServerlessHarvestNil(t *testing.T) { + // The public ServerlessHarvest methods should not panic if the + // receiver is nil. + var sh *ServerlessHarvest + event, err := CreateCustomEvent("myEvent", nil, time.Now()) + if nil != err { + t.Fatal(err) + } + sh.Consume(event) + buf := &bytes.Buffer{} + sh.Write("arn", buf) +} + +func TestServerlessHarvestEmpty(t *testing.T) { + // Test that ServerlessHarvest.Write doesn't do anything if the harvest + // is empty. + sh := NewServerlessHarvest(logger.ShimLogger{}, "the-version", serverlessGetenvShim) + buf := &bytes.Buffer{} + sh.Write("arn", buf) + if 0 != buf.Len() { + t.Error(buf.String()) + } +} + +func BenchmarkServerless(b *testing.B) { + // The JSON creation in ServerlessHarvest.Write has not been optimized. + // This benchmark would be useful for doing so. + sh := NewServerlessHarvest(logger.ShimLogger{}, "the-version", serverlessGetenvShim) + event, err := CreateCustomEvent("myEvent", nil, time.Now()) + if nil != err { + b.Fatal(err) + } + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + sh.Consume(event) + buf := &bytes.Buffer{} + sh.Write("arn", buf) + if buf.Len() == 0 { + b.Fatal(buf.String()) + } + } +} diff --git a/internal/slow_queries.go b/internal/slow_queries.go new file mode 100644 index 000000000..36f435fcd --- /dev/null +++ b/internal/slow_queries.go @@ -0,0 +1,261 @@ +package internal + +import ( + "bytes" + "container/heap" + "hash/fnv" + "time" + + "github.com/newrelic/go-agent/internal/jsonx" +) + +type queryParameters map[string]interface{} + +func vetQueryParameters(params map[string]interface{}) (queryParameters, error) { + if nil == params { + return nil, nil + } + // Copying the parameters into a new map is safer than modifying the map + // from the customer. + vetted := make(map[string]interface{}) + var retErr error + for key, val := range params { + val, err := ValidateUserAttribute(key, val) + if nil != err { + retErr = err + continue + } + vetted[key] = val + } + return queryParameters(vetted), retErr +} + +func (q queryParameters) WriteJSON(buf *bytes.Buffer) { + buf.WriteByte('{') + w := jsonFieldsWriter{buf: buf} + for key, val := range q { + writeAttributeValueJSON(&w, key, val) + } + buf.WriteByte('}') +} + +// https://source.datanerd.us/agents/agent-specs/blob/master/Slow-SQLs-LEGACY.md + +// slowQueryInstance represents a single datastore call. +type slowQueryInstance struct { + // Fields populated right after the datastore segment finishes: + + Duration time.Duration + DatastoreMetric string + ParameterizedQuery string + QueryParameters queryParameters + Host string + PortPathOrID string + DatabaseName string + StackTrace StackTrace + + TxnEvent +} + +// Aggregation is performed to avoid reporting multiple slow queries with same +// query string. Since some datastore segments may be below the slow query +// threshold, the aggregation fields Count, Total, and Min should be taken with +// a grain of salt. +type slowQuery struct { + Count int32 // number of times the query has been observed + Total time.Duration // cummulative duration + Min time.Duration // minimum observed duration + + // When Count > 1, slowQueryInstance contains values from the slowest + // observation. + slowQueryInstance +} + +type slowQueries struct { + priorityQueue []*slowQuery + // lookup maps query strings to indices in the priorityQueue + lookup map[string]int +} + +func (slows *slowQueries) Len() int { + return len(slows.priorityQueue) +} +func (slows *slowQueries) Less(i, j int) bool { + pq := slows.priorityQueue + return pq[i].Duration < pq[j].Duration +} +func (slows *slowQueries) Swap(i, j int) { + pq := slows.priorityQueue + si := pq[i] + sj := pq[j] + pq[i], pq[j] = pq[j], pq[i] + slows.lookup[si.ParameterizedQuery] = j + slows.lookup[sj.ParameterizedQuery] = i +} + +// Push and Pop are unused: only heap.Init and heap.Fix are used. +func (slows *slowQueries) Push(x interface{}) {} +func (slows *slowQueries) Pop() interface{} { return nil } + +func newSlowQueries(max int) *slowQueries { + return &slowQueries{ + lookup: make(map[string]int, max), + priorityQueue: make([]*slowQuery, 0, max), + } +} + +// Merge is used to merge slow queries from the transaction into the harvest. +func (slows *slowQueries) Merge(other *slowQueries, txnEvent TxnEvent) { + for _, s := range other.priorityQueue { + cp := *s + cp.TxnEvent = txnEvent + slows.observe(cp) + } +} + +// merge aggregates the observations from two slow queries with the same Query. +func (slow *slowQuery) merge(other slowQuery) { + slow.Count += other.Count + slow.Total += other.Total + + if other.Min < slow.Min { + slow.Min = other.Min + } + if other.Duration > slow.Duration { + slow.slowQueryInstance = other.slowQueryInstance + } +} + +func (slows *slowQueries) observeInstance(slow slowQueryInstance) { + slows.observe(slowQuery{ + Count: 1, + Total: slow.Duration, + Min: slow.Duration, + slowQueryInstance: slow, + }) +} + +func (slows *slowQueries) insertAtIndex(slow slowQuery, idx int) { + cpy := new(slowQuery) + *cpy = slow + slows.priorityQueue[idx] = cpy + slows.lookup[slow.ParameterizedQuery] = idx + heap.Fix(slows, idx) +} + +func (slows *slowQueries) observe(slow slowQuery) { + // Has the query has previously been observed? + if idx, ok := slows.lookup[slow.ParameterizedQuery]; ok { + slows.priorityQueue[idx].merge(slow) + heap.Fix(slows, idx) + return + } + // Has the collection reached max capacity? + if len(slows.priorityQueue) < cap(slows.priorityQueue) { + idx := len(slows.priorityQueue) + slows.priorityQueue = slows.priorityQueue[0 : idx+1] + slows.insertAtIndex(slow, idx) + return + } + // Is this query slower than the existing fastest? + fastest := slows.priorityQueue[0] + if slow.Duration > fastest.Duration { + delete(slows.lookup, fastest.ParameterizedQuery) + slows.insertAtIndex(slow, 0) + return + } +} + +// The third element of the slow query JSON should be a hash of the query +// string. This hash may be used by backend services to aggregate queries which +// have the have the same query string. It is unknown if this actually used. +func makeSlowQueryID(query string) uint32 { + h := fnv.New32a() + h.Write([]byte(query)) + return h.Sum32() +} + +func (slow *slowQuery) WriteJSON(buf *bytes.Buffer) { + buf.WriteByte('[') + jsonx.AppendString(buf, slow.TxnEvent.FinalName) + buf.WriteByte(',') + // Include request.uri if it is included in any destination. + // TODO: Change this to the transaction trace segment destination + // once transaction trace segment attribute configuration has been + // added. + uri, _ := slow.TxnEvent.Attrs.GetAgentValue(attributeRequestURI, DestAll) + jsonx.AppendString(buf, uri) + buf.WriteByte(',') + jsonx.AppendInt(buf, int64(makeSlowQueryID(slow.ParameterizedQuery))) + buf.WriteByte(',') + jsonx.AppendString(buf, slow.ParameterizedQuery) + buf.WriteByte(',') + jsonx.AppendString(buf, slow.DatastoreMetric) + buf.WriteByte(',') + jsonx.AppendInt(buf, int64(slow.Count)) + buf.WriteByte(',') + jsonx.AppendFloat(buf, slow.Total.Seconds()*1000.0) + buf.WriteByte(',') + jsonx.AppendFloat(buf, slow.Min.Seconds()*1000.0) + buf.WriteByte(',') + jsonx.AppendFloat(buf, slow.Duration.Seconds()*1000.0) + buf.WriteByte(',') + w := jsonFieldsWriter{buf: buf} + buf.WriteByte('{') + if "" != slow.Host { + w.stringField("host", slow.Host) + } + if "" != slow.PortPathOrID { + w.stringField("port_path_or_id", slow.PortPathOrID) + } + if "" != slow.DatabaseName { + w.stringField("database_name", slow.DatabaseName) + } + if nil != slow.StackTrace { + w.writerField("backtrace", slow.StackTrace) + } + if nil != slow.QueryParameters { + w.writerField("query_parameters", slow.QueryParameters) + } + + sharedBetterCATIntrinsics(&slow.TxnEvent, &w) + + buf.WriteByte('}') + buf.WriteByte(']') +} + +// WriteJSON marshals the collection of slow queries into JSON according to the +// schema expected by the collector. +// +// Note: This JSON does not contain the agentRunID. This is for unknown +// historical reasons. Since the agentRunID is included in the url, +// its use in the other commands' JSON is redundant (although required). +func (slows *slowQueries) WriteJSON(buf *bytes.Buffer) { + buf.WriteByte('[') + buf.WriteByte('[') + for idx, s := range slows.priorityQueue { + if idx > 0 { + buf.WriteByte(',') + } + s.WriteJSON(buf) + } + buf.WriteByte(']') + buf.WriteByte(']') +} + +func (slows *slowQueries) Data(agentRunID string, harvestStart time.Time) ([]byte, error) { + if 0 == len(slows.priorityQueue) { + return nil, nil + } + estimate := 1024 * len(slows.priorityQueue) + buf := bytes.NewBuffer(make([]byte, 0, estimate)) + slows.WriteJSON(buf) + return buf.Bytes(), nil +} + +func (slows *slowQueries) MergeIntoHarvest(newHarvest *Harvest) { +} + +func (slows *slowQueries) EndpointMethod() string { + return cmdSlowSQLs +} diff --git a/internal/slow_queries_test.go b/internal/slow_queries_test.go new file mode 100644 index 000000000..e73a88508 --- /dev/null +++ b/internal/slow_queries_test.go @@ -0,0 +1,287 @@ +package internal + +import ( + "math/rand" + "strconv" + "strings" + "testing" + "time" +) + +func TestEmptySlowQueriesData(t *testing.T) { + slows := newSlowQueries(maxHarvestSlowSQLs) + js, err := slows.Data("agentRunID", time.Now()) + if nil != js || nil != err { + t.Error(string(js), err) + } +} + +func TestSlowQueriesBasic(t *testing.T) { + acfg := CreateAttributeConfig(sampleAttributeConfigInput, true) + attr := NewAttributes(acfg) + attr.Agent.Add(attributeRequestURI, "/zip/zap", nil) + txnEvent := TxnEvent{ + FinalName: "WebTransaction/Go/hello", + Duration: 3 * time.Second, + Attrs: attr, + BetterCAT: BetterCAT{ + Enabled: false, + }, + } + + txnSlows := newSlowQueries(maxTxnSlowQueries) + qParams, err := vetQueryParameters(map[string]interface{}{ + strings.Repeat("X", attributeKeyLengthLimit+1): "invalid-key", + "invalid-value": struct{}{}, + "valid": 123, + }) + if nil == err { + t.Error("expected error") + } + txnSlows.observeInstance(slowQueryInstance{ + Duration: 2 * time.Second, + DatastoreMetric: "Datastore/statement/MySQL/users/INSERT", + ParameterizedQuery: "INSERT INTO users (name, age) VALUES ($1, $2)", + Host: "db-server-1", + PortPathOrID: "3306", + DatabaseName: "production", + StackTrace: nil, + QueryParameters: qParams, + }) + harvestSlows := newSlowQueries(maxHarvestSlowSQLs) + harvestSlows.Merge(txnSlows, txnEvent) + js, err := harvestSlows.Data("agentRunID", time.Now()) + expect := CompactJSONString(`[[ + [ + "WebTransaction/Go/hello", + "/zip/zap", + 3722056893, + "INSERT INTO users (name, age) VALUES ($1, $2)", + "Datastore/statement/MySQL/users/INSERT", + 1, + 2000, + 2000, + 2000, + { + "host":"db-server-1", + "port_path_or_id":"3306", + "database_name":"production", + "query_parameters":{ + "valid":123 + } + } + ] +]]`) + if nil != err { + t.Error(err) + } + if string(js) != expect { + t.Error(string(js), expect) + } +} + +func TestSlowQueriesExcludeURI(t *testing.T) { + c := sampleAttributeConfigInput + c.Attributes.Exclude = []string{"request.uri"} + acfg := CreateAttributeConfig(c, true) + attr := NewAttributes(acfg) + attr.Agent.Add(attributeRequestURI, "/zip/zap", nil) + txnEvent := TxnEvent{ + FinalName: "WebTransaction/Go/hello", + Duration: 3 * time.Second, + Attrs: attr, + BetterCAT: BetterCAT{ + Enabled: false, + }, + } + txnSlows := newSlowQueries(maxTxnSlowQueries) + qParams, err := vetQueryParameters(map[string]interface{}{ + strings.Repeat("X", attributeKeyLengthLimit+1): "invalid-key", + "invalid-value": struct{}{}, + "valid": 123, + }) + if nil == err { + t.Error("expected error") + } + txnSlows.observeInstance(slowQueryInstance{ + Duration: 2 * time.Second, + DatastoreMetric: "Datastore/statement/MySQL/users/INSERT", + ParameterizedQuery: "INSERT INTO users (name, age) VALUES ($1, $2)", + Host: "db-server-1", + PortPathOrID: "3306", + DatabaseName: "production", + StackTrace: nil, + QueryParameters: qParams, + }) + harvestSlows := newSlowQueries(maxHarvestSlowSQLs) + harvestSlows.Merge(txnSlows, txnEvent) + js, err := harvestSlows.Data("agentRunID", time.Now()) + expect := CompactJSONString(`[[ + [ + "WebTransaction/Go/hello", + "", + 3722056893, + "INSERT INTO users (name, age) VALUES ($1, $2)", + "Datastore/statement/MySQL/users/INSERT", + 1, + 2000, + 2000, + 2000, + { + "host":"db-server-1", + "port_path_or_id":"3306", + "database_name":"production", + "query_parameters":{ + "valid":123 + } + } + ] +]]`) + if nil != err { + t.Error(err) + } + if string(js) != expect { + t.Error(string(js), expect) + } +} + +func TestSlowQueriesAggregation(t *testing.T) { + max := 50 + slows := make([]slowQueryInstance, 3*max) + for i := 0; i < max; i++ { + num := i + 1 + str := strconv.Itoa(num) + duration := time.Duration(num) * time.Second + slow := slowQueryInstance{ + DatastoreMetric: "Datastore/" + str, + ParameterizedQuery: str, + } + slow.Duration = duration + slow.TxnEvent = TxnEvent{ + FinalName: "Txn/0" + str, + } + slows[i*3+0] = slow + slow.Duration = duration + (100 * time.Second) + slow.TxnEvent = TxnEvent{ + FinalName: "Txn/1" + str, + } + slows[i*3+1] = slow + slow.Duration = duration + (200 * time.Second) + slow.TxnEvent = TxnEvent{ + FinalName: "Txn/2" + str, + } + slows[i*3+2] = slow + } + sq := newSlowQueries(10) + seed := int64(99) // arbitrary fixed seed + r := rand.New(rand.NewSource(seed)) + perm := r.Perm(max * 3) + for _, idx := range perm { + sq.observeInstance(slows[idx]) + } + js, err := sq.Data("agentRunID", time.Now()) + expect := CompactJSONString(`[[ + ["Txn/241","",2296612630,"41","Datastore/41",1,241000,241000,241000,{}], + ["Txn/242","",2279835011,"42","Datastore/42",2,384000,142000,242000,{}], + ["Txn/243","",2263057392,"43","Datastore/43",2,386000,143000,243000,{}], + ["Txn/244","",2380500725,"44","Datastore/44",3,432000,44000,244000,{}], + ["Txn/247","",2330167868,"47","Datastore/47",2,394000,147000,247000,{}], + ["Txn/245","",2363723106,"45","Datastore/45",2,290000,45000,245000,{}], + ["Txn/250","",2212577440,"50","Datastore/50",1,250000,250000,250000,{}], + ["Txn/246","",2346945487,"46","Datastore/46",2,392000,146000,246000,{}], + ["Txn/249","",2430833582,"49","Datastore/49",3,447000,49000,249000,{}], + ["Txn/248","",2447611201,"48","Datastore/48",3,444000,48000,248000,{}] +]]`) + if nil != err { + t.Error(err) + } + if string(js) != expect { + t.Error(string(js), expect) + } +} + +func TestSlowQueriesBetterCAT(t *testing.T) { + acfg := CreateAttributeConfig(sampleAttributeConfigInput, true) + attr := NewAttributes(acfg) + attr.Agent.Add(attributeRequestURI, "/zip/zap", nil) + txnEvent := TxnEvent{ + FinalName: "WebTransaction/Go/hello", + Duration: 3 * time.Second, + Attrs: attr, + BetterCAT: BetterCAT{ + Enabled: true, + ID: "txn-id", + Priority: 0.5, + }, + } + + txnEvent.BetterCAT.Inbound = &Payload{ + payloadCaller: payloadCaller{ + TransportType: "HTTP", + Type: "Browser", + App: "caller-app", + Account: "caller-account", + }, + ID: "caller-id", + TransactionID: "caller-parent-id", + TracedID: "trace-id", + TransportDuration: 2 * time.Second, + } + + txnSlows := newSlowQueries(maxTxnSlowQueries) + qParams, err := vetQueryParameters(map[string]interface{}{ + strings.Repeat("X", attributeKeyLengthLimit+1): "invalid-key", + "invalid-value": struct{}{}, + "valid": 123, + }) + if nil == err { + t.Error("expected error") + } + txnSlows.observeInstance(slowQueryInstance{ + Duration: 2 * time.Second, + DatastoreMetric: "Datastore/statement/MySQL/users/INSERT", + ParameterizedQuery: "INSERT INTO users (name, age) VALUES ($1, $2)", + Host: "db-server-1", + PortPathOrID: "3306", + DatabaseName: "production", + StackTrace: nil, + QueryParameters: qParams, + }) + harvestSlows := newSlowQueries(maxHarvestSlowSQLs) + harvestSlows.Merge(txnSlows, txnEvent) + js, err := harvestSlows.Data("agentRunID", time.Now()) + expect := CompactJSONString(`[[ + [ + "WebTransaction/Go/hello", + "/zip/zap", + 3722056893, + "INSERT INTO users (name, age) VALUES ($1, $2)", + "Datastore/statement/MySQL/users/INSERT", + 1, + 2000, + 2000, + 2000, + { + "host":"db-server-1", + "port_path_or_id":"3306", + "database_name":"production", + "query_parameters":{"valid":123}, + "parent.type": "Browser", + "parent.app": "caller-app", + "parent.account": "caller-account", + "parent.transportType": "HTTP", + "parent.transportDuration": 2, + "guid":"txn-id", + "traceId":"trace-id", + "priority":0.500000, + "sampled":false + } + ] +]]`) + if nil != err { + t.Error(err) + } + if string(js) != expect { + t.Error(string(js), expect) + } +} diff --git a/internal/span_events.go b/internal/span_events.go new file mode 100644 index 000000000..6a670008b --- /dev/null +++ b/internal/span_events.go @@ -0,0 +1,143 @@ +package internal + +import ( + "bytes" + "time" +) + +// https://source.datanerd.us/agents/agent-specs/blob/master/Span-Events.md + +type spanCategory string + +const ( + spanCategoryHTTP spanCategory = "http" + spanCategoryDatastore = "datastore" + spanCategoryGeneric = "generic" +) + +// SpanEvent represents a span event, necessary to support Distributed Tracing. +type SpanEvent struct { + TraceID string + GUID string + ParentID string + TransactionID string + Sampled bool + Priority Priority + Timestamp time.Time + Duration time.Duration + Name string + Category spanCategory + Component string + Kind string + IsEntrypoint bool + Attributes spanAttributeMap +} + +// WriteJSON prepares JSON in the format expected by the collector. +func (e *SpanEvent) WriteJSON(buf *bytes.Buffer) { + w := jsonFieldsWriter{buf: buf} + buf.WriteByte('[') + buf.WriteByte('{') + w.stringField("type", "Span") + w.stringField("traceId", e.TraceID) + w.stringField("guid", e.GUID) + if "" != e.ParentID { + w.stringField("parentId", e.ParentID) + } + w.stringField("transactionId", e.TransactionID) + w.boolField("sampled", e.Sampled) + w.writerField("priority", e.Priority) + w.intField("timestamp", e.Timestamp.UnixNano()/(1000*1000)) // in milliseconds + w.floatField("duration", e.Duration.Seconds()) + w.stringField("name", e.Name) + w.stringField("category", string(e.Category)) + if e.IsEntrypoint { + w.boolField("nr.entryPoint", true) + } + if e.Component != "" { + w.stringField("component", e.Component) + } + if e.Kind != "" { + w.stringField("span.kind", e.Kind) + } + buf.WriteByte('}') + buf.WriteByte(',') + buf.WriteByte('{') + // user attributes section is unused + buf.WriteByte('}') + buf.WriteByte(',') + buf.WriteByte('{') + + w = jsonFieldsWriter{buf: buf} + for key, val := range e.Attributes { + w.writerField(key.String(), val) + } + + buf.WriteByte('}') + buf.WriteByte(']') +} + +// MarshalJSON is used for testing. +func (e *SpanEvent) MarshalJSON() ([]byte, error) { + buf := bytes.NewBuffer(make([]byte, 0, 256)) + + e.WriteJSON(buf) + + return buf.Bytes(), nil +} + +type spanEvents struct { + *analyticsEvents +} + +func newSpanEvents(max int) *spanEvents { + return &spanEvents{ + analyticsEvents: newAnalyticsEvents(max), + } +} + +func (events *spanEvents) addEvent(e *SpanEvent, cat *BetterCAT) { + e.TraceID = cat.TraceID() + e.TransactionID = cat.ID + e.Sampled = cat.Sampled + e.Priority = cat.Priority + events.addEventPopulated(e) +} + +func (events *spanEvents) addEventPopulated(e *SpanEvent) { + events.analyticsEvents.addEvent(analyticsEvent{priority: e.Priority, jsonWriter: e}) +} + +// MergeFromTransaction merges the span events from a transaction into the +// harvest's span events. This should only be called if the transaction was +// sampled and span events are enabled. +func (events *spanEvents) MergeFromTransaction(txndata *TxnData) { + root := &SpanEvent{ + GUID: txndata.getRootSpanID(), + Timestamp: txndata.Start, + Duration: txndata.Duration, + Name: txndata.FinalName, + Category: spanCategoryGeneric, + IsEntrypoint: true, + } + if nil != txndata.BetterCAT.Inbound { + root.ParentID = txndata.BetterCAT.Inbound.ID + } + events.addEvent(root, &txndata.BetterCAT) + + for _, evt := range txndata.spanEvents { + events.addEvent(evt, &txndata.BetterCAT) + } +} + +func (events *spanEvents) MergeIntoHarvest(h *Harvest) { + h.SpanEvents.mergeFailed(events.analyticsEvents) +} + +func (events *spanEvents) Data(agentRunID string, harvestStart time.Time) ([]byte, error) { + return events.CollectorJSON(agentRunID) +} + +func (events *spanEvents) EndpointMethod() string { + return cmdSpanEvents +} diff --git a/internal/span_events_test.go b/internal/span_events_test.go new file mode 100644 index 000000000..21776c926 --- /dev/null +++ b/internal/span_events_test.go @@ -0,0 +1,259 @@ +package internal + +import ( + "encoding/json" + "testing" + "time" +) + +func testSpanEventJSON(t *testing.T, e *SpanEvent, expect string) { + js, err := json.Marshal(e) + if nil != err { + t.Error(err) + return + } + expect = CompactJSONString(expect) + if string(js) != expect { + t.Errorf("\nexpect=%s\nactual=%s\n", expect, string(js)) + } +} + +var ( + sampleSpanEvent = SpanEvent{ + TraceID: "trace-id", + GUID: "guid", + TransactionID: "txn-id", + Sampled: true, + Priority: 0.5, + Timestamp: timeFromUnixMilliseconds(1488393111000), + Duration: 2 * time.Second, + Name: "myName", + Category: spanCategoryGeneric, + IsEntrypoint: true, + } +) + +func TestSpanEventGenericRootMarshal(t *testing.T) { + e := sampleSpanEvent + testSpanEventJSON(t, &e, `[ + { + "type":"Span", + "traceId":"trace-id", + "guid":"guid", + "transactionId":"txn-id", + "sampled":true, + "priority":0.500000, + "timestamp":1488393111000, + "duration":2, + "name":"myName", + "category":"generic", + "nr.entryPoint":true + }, + {}, + {}]`) +} + +func TestSpanEventDatastoreMarshal(t *testing.T) { + e := sampleSpanEvent + + // Alter sample span event for this test case + e.IsEntrypoint = false + e.ParentID = "parent-id" + e.Category = spanCategoryDatastore + e.Kind = "client" + e.Component = "mySql" + e.Attributes.addString(spanAttributeDBStatement, "SELECT * from foo") + e.Attributes.addString(spanAttributeDBInstance, "123") + e.Attributes.addString(spanAttributePeerAddress, "{host}:{portPathOrId}") + e.Attributes.addString(spanAttributePeerHostname, "host") + + expectEvent(t, &e, WantEvent{ + Intrinsics: map[string]interface{}{ + "type": "Span", + "traceId": "trace-id", + "guid": "guid", + "parentId": "parent-id", + "transactionId": "txn-id", + "sampled": true, + "priority": 0.500000, + "timestamp": 1.488393111e+12, + "duration": 2, + "name": "myName", + "category": "datastore", + "component": "mySql", + "span.kind": "client", + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{ + "db.statement": "SELECT * from foo", + "db.instance": "123", + "peer.address": "{host}:{portPathOrId}", + "peer.hostname": "host", + }, + }) +} + +func TestSpanEventDatastoreWithoutHostMarshal(t *testing.T) { + e := sampleSpanEvent + + // Alter sample span event for this test case + e.IsEntrypoint = false + e.ParentID = "parent-id" + e.Category = spanCategoryDatastore + e.Kind = "client" + e.Component = "mySql" + e.Attributes.addString(spanAttributeDBStatement, "SELECT * from foo") + e.Attributes.addString(spanAttributeDBInstance, "123") + + // According to CHANGELOG.md, as of version 1.5, if `Host` and + // `PortPathOrID` are not provided in a Datastore segment, they + // do not appear as `"unknown"` in transaction traces and slow + // query traces. To maintain parity with the other offerings of + // the Go Agent, neither do Span Events. + expectEvent(t, &e, WantEvent{ + Intrinsics: map[string]interface{}{ + "type": "Span", + "traceId": "trace-id", + "guid": "guid", + "parentId": "parent-id", + "transactionId": "txn-id", + "sampled": true, + "priority": 0.500000, + "timestamp": 1.488393111e+12, + "duration": 2, + "name": "myName", + "category": "datastore", + "component": "mySql", + "span.kind": "client", + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{ + "db.statement": "SELECT * from foo", + "db.instance": "123", + }, + }) +} + +func TestSpanEventExternalMarshal(t *testing.T) { + e := sampleSpanEvent + + // Alter sample span event for this test case + e.ParentID = "parent-id" + e.IsEntrypoint = false + e.Category = spanCategoryHTTP + e.Kind = "client" + e.Component = "http" + e.Attributes.addString(spanAttributeHTTPURL, "http://url.com") + e.Attributes.addString(spanAttributeHTTPMethod, "GET") + + expectEvent(t, &e, WantEvent{ + Intrinsics: map[string]interface{}{ + "type": "Span", + "traceId": "trace-id", + "guid": "guid", + "parentId": "parent-id", + "transactionId": "txn-id", + "sampled": true, + "priority": 0.500000, + "timestamp": 1.488393111e+12, + "duration": 2, + "name": "myName", + "category": "http", + "component": "http", + "span.kind": "client", + }, + UserAttributes: map[string]interface{}{}, + AgentAttributes: map[string]interface{}{ + "http.url": "http://url.com", + "http.method": "GET", + }, + }) +} + +func TestSpanEventsEndpointMethod(t *testing.T) { + events := &spanEvents{} + m := events.EndpointMethod() + if m != cmdSpanEvents { + t.Error(m) + } +} + +func TestSpanEventsMergeFromTransaction(t *testing.T) { + args := &TxnData{} + args.Start = time.Now() + args.Duration = 1 * time.Second + args.FinalName = "finalName" + args.BetterCAT.Sampled = true + args.BetterCAT.Priority = 0.7 + args.BetterCAT.Enabled = true + args.BetterCAT.ID = "txn-id" + args.BetterCAT.Inbound = &Payload{ + ID: "inbound-id", + TracedID: "inbound-trace-id", + } + args.rootSpanID = "root-span-id" + + args.spanEvents = []*SpanEvent{ + { + GUID: "span-1-id", + ParentID: "root-span-id", + Timestamp: time.Now(), + Duration: 3 * time.Millisecond, + Name: "span1", + Category: spanCategoryGeneric, + IsEntrypoint: false, + }, + { + GUID: "span-2-id", + ParentID: "span-1-id", + Timestamp: time.Now(), + Duration: 3 * time.Millisecond, + Name: "span2", + Category: spanCategoryGeneric, + IsEntrypoint: false, + }, + } + + spanEvents := newSpanEvents(10) + spanEvents.MergeFromTransaction(args) + + ExpectSpanEvents(t, spanEvents, []WantEvent{ + { + Intrinsics: map[string]interface{}{ + "name": "finalName", + "sampled": true, + "priority": 0.7, + "category": spanCategoryGeneric, + "parentId": "inbound-id", + "nr.entryPoint": true, + "guid": "root-span-id", + "transactionId": "txn-id", + "traceId": "inbound-trace-id", + }, + }, + { + Intrinsics: map[string]interface{}{ + "name": "span1", + "sampled": true, + "priority": 0.7, + "category": spanCategoryGeneric, + "parentId": "root-span-id", + "guid": "span-1-id", + "transactionId": "txn-id", + "traceId": "inbound-trace-id", + }, + }, + { + Intrinsics: map[string]interface{}{ + "name": "span2", + "sampled": true, + "priority": 0.7, + "category": spanCategoryGeneric, + "parentId": "span-1-id", + "guid": "span-2-id", + "transactionId": "txn-id", + "traceId": "inbound-trace-id", + }, + }, + }) +} diff --git a/internal/sqlparse/sqlparse.go b/internal/sqlparse/sqlparse.go new file mode 100644 index 000000000..34adb3a40 --- /dev/null +++ b/internal/sqlparse/sqlparse.go @@ -0,0 +1,60 @@ +package sqlparse + +import ( + "regexp" + "strings" + + newrelic "github.com/newrelic/go-agent" +) + +func extractTable(s string) string { + s = extractTableRegex.ReplaceAllString(s, "") + if idx := strings.Index(s, "."); idx > 0 { + s = s[idx+1:] + } + return s +} + +var ( + basicTable = `[^)(\]\[\}\{\s,;]+` + enclosedTable = `[\[\(\{]` + `\s*` + basicTable + `\s*` + `[\]\)\}]` + tablePattern = `(` + `\s+` + basicTable + `|` + `\s*` + enclosedTable + `)` + extractTableRegex = regexp.MustCompile(`[\s` + "`" + `"'\(\)\{\}\[\]]*`) + updateRegex = regexp.MustCompile(`(?is)^update(?:\s+(?:low_priority|ignore|or|rollback|abort|replace|fail|only))*` + tablePattern) + sqlOperations = map[string]*regexp.Regexp{ + "select": regexp.MustCompile(`(?is)^.*?\sfrom` + tablePattern), + "delete": regexp.MustCompile(`(?is)^.*?\sfrom` + tablePattern), + "insert": regexp.MustCompile(`(?is)^.*?\sinto?` + tablePattern), + "update": updateRegex, + "call": nil, + "create": nil, + "drop": nil, + "show": nil, + "set": nil, + "exec": nil, + "execute": nil, + "alter": nil, + "commit": nil, + "rollback": nil, + } + firstWordRegex = regexp.MustCompile(`^\w+`) + cCommentRegex = regexp.MustCompile(`(?is)/\*.*?\*/`) + lineCommentRegex = regexp.MustCompile(`(?im)(?:--|#).*?$`) + sqlPrefixRegex = regexp.MustCompile(`^[\s;]*`) +) + +// ParseQuery parses table and operation from the SQL query string. +func ParseQuery(segment *newrelic.DatastoreSegment, query string) { + s := cCommentRegex.ReplaceAllString(query, "") + s = lineCommentRegex.ReplaceAllString(s, "") + s = sqlPrefixRegex.ReplaceAllString(s, "") + op := strings.ToLower(firstWordRegex.FindString(s)) + if rg, ok := sqlOperations[op]; ok { + segment.Operation = op + if nil != rg { + if m := rg.FindStringSubmatch(s); len(m) > 1 { + segment.Collection = extractTable(m[1]) + } + } + } +} diff --git a/internal/sqlparse/sqlparse_test.go b/internal/sqlparse/sqlparse_test.go new file mode 100644 index 000000000..b4ef2a5a8 --- /dev/null +++ b/internal/sqlparse/sqlparse_test.go @@ -0,0 +1,191 @@ +package sqlparse + +import ( + "testing" + + newrelic "github.com/newrelic/go-agent" + "github.com/newrelic/go-agent/internal/crossagent" +) + +type sqlTestcase struct { + Input string `json:"input"` + Operation string `json:"operation"` + Table string `json:"table"` +} + +func (tc sqlTestcase) test(t *testing.T) { + var segment newrelic.DatastoreSegment + ParseQuery(&segment, tc.Input) + if tc.Operation == "other" { + // Allow for matching of Operation "other" to "" + if segment.Operation != "" { + t.Errorf("operation mismatch query='%s' wanted='%s' got='%s'", + tc.Input, tc.Operation, segment.Operation) + } + } else if segment.Operation != tc.Operation { + t.Errorf("operation mismatch query='%s' wanted='%s' got='%s'", + tc.Input, tc.Operation, segment.Operation) + } + // The Go agent subquery behavior does not match the PHP Agent. + if tc.Table == "(subquery)" { + return + } + if tc.Table != segment.Collection { + t.Errorf("table mismatch query='%s' wanted='%s' got='%s'", + tc.Input, tc.Table, segment.Collection) + } +} + +func TestParseSQLCrossAgent(t *testing.T) { + var tcs []sqlTestcase + err := crossagent.ReadJSON("sql_parsing.json", &tcs) + if err != nil { + t.Fatal(err) + } + + for _, tc := range tcs { + tc.test(t) + } +} + +func TestParseSQLSubQuery(t *testing.T) { + for _, tc := range []sqlTestcase{ + {Input: "SELECT * FROM (SELECT * FROM foobar)", Operation: "select", Table: "foobar"}, + {Input: "SELECT * FROM (SELECT * FROM foobar) WHERE x > y", Operation: "select", Table: "foobar"}, + {Input: "SELECT * FROM(SELECT * FROM foobar) WHERE x > y", Operation: "select", Table: "foobar"}, + } { + tc.test(t) + } +} + +func TestParseSQLOther(t *testing.T) { + for _, tc := range []sqlTestcase{ + // Test that we handle table names enclosed in brackets. + {Input: "SELECT * FROM [foo]", Operation: "select", Table: "foo"}, + {Input: "SELECT * FROM[foo]", Operation: "select", Table: "foo"}, + {Input: "SELECT * FROM [ foo ]", Operation: "select", Table: "foo"}, + {Input: "SELECT * FROM [ 'foo' ]", Operation: "select", Table: "foo"}, + {Input: "SELECT * FROM[ `something`.'foo' ]", Operation: "select", Table: "foo"}, + // Test that we handle the cheese. + {Input: "SELECT fromage FROM fromagier", Operation: "select", Table: "fromagier"}, + } { + tc.test(t) + } +} + +func TestParseSQLUpdateExtraKeywords(t *testing.T) { + for _, tc := range []sqlTestcase{ + {Input: "update or rollback foo", Operation: "update", Table: "foo"}, + {Input: "update only foo", Operation: "update", Table: "foo"}, + {Input: "update low_priority ignore{foo}", Operation: "update", Table: "foo"}, + } { + tc.test(t) + } +} + +func TestLineComment(t *testing.T) { + for _, tc := range []sqlTestcase{ + { + Input: `SELECT -- * FROM tricky + * FROM foo`, + Operation: "select", + Table: "foo", + }, + { + Input: `SELECT # * FROM tricky + * FROM foo`, + Operation: "select", + Table: "foo", + }, + { + Input: ` -- SELECT * FROM tricky + SELECT * FROM foo`, + Operation: "select", + Table: "foo", + }, + { + Input: ` # SELECT * FROM tricky + SELECT * FROM foo`, + Operation: "select", + Table: "foo", + }, + { + Input: `SELECT * FROM -- tricky + foo`, + Operation: "select", + Table: "foo", + }, + } { + tc.test(t) + } +} + +func TestSemicolonPrefix(t *testing.T) { + for _, tc := range []sqlTestcase{ + { + Input: `;select * from foo`, + Operation: "select", + Table: "foo", + }, + { + Input: ` ;; ; select * from foo`, + Operation: "select", + Table: "foo", + }, + { + Input: ` ; + SELECT * FROM foo`, + Operation: "select", + Table: "foo", + }, + } { + tc.test(t) + } +} + +func TestDollarSignTable(t *testing.T) { + for _, tc := range []sqlTestcase{ + { + Input: `select * from $dollar_100_$`, + Operation: "select", + Table: "$dollar_100_$", + }, + } { + tc.test(t) + } +} + +func TestPriorityQuery(t *testing.T) { + // Test that we handle: + // https://dev.mysql.com/doc/refman/8.0/en/insert.html + // INSERT [LOW_PRIORITY | DELAYED | HIGH_PRIORITY] [IGNORE] [INTO] tbl_name + for _, tc := range []sqlTestcase{ + { + Input: `INSERT HIGH_PRIORITY INTO employee VALUES('Tom',12345,'Sales',100)`, + Operation: "insert", + Table: "employee", + }, + } { + tc.test(t) + } +} + +func TestExtractTable(t *testing.T) { + for idx, tc := range []string{ + "table", + "`table`", + `"table"`, + "`database.table`", + "`database`.table", + "database.`table`", + "`database`.`table`", + " { table }", + "\n[table]", + "\t ( 'database'.`table` ) ", + } { + table := extractTable(tc) + if table != "table" { + t.Error(idx, table) + } + } +} diff --git a/internal/stack_frame.go b/internal/stack_frame.go new file mode 100644 index 000000000..837bcb707 --- /dev/null +++ b/internal/stack_frame.go @@ -0,0 +1,24 @@ +// +build go1.7 + +package internal + +import "runtime" + +func (st StackTrace) frames() []stacktraceFrame { + if len(st) == 0 { + return nil + } + frames := runtime.CallersFrames(st) // CallersFrames is only available in Go 1.7+ + fs := make([]stacktraceFrame, 0, maxStackTraceFrames) + var frame runtime.Frame + more := true + for more { + frame, more = frames.Next() + fs = append(fs, stacktraceFrame{ + Name: frame.Function, + File: frame.File, + Line: int64(frame.Line), + }) + } + return fs +} diff --git a/internal/stack_frame_pre_1_7.go b/internal/stack_frame_pre_1_7.go new file mode 100644 index 000000000..b9d824788 --- /dev/null +++ b/internal/stack_frame_pre_1_7.go @@ -0,0 +1,34 @@ +// +build !go1.7 + +package internal + +import "runtime" + +func (st StackTrace) frames() []stacktraceFrame { + fs := make([]stacktraceFrame, len(st)) + for idx, pc := range st { + fs[idx] = lookupFrame(pc) + } + return fs +} + +func lookupFrame(pc uintptr) stacktraceFrame { + // The Golang runtime package documentation says "To look up the file + // and line number of the call itself, use pc[i]-1. As an exception to + // this rule, if pc[i-1] corresponds to the function runtime.sigpanic, + // then pc[i] is the program counter of a faulting instruction and + // should be used without any subtraction." + // + // TODO: Fully understand when this subtraction is necessary. + place := pc - 1 + f := runtime.FuncForPC(place) + if nil == f { + return stacktraceFrame{} + } + file, line := f.FileLine(place) + return stacktraceFrame{ + Name: f.Name(), + File: file, + Line: int64(line), + } +} diff --git a/internal/stacktrace.go b/internal/stacktrace.go new file mode 100644 index 000000000..0a66d2795 --- /dev/null +++ b/internal/stacktrace.go @@ -0,0 +1,95 @@ +package internal + +import ( + "bytes" + "path" + "runtime" + "strings" +) + +// StackTrace is a stack trace. +type StackTrace []uintptr + +// GetStackTrace returns a new StackTrace. +func GetStackTrace() StackTrace { + skip := 1 // skip runtime.Callers + callers := make([]uintptr, maxStackTraceFrames) + written := runtime.Callers(skip, callers) + return callers[:written] +} + +type stacktraceFrame struct { + Name string + File string + Line int64 +} + +func (f stacktraceFrame) formattedName() string { + if strings.HasPrefix(f.Name, "go.") { + // This indicates an anonymous struct. eg. + // "go.(*struct { github.com/newrelic/go-agent.threadWithExtras }).NoticeError" + return f.Name + } + return path.Base(f.Name) +} + +func (f stacktraceFrame) isAgent() bool { + // Note this is not a contains conditional rather than a prefix + // conditional to handle anonymous functions like: + // "go.(*struct { github.com/newrelic/go-agent.threadWithExtras }).NoticeError" + return strings.Contains(f.Name, "github.com/newrelic/go-agent/internal.") || + strings.Contains(f.Name, "github.com/newrelic/go-agent.") +} + +func (f stacktraceFrame) WriteJSON(buf *bytes.Buffer) { + buf.WriteByte('{') + w := jsonFieldsWriter{buf: buf} + if f.Name != "" { + w.stringField("name", f.formattedName()) + } + if f.File != "" { + w.stringField("filepath", f.File) + } + if f.Line != 0 { + w.intField("line", f.Line) + } + buf.WriteByte('}') +} + +func writeFrames(buf *bytes.Buffer, frames []stacktraceFrame) { + // Remove top agent frames. + for len(frames) > 0 && frames[0].isAgent() { + frames = frames[1:] + } + // Truncate excessively long stack traces (they may be provided by the + // customer). + if len(frames) > maxStackTraceFrames { + frames = frames[0:maxStackTraceFrames] + } + + buf.WriteByte('[') + for idx, frame := range frames { + if idx > 0 { + buf.WriteByte(',') + } + frame.WriteJSON(buf) + } + buf.WriteByte(']') +} + +// WriteJSON adds the stack trace to the buffer in the JSON form expected by the +// collector. +func (st StackTrace) WriteJSON(buf *bytes.Buffer) { + frames := st.frames() + writeFrames(buf, frames) +} + +// MarshalJSON prepares JSON in the format expected by the collector. +func (st StackTrace) MarshalJSON() ([]byte, error) { + estimate := 256 * len(st) + buf := bytes.NewBuffer(make([]byte, 0, estimate)) + + st.WriteJSON(buf) + + return buf.Bytes(), nil +} diff --git a/internal/stacktrace_test.go b/internal/stacktrace_test.go new file mode 100644 index 000000000..5651231b3 --- /dev/null +++ b/internal/stacktrace_test.go @@ -0,0 +1,148 @@ +package internal + +import ( + "bytes" + "encoding/json" + "strings" + "testing" + + "github.com/newrelic/go-agent/internal/stacktracetest" +) + +func TestGetStackTrace(t *testing.T) { + stack := GetStackTrace() + js, err := json.Marshal(stack) + if nil != err { + t.Fatal(err) + } + if nil == js { + t.Fatal(string(js)) + } +} + +func TestLongStackTraceLimitsFrames(t *testing.T) { + st := stacktracetest.CountedCall(maxStackTraceFrames+20, func() []uintptr { + return GetStackTrace() + }) + if len(st) != maxStackTraceFrames { + t.Error("Unexpected size of stacktrace", maxStackTraceFrames, len(st)) + } + l := len(StackTrace(st).frames()) + if l != maxStackTraceFrames { + t.Error("Unexpected number of frames", maxStackTraceFrames, l) + } +} + +func TestManyStackTraceFramesLimitsOutput(t *testing.T) { + frames := make([]stacktraceFrame, maxStackTraceFrames+20) + expect := `[ + {},{},{},{},{},{},{},{},{},{}, + {},{},{},{},{},{},{},{},{},{}, + {},{},{},{},{},{},{},{},{},{}, + {},{},{},{},{},{},{},{},{},{}, + {},{},{},{},{},{},{},{},{},{}, + {},{},{},{},{},{},{},{},{},{}, + {},{},{},{},{},{},{},{},{},{}, + {},{},{},{},{},{},{},{},{},{}, + {},{},{},{},{},{},{},{},{},{}, + {},{},{},{},{},{},{},{},{},{} + ]` + estimate := 256 * len(frames) + output := bytes.NewBuffer(make([]byte, 0, estimate)) + writeFrames(output, frames) + if CompactJSONString(expect) != output.String() { + t.Error("Unexpected JSON output", CompactJSONString(expect), output.String()) + } +} + +func TestStacktraceFrames(t *testing.T) { + // This stacktrace taken from Go 1.11 + inputFrames := []stacktraceFrame{ + { + File: "/Users/will/Desktop/gopath/src/github.com/newrelic/go-agent/internal/stacktrace.go", + Name: "github.com/newrelic/go-agent/internal.GetStackTrace", + Line: 17, + }, + { + File: "/Users/will/Desktop/gopath/src/github.com/newrelic/go-agent/internal_txn.go", + Name: "github.com/newrelic/go-agent.(*txn).NoticeError", + Line: 696, + }, + { + File: "\u003cautogenerated\u003e", + Name: "go.(*struct { github.com/newrelic/go-agent.threadWithExtras }).NoticeError", + Line: 1, + }, + { + File: "/Users/will/Desktop/gopath/src/github.com/newrelic/go-agent/internal_attributes_test.go", + Name: "github.com/newrelic/go-agent.TestAddAttributeSecurityPolicyDisablesInclude", + Line: 68, + }, + { + File: "/Users/will/.gvm/gos/go1.11/src/testing/testing.go", + Name: "testing.tRunner", + Line: 827, + }, + { + File: "/Users/will/.gvm/gos/go1.11/src/runtime/asm_amd64.s", + Name: "runtime.goexit", + Line: 1333, + }, + } + buf := &bytes.Buffer{} + writeFrames(buf, inputFrames) + expectedJSON := `[ + { + "name":"testing.tRunner", + "filepath":"/Users/will/.gvm/gos/go1.11/src/testing/testing.go", + "line":827 + }, + { + "name":"runtime.goexit", + "filepath":"/Users/will/.gvm/gos/go1.11/src/runtime/asm_amd64.s", + "line":1333 + } + ]` + testExpectedJSON(t, expectedJSON, buf.String()) +} + +func TestStackTraceTopFrame(t *testing.T) { + // This test uses a separate package since the stacktrace code removes + // the top stack frames which are in packages "newrelic" and "internal". + stackJSON := stacktracetest.TopStackFrame(func() []byte { + st := GetStackTrace() + js, _ := json.Marshal(st) + return js + }) + + stack := []struct { + Name string `json:"name"` + FilePath string `json:"filepath"` + Line int `json:"line"` + }{} + if err := json.Unmarshal(stackJSON, &stack); err != nil { + t.Fatal(err) + } + if len(stack) < 2 { + t.Fatal(string(stackJSON)) + } + if stack[0].Name != "stacktracetest.TopStackFrame" { + t.Error(string(stackJSON)) + } + if stack[0].Line != 6 { + t.Error(string(stackJSON)) + } + if !strings.Contains(stack[0].FilePath, "go-agent/internal/stacktracetest/stacktracetest.go") { + t.Error(string(stackJSON)) + } +} + +func TestFramesCount(t *testing.T) { + st := stacktracetest.CountedCall(3, func() []uintptr { + return GetStackTrace() + }) + frames := StackTrace(st).frames() + if len(st) != len(frames) { + t.Error("Invalid # of frames", len(st), len(frames)) + } +} diff --git a/internal/stacktracetest/stacktracetest.go b/internal/stacktracetest/stacktracetest.go new file mode 100644 index 000000000..bc5c1a6ae --- /dev/null +++ b/internal/stacktracetest/stacktracetest.go @@ -0,0 +1,17 @@ +// Package stacktracetest helps test stack trace behavior. +package stacktracetest + +// TopStackFrame is a function should will appear in the stacktrace. +func TopStackFrame(generateStacktrace func() []byte) []byte { + return generateStacktrace() +} + +// CountedCall is a function that allows you to generate a stack trace with this function being called a particular +// number of times. The parameter f should be a function that returns a StackTrace (but it is referred to as []uintptr +// in order to not create a circular dependency on the internal package) +func CountedCall(i int, f func() []uintptr) []uintptr { + if i > 0 { + return CountedCall(i-1, f) + } + return f() +} diff --git a/internal/synthetics_test.go b/internal/synthetics_test.go new file mode 100644 index 000000000..700a906c8 --- /dev/null +++ b/internal/synthetics_test.go @@ -0,0 +1,234 @@ +package internal + +import ( + "encoding/json" + "fmt" + "net/http" + "testing" + + "github.com/newrelic/go-agent/internal/cat" + "github.com/newrelic/go-agent/internal/crossagent" +) + +type harvestedTxnTrace struct { + startTimeMs float64 + durationToResponse float64 + transactionName string + requestURL string + traceDetails struct { + attributes struct { + agentAttributes eventAttributes + userAttributes eventAttributes + intrinsics eventAttributes + } + } + catGUID string + forcePersistFlag bool + xraySessionID string + syntheticsResourceID string +} + +func (h *harvestedTxnTrace) UnmarshalJSON(data []byte) error { + var arr []interface{} + + if err := json.Unmarshal(data, &arr); err != nil { + return err + } + + if len(arr) != 10 { + return fmt.Errorf("unexpected number of transaction trace items: %d", len(arr)) + } + + h.startTimeMs = arr[0].(float64) + h.durationToResponse = arr[1].(float64) + h.transactionName = arr[2].(string) + if nil != arr[3] { + h.requestURL = arr[3].(string) + } + // Item 4 -- the trace -- will be dealt with shortly. + h.catGUID = arr[5].(string) + // Item 6 intentionally ignored. + h.forcePersistFlag = arr[7].(bool) + if arr[8] != nil { + h.xraySessionID = arr[8].(string) + } + h.syntheticsResourceID = arr[9].(string) + + traceDetails := arr[4].([]interface{}) + attributes := traceDetails[4].(map[string]interface{}) + + h.traceDetails.attributes.agentAttributes = attributes["agentAttributes"].(map[string]interface{}) + h.traceDetails.attributes.userAttributes = attributes["userAttributes"].(map[string]interface{}) + h.traceDetails.attributes.intrinsics = attributes["intrinsics"].(map[string]interface{}) + + return nil +} + +func harvestTxnDataTrace(t *TxnData) (*harvestedTxnTrace, error) { + // Since transaction trace JSON is built using string manipulation, we have + // to do an awkward marshal/unmarshal shuffle to be able to verify the + // intrinsics. + ht := HarvestTrace{ + TxnEvent: t.TxnEvent, + Trace: t.TxnTrace, + } + js, err := ht.MarshalJSON() + if err != nil { + return nil, err + } + + trace := &harvestedTxnTrace{} + if err := json.Unmarshal(js, trace); err != nil { + return nil, err + } + + return trace, nil +} + +func TestSynthetics(t *testing.T) { + var testcases []struct { + Name string `json:"name"` + Settings struct { + AgentEncodingKey string `json:"agentEncodingKey"` + SyntheticsEncodingKey string `json:"syntheticsEncodingKey"` + TransactionGUID string `json:"transactionGuid"` + TrustedAccountIDs []int `json:"trustedAccountIds"` + } `json:"settings"` + InputHeaderPayload json.RawMessage `json:"inputHeaderPayload"` + InputObfuscatedHeader map[string]string `json:"inputObfuscatedHeader"` + OutputTransactionTrace struct { + Header struct { + Field9 string `json:"field_9"` + } `json:"header"` + ExpectedIntrinsics map[string]string `json:"expectedIntrinsics"` + NonExpectedIntrinsics []string `json:"nonExpectedIntrinsics"` + } `json:"outputTransactionTrace"` + OutputTransactionEvent struct { + ExpectedAttributes map[string]string `json:"expectedAttributes"` + NonExpectedAttributes []string `json:"nonExpectedAttributes"` + } `json:"outputTransactionEvent"` + OutputExternalRequestHeader struct { + ExpectedHeader map[string]string `json:"expectedHeader"` + NonExpectedHeader []string `json:"nonExpectedHeader"` + } `json:"outputExternalRequestHeader"` + } + + err := crossagent.ReadJSON("synthetics/synthetics.json", &testcases) + if err != nil { + t.Fatal(err) + } + + for _, tc := range testcases { + // Fake enough transaction data to run the test. + tr := &TxnData{ + Name: "txn", + } + + tr.CrossProcess.Init(false, false, &ConnectReply{ + CrossProcessID: "1#1", + TrustedAccounts: make(map[int]struct{}), + EncodingKey: tc.Settings.AgentEncodingKey, + }) + + // Set up the trusted accounts. + for _, account := range tc.Settings.TrustedAccountIDs { + tr.CrossProcess.TrustedAccounts[account] = struct{}{} + } + + // Set up the GUID. + if tc.Settings.TransactionGUID != "" { + tr.CrossProcess.GUID = tc.Settings.TransactionGUID + } + + // Parse the input header, ignoring any errors. + inputHeaders := make(http.Header) + for k, v := range tc.InputObfuscatedHeader { + inputHeaders.Add(k, v) + } + + tr.CrossProcess.handleInboundRequestEncodedSynthetics(inputHeaders.Get(cat.NewRelicSyntheticsName)) + + // Get the headers for an external request. + metadata, err := tr.CrossProcess.CreateCrossProcessMetadata("txn", "app") + if err != nil { + t.Fatalf("%s: error creating outbound request headers: %v", tc.Name, err) + } + + // Verify that the header either exists or doesn't exist, depending on the + // test case. + headers := MetadataToHTTPHeader(metadata) + for key, value := range tc.OutputExternalRequestHeader.ExpectedHeader { + obfuscated := headers.Get(key) + if obfuscated == "" { + t.Errorf("%s: expected output header %s not found", tc.Name, key) + } else if value != obfuscated { + t.Errorf("%s: expected output header %s mismatch: expected=%s; got=%s", tc.Name, key, value, obfuscated) + } + } + + for _, key := range tc.OutputExternalRequestHeader.NonExpectedHeader { + if value := headers.Get(key); value != "" { + t.Errorf("%s: output header %s expected to be missing; got %s", tc.Name, key, value) + } + } + + // Harvest the trace. + trace, err := harvestTxnDataTrace(tr) + if err != nil { + t.Errorf("%s: error harvesting trace data: %v", tc.Name, err) + } + + // Check the synthetics resource ID. + if trace.syntheticsResourceID != tc.OutputTransactionTrace.Header.Field9 { + t.Errorf("%s: unexpected field 9: expected=%s; got=%s", tc.Name, tc.OutputTransactionTrace.Header.Field9, trace.syntheticsResourceID) + } + + // Check for expected intrinsics. + for key, value := range tc.OutputTransactionTrace.ExpectedIntrinsics { + // First, check if the key exists at all. + if !trace.traceDetails.attributes.intrinsics.has(key) { + t.Fatalf("%s: missing intrinsic %s", tc.Name, key) + } + + // Everything we're looking for is a string, so we can be a little lazy + // here. + if err := trace.traceDetails.attributes.intrinsics.isString(key, value); err != nil { + t.Errorf("%s: %v", tc.Name, err) + } + } + + // Now we verify that the unexpected intrinsics didn't miraculously appear. + for _, key := range tc.OutputTransactionTrace.NonExpectedIntrinsics { + if trace.traceDetails.attributes.intrinsics.has(key) { + t.Errorf("%s: expected intrinsic %s to be missing; instead, got value %v", tc.Name, key, trace.traceDetails.attributes.intrinsics[key]) + } + } + + // Harvest the event. + event, err := harvestTxnDataEvent(tr) + if err != nil { + t.Errorf("%s: error harvesting event data: %v", tc.Name, err) + } + + // Now we have the event, let's look for the expected intrinsics. + for key, value := range tc.OutputTransactionEvent.ExpectedAttributes { + // First, check if the key exists at all. + if !event.intrinsics.has(key) { + t.Fatalf("%s: missing intrinsic %s", tc.Name, key) + } + + // Everything we're looking for is a string, so we can be a little lazy + // here. + if err := event.intrinsics.isString(key, value); err != nil { + t.Errorf("%s: %v", tc.Name, err) + } + } + + // Now we verify that the unexpected intrinsics didn't miraculously appear. + for _, key := range tc.OutputTransactionEvent.NonExpectedAttributes { + if event.intrinsics.has(key) { + t.Errorf("%s: expected intrinsic %s to be missing; instead, got value %v", tc.Name, key, event.intrinsics[key]) + } + } + } +} diff --git a/internal/sysinfo/bootid.go b/internal/sysinfo/bootid.go new file mode 100644 index 000000000..780058d5e --- /dev/null +++ b/internal/sysinfo/bootid.go @@ -0,0 +1,50 @@ +package sysinfo + +import ( + "bytes" + "fmt" + "io/ioutil" + "runtime" +) + +// BootID returns the boot ID of the executing kernel. +func BootID() (string, error) { + if "linux" != runtime.GOOS { + return "", ErrFeatureUnsupported + } + data, err := ioutil.ReadFile("/proc/sys/kernel/random/boot_id") + if err != nil { + return "", err + } + + return validateBootID(data) +} + +type invalidBootID string + +func (e invalidBootID) Error() string { + return fmt.Sprintf("Boot id has unrecognized format, id=%q", string(e)) +} + +func isASCIIByte(b byte) bool { + return (b >= 0x20 && b <= 0x7f) +} + +func validateBootID(data []byte) (string, error) { + // We're going to go for the permissive reading of + // https://source.datanerd.us/agents/agent-specs/blob/master/Utilization.md: + // any ASCII (excluding control characters, because I'm pretty sure that's not + // in the spirit of the spec) string will be sent up to and including 128 + // bytes in length. + trunc := bytes.TrimSpace(data) + if len(trunc) > 128 { + trunc = trunc[:128] + } + for _, b := range trunc { + if !isASCIIByte(b) { + return "", invalidBootID(data) + } + } + + return string(trunc), nil +} diff --git a/internal/sysinfo/docker.go b/internal/sysinfo/docker.go new file mode 100644 index 000000000..a4f7c004b --- /dev/null +++ b/internal/sysinfo/docker.go @@ -0,0 +1,114 @@ +package sysinfo + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "os" + "regexp" + "runtime" +) + +var ( + // ErrDockerNotFound is returned if a Docker ID is not found in + // /proc/self/cgroup + ErrDockerNotFound = errors.New("Docker ID not found") +) + +// DockerID attempts to detect Docker. +func DockerID() (string, error) { + if "linux" != runtime.GOOS { + return "", ErrFeatureUnsupported + } + + f, err := os.Open("/proc/self/cgroup") + if err != nil { + return "", err + } + defer f.Close() + + return parseDockerID(f) +} + +var ( + // The DockerID must be a 64-character lowercase hex string + // be greedy and match anything 64-characters or longer to spot invalid IDs + dockerIDLength = 64 + dockerIDRegexRaw = fmt.Sprintf("[0-9a-f]{%d,}", dockerIDLength) + dockerIDRegex = regexp.MustCompile(dockerIDRegexRaw) +) + +func parseDockerID(r io.Reader) (string, error) { + // Each line in the cgroup file consists of three colon delimited fields. + // 1. hierarchy ID - we don't care about this + // 2. subsystems - comma separated list of cgroup subsystem names + // 3. control group - control group to which the process belongs + // + // Example + // 5:cpuacct,cpu,cpuset:/daemons + + var id string + + for scanner := bufio.NewScanner(r); scanner.Scan(); { + line := scanner.Bytes() + cols := bytes.SplitN(line, []byte(":"), 3) + + if len(cols) < 3 { + continue + } + + // We're only interested in the cpu subsystem. + if !isCPUCol(cols[1]) { + continue + } + + id = dockerIDRegex.FindString(string(cols[2])) + + if err := validateDockerID(id); err != nil { + // We can stop searching at this point, the CPU + // subsystem should only occur once, and its cgroup is + // not docker or not a format we accept. + return "", err + } + return id, nil + } + + return "", ErrDockerNotFound +} + +func isCPUCol(col []byte) bool { + // Sometimes we have multiple subsystems in one line, as in this example + // from: + // https://source.datanerd.us/newrelic/cross_agent_tests/blob/master/docker_container_id/docker-1.1.2-native-driver-systemd.txt + // + // 3:cpuacct,cpu:/system.slice/docker-67f98c9e6188f9c1818672a15dbe46237b6ee7e77f834d40d41c5fb3c2f84a2f.scope + splitCSV := func(r rune) bool { return r == ',' } + subsysCPU := []byte("cpu") + + for _, subsys := range bytes.FieldsFunc(col, splitCSV) { + if bytes.Equal(subsysCPU, subsys) { + return true + } + } + return false +} + +func isHex(r rune) bool { + return ('0' <= r && r <= '9') || ('a' <= r && r <= 'f') +} + +func validateDockerID(id string) error { + if len(id) != 64 { + return fmt.Errorf("%s is not %d characters long", id, dockerIDLength) + } + + for _, c := range id { + if !isHex(c) { + return fmt.Errorf("Character: %c is not hex in string %s", c, id) + } + } + + return nil +} diff --git a/internal/sysinfo/docker_test.go b/internal/sysinfo/docker_test.go new file mode 100644 index 000000000..b4c6ffbe9 --- /dev/null +++ b/internal/sysinfo/docker_test.go @@ -0,0 +1,51 @@ +package sysinfo + +import ( + "bytes" + "path/filepath" + "testing" + + "github.com/newrelic/go-agent/internal/crossagent" +) + +func TestDockerIDCrossAgent(t *testing.T) { + var testCases []struct { + File string `json:"filename"` + ID string `json:"containerId"` + } + + dir := "docker_container_id" + err := crossagent.ReadJSON(filepath.Join(dir, "cases.json"), &testCases) + if err != nil { + t.Fatal(err) + } + + for _, test := range testCases { + file := filepath.Join(dir, test.File) + input, err := crossagent.ReadFile(file) + if err != nil { + t.Error(err) + continue + } + + got, _ := parseDockerID(bytes.NewReader(input)) + if got != test.ID { + t.Errorf("%s != %s", got, test.ID) + } + } +} + +func TestDockerIDValidation(t *testing.T) { + err := validateDockerID("baaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa1239") + if nil != err { + t.Error("Validation should pass with a 64-character hex string.") + } + err = validateDockerID("39ffbba") + if nil == err { + t.Error("Validation should have failed with short string.") + } + err = validateDockerID("z000000000000000000000000000000000000000000000000100000000000000") + if nil == err { + t.Error("Validation should have failed with non-hex characters.") + } +} diff --git a/internal/sysinfo/errors.go b/internal/sysinfo/errors.go new file mode 100644 index 000000000..d4b684b5f --- /dev/null +++ b/internal/sysinfo/errors.go @@ -0,0 +1,10 @@ +package sysinfo + +import ( + "errors" +) + +var ( + // ErrFeatureUnsupported indicates unsupported platform. + ErrFeatureUnsupported = errors.New("That feature is not supported on this platform") +) diff --git a/internal/sysinfo/hostname_generic.go b/internal/sysinfo/hostname_generic.go new file mode 100644 index 000000000..ccef4fcab --- /dev/null +++ b/internal/sysinfo/hostname_generic.go @@ -0,0 +1,10 @@ +// +build !linux + +package sysinfo + +import "os" + +// Hostname returns the host name. +func Hostname() (string, error) { + return os.Hostname() +} diff --git a/internal/sysinfo/hostname_linux.go b/internal/sysinfo/hostname_linux.go new file mode 100644 index 000000000..e2300854d --- /dev/null +++ b/internal/sysinfo/hostname_linux.go @@ -0,0 +1,50 @@ +package sysinfo + +import ( + "os" + "syscall" +) + +// Hostname returns the host name. +func Hostname() (string, error) { + // Try the builtin API first, which is designed to match the output of + // /bin/hostname, and fallback to uname(2) if that fails to match the + // behavior of gethostname(2) as implemented by glibc. On Linux, all + // these method should result in the same value because sethostname(2) + // limits the hostname to 64 bytes, the same size of the nodename field + // returned by uname(2). Note that is correspondence is not true on + // other platforms. + // + // os.Hostname failures should be exceedingly rare, however some systems + // configure SELinux to deny read access to /proc/sys/kernel/hostname. + // Redhat's OpenShift platform for example. os.Hostname can also fail if + // some or all of /proc has been hidden via chroot(2) or manipulation of + // the current processes' filesystem namespace via the cgroups APIs. + // Docker is an example of a tool that can configure such an + // environment. + name, err := os.Hostname() + if err == nil { + return name, nil + } + + var uts syscall.Utsname + if err2 := syscall.Uname(&uts); err2 != nil { + // The man page documents only one possible error for uname(2), + // suggesting that as long as the buffer given is valid, the + // call will never fail. Return the original error in the hope + // it provides more relevant information about why the hostname + // can't be retrieved. + return "", err + } + + // Convert Nodename to a Go string. + buf := make([]byte, 0, len(uts.Nodename)) + for _, c := range uts.Nodename { + if c == 0 { + break + } + buf = append(buf, byte(c)) + } + + return string(buf), nil +} diff --git a/internal/sysinfo/memtotal.go b/internal/sysinfo/memtotal.go new file mode 100644 index 000000000..0763ee301 --- /dev/null +++ b/internal/sysinfo/memtotal.go @@ -0,0 +1,40 @@ +package sysinfo + +import ( + "bufio" + "errors" + "io" + "regexp" + "strconv" +) + +// BytesToMebibytes converts bytes into mebibytes. +func BytesToMebibytes(bts uint64) uint64 { + return bts / ((uint64)(1024 * 1024)) +} + +var ( + meminfoRe = regexp.MustCompile(`^MemTotal:\s+([0-9]+)\s+[kK]B$`) + errMemTotalNotFound = errors.New("supported MemTotal not found in /proc/meminfo") +) + +// parseProcMeminfo is used to parse Linux's "/proc/meminfo". It is located +// here so that the relevant cross agent tests will be run on all platforms. +func parseProcMeminfo(f io.Reader) (uint64, error) { + scanner := bufio.NewScanner(f) + for scanner.Scan() { + if m := meminfoRe.FindSubmatch(scanner.Bytes()); m != nil { + kb, err := strconv.ParseUint(string(m[1]), 10, 64) + if err != nil { + return 0, err + } + return kb * 1024, nil + } + } + + err := scanner.Err() + if err == nil { + err = errMemTotalNotFound + } + return 0, err +} diff --git a/internal/sysinfo/memtotal_darwin.go b/internal/sysinfo/memtotal_darwin.go new file mode 100644 index 000000000..3c40f42d5 --- /dev/null +++ b/internal/sysinfo/memtotal_darwin.go @@ -0,0 +1,29 @@ +package sysinfo + +import ( + "syscall" + "unsafe" +) + +// PhysicalMemoryBytes returns the total amount of host memory. +func PhysicalMemoryBytes() (uint64, error) { + mib := []int32{6 /* CTL_HW */, 24 /* HW_MEMSIZE */} + + buf := make([]byte, 8) + bufLen := uintptr(8) + + _, _, e1 := syscall.Syscall6(syscall.SYS___SYSCTL, + uintptr(unsafe.Pointer(&mib[0])), uintptr(len(mib)), + uintptr(unsafe.Pointer(&buf[0])), uintptr(unsafe.Pointer(&bufLen)), + uintptr(0), uintptr(0)) + + if e1 != 0 { + return 0, e1 + } + + if bufLen != 8 { + return 0, syscall.EIO + } + + return *(*uint64)(unsafe.Pointer(&buf[0])), nil +} diff --git a/internal/sysinfo/memtotal_darwin_test.go b/internal/sysinfo/memtotal_darwin_test.go new file mode 100644 index 000000000..3c928b5c1 --- /dev/null +++ b/internal/sysinfo/memtotal_darwin_test.go @@ -0,0 +1,46 @@ +package sysinfo + +import ( + "errors" + "os/exec" + "regexp" + "strconv" + "testing" +) + +var re = regexp.MustCompile(`hw\.memsize:\s*(\d+)`) + +func darwinSysctlMemoryBytes() (uint64, error) { + out, err := exec.Command("/usr/sbin/sysctl", "hw.memsize").Output() + if err != nil { + return 0, err + } + + match := re.FindSubmatch(out) + if match == nil { + return 0, errors.New("memory size not found in sysctl output") + } + + bts, err := strconv.ParseUint(string(match[1]), 10, 64) + if err != nil { + return 0, err + } + + return bts, nil +} + +func TestPhysicalMemoryBytes(t *testing.T) { + mem, err := PhysicalMemoryBytes() + if err != nil { + t.Fatal(err) + } + + mem2, err := darwinSysctlMemoryBytes() + if nil != err { + t.Fatal(err) + } + + if mem != mem2 { + t.Error(mem, mem2) + } +} diff --git a/internal/sysinfo/memtotal_freebsd.go b/internal/sysinfo/memtotal_freebsd.go new file mode 100644 index 000000000..2e82320ac --- /dev/null +++ b/internal/sysinfo/memtotal_freebsd.go @@ -0,0 +1,32 @@ +package sysinfo + +import ( + "syscall" + "unsafe" +) + +// PhysicalMemoryBytes returns the total amount of host memory. +func PhysicalMemoryBytes() (uint64, error) { + mib := []int32{6 /* CTL_HW */, 5 /* HW_PHYSMEM */} + + buf := make([]byte, 8) + bufLen := uintptr(8) + + _, _, e1 := syscall.Syscall6(syscall.SYS___SYSCTL, + uintptr(unsafe.Pointer(&mib[0])), uintptr(len(mib)), + uintptr(unsafe.Pointer(&buf[0])), uintptr(unsafe.Pointer(&bufLen)), + uintptr(0), uintptr(0)) + + if e1 != 0 { + return 0, e1 + } + + switch bufLen { + case 4: + return uint64(*(*uint32)(unsafe.Pointer(&buf[0]))), nil + case 8: + return *(*uint64)(unsafe.Pointer(&buf[0])), nil + default: + return 0, syscall.EIO + } +} diff --git a/internal/sysinfo/memtotal_freebsd_test.go b/internal/sysinfo/memtotal_freebsd_test.go new file mode 100644 index 000000000..649953026 --- /dev/null +++ b/internal/sysinfo/memtotal_freebsd_test.go @@ -0,0 +1,46 @@ +package sysinfo + +import ( + "errors" + "os/exec" + "regexp" + "strconv" + "testing" +) + +var re = regexp.MustCompile(`hw\.physmem:\s*(\d+)`) + +func freebsdSysctlMemoryBytes() (uint64, error) { + out, err := exec.Command("/sbin/sysctl", "hw.physmem").Output() + if err != nil { + return 0, err + } + + match := re.FindSubmatch(out) + if match == nil { + return 0, errors.New("memory size not found in sysctl output") + } + + bts, err := strconv.ParseUint(string(match[1]), 10, 64) + if err != nil { + return 0, err + } + + return bts, nil +} + +func TestPhysicalMemoryBytes(t *testing.T) { + mem, err := PhysicalMemoryBytes() + if err != nil { + t.Fatal(err) + } + + mem2, err := freebsdSysctlMemoryBytes() + if nil != err { + t.Fatal(err) + } + + if mem != mem2 { + t.Error(mem, mem2) + } +} diff --git a/internal/sysinfo/memtotal_linux.go b/internal/sysinfo/memtotal_linux.go new file mode 100644 index 000000000..958e56993 --- /dev/null +++ b/internal/sysinfo/memtotal_linux.go @@ -0,0 +1,14 @@ +package sysinfo + +import "os" + +// PhysicalMemoryBytes returns the total amount of host memory. +func PhysicalMemoryBytes() (uint64, error) { + f, err := os.Open("/proc/meminfo") + if err != nil { + return 0, err + } + defer f.Close() + + return parseProcMeminfo(f) +} diff --git a/internal/sysinfo/memtotal_solaris.go b/internal/sysinfo/memtotal_solaris.go new file mode 100644 index 000000000..4f1c818e5 --- /dev/null +++ b/internal/sysinfo/memtotal_solaris.go @@ -0,0 +1,26 @@ +package sysinfo + +/* +#include