Browse Source

New Relic support

DarthSim 6 years ago
parent
commit
0f92ff2532
100 changed files with 12446 additions and 7 deletions
  1. 17 0
      Gopkg.lock
  2. 5 3
      README.md
  3. 7 0
      config.go
  4. 9 0
      docs/configuration.md
  5. 15 0
      docs/new_relic.md
  6. 5 0
      download.go
  7. 67 0
      newrelic.go
  8. 5 0
      process.go
  9. 15 1
      server.go
  10. 9 3
      timer.go
  11. 32 0
      vendor/github.com/newrelic/go-agent/.travis.yml
  12. 317 0
      vendor/github.com/newrelic/go-agent/CHANGELOG.md
  13. 9 0
      vendor/github.com/newrelic/go-agent/CONTRIBUTING.md
  14. 614 0
      vendor/github.com/newrelic/go-agent/GUIDE.md
  15. 49 0
      vendor/github.com/newrelic/go-agent/LICENSE.txt
  16. 162 0
      vendor/github.com/newrelic/go-agent/README.md
  17. 64 0
      vendor/github.com/newrelic/go-agent/application.go
  18. 42 0
      vendor/github.com/newrelic/go-agent/attributes.go
  19. 297 0
      vendor/github.com/newrelic/go-agent/config.go
  20. 31 0
      vendor/github.com/newrelic/go-agent/context.go
  21. 10 0
      vendor/github.com/newrelic/go-agent/context_stub.go
  22. 27 0
      vendor/github.com/newrelic/go-agent/datastore.go
  23. 51 0
      vendor/github.com/newrelic/go-agent/errors.go
  24. 70 0
      vendor/github.com/newrelic/go-agent/instrumentation.go
  25. 104 0
      vendor/github.com/newrelic/go-agent/internal/adaptive_sampler.go
  26. 136 0
      vendor/github.com/newrelic/go-agent/internal/analytics_events.go
  27. 48 0
      vendor/github.com/newrelic/go-agent/internal/apdex.go
  28. 527 0
      vendor/github.com/newrelic/go-agent/internal/attributes.go
  29. 111 0
      vendor/github.com/newrelic/go-agent/internal/cat/appdata.go
  30. 15 0
      vendor/github.com/newrelic/go-agent/internal/cat/errors.go
  31. 13 0
      vendor/github.com/newrelic/go-agent/internal/cat/headers.go
  32. 41 0
      vendor/github.com/newrelic/go-agent/internal/cat/id.go
  33. 35 0
      vendor/github.com/newrelic/go-agent/internal/cat/path_hash.go
  34. 82 0
      vendor/github.com/newrelic/go-agent/internal/cat/synthetics.go
  35. 96 0
      vendor/github.com/newrelic/go-agent/internal/cat/txndata.go
  36. 339 0
      vendor/github.com/newrelic/go-agent/internal/collector.go
  37. 19 0
      vendor/github.com/newrelic/go-agent/internal/compress.go
  38. 150 0
      vendor/github.com/newrelic/go-agent/internal/connect_reply.go
  39. 74 0
      vendor/github.com/newrelic/go-agent/internal/cross_process_http.go
  40. 103 0
      vendor/github.com/newrelic/go-agent/internal/custom_event.go
  41. 38 0
      vendor/github.com/newrelic/go-agent/internal/custom_events.go
  42. 12 0
      vendor/github.com/newrelic/go-agent/internal/custom_metric.go
  43. 211 0
      vendor/github.com/newrelic/go-agent/internal/distributed_tracing.go
  44. 61 0
      vendor/github.com/newrelic/go-agent/internal/environment.go
  45. 67 0
      vendor/github.com/newrelic/go-agent/internal/error_events.go
  46. 174 0
      vendor/github.com/newrelic/go-agent/internal/errors.go
  47. 664 0
      vendor/github.com/newrelic/go-agent/internal/expect.go
  48. 200 0
      vendor/github.com/newrelic/go-agent/internal/harvest.go
  49. 39 0
      vendor/github.com/newrelic/go-agent/internal/intrinsics.go
  50. 61 0
      vendor/github.com/newrelic/go-agent/internal/json_object_writer.go
  51. 174 0
      vendor/github.com/newrelic/go-agent/internal/jsonx/encode.go
  52. 23 0
      vendor/github.com/newrelic/go-agent/internal/labels.go
  53. 61 0
      vendor/github.com/newrelic/go-agent/internal/limits.go
  54. 89 0
      vendor/github.com/newrelic/go-agent/internal/logger/logger.go
  55. 237 0
      vendor/github.com/newrelic/go-agent/internal/metric_names.go
  56. 164 0
      vendor/github.com/newrelic/go-agent/internal/metric_rules.go
  57. 262 0
      vendor/github.com/newrelic/go-agent/internal/metrics.go
  58. 37 0
      vendor/github.com/newrelic/go-agent/internal/obfuscate.go
  59. 27 0
      vendor/github.com/newrelic/go-agent/internal/priority.go
  60. 72 0
      vendor/github.com/newrelic/go-agent/internal/queuing.go
  61. 59 0
      vendor/github.com/newrelic/go-agent/internal/rand.go
  62. 145 0
      vendor/github.com/newrelic/go-agent/internal/sampler.go
  63. 101 0
      vendor/github.com/newrelic/go-agent/internal/security_policies.go
  64. 145 0
      vendor/github.com/newrelic/go-agent/internal/segment_terms.go
  65. 254 0
      vendor/github.com/newrelic/go-agent/internal/slow_queries.go
  66. 172 0
      vendor/github.com/newrelic/go-agent/internal/span_events.go
  67. 81 0
      vendor/github.com/newrelic/go-agent/internal/stacktrace.go
  68. 50 0
      vendor/github.com/newrelic/go-agent/internal/sysinfo/bootid.go
  69. 114 0
      vendor/github.com/newrelic/go-agent/internal/sysinfo/docker.go
  70. 10 0
      vendor/github.com/newrelic/go-agent/internal/sysinfo/errors.go
  71. 10 0
      vendor/github.com/newrelic/go-agent/internal/sysinfo/hostname_generic.go
  72. 50 0
      vendor/github.com/newrelic/go-agent/internal/sysinfo/hostname_linux.go
  73. 40 0
      vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal.go
  74. 29 0
      vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_darwin.go
  75. 32 0
      vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_freebsd.go
  76. 14 0
      vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_linux.go
  77. 26 0
      vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_solaris.go
  78. 23 0
      vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_windows.go
  79. 11 0
      vendor/github.com/newrelic/go-agent/internal/sysinfo/usage.go
  80. 26 0
      vendor/github.com/newrelic/go-agent/internal/sysinfo/usage_posix.go
  81. 34 0
      vendor/github.com/newrelic/go-agent/internal/sysinfo/usage_windows.go
  82. 597 0
      vendor/github.com/newrelic/go-agent/internal/tracing.go
  83. 419 0
      vendor/github.com/newrelic/go-agent/internal/txn_cross_process.go
  84. 193 0
      vendor/github.com/newrelic/go-agent/internal/txn_events.go
  85. 410 0
      vendor/github.com/newrelic/go-agent/internal/txn_trace.go
  86. 43 0
      vendor/github.com/newrelic/go-agent/internal/url.go
  87. 107 0
      vendor/github.com/newrelic/go-agent/internal/utilities.go
  88. 89 0
      vendor/github.com/newrelic/go-agent/internal/utilization/aws.go
  89. 102 0
      vendor/github.com/newrelic/go-agent/internal/utilization/azure.go
  90. 152 0
      vendor/github.com/newrelic/go-agent/internal/utilization/gcp.go
  91. 80 0
      vendor/github.com/newrelic/go-agent/internal/utilization/pcf.go
  92. 59 0
      vendor/github.com/newrelic/go-agent/internal/utilization/provider.go
  93. 206 0
      vendor/github.com/newrelic/go-agent/internal/utilization/utilization.go
  94. 668 0
      vendor/github.com/newrelic/go-agent/internal_app.go
  95. 158 0
      vendor/github.com/newrelic/go-agent/internal_config.go
  96. 121 0
      vendor/github.com/newrelic/go-agent/internal_response_writer.go
  97. 854 0
      vendor/github.com/newrelic/go-agent/internal_txn.go
  98. 30 0
      vendor/github.com/newrelic/go-agent/log.go
  99. 130 0
      vendor/github.com/newrelic/go-agent/segments.go
  100. 107 0
      vendor/github.com/newrelic/go-agent/transaction.go

+ 17 - 0
Gopkg.lock

@@ -64,6 +64,22 @@
   pruneopts = "UT"
   revision = "3de1538a83bcb1749aa54482113e5cd06e4fb938"
 
+[[projects]]
+  digest = "1:01f60ec456e70c9be269902c08fa189a9d47056a419c4a91a02f19df5e36ab59"
+  name = "github.com/newrelic/go-agent"
+  packages = [
+    ".",
+    "internal",
+    "internal/cat",
+    "internal/jsonx",
+    "internal/logger",
+    "internal/sysinfo",
+    "internal/utilization",
+  ]
+  pruneopts = "UT"
+  revision = "46d73e6be8b4faeee70850d0df829e4fe00d6819"
+  version = "v2.1.0"
+
 [[projects]]
   digest = "1:72b36febaabad58e1864de2b43de05689ce27a2c9a582a61a25e71a31ba23d0b"
   name = "golang.org/x/image"
@@ -91,6 +107,7 @@
     "github.com/aws/aws-sdk-go/aws/session",
     "github.com/aws/aws-sdk-go/service/s3",
     "github.com/matoous/go-nanoid",
+    "github.com/newrelic/go-agent",
     "golang.org/x/image/webp",
     "golang.org/x/net/netutil",
   ]

+ 5 - 3
README.md

@@ -64,6 +64,7 @@ Massive processing of remote images is a potentially dangerous thing, security-w
    * [Presets](./docs/configuration.md#presets)
    * [Serving local files](./docs/configuration.md#serving-local-files)
    * [Serving files from Amazon S3](./docs/configuration.md#serving-files-from-amazon-s3)
+   * [New Relic metrics](./docs/configuration.md#new-relic-metrics)
    * [Miscellaneous](./docs/configuration.md#miscellaneous)
 4. [Generating the URL](./docs/generating_the_url_basic.md)
    * [Basic](./docs/generating_the_url_basic.md)
@@ -73,9 +74,10 @@ Massive processing of remote images is a potentially dangerous thing, security-w
 6. [Presets](./docs/presets.md)
 7. [Serving local files](./docs/serving_local_files.md)
 8. [Serving files from Amazon S3](./docs/serving_files_from_s3.md)
-9. [Source image formats support](./docs/source_image_formats_support.md)
-10. [About processing pipeline](./docs/about_processing_pipeline.md)
-11. [Health check](./docs/healthcheck.md)
+9. [New Relic](./docs/new_relic.md)
+10. [Source image formats support](./docs/source_image_formats_support.md)
+11. [About processing pipeline](./docs/about_processing_pipeline.md)
+12. [Health check](./docs/healthcheck.md)
 
 ## Author
 

+ 7 - 0
config.go

@@ -155,6 +155,9 @@ type config struct {
 	WatermarkPath    string
 	WatermarkURL     string
 	WatermarkOpacity float64
+
+	NewRelicAppName string
+	NewRelicKey     string
 }
 
 var conf = config{
@@ -240,6 +243,9 @@ func init() {
 	strEnvConfig(&conf.WatermarkURL, "IMGPROXY_WATERMARK_URL")
 	floatEnvConfig(&conf.WatermarkOpacity, "IMGPROXY_WATERMARK_OPACITY")
 
+	strEnvConfig(&conf.NewRelicAppName, "IMGPROXY_NEW_RELIC_APP_NAME")
+	strEnvConfig(&conf.NewRelicKey, "IMGPROXY_NEW_RELIC_KEY")
+
 	if len(conf.Key) == 0 {
 		warning("Key is not defined, so signature checking is disabled")
 		conf.AllowInsecure = true
@@ -324,5 +330,6 @@ func init() {
 	}
 
 	initDownloading()
+	initNewrelic()
 	initVips()
 }

+ 9 - 0
docs/configuration.md

@@ -122,6 +122,15 @@ imgproxy can process files from Amazon S3 buckets, but this feature is disabled
 
 Check out the [Serving files from S3](./serving_files_from_s3.md) guide to learn more.
 
+### New Relic metrics
+
+imgproxy can send its metrics to New Relic. Specify your New Relic license key to activate this feature:
+
+* `IMGPROXY_NEW_RELIC_KEY` - New Relic license key;
+* `IMGPROXY_NEW_RELIC_APP_NAME` - application name. If not specified, `imgproxy` will be used as the application name.
+
+Check out the [New Relic](./new_relic.md) guide to learn more.
+
 ### Miscellaneous
 
 * `IMGPROXY_BASE_URL`: base URL prefix that will be added to every requested image URL. For example, if the base URL is `http://example.com/images` and `/path/to/image.png` is requested, imgproxy will download the source image from `http://example.com/images/path/to/image.png`. Default: blank.

+ 15 - 0
docs/new_relic.md

@@ -0,0 +1,15 @@
+# New Relic
+
+imgproxy can send its metrics to New Relic. To use this feature, do the following:
+
+1. Register at New Relic to get license key;
+2. Set `IMGPROXY_NEW_RELIC_KEY` environment variable to the license key;
+3. _(optional)_ Set `IMGPROXY_NEW_RELIC_APP_NAME` environment variable to the desired application name.
+
+imgproxy will send the following info to New Relic:
+
+* CPU and memory usage;
+* Response time;
+* Image downloading time;
+* Image processing time;
+* Errors that occurred while downloading and processing image.

+ 5 - 0
download.go

@@ -136,6 +136,11 @@ func readAndCheckImage(ctx context.Context, res *http.Response) (context.Context
 func downloadImage(ctx context.Context) (context.Context, context.CancelFunc, error) {
 	url := fmt.Sprintf("%s%s", conf.BaseURL, getImageURL(ctx))
 
+	if newRelicEnabled {
+		newRelicCancel := startNewRelicSegment(ctx, "Downloading image")
+		defer newRelicCancel()
+	}
+
 	res, err := downloadClient.Get(url)
 	if err != nil {
 		return ctx, func() {}, err

+ 67 - 0
newrelic.go

@@ -0,0 +1,67 @@
+package main
+
+import (
+	"context"
+	"log"
+	"net/http"
+	"time"
+
+	newrelic "github.com/newrelic/go-agent"
+)
+
+var (
+	newRelicApp     newrelic.Application
+	newRelicEnabled bool
+
+	newRelicTransactionCtxKey = ctxKey("newRelicTransaction")
+)
+
+func initNewrelic() {
+	if len(conf.NewRelicKey) == 0 {
+		return
+	}
+
+	name := conf.NewRelicAppName
+	if len(name) == 0 {
+		name = "imgproxy"
+	}
+
+	var err error
+
+	config := newrelic.NewConfig(name, conf.NewRelicKey)
+	newRelicApp, err = newrelic.NewApplication(config)
+
+	if err != nil {
+		log.Fatalf("Can't init New Relic agent: %s", err)
+	}
+
+	newRelicEnabled = true
+}
+
+func startNewRelicTransaction(ctx context.Context, rw http.ResponseWriter, r *http.Request) (context.Context, context.CancelFunc) {
+	txn := newRelicApp.StartTransaction("request", rw, r)
+	cancel := func() { txn.End() }
+	return context.WithValue(ctx, newRelicTransactionCtxKey, txn), cancel
+}
+
+func startNewRelicSegment(ctx context.Context, name string) context.CancelFunc {
+	txn := ctx.Value(newRelicTransactionCtxKey).(newrelic.Transaction)
+	segment := newrelic.StartSegment(txn, name)
+	return func() { segment.End() }
+}
+
+func sendErrorToNewRelic(ctx context.Context, err error) {
+	txn := ctx.Value(newRelicTransactionCtxKey).(newrelic.Transaction)
+	txn.NoticeError(err)
+}
+
+func sendTimeoutToNewRelic(ctx context.Context, d time.Duration) {
+	txn := ctx.Value(newRelicTransactionCtxKey).(newrelic.Transaction)
+	txn.NoticeError(newrelic.Error{
+		Message: "Timeout",
+		Class:   "Timeout",
+		Attributes: map[string]interface{}{
+			"time": d.Seconds(),
+		},
+	})
+}

+ 5 - 0
process.go

@@ -217,6 +217,11 @@ func calcCrop(width, height int, po *processingOptions) (left, top int) {
 }
 
 func processImage(ctx context.Context) ([]byte, error) {
+	if newRelicEnabled {
+		newRelicCancel := startNewRelicSegment(ctx, "Processing image")
+		defer newRelicCancel()
+	}
+
 	defer C.vips_cleanup()
 
 	data := getImageData(ctx).Bytes()

+ 15 - 1
server.go

@@ -194,6 +194,14 @@ func (h *httpHandler) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
 		panic(errInvalidSecret)
 	}
 
+	ctx := context.Background()
+
+	if newRelicEnabled {
+		var newRelicCancel context.CancelFunc
+		ctx, newRelicCancel = startNewRelicTransaction(ctx, rw, r)
+		defer newRelicCancel()
+	}
+
 	h.lock()
 	defer h.unlock()
 
@@ -203,7 +211,7 @@ func (h *httpHandler) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
 		return
 	}
 
-	ctx, timeoutCancel := startTimer(time.Duration(conf.WriteTimeout) * time.Second)
+	ctx, timeoutCancel := startTimer(ctx, time.Duration(conf.WriteTimeout)*time.Second)
 	defer timeoutCancel()
 
 	ctx, err := parsePath(ctx, r)
@@ -214,6 +222,9 @@ func (h *httpHandler) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
 	ctx, downloadcancel, err := downloadImage(ctx)
 	defer downloadcancel()
 	if err != nil {
+		if newRelicEnabled {
+			sendErrorToNewRelic(ctx, err)
+		}
 		panic(newError(404, err.Error(), "Image is unreachable"))
 	}
 
@@ -232,6 +243,9 @@ func (h *httpHandler) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
 
 	imageData, err := processImage(ctx)
 	if err != nil {
+		if newRelicEnabled {
+			sendErrorToNewRelic(ctx, err)
+		}
 		panic(newError(500, err.Error(), "Error occurred while processing image"))
 	}
 

+ 9 - 3
timer.go

@@ -8,9 +8,9 @@ import (
 
 var timerSinceCtxKey = ctxKey("timerSince")
 
-func startTimer(d time.Duration) (context.Context, context.CancelFunc) {
+func startTimer(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) {
 	return context.WithTimeout(
-		context.WithValue(context.Background(), timerSinceCtxKey, time.Now()),
+		context.WithValue(ctx, timerSinceCtxKey, time.Now()),
 		d,
 	)
 }
@@ -22,7 +22,13 @@ func getTimerSince(ctx context.Context) time.Duration {
 func checkTimeout(ctx context.Context) {
 	select {
 	case <-ctx.Done():
-		panic(newError(503, fmt.Sprintf("Timeout after %v", getTimerSince(ctx)), "Timeout"))
+		d := getTimerSince(ctx)
+
+		if newRelicEnabled {
+			sendTimeoutToNewRelic(ctx, d)
+		}
+
+		panic(newError(503, fmt.Sprintf("Timeout after %v", d), "Timeout"))
 	default:
 		// Go ahead
 	}

+ 32 - 0
vendor/github.com/newrelic/go-agent/.travis.yml

@@ -0,0 +1,32 @@
+language: go
+go_import_path: github.com/newrelic/go-agent
+go:
+    - 1.3
+    - 1.4    
+    - 1.5    
+    - 1.6    
+    - 1.7
+    - 1.8    
+    - 1.9
+    - tip
+
+# Skip the install step. Don't `go get` dependencies. Only build with the
+# code in vendor/
+install: true
+
+matrix:
+  allow_failures:
+    - go: tip
+
+# golint has dependencies that only work on Go 1.7 or later. See also
+# https://github.com/golang/lint/issues/400.
+before_script:
+  - GO_FILES=$(find . -iname '*.go' -type f | grep -v /vendor/)
+  - DO_GOLINT=$(echo $TRAVIS_GO_VERSION | perl -wne 'chomp; if (/^[0-9]+\.[0-9]+/) { my ($major, $minor) = split /\./; print "new" if $minor >= 7 || $major > 1 } elsif (/^tip$/) { print "new" }' | grep new > /dev/null; echo $?)
+  - if [[ "$DO_GOLINT" -eq "0" ]]; then go get github.com/golang/lint/golint; fi
+
+script:
+  - if [ -n "$(gofmt -s -l $GO_FILES)" ]; then echo "1"; fi; # gofmt
+  - go test -v -race ./... # go test
+  - go vet ./... # go vet
+  - if [[ "$DO_GOLINT" -eq 0 ]]; then golint -set_exit_status $(go list ./...); fi # golint

+ 317 - 0
vendor/github.com/newrelic/go-agent/CHANGELOG.md

@@ -0,0 +1,317 @@
+## ChangeLog
+
+## 2.1.0
+
+* The Go Agent now supports distributed tracing.
+
+  Distributed tracing lets you see the path that a request takes as it travels through your distributed system. By
+  showing the distributed activity through a unified view, you can troubleshoot and understand a complex system better
+  than ever before.
+
+  Distributed tracing is available with an APM Pro or equivalent subscription. To see a complete distributed trace, you
+  need to enable the feature on a set of neighboring services. Enabling distributed tracing changes the behavior of
+  some New Relic features, so carefully consult the
+  [transition guide](https://docs.newrelic.com/docs/transition-guide-distributed-tracing) before you enable this
+  feature.
+
+  To enable distributed tracing, set the following fields in your config.  Note that distributed tracing and cross
+  application tracing cannot be used simultaneously.
+
+```
+  config := newrelic.NewConfig("Your Application Name", "__YOUR_NEW_RELIC_LICENSE_KEY__")
+  config.CrossApplicationTracer.Enabled = false
+  config.DistributedTracer.Enabled = true
+```
+
+  Please refer to the
+  [distributed tracing section of the guide](GUIDE.md#distributed-tracing)
+  for more detail on how to ensure you get the most out of the Go agent's distributed tracing support.
+
+* Added functions [NewContext](https://godoc.org/github.com/newrelic/go-agent#NewContext)
+  and [FromContext](https://godoc.org/github.com/newrelic/go-agent#FromContext)
+  for adding and retrieving the Transaction from a Context.  Handlers
+  instrumented by
+  [WrapHandle](https://godoc.org/github.com/newrelic/go-agent#WrapHandle),
+  [WrapHandleFunc](https://godoc.org/github.com/newrelic/go-agent#WrapHandleFunc),
+  and [nrgorilla.InstrumentRoutes](https://godoc.org/github.com/newrelic/go-agent/_integrations/nrgorilla/v1#InstrumentRoutes)
+  may use [FromContext](https://godoc.org/github.com/newrelic/go-agent#FromContext)
+  on the request's context to access the Transaction.
+  Thanks to @caarlos0 for the contribution!  Though [NewContext](https://godoc.org/github.com/newrelic/go-agent#NewContext)
+  and [FromContext](https://godoc.org/github.com/newrelic/go-agent#FromContext)
+  require Go 1.7+ (when [context](https://golang.org/pkg/context/) was added),
+  [RequestWithTransactionContext](https://godoc.org/github.com/newrelic/go-agent#RequestWithTransactionContext) is always exported so that it can be used in all framework and library
+  instrumentation.
+
+## 2.0.0
+
+* The `End()` functions defined on the `Segment`, `DatastoreSegment`, and
+  `ExternalSegment` types now receive the segment as a pointer, rather than as
+  a value. This prevents unexpected behaviour when a call to `End()` is
+  deferred before one or more fields are changed on the segment.
+
+  In practice, this is likely to only affect this pattern:
+
+    ```go
+    defer newrelic.DatastoreSegment{
+      // ...
+    }.End()
+    ```
+
+  Instead, you will now need to separate the literal from the deferred call:
+
+    ```go
+    ds := newrelic.DatastoreSegment{
+      // ...
+    }
+    defer ds.End()
+    ```
+
+  When creating custom and external segments, we recommend using
+  [`newrelic.StartSegment()`](https://godoc.org/github.com/newrelic/go-agent#StartSegment)
+  and
+  [`newrelic.StartExternalSegment()`](https://godoc.org/github.com/newrelic/go-agent#StartExternalSegment),
+  respectively.
+
+* Added GoDoc badge to README.  Thanks to @mrhwick for the contribution!
+
+* `Config.UseTLS` configuration setting has been removed to increase security.
+   TLS will now always be used in communication with New Relic Servers.
+
+## 1.11.0
+
+* We've closed the Issues tab on GitHub. Please visit our
+  [support site](https://support.newrelic.com) to get timely help with any
+  problems you're having, or to report issues.
+
+* Added support for Cross Application Tracing (CAT). Please refer to the
+  [CAT section of the guide](GUIDE.md#cross-application-tracing)
+  for more detail on how to ensure you get the most out of the Go agent's new
+  CAT support.
+
+* The agent now collects additional metadata when running within Amazon Web
+  Services, Google Cloud Platform, Microsoft Azure, and Pivotal Cloud Foundry.
+  This information is used to provide an enhanced experience when the agent is
+  deployed on those platforms.
+
+## 1.10.0
+
+* Added new `RecordCustomMetric` method to [Application](https://godoc.org/github.com/newrelic/go-agent#Application).
+  This functionality can be used to track averages or counters without using
+  custom events.
+  * [Custom Metric Documentation](https://docs.newrelic.com/docs/agents/manage-apm-agents/agent-data/collect-custom-metrics)
+
+* Fixed import needed for logrus.  The import Sirupsen/logrus had been renamed to sirupsen/logrus.
+  Thanks to @alfred-landrum for spotting this.
+
+* Added [ErrorAttributer](https://godoc.org/github.com/newrelic/go-agent#ErrorAttributer),
+  an optional interface that can be implemented by errors provided to
+  `Transaction.NoticeError` to attach additional attributes.  These attributes are
+  subject to attribute configuration.
+
+* Added [Error](https://godoc.org/github.com/newrelic/go-agent#Error), a type
+  that allows direct control of error fields.  Example use:
+
+```go
+txn.NoticeError(newrelic.Error{
+	// Message is returned by the Error() method.
+	Message: "error message: something went very wrong",
+	Class:   "errors are aggregated by class",
+	Attributes: map[string]interface{}{
+		"important_number": 97232,
+		"relevant_string":  "zap",
+	},
+})
+```
+
+* Updated license to address scope of usage.
+
+## 1.9.0
+
+* Added support for [github.com/gin-gonic/gin](https://github.com/gin-gonic/gin)
+  in the new `nrgin` package.
+  * [Documentation](http://godoc.org/github.com/newrelic/go-agent/_integrations/nrgin/v1)
+  * [Example](examples/_gin/main.go)
+
+## 1.8.0
+
+* Fixed incorrect metric rule application when the metric rule is flagged to
+  terminate and matches but the name is unchanged.
+
+* `Segment.End()`, `DatastoreSegment.End()`, and `ExternalSegment.End()` methods now return an
+  error which may be helpful in diagnosing situations where segment data is unexpectedly missing.
+
+## 1.7.0
+
+* Added support for [gorilla/mux](http://github.com/gorilla/mux) in the new `nrgorilla`
+  package.
+  * [Documentation](http://godoc.org/github.com/newrelic/go-agent/_integrations/nrgorilla/v1)
+  * [Example](examples/_gorilla/main.go)
+
+## 1.6.0
+
+* Added support for custom error messages and stack traces.  Errors provided
+  to `Transaction.NoticeError` will now be checked to see if
+  they implement [ErrorClasser](https://godoc.org/github.com/newrelic/go-agent#ErrorClasser)
+  and/or [StackTracer](https://godoc.org/github.com/newrelic/go-agent#StackTracer).
+  Thanks to @fgrosse for this proposal.
+
+* Added support for [pkg/errors](https://github.com/pkg/errors).  Thanks to
+  @fgrosse for this work.
+  * [documentation](https://godoc.org/github.com/newrelic/go-agent/_integrations/nrpkgerrors)
+  * [example](https://github.com/newrelic/go-agent/blob/master/_integrations/nrpkgerrors/nrpkgerrors.go)
+
+* Fixed tests for Go 1.8.
+
+## 1.5.0
+
+* Added support for Windows.  Thanks to @ianomad and @lvxv for the contributions.
+
+* The number of heap objects allocated is recorded in the
+  `Memory/Heap/AllocatedObjects` metric.  This will soon be displayed on the "Go
+  runtime" page.
+
+* If the [DatastoreSegment](https://godoc.org/github.com/newrelic/go-agent#DatastoreSegment)
+  fields `Host` and `PortPathOrID` are not provided, they will no longer appear
+  as `"unknown"` in transaction traces and slow query traces.
+
+* Stack traces will now be nicely aligned in the APM UI.
+
+## 1.4.0
+
+* Added support for slow query traces.  Slow datastore segments will now
+ generate slow query traces viewable on the datastore tab.  These traces include
+ a stack trace and help you to debug slow datastore activity.
+ [Slow Query Documentation](https://docs.newrelic.com/docs/apm/applications-menu/monitoring/viewing-slow-query-details)
+
+* Added new
+[DatastoreSegment](https://godoc.org/github.com/newrelic/go-agent#DatastoreSegment)
+fields `ParameterizedQuery`, `QueryParameters`, `Host`, `PortPathOrID`, and
+`DatabaseName`.  These fields will be shown in transaction traces and in slow
+query traces.
+
+## 1.3.0
+
+* Breaking Change: Added a timeout parameter to the `Application.Shutdown` method.
+
+## 1.2.0
+
+* Added support for instrumenting short-lived processes:
+  * The new `Application.Shutdown` method allows applications to report
+    data to New Relic without waiting a full minute.
+  * The new `Application.WaitForConnection` method allows your process to
+    defer instrumentation until the application is connected and ready to
+    gather data.
+  * Full documentation here: [application.go](application.go)
+  * Example short-lived process: [examples/short-lived-process/main.go](examples/short-lived-process/main.go)
+
+* Error metrics are no longer created when `ErrorCollector.Enabled = false`.
+
+* Added support for [github.com/mgutz/logxi](github.com/mgutz/logxi).  See
+  [_integrations/nrlogxi/v1/nrlogxi.go](_integrations/nrlogxi/v1/nrlogxi.go).
+
+* Fixed bug where Transaction Trace thresholds based upon Apdex were not being
+  applied to background transactions.
+
+## 1.1.0
+
+* Added support for Transaction Traces.
+
+* Stack trace filenames have been shortened: Any thing preceding the first
+  `/src/` is now removed.
+
+## 1.0.0
+
+* Removed `BetaToken` from the `Config` structure.
+
+* Breaking Datastore Change:  `datastore` package contents moved to top level
+  `newrelic` package.  `datastore.MySQL` has become `newrelic.DatastoreMySQL`.
+
+* Breaking Attributes Change:  `attributes` package contents moved to top
+  level `newrelic` package.  `attributes.ResponseCode` has become
+  `newrelic.AttributeResponseCode`.  Some attribute name constants have been
+  shortened.
+
+* Added "runtime.NumCPU" to the environment tab.  Thanks sergeylanzman for the
+  contribution.
+
+* Prefixed the environment tab values "Compiler", "GOARCH", "GOOS", and
+  "Version" with "runtime.".
+
+## 0.8.0
+
+* Breaking Segments API Changes:  The segments API has been rewritten with the
+  goal of being easier to use and to avoid nil Transaction checks.  See:
+
+  * [segments.go](segments.go)
+  * [examples/server/main.go](examples/server/main.go)
+  * [GUIDE.md#segments](GUIDE.md#segments)
+
+* Updated LICENSE.txt with contribution information.
+
+## 0.7.1
+
+* Fixed a bug causing the `Config` to fail to serialize into JSON when the
+  `Transport` field was populated.
+
+## 0.7.0
+
+* Eliminated `api`, `version`, and `log` packages.  `Version`, `Config`,
+  `Application`, and `Transaction` now live in the top level `newrelic` package.
+  If you imported the  `attributes` or `datastore` packages then you will need
+  to remove `api` from the import path.
+
+* Breaking Logging Changes
+
+Logging is no longer controlled though a single global.  Instead, logging is
+configured on a per-application basis with the new `Config.Logger` field.  The
+logger is an interface described in [log.go](log.go).  See
+[GUIDE.md#logging](GUIDE.md#logging).
+
+## 0.6.1
+
+* No longer create "GC/System/Pauses" metric if no GC pauses happened.
+
+## 0.6.0
+
+* Introduced beta token to support our beta program.
+
+* Rename `Config.Development` to `Config.Enabled` (and change boolean
+  direction).
+
+* Fixed a bug where exclusive time could be incorrect if segments were not
+  ended.
+
+* Fix unit tests broken in 1.6.
+
+* In `Config.Enabled = false` mode, the license must be the proper length or empty.
+
+* Added runtime statistics for CPU/memory usage, garbage collection, and number
+  of goroutines.
+
+## 0.5.0
+
+* Added segment timing methods to `Transaction`.  These methods must only be
+  used in a single goroutine.
+
+* The license length check will not be performed in `Development` mode.
+
+* Rename `SetLogFile` to `SetFile` to reduce redundancy.
+
+* Added `DebugEnabled` logging guard to reduce overhead.
+
+* `Transaction` now implements an `Ignore` method which will prevent
+  any of the transaction's data from being recorded.
+
+* `Transaction` now implements a subset of the interfaces
+  `http.CloseNotifier`, `http.Flusher`, `http.Hijacker`, and `io.ReaderFrom`
+  to match the behavior of its wrapped `http.ResponseWriter`.
+
+* Changed project name from `go-sdk` to `go-agent`.
+
+## 0.4.0
+
+* Queue time support added: if the inbound request contains an
+`"X-Request-Start"` or `"X-Queue-Start"` header with a unix timestamp, the
+agent will report queue time metrics.  Queue time will appear on the
+application overview chart.  The timestamp may fractional seconds,
+milliseconds, or microseconds: the agent will deduce the correct units.

+ 9 - 0
vendor/github.com/newrelic/go-agent/CONTRIBUTING.md

@@ -0,0 +1,9 @@
+# Contributing
+
+You are welcome to send pull requests to us.  By doing so you agree that you are
+granting New Relic a non-exclusive, non-revokable, no-cost license to use the
+code, algorithms, patents, and ideas in that code in our products if we so
+choose.  You also agree the code is provided as-is and you provide no warranties
+as to its fitness or correctness for any purpose.
+
+* [LICENSE.txt](LICENSE.txt)

+ 614 - 0
vendor/github.com/newrelic/go-agent/GUIDE.md

@@ -0,0 +1,614 @@
+# New Relic Go Agent Guide
+
+* [Installation](#installation)
+* [Config and Application](#config-and-application)
+* [Logging](#logging)
+  * [logrus](#logrus)
+* [Transactions](#transactions)
+* [Segments](#segments)
+  * [Datastore Segments](#datastore-segments)
+  * [External Segments](#external-segments)
+* [Attributes](#attributes)
+* [Tracing](#tracing)
+  * [Distributed Tracing](#distributed-tracing)
+  * [Cross-Application Tracing](#cross-application-tracing)
+  * [Tracing instrumentation](#tracing-instrumentation)
+    * [Getting Tracing Instrumentation Out-of-the-Box](#getting-tracing-instrumentation-out-of-the-box)
+    * [Manually Implementing Distributed Tracing](#manually-implementing-distributed-tracing)
+* [Distributed Tracing](#distributed-tracing)
+* [Custom Metrics](#custom-metrics)
+* [Custom Events](#custom-events)
+* [Request Queuing](#request-queuing)
+* [Error Reporting](#error-reporting)
+  * [Advanced Error Reporting](#advanced-error-reporting)
+* [Naming Transactions and Metrics](#naming-transactions-and-metrics)
+* [For More Help](#for-more-help)
+
+## Installation
+
+Installing the Go Agent is the same as installing any other Go library.  The
+simplest way is to run:
+
+```
+go get github.com/newrelic/go-agent
+```
+
+Then import the `github.com/newrelic/go-agent` package in your application.
+
+## Config and Application
+
+* [config.go](config.go)
+* [application.go](application.go)
+
+In your `main` function or in an `init` block:
+
+```go
+config := newrelic.NewConfig("Your Application Name", "__YOUR_NEW_RELIC_LICENSE_KEY__")
+app, err := newrelic.NewApplication(config)
+```
+
+Find your application in the New Relic UI.  Click on it to see the Go runtime
+page that shows information about goroutine counts, garbage collection, memory,
+and CPU usage.
+
+If you are working in a development environment or running unit tests, you may
+not want the Go Agent to spawn goroutines or report to New Relic.  You're in
+luck!  Set the config's `Enabled` field to false.  This makes the license key
+optional.
+
+```go
+config := newrelic.NewConfig("Your Application Name", "")
+config.Enabled = false
+app, err := newrelic.NewApplication(config)
+```
+
+## Logging
+
+* [log.go](log.go)
+
+The agent's logging system is designed to be easily extensible.  By default, no
+logging will occur.  To enable logging, assign the `Config.Logger` field to
+something implementing the `Logger` interface.  A basic logging
+implementation is included.
+
+To log at debug level to standard out, set:
+
+```go
+config.Logger = newrelic.NewDebugLogger(os.Stdout)
+```
+
+To log at info level to a file, set:
+
+```go
+w, err := os.OpenFile("my_log_file", os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0644)
+if nil == err {
+  config.Logger = newrelic.NewLogger(w)
+}
+```
+
+### logrus
+
+* [_integrations/nrlogrus/nrlogrus.go](_integrations/nrlogrus/nrlogrus.go)
+
+If you are using `logrus` and would like to send the agent's log messages to its
+standard logger, import the
+`github.com/newrelic/go-agent/_integrations/nrlogrus` package, then set:
+
+```go
+config.Logger = nrlogrus.StandardLogger()
+```
+
+## Transactions
+
+* [transaction.go](transaction.go)
+* [Naming Transactions](#naming-transactions-and-metrics)
+* [More info on Transactions](https://docs.newrelic.com/docs/apm/applications-menu/monitoring/transactions-page)
+
+Transactions time requests and background tasks.  Each transaction should only
+be used in a single goroutine.  Start a new transaction when you spawn a new
+goroutine.
+
+The simplest way to create transactions is to use
+`Application.StartTransaction` and `Transaction.End`.
+
+```go
+txn := app.StartTransaction("transactionName", responseWriter, request)
+defer txn.End()
+```
+
+If the response writer is provided when calling `StartTransaction`, you can
+then use `txn.WriteHeader` as a drop in replacement for the standard library's
+[`http.ResponseWriter.WriteHeader`](https://golang.org/pkg/net/http/#ResponseWriter)
+function. We strongly recommend doing so, as this both enables cross-application
+tracing support and ensures that attributes are added to the
+Transaction event capturing the response size and status code.
+
+The response writer and request parameters are optional.  Leave them `nil` to
+instrument a background task.
+
+```go
+txn := app.StartTransaction("backgroundTask", nil, nil)
+defer txn.End()
+```
+
+The transaction has helpful methods like `NoticeError` and `SetName`.
+See more in [transaction.go](transaction.go).
+
+If you are using [`http.ServeMux`](https://golang.org/pkg/net/http/#ServeMux),
+use `WrapHandle` and `WrapHandleFunc`.  These wrappers automatically start and
+end transactions with the request and response writer.  See
+[instrumentation.go](instrumentation.go).
+
+```go
+http.HandleFunc(newrelic.WrapHandleFunc(app, "/users", usersHandler))
+```
+
+To access the transaction in your handler, use type assertion on the response
+writer passed to the handler.
+
+```go
+func myHandler(w http.ResponseWriter, r *http.Request) {
+	if txn, ok := w.(newrelic.Transaction); ok {
+		txn.NoticeError(errors.New("my error message"))
+	}
+}
+```
+
+## Segments
+
+* [segments.go](segments.go)
+
+Find out where the time in your transactions is being spent!  Each transaction
+should only track segments in a single goroutine.
+
+`Segment` is used to instrument functions, methods, and blocks of code. A
+segment begins when its `StartTime` field is populated, and finishes when its
+`End` method is called.
+
+```go
+segment := newrelic.Segment{}
+segment.Name = "mySegmentName"
+segment.StartTime = newrelic.StartSegmentNow(txn)
+// ... code you want to time here ...
+segment.End()
+```
+
+`StartSegment` is a convenient helper.  It creates a segment and starts it:
+
+```go
+segment := newrelic.StartSegment(txn, "mySegmentName")
+// ... code you want to time here ...
+segment.End()
+```
+
+Timing a function is easy using `StartSegment` and `defer`.  Just add the
+following line to the beginning of that function:
+
+```go
+defer newrelic.StartSegment(txn, "mySegmentName").End()
+```
+
+Segments may be nested.  The segment being ended must be the most recently
+started segment.
+
+```go
+s1 := newrelic.StartSegment(txn, "outerSegment")
+s2 := newrelic.StartSegment(txn, "innerSegment")
+// s2 must be ended before s1
+s2.End()
+s1.End()
+```
+
+A zero value segment may safely be ended.  Therefore, the following code
+is safe even if the conditional fails:
+
+```go
+var s newrelic.Segment
+if txn, ok := w.(newrelic.Transaction); ok {
+	s.StartTime = newrelic.StartSegmentNow(txn),
+}
+// ... code you wish to time here ...
+s.End()
+```
+
+### Datastore Segments
+
+Datastore segments appear in the transaction "Breakdown table" and in the
+"Databases" page.
+
+* [datastore.go](datastore.go)
+* [More info on Databases page](https://docs.newrelic.com/docs/apm/applications-menu/monitoring/databases-slow-queries-page)
+
+Datastore segments are instrumented using `DatastoreSegment`.  Just like basic
+segments, datastore segments begin when the `StartTime` field is populated and
+finish when the `End` method is called.  Here is an example:
+
+```go
+s := newrelic.DatastoreSegment{
+	// Product is the datastore type.  See the constants in datastore.go.
+	Product: newrelic.DatastoreMySQL,
+	// Collection is the table or group.
+	Collection: "my_table",
+	// Operation is the relevant action, e.g. "SELECT" or "GET".
+	Operation: "SELECT",
+}
+s.StartTime = newrelic.StartSegmentNow(txn)
+// ... make the datastore call
+s.End()
+```
+
+This may be combined into a single line when instrumenting a datastore call
+that spans an entire function call:
+
+```go
+s := newrelic.DatastoreSegment{
+	StartTime:  newrelic.StartSegmentNow(txn),
+	Product:    newrelic.DatastoreMySQL,
+	Collection: "my_table",
+	Operation:  "SELECT",
+}
+defer s.End()
+```
+
+### External Segments
+
+External segments appear in the transaction "Breakdown table" and in the
+"External services" page. Version 1.11.0 of the Go Agent adds support for
+cross-application tracing (CAT), which will result in external segments also
+appearing in the "Service maps" page and being linked in transaction traces when
+both sides of the request have traces. Version 2.1.0 of the Go Agent adds
+support for distributed tracing, which lets you see the path a request takes as
+it travels through distributed APM apps.
+
+* [More info on External Services page](https://docs.newrelic.com/docs/apm/applications-menu/monitoring/external-services-page)
+* [More info on Cross-Application Tracing](https://docs.newrelic.com/docs/apm/transactions/cross-application-traces/introduction-cross-application-traces)
+* [More info on Distributed Tracing](https://docs.newrelic.com/docs/apm/distributed-tracing/getting-started/introduction-distributed-tracing) 
+
+External segments are instrumented using `ExternalSegment`. There are three
+ways to use this functionality:
+
+1. Using `StartExternalSegment` to create an `ExternalSegment` before the
+   request is sent, and then calling `ExternalSegment.End` when the external
+   request is complete.
+   
+   For CAT support to operate, an `http.Request` must be provided to
+   `StartExternalSegment`, and the `ExternalSegment.Response` field must be set
+   before `ExternalSegment.End` is called or deferred.
+
+   For example:
+
+    ```go
+    func external(txn newrelic.Transaction, req *http.Request) (*http.Response, error) {
+      s := newrelic.StartExternalSegment(txn, req)
+      response, err := http.DefaultClient.Do(req)
+      s.Response = response
+      s.End()
+      return response, err
+    }
+    ```
+
+2. Using `NewRoundTripper` to get a
+   [`http.RoundTripper`](https://golang.org/pkg/net/http/#RoundTripper) that
+   will automatically instrument all requests made via
+   [`http.Client`](https://golang.org/pkg/net/http/#Client) instances that use
+   that round tripper as their `Transport`. This option results in CAT support,
+   provided the Go Agent is version 1.11.0, and in distributed tracing support,
+   provided the Go Agent is version 2.1.0.
+
+   For example:
+
+    ```go
+    client := &http.Client{}
+    client.Transport = newrelic.NewRoundTripper(txn, nil)
+    resp, err := client.Get("http://example.com/")
+    ```
+
+   Note that, as with all segments, the round tripper returned **must** only be
+   used in the same goroutine as the transaction.
+
+3. Directly creating an `ExternalSegment` via a struct literal with an explicit
+   `URL` or `Request`, and then calling `ExternalSegment.End`. This option does
+   not support CAT, and may be removed or changed in a future major version of
+   the Go Agent. As a result, we suggest using one of the other options above
+   wherever possible.
+
+   For example:
+
+    ```go
+    func external(txn newrelic.Transaction, url string) (*http.Response, error) {
+      es := newrelic.ExternalSegment{
+        StartTime: newrelic.StartSegmentNow(txn),
+        URL:   url,
+      }
+      defer es.End()
+
+      return http.Get(url)
+    }
+    ```
+
+## Attributes
+
+Attributes add context to errors and allow you to filter performance data
+in Insights.
+
+You may add them using the `Transaction.AddAttribute` method.
+
+```go
+txn.AddAttribute("key", "value")
+txn.AddAttribute("product", "widget")
+txn.AddAttribute("price", 19.99)
+txn.AddAttribute("importantCustomer", true)
+```
+
+* [More info on Custom Attributes](https://docs.newrelic.com/docs/insights/new-relic-insights/decorating-events/insights-custom-attributes)
+
+Some attributes are recorded automatically.  These are called agent attributes.
+They are listed here:
+
+* [attributes.go](attributes.go)
+
+To disable one of these agents attributes, `AttributeResponseCode` for
+example, modify the config like this:
+
+```go
+config.Attributes.Exclude = append(config.Attributes.Exclude, newrelic.AttributeResponseCode)
+```
+
+* [More info on Agent Attributes](https://docs.newrelic.com/docs/agents/manage-apm-agents/agent-metrics/agent-attributes)
+
+## Tracing
+
+New Relic's [distributed
+tracing](https://docs.newrelic.com/docs/apm/distributed-tracing/getting-started/introduction-distributed-tracing)  
+is the next generation of the previous cross-application tracing feature. Compared to 
+cross-application tracing, distributed tracing gives more detail about cross-service activity and provides more 
+complete end-to-end visibility.  This section discusses distributed tracing and cross-application tracing in turn.
+
+### Distributed Tracing
+
+New Relic's [distributed
+tracing](https://docs.newrelic.com/docs/apm/distributed-tracing/getting-started/introduction-distributed-tracing) 
+feature lets you see the path that a request takes as it travels through distributed APM
+apps, which is vital for applications implementing a service-oriented or
+microservices architecture. Support for distributed tracing was added in 
+version 2.1.0 of the Go Agent.
+
+The config's `DistributedTracer.Enabled` field has to be set. When true, the 
+agent will add distributed tracing headers in outbound requests, and scan 
+incoming requests for distributed tracing headers. Distributed tracing and 
+cross-application tracing cannot be used simultaneously:
+
+```go
+config.CrossApplicationTracer.Enabled = false
+config.DistributedTracer.Enabled = true
+```
+
+### Cross-Application Tracing
+
+New Relic's
+[cross-application tracing](https://docs.newrelic.com/docs/apm/transactions/cross-application-traces/introduction-cross-application-traces)
+feature, or CAT for short, links transactions between applications in APM to
+help identify performance problems within your service-oriented architecture.
+Support for CAT was added in version 1.11.0 of the Go Agent.
+
+As CAT uses HTTP headers to track requests across applications, the Go Agent
+needs to be able to access and modify request and response headers both for
+incoming and outgoing requests.
+
+### Tracing Instrumentation
+
+Both distributed tracing and cross-application tracing work by propagating 
+[header information](https://docs.newrelic.com/docs/apm/distributed-tracing/getting-started/how-new-relic-distributed-tracing-works#headers)
+from service to service in a request path. In many scenarios, the Go Agent offers tracing instrumentation 
+out-of-the-box, for both distributed tracing and cross-application tracing. For other scenarios customers may implement 
+distributed tracing based on the examples provided in this guide.
+
+#### Getting Tracing Instrumentation Out-of-the-Box
+
+The Go Agent automatically creates and propagates tracing header information 
+for each of the following scenarios:
+
+1. Using `WrapHandle` or `WrapHandleFunc` to instrument a server that
+   uses [`http.ServeMux`](https://golang.org/pkg/net/http/#ServeMux)
+   ([Example](examples/server/main.go)).
+
+2. Using either of the Go Agent's [Gin](_integrations/nrgin/v1) or
+   [Gorilla](_integrations/nrgorilla/v1) integration
+   ([Gin Example](examples/_gin/main.go), [Gorilla Example](examples/_gorilla/main.go)).
+.
+
+3. Using another framework or [`http.Server`](https://golang.org/pkg/net/http/#Server) while ensuring that:
+
+      1. All calls to `StartTransaction` include the response writer and
+         request, and
+      2. `Transaction.WriteHeader` is used instead of calling `WriteHeader`
+         directly on the response writer, as described in the
+         [transactions section of this guide](#transactions)
+         ([Example](examples/server-http/main.go)).
+
+4. Using `NewRoundTripper`, as described in the
+   [external segments section of this guide](#external-segments)
+   ([Example](examples/client-round-tripper/main.go)).
+
+5. Using the call `StartExternalSegment` and providing an `http.Request`, as 
+   described in the [external segments section of this guide](#external-segments)
+   ([Example](examples/client/main.go)).
+
+#### Manually Implementing Distributed Tracing
+
+Consider [manual instrumentation](https://docs.newrelic.com/docs/apm/distributed-tracing/enable-configure/enable-distributed-tracing#agent-apis) 
+for services not instrumented automatically by the Go Agent. In such scenarios, the
+calling service has to generate a distributed trace payload:
+
+```go
+p := callingTxn.CreateDistributedTracePayload()
+```
+
+This payload has to be added to the call to the destination service, which in turn
+invokes the call for accepting the payload:
+
+```go
+calledTxn.AcceptDistributedTracePayload(newrelic.TransportOther, p)
+```
+
+A complete example can be found
+[here](examples/custom-instrumentation/main.go).
+
+
+## Custom Metrics
+
+* [More info on Custom Metrics](https://docs.newrelic.com/docs/agents/go-agent/instrumentation/create-custom-metrics-go)
+
+You may [create custom metrics](https://docs.newrelic.com/docs/agents/manage-apm-agents/agent-data/collect-custom-metrics)
+via the `RecordCustomMetric` method.
+
+```go
+app.RecordCustomMetric(
+	"CustomMetricName", // Name of your metric
+	132,                // Value
+)
+```
+
+**Note:** The Go Agent will automatically prepend the metric name you pass to
+`RecordCustomMetric` (`"CustomMetricName"` above) with the string `Custom/`.
+This means the above code would produce a metric named
+`Custom/CustomMetricName`.  You'll also want to read over the
+[Naming Transactions and Metrics](#naming-transactions-and-metrics) section below for
+advice on coming up with appropriate metric names.
+
+## Custom Events
+
+You may track arbitrary events using custom Insights events.
+
+```go
+app.RecordCustomEvent("MyEventType", map[string]interface{}{
+	"myString": "hello",
+	"myFloat":  0.603,
+	"myInt":    123,
+	"myBool":   true,
+})
+```
+
+## Request Queuing
+
+If you are running a load balancer or reverse web proxy then you may configure
+it to add a `X-Queue-Start` header with a Unix timestamp.  This will create a
+band on the application overview chart showing queue time.
+
+* [More info on Request Queuing](https://docs.newrelic.com/docs/apm/applications-menu/features/request-queuing-tracking-front-end-time)
+
+## Error Reporting
+
+You may track errors using the `Transaction.NoticeError` method.  The easiest
+way to get started with `NoticeError` is to use errors based on
+[Go's standard error interface](https://blog.golang.org/error-handling-and-go).
+
+```go
+txn.NoticeError(errors.New("my error message"))
+```
+
+`NoticeError` will work with *any* sort of object that implements Go's standard
+error type interface -- not just `errorStrings` created via `errors.New`.  
+
+If you're interested in sending more than an error *message* to New Relic, the
+Go Agent also offers a `newrelic.Error` struct.
+
+```go
+txn.NoticeError(newrelic.Error{
+	Message: "my error message",
+	Class:   "IdentifierForError",
+	Attributes: map[string]interface{}{
+		"important_number": 97232,
+		"relevant_string":  "zap",
+	},
+})
+```
+
+Using the `newrelic.Error` struct requires you to manually marshall your error
+data into the `Message`, `Class`, and `Attributes` fields.  However, there's two
+**advantages** to using the `newrelic.Error` struct.
+
+First, by setting an error `Class`, New Relic will be able to aggregate errors
+in the *Error Analytics* section of APM.  Second, the `Attributes` field allows
+you to send through key/value pairs with additional error debugging information
+(also exposed in the *Error Analytics* section of APM).
+
+### Advanced Error Reporting
+
+You're not limited to using Go's built-in error type or the provided
+`newrelic.Error` struct.  The Go Agent provides three error interfaces
+
+```go
+type StackTracer interface {
+	StackTrace() []uintptr
+}
+
+type ErrorClasser interface {
+	ErrorClass() string
+}
+
+type ErrorAttributer interface {
+	ErrorAttributes() map[string]interface{}
+}
+```
+
+If you implement any of these on your own error structs, the `txn.NoticeError`
+method will recognize these methods and use their return values to provide error
+information.
+
+For example, you could implement a custom error struct named `MyErrorWithClass`
+
+```go
+type MyErrorWithClass struct {
+
+}
+```
+
+Then, you could implement both an `Error` method (per Go's standard `error`
+interface) and an `ErrorClass` method (per the Go Agent `ErrorClasser`
+interface) for this struct.
+
+```go
+func (e MyErrorWithClass) Error() string { return "A hard coded error message" }
+
+// ErrorClass implements the ErrorClasser interface.
+func (e MyErrorWithClass) ErrorClass() string { return "MyErrorClassForAggregation" }
+```
+
+Finally, you'd use your new error by creating a new instance of your struct and
+passing it to the `NoticeError` method
+
+```go
+txn.NoticeError(MyErrorWithClass{})
+```
+
+While this is an oversimplified example, these interfaces give you a great deal
+of control over what error information is available for your application.
+
+## Naming Transactions and Metrics
+
+You'll want to think carefully about how you name your transactions and custom
+metrics.  If your program creates too many unique names, you may end up with a
+[Metric Grouping Issue (or MGI)](https://docs.newrelic.com/docs/agents/manage-apm-agents/troubleshooting/metric-grouping-issues).
+
+MGIs occur when the granularity of names is too fine, resulting in hundreds or
+thousands of uniquely identified metrics and transactions.  One common cause of
+MGIs is relying on the full URL name for metric naming in web transactions.  A
+few major code paths may generate many different full URL paths to unique
+documents, articles, page, etc. If the unique element of the URL path is
+included in the metric name, each of these common paths will have its own unique
+metric name.
+
+
+## For More Help
+
+There's a variety of places online to learn more about the Go Agent.
+
+[The New Relic docs site](https://docs.newrelic.com/docs/agents/go-agent/get-started/introduction-new-relic-go)
+contains a number of useful code samples and more context about how to use the Go Agent.
+
+[New Relic's discussion forums](https://discuss.newrelic.com) have a dedicated
+public forum [for the Go Agent](https://discuss.newrelic.com/c/support-products-agents/go-agent).
+
+When in doubt, [the New Relic support site](https://support.newrelic.com/) is
+the best place to get started troubleshooting an agent issue.

+ 49 - 0
vendor/github.com/newrelic/go-agent/LICENSE.txt

@@ -0,0 +1,49 @@
+This product includes source derived from 'go' by The Go Authors, distributed
+under the following BSD license:
+
+	https://github.com/golang/go/blob/master/LICENSE
+
+-------------------------------------------------------------------------------
+
+All components of this product are Copyright (c) 2016 New Relic, Inc.  All
+rights reserved.
+
+Certain inventions disclosed in this file may be claimed within patents owned or
+patent applications filed by New Relic, Inc. or third parties.
+
+Subject to the terms of this notice, New Relic grants you a nonexclusive,
+nontransferable license, without the right to sublicense, to (a) install and
+execute one copy of these files on any number of workstations owned or
+controlled by you and (b) distribute verbatim copies of these files to third
+parties.  These files and their contents shall not be used in conjunction with
+any other product or software that may compete with any New Relic product,
+feature, or software or be used for the purpose of research, reverse engineering
+or developing such competitive products, features, or software.  As a condition
+to the foregoing grant, you must provide this notice along with each copy you
+distribute and you must not remove, alter, or obscure this notice.  In the event
+you submit or provide any feedback, code, pull requests, or suggestions to New
+Relic you hereby grant New Relic a worldwide, non-exclusive, irrevocable,
+transferrable, fully paid-up license to use the code, algorithms, patents, and
+ideas therein in our products.
+
+All other use, reproduction, modification, distribution, or other exploitation
+of these files is strictly prohibited, except as may be set forth in a separate
+written license agreement between you and New Relic.  The terms of any such
+license agreement will control over this notice.  The license stated above will
+be automatically terminated and revoked if you exceed its scope or violate any
+of the terms of this notice.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of New Relic, except as required for reasonable
+and customary use in describing the origin of this file and reproducing the
+content of this notice.  You may not mark or brand this file with any trade
+name, trademarks, service marks, or product names other than the original brand
+(if any) provided by New Relic.
+
+Unless otherwise expressly agreed by New Relic in a separate written license
+agreement, these files are provided AS IS, WITHOUT WARRANTY OF ANY KIND,
+including without any implied warranties of MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE, TITLE, or NON-INFRINGEMENT.  As a condition to your use of
+these files, you are solely responsible for such use. New Relic will have no
+liability to you for direct, indirect, consequential, incidental, special, or
+punitive damages or for lost profits or data.

+ 162 - 0
vendor/github.com/newrelic/go-agent/README.md

@@ -0,0 +1,162 @@
+# New Relic Go Agent [![GoDoc](https://godoc.org/github.com/newrelic/go-agent?status.svg)](https://godoc.org/github.com/newrelic/go-agent)
+
+## Description
+
+The New Relic Go Agent allows you to monitor your Go applications with New
+Relic.  It helps you track transactions, outbound requests, database calls, and
+other parts of your Go application's behavior and provides a running overview of
+garbage collection, goroutine activity, and memory use.
+
+All pull requests will be reviewed by the New Relic product team. Any questions or issues should be directed to our [support
+site](http://support.newrelic.com/) or our [community
+forum](http://forum.newrelic.com).
+
+## Requirements
+
+Go 1.3+ is required, due to the use of http.Client's Timeout field.
+
+Linux, OS X, and Windows (Vista, Server 2008 and later) are supported.
+
+## Getting Started
+
+Here are the basic steps to instrumenting your application.  For more
+information, see [GUIDE.md](GUIDE.md).
+
+#### Step 0: Installation
+
+Installing the Go Agent is the same as installing any other Go library.  The
+simplest way is to run:
+
+```
+go get github.com/newrelic/go-agent
+```
+
+Then import the `github.com/newrelic/go-agent` package in your application.
+
+#### Step 1: Create a Config and an Application
+
+In your `main` function or an `init` block:
+
+```go
+config := newrelic.NewConfig("Your Application Name", "__YOUR_NEW_RELIC_LICENSE_KEY__")
+app, err := newrelic.NewApplication(config)
+```
+
+[more info](GUIDE.md#config-and-application), [application.go](application.go),
+[config.go](config.go)
+
+#### Step 2: Add Transactions
+
+Transactions time requests and background tasks.  Use `WrapHandle` and
+`WrapHandleFunc` to create transactions for requests handled by the `http`
+standard library package.
+
+```go
+http.HandleFunc(newrelic.WrapHandleFunc(app, "/users", usersHandler))
+```
+
+Alternatively, create transactions directly using the application's
+`StartTransaction` method:
+
+```go
+txn := app.StartTransaction("myTxn", optionalResponseWriter, optionalRequest)
+defer txn.End()
+```
+
+[more info](GUIDE.md#transactions), [transaction.go](transaction.go)
+
+#### Step 3: Instrument Segments
+
+Segments show you where time in your transactions is being spent.  At the
+beginning of important functions, add:
+
+```go
+defer newrelic.StartSegment(txn, "mySegmentName").End()
+```
+
+[more info](GUIDE.md#segments), [segments.go](segments.go)
+
+## Runnable Example
+
+[examples/server/main.go](./examples/server/main.go) is an example that will
+appear as "Example App" in your New Relic applications list.  To run it:
+
+```
+env NEW_RELIC_LICENSE_KEY=__YOUR_NEW_RELIC_LICENSE_KEY__LICENSE__ \
+    go run examples/server/main.go
+```
+
+Some endpoints exposed are [http://localhost:8000/](http://localhost:8000/)
+and [http://localhost:8000/notice_error](http://localhost:8000/notice_error)
+
+
+## Basic Example
+
+Before Instrumentation
+
+```go
+package main
+
+import (
+	"io"
+	"net/http"
+)
+
+func helloHandler(w http.ResponseWriter, r *http.Request) {
+	io.WriteString(w, "hello, world")
+}
+
+func main() {
+	http.HandleFunc("/", helloHandler)
+	http.ListenAndServe(":8000", nil)
+}
+```
+
+After Instrumentation
+
+```go
+package main
+
+import (
+	"fmt"
+	"io"
+	"net/http"
+	"os"
+
+	"github.com/newrelic/go-agent"
+)
+
+func helloHandler(w http.ResponseWriter, r *http.Request) {
+	io.WriteString(w, "hello, world")
+}
+
+func main() {
+	// Create a config.  You need to provide the desired application name
+	// and your New Relic license key.
+	cfg := newrelic.NewConfig("Example App", "__YOUR_NEW_RELIC_LICENSE_KEY__")
+
+	// Create an application.  This represents an application in the New
+	// Relic UI.
+	app, err := newrelic.NewApplication(cfg)
+	if err != nil {
+		fmt.Println(err)
+		os.Exit(1)
+	}
+
+	// Wrap helloHandler.  The performance of this handler will be recorded.
+	http.HandleFunc(newrelic.WrapHandleFunc(app, "/", helloHandler))
+	http.ListenAndServe(":8000", nil)
+}
+```
+
+## Support
+
+You can find more detailed documentation [in the guide](GUIDE.md) and on
+[the New Relic Documentation site](https://docs.newrelic.com/docs/agents/go-agent).
+
+If you can't find what you're looking for there, reach out to us on our [support
+site](http://support.newrelic.com/) or our [community
+forum](http://forum.newrelic.com) and we'll be happy to help you.
+
+Find a bug?  Contact us via [support.newrelic.com](http://support.newrelic.com/),
+or email support@newrelic.com.

+ 64 - 0
vendor/github.com/newrelic/go-agent/application.go

@@ -0,0 +1,64 @@
+package newrelic
+
+import (
+	"net/http"
+	"time"
+)
+
+// Application represents your application.
+type Application interface {
+	// StartTransaction begins a Transaction.
+	// * The Transaction should only be used in a single goroutine.
+	// * This method never returns nil.
+	// * If an http.Request is provided then the Transaction is considered
+	//   a web transaction.
+	// * If an http.ResponseWriter is provided then the Transaction can be
+	//   used in its place.  This allows instrumentation of the response
+	//   code and response headers.
+	StartTransaction(name string, w http.ResponseWriter, r *http.Request) Transaction
+
+	// RecordCustomEvent adds a custom event to the application.  This
+	// feature is incompatible with high security mode.
+	//
+	// eventType must consist of alphanumeric characters, underscores, and
+	// colons, and must contain fewer than 255 bytes.
+	//
+	// Each value in the params map must be a number, string, or boolean.
+	// Keys must be less than 255 bytes.  The params map may not contain
+	// more than 64 attributes.  For more information, and a set of
+	// restricted keywords, see:
+	//
+	// https://docs.newrelic.com/docs/insights/new-relic-insights/adding-querying-data/inserting-custom-events-new-relic-apm-agents
+	RecordCustomEvent(eventType string, params map[string]interface{}) error
+
+	// RecordCustomMetric records a custom metric.  NOTE! The name you give
+	// will be prefixed by "Custom/".
+	//
+	// https://docs.newrelic.com/docs/agents/manage-apm-agents/agent-data/collect-custom-metrics
+	RecordCustomMetric(name string, value float64) error
+
+	// WaitForConnection blocks until the application is connected, is
+	// incapable of being connected, or the timeout has been reached.  This
+	// method is useful for short-lived processes since the application will
+	// not gather data until it is connected.  nil is returned if the
+	// application is connected successfully.
+	WaitForConnection(timeout time.Duration) error
+
+	// Shutdown flushes data to New Relic's servers and stops all
+	// agent-related goroutines managing this application.  After Shutdown
+	// is called, the application is disabled and no more data will be
+	// collected.  This method will block until all final data is sent to
+	// New Relic or the timeout has elapsed.
+	Shutdown(timeout time.Duration)
+}
+
+// NewApplication creates an Application and spawns goroutines to manage the
+// aggregation and harvesting of data.  On success, a non-nil Application and a
+// nil error are returned. On failure, a nil Application and a non-nil error
+// are returned.
+//
+// Applications do not share global state (other than the shared log.Logger).
+// Therefore, it is safe to create multiple applications.
+func NewApplication(c Config) (Application, error) {
+	return newApp(c)
+}

+ 42 - 0
vendor/github.com/newrelic/go-agent/attributes.go

@@ -0,0 +1,42 @@
+package newrelic
+
+// This file contains the names of the automatically captured attributes.
+// Attributes are key value pairs attached to transaction events, error events,
+// and traced errors.  You may add your own attributes using the
+// Transaction.AddAttribute method (see transaction.go).
+//
+// These attribute names are exposed here to facilitate configuration.
+//
+// For more information, see:
+// https://docs.newrelic.com/docs/agents/manage-apm-agents/agent-metrics/agent-attributes
+
+// Attributes destined for Transaction Events and Errors:
+const (
+	// AttributeResponseCode is the response status code for a web request.
+	AttributeResponseCode = "httpResponseCode"
+	// AttributeRequestMethod is the request's method.
+	AttributeRequestMethod = "request.method"
+	// AttributeRequestAccept is the request's "Accept" header.
+	AttributeRequestAccept = "request.headers.accept"
+	// AttributeRequestContentType is the request's "Content-Type" header.
+	AttributeRequestContentType = "request.headers.contentType"
+	// AttributeRequestContentLength is the request's "Content-Length" header.
+	AttributeRequestContentLength = "request.headers.contentLength"
+	// AttributeRequestHost is the request's "Host" header.
+	AttributeRequestHost = "request.headers.host"
+	// AttributeResponseContentType is the response "Content-Type" header.
+	AttributeResponseContentType = "response.headers.contentType"
+	// AttributeResponseContentLength is the response "Content-Length" header.
+	AttributeResponseContentLength = "response.headers.contentLength"
+	// AttributeHostDisplayName contains the value of Config.HostDisplayName.
+	AttributeHostDisplayName = "host.displayName"
+)
+
+// Attributes destined for Errors:
+const (
+	// AttributeRequestUserAgent is the request's "User-Agent" header.
+	AttributeRequestUserAgent = "request.headers.User-Agent"
+	// AttributeRequestReferer is the request's "Referer" header.  Query
+	// string parameters are removed.
+	AttributeRequestReferer = "request.headers.referer"
+)

+ 297 - 0
vendor/github.com/newrelic/go-agent/config.go

@@ -0,0 +1,297 @@
+package newrelic
+
+import (
+	"errors"
+	"fmt"
+	"net/http"
+	"strings"
+	"time"
+)
+
+// Config contains Application and Transaction behavior settings.
+// Use NewConfig to create a Config with proper defaults.
+type Config struct {
+	// AppName is used by New Relic to link data across servers.
+	//
+	// https://docs.newrelic.com/docs/apm/new-relic-apm/installation-configuration/naming-your-application
+	AppName string
+
+	// License is your New Relic license key.
+	//
+	// https://docs.newrelic.com/docs/accounts-partnerships/accounts/account-setup/license-key
+	License string
+
+	// Logger controls go-agent logging.  See log.go.
+	Logger Logger
+
+	// Enabled determines whether the agent will communicate with the New
+	// Relic servers and spawn goroutines.  Setting this to be false can be
+	// useful in testing and staging situations.
+	Enabled bool
+
+	// Labels are key value pairs used to roll up applications into specific
+	// categories.
+	//
+	// https://docs.newrelic.com/docs/apm/new-relic-apm/maintenance/labels-categories-organizing-your-apps-servers
+	Labels map[string]string
+
+	// HighSecurity guarantees that certain agent settings can not be made
+	// more permissive.  This setting must match the corresponding account
+	// setting in the New Relic UI.
+	//
+	// https://docs.newrelic.com/docs/accounts-partnerships/accounts/security/high-security
+	HighSecurity bool
+
+	// SecurityPoliciesToken enables security policies if set to a non-empty
+	// string.  Only set this if security policies have been enabled on your
+	// account.  This cannot be used in conjunction with HighSecurity.
+	SecurityPoliciesToken string
+
+	// CustomInsightsEvents controls the behavior of
+	// Application.RecordCustomEvent.
+	//
+	// https://docs.newrelic.com/docs/insights/new-relic-insights/adding-querying-data/inserting-custom-events-new-relic-apm-agents
+	CustomInsightsEvents struct {
+		// Enabled controls whether RecordCustomEvent will collect
+		// custom analytics events.  High security mode overrides this
+		// setting.
+		Enabled bool
+	}
+
+	// TransactionEvents controls the behavior of transaction analytics
+	// events.
+	TransactionEvents struct {
+		// Enabled controls whether transaction events are captured.
+		Enabled bool
+		// Attributes controls the attributes included with transaction
+		// events.
+		Attributes AttributeDestinationConfig
+	}
+
+	// ErrorCollector controls the capture of errors.
+	ErrorCollector struct {
+		// Enabled controls whether errors are captured.  This setting
+		// affects both traced errors and error analytics events.
+		Enabled bool
+		// CaptureEvents controls whether error analytics events are
+		// captured.
+		CaptureEvents bool
+		// IgnoreStatusCodes controls which http response codes are
+		// automatically turned into errors.  By default, response codes
+		// greater than or equal to 400, with the exception of 404, are
+		// turned into errors.
+		IgnoreStatusCodes []int
+		// Attributes controls the attributes included with errors.
+		Attributes AttributeDestinationConfig
+	}
+
+	// TransactionTracer controls the capture of transaction traces.
+	TransactionTracer struct {
+		// Enabled controls whether transaction traces are captured.
+		Enabled bool
+		// Threshold controls whether a transaction trace will be
+		// considered for capture.  Of the traces exceeding the
+		// threshold, the slowest trace every minute is captured.
+		Threshold struct {
+			// If IsApdexFailing is true then the trace threshold is
+			// four times the apdex threshold.
+			IsApdexFailing bool
+			// If IsApdexFailing is false then this field is the
+			// threshold, otherwise it is ignored.
+			Duration time.Duration
+		}
+		// SegmentThreshold is the threshold at which segments will be
+		// added to the trace.  Lowering this setting may increase
+		// overhead.
+		SegmentThreshold time.Duration
+		// StackTraceThreshold is the threshold at which segments will
+		// be given a stack trace in the transaction trace.  Lowering
+		// this setting will drastically increase overhead.
+		StackTraceThreshold time.Duration
+		// Attributes controls the attributes included with transaction
+		// traces.
+		Attributes AttributeDestinationConfig
+	}
+
+	// HostDisplayName gives this server a recognizable name in the New
+	// Relic UI.  This is an optional setting.
+	HostDisplayName string
+
+	// Transport customizes http.Client communication with New Relic
+	// servers.  This may be used to configure a proxy.
+	Transport http.RoundTripper
+
+	// Utilization controls the detection and gathering of system
+	// information.
+	Utilization struct {
+		// DetectAWS controls whether the Application attempts to detect
+		// AWS.
+		DetectAWS bool
+		// DetectAzure controls whether the Application attempts to detect
+		// Azure.
+		DetectAzure bool
+		// DetectPCF controls whether the Application attempts to detect
+		// PCF.
+		DetectPCF bool
+		// DetectGCP controls whether the Application attempts to detect
+		// GCP.
+		DetectGCP bool
+		// DetectDocker controls whether the Application attempts to
+		// detect Docker.
+		DetectDocker bool
+
+		// These settings provide system information when custom values
+		// are required.
+		LogicalProcessors int
+		TotalRAMMIB       int
+		BillingHostname   string
+	}
+
+	// CrossApplicationTracer controls behaviour relating to cross application
+	// tracing (CAT), available since Go Agent v0.11.  The CrossApplication
+	// Tracer and the DistributedTracer cannot be simultaneously enabled.
+	CrossApplicationTracer struct {
+		Enabled bool
+	}
+
+	// DistributedTracer controls behaviour relating to Distributed Tracing,
+	// available since Go Agent v2.1. The DistributedTracer and the
+	// CrossApplicationTracer cannot be simultaneously enabled.
+	DistributedTracer struct {
+		Enabled bool
+	}
+
+	// SpanEvents controls behavior relating to Span Events.  Span Events
+	// require that distributed tracing is enabled.
+	SpanEvents struct {
+		Enabled bool
+	}
+
+	// DatastoreTracer controls behavior relating to datastore segments.
+	DatastoreTracer struct {
+		InstanceReporting struct {
+			Enabled bool
+		}
+		DatabaseNameReporting struct {
+			Enabled bool
+		}
+		QueryParameters struct {
+			Enabled bool
+		}
+		// SlowQuery controls the capture of slow query traces.  Slow
+		// query traces show you instances of your slowest datastore
+		// segments.
+		SlowQuery struct {
+			Enabled   bool
+			Threshold time.Duration
+		}
+	}
+
+	// Attributes controls the attributes included with errors and
+	// transaction events.
+	Attributes AttributeDestinationConfig
+
+	// RuntimeSampler controls the collection of runtime statistics like
+	// CPU/Memory usage, goroutine count, and GC pauses.
+	RuntimeSampler struct {
+		// Enabled controls whether runtime statistics are captured.
+		Enabled bool
+	}
+}
+
+// AttributeDestinationConfig controls the attributes included with errors and
+// transaction events.
+type AttributeDestinationConfig struct {
+	Enabled bool
+	Include []string
+	Exclude []string
+}
+
+// NewConfig creates an Config populated with the given appname, license,
+// and expected default values.
+func NewConfig(appname, license string) Config {
+	c := Config{}
+
+	c.AppName = appname
+	c.License = license
+	c.Enabled = true
+	c.Labels = make(map[string]string)
+	c.CustomInsightsEvents.Enabled = true
+	c.TransactionEvents.Enabled = true
+	c.TransactionEvents.Attributes.Enabled = true
+	c.HighSecurity = false
+	c.ErrorCollector.Enabled = true
+	c.ErrorCollector.CaptureEvents = true
+	c.ErrorCollector.IgnoreStatusCodes = []int{
+		http.StatusNotFound, // 404
+	}
+	c.ErrorCollector.Attributes.Enabled = true
+	c.Utilization.DetectAWS = true
+	c.Utilization.DetectAzure = true
+	c.Utilization.DetectPCF = true
+	c.Utilization.DetectGCP = true
+	c.Utilization.DetectDocker = true
+	c.Attributes.Enabled = true
+	c.RuntimeSampler.Enabled = true
+
+	c.TransactionTracer.Enabled = true
+	c.TransactionTracer.Threshold.IsApdexFailing = true
+	c.TransactionTracer.Threshold.Duration = 500 * time.Millisecond
+	c.TransactionTracer.SegmentThreshold = 2 * time.Millisecond
+	c.TransactionTracer.StackTraceThreshold = 500 * time.Millisecond
+	c.TransactionTracer.Attributes.Enabled = true
+
+	c.CrossApplicationTracer.Enabled = true
+	c.DistributedTracer.Enabled = false
+	c.SpanEvents.Enabled = true
+
+	c.DatastoreTracer.InstanceReporting.Enabled = true
+	c.DatastoreTracer.DatabaseNameReporting.Enabled = true
+	c.DatastoreTracer.QueryParameters.Enabled = true
+	c.DatastoreTracer.SlowQuery.Enabled = true
+	c.DatastoreTracer.SlowQuery.Threshold = 10 * time.Millisecond
+
+	return c
+}
+
+const (
+	licenseLength = 40
+	appNameLimit  = 3
+)
+
+// The following errors will be returned if your Config fails to validate.
+var (
+	errLicenseLen                       = fmt.Errorf("license length is not %d", licenseLength)
+	errAppNameMissing                   = errors.New("string AppName required")
+	errAppNameLimit                     = fmt.Errorf("max of %d rollup application names", appNameLimit)
+	errHighSecurityWithSecurityPolicies = errors.New("SecurityPoliciesToken and HighSecurity are incompatible; please ensure HighSecurity is set to false if SecurityPoliciesToken is a non-empty string and a security policy has been set for your account")
+	errMixedTracers                     = errors.New("CrossApplicationTracer and DistributedTracer cannot be enabled simultaneously; please choose CrossApplicationTracer (available since v1.11) or DistributedTracer (available since v2.1)")
+)
+
+// Validate checks the config for improper fields.  If the config is invalid,
+// newrelic.NewApplication returns an error.
+func (c Config) Validate() error {
+	if c.Enabled {
+		if len(c.License) != licenseLength {
+			return errLicenseLen
+		}
+	} else {
+		// The License may be empty when the agent is not enabled.
+		if len(c.License) != licenseLength && len(c.License) != 0 {
+			return errLicenseLen
+		}
+	}
+	if "" == c.AppName && c.Enabled {
+		return errAppNameMissing
+	}
+	if c.HighSecurity && "" != c.SecurityPoliciesToken {
+		return errHighSecurityWithSecurityPolicies
+	}
+	if c.CrossApplicationTracer.Enabled && c.DistributedTracer.Enabled {
+		return errMixedTracers
+	}
+	if strings.Count(c.AppName, ";") >= appNameLimit {
+		return errAppNameLimit
+	}
+	return nil
+}

+ 31 - 0
vendor/github.com/newrelic/go-agent/context.go

@@ -0,0 +1,31 @@
+// +build go1.7
+
+package newrelic
+
+import (
+	"context"
+	"net/http"
+)
+
+type contextKeyType struct{}
+
+var contextKey = contextKeyType(struct{}{})
+
+// NewContext returns a new Context that carries the provided transcation.
+func NewContext(ctx context.Context, txn Transaction) context.Context {
+	return context.WithValue(ctx, contextKey, txn)
+}
+
+// FromContext returns the Transaction from the context if present, and nil
+// otherwise.
+func FromContext(ctx context.Context) Transaction {
+	h, _ := ctx.Value(contextKey).(Transaction)
+	return h
+}
+
+// RequestWithTransactionContext adds the transaction to the request's context.
+func RequestWithTransactionContext(req *http.Request, txn Transaction) *http.Request {
+	ctx := req.Context()
+	ctx = NewContext(ctx, txn)
+	return req.WithContext(ctx)
+}

+ 10 - 0
vendor/github.com/newrelic/go-agent/context_stub.go

@@ -0,0 +1,10 @@
+// +build !go1.7
+
+package newrelic
+
+import "net/http"
+
+// RequestWithTransactionContext adds the transaction to the request's context.
+func RequestWithTransactionContext(req *http.Request, txn Transaction) *http.Request {
+	return req
+}

+ 27 - 0
vendor/github.com/newrelic/go-agent/datastore.go

@@ -0,0 +1,27 @@
+package newrelic
+
+// DatastoreProduct encourages consistent metrics across New Relic agents.  You
+// may create your own if your datastore is not listed below.
+type DatastoreProduct string
+
+// Datastore names used across New Relic agents:
+const (
+	DatastoreCassandra     DatastoreProduct = "Cassandra"
+	DatastoreDerby                          = "Derby"
+	DatastoreElasticsearch                  = "Elasticsearch"
+	DatastoreFirebird                       = "Firebird"
+	DatastoreIBMDB2                         = "IBMDB2"
+	DatastoreInformix                       = "Informix"
+	DatastoreMemcached                      = "Memcached"
+	DatastoreMongoDB                        = "MongoDB"
+	DatastoreMySQL                          = "MySQL"
+	DatastoreMSSQL                          = "MSSQL"
+	DatastoreOracle                         = "Oracle"
+	DatastorePostgres                       = "Postgres"
+	DatastoreRedis                          = "Redis"
+	DatastoreSolr                           = "Solr"
+	DatastoreSQLite                         = "SQLite"
+	DatastoreCouchDB                        = "CouchDB"
+	DatastoreRiak                           = "Riak"
+	DatastoreVoltDB                         = "VoltDB"
+)

+ 51 - 0
vendor/github.com/newrelic/go-agent/errors.go

@@ -0,0 +1,51 @@
+package newrelic
+
+// StackTracer can be implemented by errors to provide a stack trace when using
+// Transaction.NoticeError.
+type StackTracer interface {
+	StackTrace() []uintptr
+}
+
+// ErrorClasser can be implemented by errors to provide a custom class when
+// using Transaction.NoticeError.
+type ErrorClasser interface {
+	ErrorClass() string
+}
+
+// ErrorAttributer can be implemented by errors to provide extra context when
+// using Transaction.NoticeError.
+type ErrorAttributer interface {
+	ErrorAttributes() map[string]interface{}
+}
+
+// Error is an error that implements ErrorClasser and ErrorAttributer.  It can
+// be used with Transaction.NoticeError to control exactly how errors are
+// recorded.  Example use:
+//
+// 	txn.NoticeError(newrelic.Error{
+// 		Message: "error message: something went very wrong",
+// 		Class:   "errors are aggregated by class",
+// 		Attributes: map[string]interface{}{
+// 			"important_number": 97232,
+// 			"relevant_string":  "zap",
+// 		},
+// 	})
+type Error struct {
+	// Message is the error message which will be returned by the Error()
+	// method.
+	Message string
+	// Class indicates how the error may be aggregated.
+	Class string
+	// Attributes are attached to traced errors and error events for
+	// additional context.  These attributes are validated just like those
+	// added to `Transaction.AddAttribute`.
+	Attributes map[string]interface{}
+}
+
+func (e Error) Error() string { return e.Message }
+
+// ErrorClass implements the ErrorClasser interface.
+func (e Error) ErrorClass() string { return e.Class }
+
+// ErrorAttributes implements the ErrorAttributes interface.
+func (e Error) ErrorAttributes() map[string]interface{} { return e.Attributes }

+ 70 - 0
vendor/github.com/newrelic/go-agent/instrumentation.go

@@ -0,0 +1,70 @@
+package newrelic
+
+import "net/http"
+
+// instrumentation.go contains helpers built on the lower level api.
+
+// WrapHandle facilitates instrumentation of handlers registered with an
+// http.ServeMux.  For example, to instrument this code:
+//
+//    http.Handle("/foo", fooHandler)
+//
+// Perform this replacement:
+//
+//    http.Handle(newrelic.WrapHandle(app, "/foo", fooHandler))
+//
+// The Transaction is passed to the handler in place of the original
+// http.ResponseWriter, so it can be accessed using type assertion.
+// For example, to rename the transaction:
+//
+//	// 'w' is the variable name of the http.ResponseWriter.
+//	if txn, ok := w.(newrelic.Transaction); ok {
+//		txn.SetName("other-name")
+//	}
+//
+func WrapHandle(app Application, pattern string, handler http.Handler) (string, http.Handler) {
+	return pattern, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		txn := app.StartTransaction(pattern, w, r)
+		defer txn.End()
+
+		r = RequestWithTransactionContext(r, txn)
+
+		handler.ServeHTTP(txn, r)
+	})
+}
+
+// WrapHandleFunc serves the same purpose as WrapHandle for functions registered
+// with ServeMux.HandleFunc.
+func WrapHandleFunc(app Application, pattern string, handler func(http.ResponseWriter, *http.Request)) (string, func(http.ResponseWriter, *http.Request)) {
+	p, h := WrapHandle(app, pattern, http.HandlerFunc(handler))
+	return p, func(w http.ResponseWriter, r *http.Request) { h.ServeHTTP(w, r) }
+}
+
+// NewRoundTripper creates an http.RoundTripper to instrument external requests.
+// This RoundTripper must be used in same the goroutine as the other uses of the
+// Transaction's SegmentTracer methods.  http.DefaultTransport is used if an
+// http.RoundTripper is not provided.
+//
+//   client := &http.Client{}
+//   client.Transport = newrelic.NewRoundTripper(txn, nil)
+//   resp, err := client.Get("http://example.com/")
+//
+func NewRoundTripper(txn Transaction, original http.RoundTripper) http.RoundTripper {
+	return roundTripperFunc(func(request *http.Request) (*http.Response, error) {
+		segment := StartExternalSegment(txn, request)
+
+		if nil == original {
+			original = http.DefaultTransport
+		}
+		response, err := original.RoundTrip(request)
+
+		segment.Response = response
+		segment.End()
+
+		return response, err
+	})
+}
+
+type roundTripperFunc func(*http.Request) (*http.Response, error)
+
+func (f roundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) { return f(r) }

+ 104 - 0
vendor/github.com/newrelic/go-agent/internal/adaptive_sampler.go

@@ -0,0 +1,104 @@
+package internal
+
+import (
+	"math"
+	"sync"
+	"time"
+)
+
+// adaptiveSamplerInput holds input fields for the NewAdaptiveSampler function
+type adaptiveSamplerInput struct {
+	Period time.Duration
+	Target uint64
+}
+
+// AdaptiveSampler calculates which transactions should be sampled.  An interface
+// is used in the connect reply to facilitate testing.
+type AdaptiveSampler interface {
+	ComputeSampled(priority float32, now time.Time) bool
+}
+
+// SampleEverything is used for testing.
+type SampleEverything struct{}
+
+// SampleNothing is used when the application is not yet connected.
+type SampleNothing struct{}
+
+// ComputeSampled implements AdaptiveSampler.
+func (s SampleEverything) ComputeSampled(priority float32, now time.Time) bool { return true }
+
+// ComputeSampled implements AdaptiveSampler.
+func (s SampleNothing) ComputeSampled(priority float32, now time.Time) bool { return false }
+
+type adaptiveSampler struct {
+	sync.Mutex
+	adaptiveSamplerInput
+
+	// Transactions with priority higher than this are sampled.
+	// This is 1 - sampleRatio.
+	priorityMin float32
+
+	currentPeriod struct {
+		numSampled uint64
+		numSeen    uint64
+		end        time.Time
+	}
+}
+
+func newAdaptiveSampler(input adaptiveSamplerInput, now time.Time) *adaptiveSampler {
+	as := &adaptiveSampler{}
+	as.adaptiveSamplerInput = input
+	as.currentPeriod.end = now.Add(input.Period)
+
+	// Sample the first transactions in the first period.
+	as.priorityMin = 0.0
+	return as
+}
+
+// ComputeSampled calculates if the transaction should be sampled.
+func (as *adaptiveSampler) ComputeSampled(priority float32, now time.Time) bool {
+	as.Lock()
+	defer as.Unlock()
+
+	// If the current time is after the end of the "currentPeriod".  This is in
+	// a `for`/`while` loop in case there's a harvest where no sampling happened.
+	// i.e. for situations where a single call to
+	//    as.currentPeriod.end = as.currentPeriod.end.Add(as.period)
+	// might not catch us up to the current period
+	for now.After(as.currentPeriod.end) {
+		as.priorityMin = 0.0
+		if as.currentPeriod.numSeen > 0 {
+			sampledRatio := float32(as.Target) / float32(as.currentPeriod.numSeen)
+			as.priorityMin = 1.0 - sampledRatio
+		}
+		as.currentPeriod.numSampled = 0
+		as.currentPeriod.numSeen = 0
+		as.currentPeriod.end = as.currentPeriod.end.Add(as.Period)
+	}
+
+	as.currentPeriod.numSeen++
+
+	// exponential backoff -- if the number of sampled items is greater than our
+	// target, we need to apply the exponential backoff
+	if as.currentPeriod.numSampled > as.Target {
+		if as.computeSampledBackoff(as.Target, as.currentPeriod.numSeen, as.currentPeriod.numSampled) {
+			as.currentPeriod.numSampled++
+			return true
+		}
+		return false
+	} else if as.currentPeriod.numSampled > as.Target {
+		return false
+	}
+
+	if priority >= as.priorityMin {
+		as.currentPeriod.numSampled++
+		return true
+	}
+
+	return false
+}
+
+func (as *adaptiveSampler) computeSampledBackoff(target uint64, decidedCount uint64, sampledTrueCount uint64) bool {
+	return float64(RandUint64N(decidedCount)) <
+		math.Pow(float64(target), (float64(target)/float64(sampledTrueCount)))-math.Pow(float64(target), 0.5)
+}

+ 136 - 0
vendor/github.com/newrelic/go-agent/internal/analytics_events.go

@@ -0,0 +1,136 @@
+package internal
+
+import (
+	"bytes"
+	"container/heap"
+
+	"github.com/newrelic/go-agent/internal/jsonx"
+)
+
+type analyticsEvent struct {
+	priority Priority
+	jsonWriter
+}
+
+type analyticsEventHeap []analyticsEvent
+
+type analyticsEvents struct {
+	numSeen        int
+	events         analyticsEventHeap
+	failedHarvests int
+}
+
+func (events *analyticsEvents) NumSeen() float64  { return float64(events.numSeen) }
+func (events *analyticsEvents) NumSaved() float64 { return float64(len(events.events)) }
+
+func (h analyticsEventHeap) Len() int           { return len(h) }
+func (h analyticsEventHeap) Less(i, j int) bool { return h[i].priority.isLowerPriority(h[j].priority) }
+func (h analyticsEventHeap) Swap(i, j int)      { h[i], h[j] = h[j], h[i] }
+
+// Push and Pop are unused: only heap.Init and heap.Fix are used.
+func (h analyticsEventHeap) Push(x interface{}) {}
+func (h analyticsEventHeap) Pop() interface{}   { return nil }
+
+func newAnalyticsEvents(max int) *analyticsEvents {
+	return &analyticsEvents{
+		numSeen:        0,
+		events:         make(analyticsEventHeap, 0, max),
+		failedHarvests: 0,
+	}
+}
+
+func (events *analyticsEvents) addEvent(e analyticsEvent) {
+	events.numSeen++
+
+	if len(events.events) < cap(events.events) {
+		events.events = append(events.events, e)
+		if len(events.events) == cap(events.events) {
+			// Delay heap initialization so that we can have
+			// deterministic ordering for integration tests (the max
+			// is not being reached).
+			heap.Init(events.events)
+		}
+		return
+	}
+
+	if e.priority.isLowerPriority((events.events)[0].priority) {
+		return
+	}
+
+	events.events[0] = e
+	heap.Fix(events.events, 0)
+}
+
+func (events *analyticsEvents) mergeFailed(other *analyticsEvents) {
+	fails := other.failedHarvests + 1
+	if fails >= failedEventsAttemptsLimit {
+		return
+	}
+	events.failedHarvests = fails
+	events.Merge(other)
+}
+
+func (events *analyticsEvents) Merge(other *analyticsEvents) {
+	allSeen := events.numSeen + other.numSeen
+
+	for _, e := range other.events {
+		events.addEvent(e)
+	}
+	events.numSeen = allSeen
+}
+
+func (events *analyticsEvents) CollectorJSON(agentRunID string) ([]byte, error) {
+	if 0 == events.numSeen {
+		return nil, nil
+	}
+
+	estimate := 256 * len(events.events)
+	buf := bytes.NewBuffer(make([]byte, 0, estimate))
+
+	buf.WriteByte('[')
+	jsonx.AppendString(buf, agentRunID)
+	buf.WriteByte(',')
+	buf.WriteByte('{')
+	buf.WriteString(`"reservoir_size":`)
+	jsonx.AppendUint(buf, uint64(cap(events.events)))
+	buf.WriteByte(',')
+	buf.WriteString(`"events_seen":`)
+	jsonx.AppendUint(buf, uint64(events.numSeen))
+	buf.WriteByte('}')
+	buf.WriteByte(',')
+	buf.WriteByte('[')
+	for i, e := range events.events {
+		if i > 0 {
+			buf.WriteByte(',')
+		}
+		e.WriteJSON(buf)
+	}
+	buf.WriteByte(']')
+	buf.WriteByte(']')
+
+	return buf.Bytes(), nil
+
+}
+
+// split splits the events into two.  NOTE! The two event pools are not valid
+// priority queues, and should only be used to create JSON, not for adding any
+// events.
+func (events *analyticsEvents) split() (*analyticsEvents, *analyticsEvents) {
+	// numSeen is conserved: e1.numSeen + e2.numSeen == events.numSeen.
+	e1 := &analyticsEvents{
+		numSeen:        len(events.events) / 2,
+		events:         make([]analyticsEvent, len(events.events)/2),
+		failedHarvests: events.failedHarvests,
+	}
+	e2 := &analyticsEvents{
+		numSeen:        events.numSeen - e1.numSeen,
+		events:         make([]analyticsEvent, len(events.events)-len(e1.events)),
+		failedHarvests: events.failedHarvests,
+	}
+	// Note that slicing is not used to ensure that length == capacity for
+	// e1.events and e2.events.
+	copy(e1.events, events.events)
+	copy(e2.events, events.events[len(events.events)/2:])
+
+	return e1, e2
+}

+ 48 - 0
vendor/github.com/newrelic/go-agent/internal/apdex.go

@@ -0,0 +1,48 @@
+package internal
+
+import "time"
+
+// ApdexZone is a transaction classification.
+type ApdexZone int
+
+// https://en.wikipedia.org/wiki/Apdex
+const (
+	ApdexNone ApdexZone = iota
+	ApdexSatisfying
+	ApdexTolerating
+	ApdexFailing
+)
+
+// ApdexFailingThreshold calculates the threshold at which the transaction is
+// considered a failure.
+func ApdexFailingThreshold(threshold time.Duration) time.Duration {
+	return 4 * threshold
+}
+
+// CalculateApdexZone calculates the apdex based on the transaction duration and
+// threshold.
+//
+// Note that this does not take into account whether or not the transaction
+// had an error.  That is expected to be done by the caller.
+func CalculateApdexZone(threshold, duration time.Duration) ApdexZone {
+	if duration <= threshold {
+		return ApdexSatisfying
+	}
+	if duration <= ApdexFailingThreshold(threshold) {
+		return ApdexTolerating
+	}
+	return ApdexFailing
+}
+
+func (zone ApdexZone) label() string {
+	switch zone {
+	case ApdexSatisfying:
+		return "S"
+	case ApdexTolerating:
+		return "T"
+	case ApdexFailing:
+		return "F"
+	default:
+		return ""
+	}
+}

+ 527 - 0
vendor/github.com/newrelic/go-agent/internal/attributes.go

@@ -0,0 +1,527 @@
+package internal
+
+import (
+	"bytes"
+	"fmt"
+	"net/http"
+	"sort"
+	"strconv"
+	"strings"
+)
+
+// New agent attributes must be added in the following places:
+// * Constants here.
+// * Top level attributes.go file.
+// * agentAttributes
+// * agentAttributeDests
+// * calculateAgentAttributeDests
+// * writeAgentAttributes
+const (
+	responseCode          = "httpResponseCode"
+	requestMethod         = "request.method"
+	requestAccept         = "request.headers.accept"
+	requestContentType    = "request.headers.contentType"
+	requestContentLength  = "request.headers.contentLength"
+	requestHost           = "request.headers.host"
+	responseContentType   = "response.headers.contentType"
+	responseContentLength = "response.headers.contentLength"
+	hostDisplayName       = "host.displayName"
+	requestUserAgent      = "request.headers.User-Agent"
+	requestReferer        = "request.headers.referer"
+)
+
+// https://source.datanerd.us/agents/agent-specs/blob/master/Agent-Attributes-PORTED.md
+
+// AttributeDestinationConfig matches newrelic.AttributeDestinationConfig to
+// avoid circular dependency issues.
+type AttributeDestinationConfig struct {
+	Enabled bool
+	Include []string
+	Exclude []string
+}
+
+type destinationSet int
+
+const (
+	destTxnEvent destinationSet = 1 << iota
+	destError
+	destTxnTrace
+	destBrowser
+)
+
+const (
+	destNone destinationSet = 0
+	// DestAll contains all destinations.
+	DestAll destinationSet = destTxnEvent | destTxnTrace | destError | destBrowser
+)
+
+const (
+	attributeWildcardSuffix = '*'
+)
+
+type attributeModifier struct {
+	match string // This will not contain a trailing '*'.
+	includeExclude
+}
+
+type byMatch []*attributeModifier
+
+func (m byMatch) Len() int           { return len(m) }
+func (m byMatch) Swap(i, j int)      { m[i], m[j] = m[j], m[i] }
+func (m byMatch) Less(i, j int) bool { return m[i].match < m[j].match }
+
+// AttributeConfig is created at connect and shared between all transactions.
+type AttributeConfig struct {
+	disabledDestinations destinationSet
+	exactMatchModifiers  map[string]*attributeModifier
+	// Once attributeConfig is constructed, wildcardModifiers is sorted in
+	// lexicographical order.  Modifiers appearing later have precedence
+	// over modifiers appearing earlier.
+	wildcardModifiers []*attributeModifier
+	agentDests        agentAttributeDests
+}
+
+type includeExclude struct {
+	include destinationSet
+	exclude destinationSet
+}
+
+func modifierApply(m *attributeModifier, d destinationSet) destinationSet {
+	// Include before exclude, since exclude has priority.
+	d |= m.include
+	d &^= m.exclude
+	return d
+}
+
+func applyAttributeConfig(c *AttributeConfig, key string, d destinationSet) destinationSet {
+	// Important: The wildcard modifiers must be applied before the exact
+	// match modifiers, and the slice must be iterated in a forward
+	// direction.
+	for _, m := range c.wildcardModifiers {
+		if strings.HasPrefix(key, m.match) {
+			d = modifierApply(m, d)
+		}
+	}
+
+	if m, ok := c.exactMatchModifiers[key]; ok {
+		d = modifierApply(m, d)
+	}
+
+	d &^= c.disabledDestinations
+
+	return d
+}
+
+func addModifier(c *AttributeConfig, match string, d includeExclude) {
+	if "" == match {
+		return
+	}
+	exactMatch := true
+	if attributeWildcardSuffix == match[len(match)-1] {
+		exactMatch = false
+		match = match[0 : len(match)-1]
+	}
+	mod := &attributeModifier{
+		match:          match,
+		includeExclude: d,
+	}
+
+	if exactMatch {
+		if m, ok := c.exactMatchModifiers[mod.match]; ok {
+			m.include |= mod.include
+			m.exclude |= mod.exclude
+		} else {
+			c.exactMatchModifiers[mod.match] = mod
+		}
+	} else {
+		for _, m := range c.wildcardModifiers {
+			// Important: Duplicate entries for the same match
+			// string would not work because exclude needs
+			// precedence over include.
+			if m.match == mod.match {
+				m.include |= mod.include
+				m.exclude |= mod.exclude
+				return
+			}
+		}
+		c.wildcardModifiers = append(c.wildcardModifiers, mod)
+	}
+}
+
+func processDest(c *AttributeConfig, includeEnabled bool, dc *AttributeDestinationConfig, d destinationSet) {
+	if !dc.Enabled {
+		c.disabledDestinations |= d
+	}
+	if includeEnabled {
+		for _, match := range dc.Include {
+			addModifier(c, match, includeExclude{include: d})
+		}
+	}
+	for _, match := range dc.Exclude {
+		addModifier(c, match, includeExclude{exclude: d})
+	}
+}
+
+// AttributeConfigInput is used as the input to CreateAttributeConfig:  it
+// transforms newrelic.Config settings into an AttributeConfig.
+type AttributeConfigInput struct {
+	Attributes        AttributeDestinationConfig
+	ErrorCollector    AttributeDestinationConfig
+	TransactionEvents AttributeDestinationConfig
+	browserMonitoring AttributeDestinationConfig
+	TransactionTracer AttributeDestinationConfig
+}
+
+var (
+	sampleAttributeConfigInput = AttributeConfigInput{
+		Attributes:        AttributeDestinationConfig{Enabled: true},
+		ErrorCollector:    AttributeDestinationConfig{Enabled: true},
+		TransactionEvents: AttributeDestinationConfig{Enabled: true},
+		TransactionTracer: AttributeDestinationConfig{Enabled: true},
+	}
+)
+
+// CreateAttributeConfig creates a new AttributeConfig.
+func CreateAttributeConfig(input AttributeConfigInput, includeEnabled bool) *AttributeConfig {
+	c := &AttributeConfig{
+		exactMatchModifiers: make(map[string]*attributeModifier),
+		wildcardModifiers:   make([]*attributeModifier, 0, 64),
+	}
+
+	processDest(c, includeEnabled, &input.Attributes, DestAll)
+	processDest(c, includeEnabled, &input.ErrorCollector, destError)
+	processDest(c, includeEnabled, &input.TransactionEvents, destTxnEvent)
+	processDest(c, includeEnabled, &input.TransactionTracer, destTxnTrace)
+	processDest(c, includeEnabled, &input.browserMonitoring, destBrowser)
+
+	sort.Sort(byMatch(c.wildcardModifiers))
+
+	c.agentDests = calculateAgentAttributeDests(c)
+
+	return c
+}
+
+type userAttribute struct {
+	value interface{}
+	dests destinationSet
+}
+
+// Attributes are key value pairs attached to the various collected data types.
+type Attributes struct {
+	config *AttributeConfig
+	user   map[string]userAttribute
+	Agent  agentAttributes
+}
+
+type agentAttributes struct {
+	HostDisplayName              string
+	RequestMethod                string
+	RequestAcceptHeader          string
+	RequestContentType           string
+	RequestContentLength         int
+	RequestHeadersHost           string
+	RequestHeadersUserAgent      string
+	RequestHeadersReferer        string
+	ResponseHeadersContentType   string
+	ResponseHeadersContentLength int
+	ResponseCode                 string
+}
+
+type agentAttributeDests struct {
+	HostDisplayName              destinationSet
+	RequestMethod                destinationSet
+	RequestAcceptHeader          destinationSet
+	RequestContentType           destinationSet
+	RequestContentLength         destinationSet
+	RequestHeadersHost           destinationSet
+	RequestHeadersUserAgent      destinationSet
+	RequestHeadersReferer        destinationSet
+	ResponseHeadersContentType   destinationSet
+	ResponseHeadersContentLength destinationSet
+	ResponseCode                 destinationSet
+}
+
+func calculateAgentAttributeDests(c *AttributeConfig) agentAttributeDests {
+	usual := DestAll &^ destBrowser
+	traces := destTxnTrace | destError
+	return agentAttributeDests{
+		HostDisplayName:              applyAttributeConfig(c, hostDisplayName, usual),
+		RequestMethod:                applyAttributeConfig(c, requestMethod, usual),
+		RequestAcceptHeader:          applyAttributeConfig(c, requestAccept, usual),
+		RequestContentType:           applyAttributeConfig(c, requestContentType, usual),
+		RequestContentLength:         applyAttributeConfig(c, requestContentLength, usual),
+		RequestHeadersHost:           applyAttributeConfig(c, requestHost, usual),
+		RequestHeadersUserAgent:      applyAttributeConfig(c, requestUserAgent, traces),
+		RequestHeadersReferer:        applyAttributeConfig(c, requestReferer, traces),
+		ResponseHeadersContentType:   applyAttributeConfig(c, responseContentType, usual),
+		ResponseHeadersContentLength: applyAttributeConfig(c, responseContentLength, usual),
+		ResponseCode:                 applyAttributeConfig(c, responseCode, usual),
+	}
+}
+
+type agentAttributeWriter struct {
+	jsonFieldsWriter
+	d destinationSet
+}
+
+func (w *agentAttributeWriter) writeString(name string, val string, d destinationSet) {
+	if "" != val && 0 != w.d&d {
+		w.stringField(name, truncateStringValueIfLong(val))
+	}
+}
+
+func (w *agentAttributeWriter) writeInt(name string, val int, d destinationSet) {
+	if val >= 0 && 0 != w.d&d {
+		w.intField(name, int64(val))
+	}
+}
+
+func writeAgentAttributes(buf *bytes.Buffer, d destinationSet, values agentAttributes, dests agentAttributeDests) {
+	w := &agentAttributeWriter{
+		jsonFieldsWriter: jsonFieldsWriter{buf: buf},
+		d:                d,
+	}
+	buf.WriteByte('{')
+	w.writeString(hostDisplayName, values.HostDisplayName, dests.HostDisplayName)
+	w.writeString(requestMethod, values.RequestMethod, dests.RequestMethod)
+	w.writeString(requestAccept, values.RequestAcceptHeader, dests.RequestAcceptHeader)
+	w.writeString(requestContentType, values.RequestContentType, dests.RequestContentType)
+	w.writeInt(requestContentLength, values.RequestContentLength, dests.RequestContentLength)
+	w.writeString(requestHost, values.RequestHeadersHost, dests.RequestHeadersHost)
+	w.writeString(requestUserAgent, values.RequestHeadersUserAgent, dests.RequestHeadersUserAgent)
+	w.writeString(requestReferer, values.RequestHeadersReferer, dests.RequestHeadersReferer)
+	w.writeString(responseContentType, values.ResponseHeadersContentType, dests.ResponseHeadersContentType)
+	w.writeInt(responseContentLength, values.ResponseHeadersContentLength, dests.ResponseHeadersContentLength)
+	w.writeString(responseCode, values.ResponseCode, dests.ResponseCode)
+	buf.WriteByte('}')
+}
+
+// NewAttributes creates a new Attributes.
+func NewAttributes(config *AttributeConfig) *Attributes {
+	return &Attributes{
+		config: config,
+		Agent: agentAttributes{
+			RequestContentLength:         -1,
+			ResponseHeadersContentLength: -1,
+		},
+	}
+}
+
+// ErrInvalidAttributeType is returned when the value is not valid.
+type ErrInvalidAttributeType struct {
+	key string
+	val interface{}
+}
+
+func (e ErrInvalidAttributeType) Error() string {
+	return fmt.Sprintf("attribute '%s' value of type %T is invalid", e.key, e.val)
+}
+
+type invalidAttributeKeyErr struct{ key string }
+
+func (e invalidAttributeKeyErr) Error() string {
+	return fmt.Sprintf("attribute key '%.32s...' exceeds length limit %d",
+		e.key, attributeKeyLengthLimit)
+}
+
+type userAttributeLimitErr struct{ key string }
+
+func (e userAttributeLimitErr) Error() string {
+	return fmt.Sprintf("attribute '%s' discarded: limit of %d reached", e.key,
+		attributeUserLimit)
+}
+
+func truncateStringValueIfLong(val string) string {
+	if len(val) > attributeValueLengthLimit {
+		return StringLengthByteLimit(val, attributeValueLengthLimit)
+	}
+	return val
+}
+
+// ValidateUserAttribute validates a user attribute.
+func ValidateUserAttribute(key string, val interface{}) (interface{}, error) {
+	if str, ok := val.(string); ok {
+		val = interface{}(truncateStringValueIfLong(str))
+	}
+
+	switch val.(type) {
+	case string, bool, nil,
+		uint8, uint16, uint32, uint64, int8, int16, int32, int64,
+		float32, float64, uint, int, uintptr:
+	default:
+		return nil, ErrInvalidAttributeType{
+			key: key,
+			val: val,
+		}
+	}
+
+	// Attributes whose keys are excessively long are dropped rather than
+	// truncated to avoid worrying about the application of configuration to
+	// truncated values or performing the truncation after configuration.
+	if len(key) > attributeKeyLengthLimit {
+		return nil, invalidAttributeKeyErr{key: key}
+	}
+	return val, nil
+}
+
+// AddUserAttribute adds a user attribute.
+func AddUserAttribute(a *Attributes, key string, val interface{}, d destinationSet) error {
+	val, err := ValidateUserAttribute(key, val)
+	if nil != err {
+		return err
+	}
+	dests := applyAttributeConfig(a.config, key, d)
+	if destNone == dests {
+		return nil
+	}
+	if nil == a.user {
+		a.user = make(map[string]userAttribute)
+	}
+
+	if _, exists := a.user[key]; !exists && len(a.user) >= attributeUserLimit {
+		return userAttributeLimitErr{key}
+	}
+
+	// Note: Duplicates are overridden: last attribute in wins.
+	a.user[key] = userAttribute{
+		value: val,
+		dests: dests,
+	}
+	return nil
+}
+
+func writeAttributeValueJSON(w *jsonFieldsWriter, key string, val interface{}) {
+	switch v := val.(type) {
+	case nil:
+		w.rawField(key, `null`)
+	case string:
+		w.stringField(key, v)
+	case bool:
+		if v {
+			w.rawField(key, `true`)
+		} else {
+			w.rawField(key, `false`)
+		}
+	case uint8:
+		w.intField(key, int64(v))
+	case uint16:
+		w.intField(key, int64(v))
+	case uint32:
+		w.intField(key, int64(v))
+	case uint64:
+		w.intField(key, int64(v))
+	case uint:
+		w.intField(key, int64(v))
+	case uintptr:
+		w.intField(key, int64(v))
+	case int8:
+		w.intField(key, int64(v))
+	case int16:
+		w.intField(key, int64(v))
+	case int32:
+		w.intField(key, int64(v))
+	case int64:
+		w.intField(key, v)
+	case int:
+		w.intField(key, int64(v))
+	case float32:
+		w.floatField(key, float64(v))
+	case float64:
+		w.floatField(key, v)
+	default:
+		w.stringField(key, fmt.Sprintf("%T", v))
+	}
+}
+
+func agentAttributesJSON(a *Attributes, buf *bytes.Buffer, d destinationSet) {
+	if nil == a {
+		buf.WriteString("{}")
+		return
+	}
+	writeAgentAttributes(buf, d, a.Agent, a.config.agentDests)
+}
+
+func userAttributesJSON(a *Attributes, buf *bytes.Buffer, d destinationSet, extraAttributes map[string]interface{}) {
+	buf.WriteByte('{')
+	if nil != a {
+		w := jsonFieldsWriter{buf: buf}
+		for key, val := range extraAttributes {
+			outputDest := applyAttributeConfig(a.config, key, d)
+			if 0 != outputDest&d {
+				writeAttributeValueJSON(&w, key, val)
+			}
+		}
+		for name, atr := range a.user {
+			if 0 != atr.dests&d {
+				if _, found := extraAttributes[name]; found {
+					continue
+				}
+				writeAttributeValueJSON(&w, name, atr.value)
+			}
+		}
+	}
+	buf.WriteByte('}')
+}
+
+// userAttributesStringJSON is only used for testing.
+func userAttributesStringJSON(a *Attributes, d destinationSet, extraAttributes map[string]interface{}) string {
+	estimate := len(a.user) * 128
+	buf := bytes.NewBuffer(make([]byte, 0, estimate))
+	userAttributesJSON(a, buf, d, extraAttributes)
+	return buf.String()
+}
+
+// RequestAgentAttributes gathers agent attributes out of the request.
+func RequestAgentAttributes(a *Attributes, r *http.Request) {
+	a.Agent.RequestMethod = r.Method
+
+	h := r.Header
+	if nil == h {
+		return
+	}
+	a.Agent.RequestAcceptHeader = h.Get("Accept")
+	a.Agent.RequestContentType = h.Get("Content-Type")
+	a.Agent.RequestHeadersHost = h.Get("Host")
+	a.Agent.RequestHeadersUserAgent = h.Get("User-Agent")
+	a.Agent.RequestHeadersReferer = SafeURLFromString(h.Get("Referer"))
+
+	// Per NewAttributes(), the default for this field is -1 (which is also what
+	// GetContentLengthFromHeader() returns if no content length is found), so we
+	// can just use the return value unconditionally.
+	a.Agent.RequestContentLength = int(GetContentLengthFromHeader(h))
+}
+
+// ResponseHeaderAttributes gather agent attributes from the response headers.
+func ResponseHeaderAttributes(a *Attributes, h http.Header) {
+	if nil == h {
+		return
+	}
+	a.Agent.ResponseHeadersContentType = h.Get("Content-Type")
+
+	// Per NewAttributes(), the default for this field is -1 (which is also what
+	// GetContentLengthFromHeader() returns if no content length is found), so we
+	// can just use the return value unconditionally.
+	a.Agent.ResponseHeadersContentLength = int(GetContentLengthFromHeader(h))
+}
+
+var (
+	// statusCodeLookup avoids a strconv.Itoa call.
+	statusCodeLookup = map[int]string{
+		100: "100", 101: "101",
+		200: "200", 201: "201", 202: "202", 203: "203", 204: "204", 205: "205", 206: "206",
+		300: "300", 301: "301", 302: "302", 303: "303", 304: "304", 305: "305", 307: "307",
+		400: "400", 401: "401", 402: "402", 403: "403", 404: "404", 405: "405", 406: "406",
+		407: "407", 408: "408", 409: "409", 410: "410", 411: "411", 412: "412", 413: "413",
+		414: "414", 415: "415", 416: "416", 417: "417", 418: "418", 428: "428", 429: "429",
+		431: "431", 451: "451",
+		500: "500", 501: "501", 502: "502", 503: "503", 504: "504", 505: "505", 511: "511",
+	}
+)
+
+// ResponseCodeAttribute sets the response code agent attribute.
+func ResponseCodeAttribute(a *Attributes, code int) {
+	a.Agent.ResponseCode = statusCodeLookup[code]
+	if a.Agent.ResponseCode == "" {
+		a.Agent.ResponseCode = strconv.Itoa(code)
+	}
+}

+ 111 - 0
vendor/github.com/newrelic/go-agent/internal/cat/appdata.go

@@ -0,0 +1,111 @@
+package cat
+
+import (
+	"bytes"
+	"encoding/json"
+	"errors"
+
+	"github.com/newrelic/go-agent/internal/jsonx"
+)
+
+// AppDataHeader represents a decoded AppData header.
+type AppDataHeader struct {
+	CrossProcessID        string
+	TransactionName       string
+	QueueTimeInSeconds    float64
+	ResponseTimeInSeconds float64
+	ContentLength         int64
+	TransactionGUID       string
+}
+
+var (
+	errInvalidAppDataJSON                  = errors.New("invalid transaction data JSON")
+	errInvalidAppDataCrossProcessID        = errors.New("cross process ID is not a string")
+	errInvalidAppDataTransactionName       = errors.New("transaction name is not a string")
+	errInvalidAppDataQueueTimeInSeconds    = errors.New("queue time is not a float64")
+	errInvalidAppDataResponseTimeInSeconds = errors.New("response time is not a float64")
+	errInvalidAppDataContentLength         = errors.New("content length is not a float64")
+	errInvalidAppDataTransactionGUID       = errors.New("transaction GUID is not a string")
+)
+
+// MarshalJSON marshalls an AppDataHeader as raw JSON.
+func (appData *AppDataHeader) MarshalJSON() ([]byte, error) {
+	buf := bytes.NewBufferString("[")
+
+	jsonx.AppendString(buf, appData.CrossProcessID)
+
+	buf.WriteString(",")
+	jsonx.AppendString(buf, appData.TransactionName)
+
+	buf.WriteString(",")
+	jsonx.AppendFloat(buf, appData.QueueTimeInSeconds)
+
+	buf.WriteString(",")
+	jsonx.AppendFloat(buf, appData.ResponseTimeInSeconds)
+
+	buf.WriteString(",")
+	jsonx.AppendInt(buf, appData.ContentLength)
+
+	buf.WriteString(",")
+	jsonx.AppendString(buf, appData.TransactionGUID)
+
+	// The mysterious unused field. We don't need to round trip this, so we'll
+	// just hardcode it to false.
+	buf.WriteString(",false]")
+	return buf.Bytes(), nil
+}
+
+// UnmarshalJSON unmarshalls an AppDataHeader from raw JSON.
+func (appData *AppDataHeader) UnmarshalJSON(data []byte) error {
+	var ok bool
+	var v interface{}
+
+	if err := json.Unmarshal(data, &v); err != nil {
+		return err
+	}
+
+	arr, ok := v.([]interface{})
+	if !ok {
+		return errInvalidAppDataJSON
+	}
+	if len(arr) < 7 {
+		return errUnexpectedArraySize{
+			label:    "unexpected number of application data elements",
+			expected: 7,
+			actual:   len(arr),
+		}
+	}
+
+	if appData.CrossProcessID, ok = arr[0].(string); !ok {
+		return errInvalidAppDataCrossProcessID
+	}
+
+	if appData.TransactionName, ok = arr[1].(string); !ok {
+		return errInvalidAppDataTransactionName
+	}
+
+	if appData.QueueTimeInSeconds, ok = arr[2].(float64); !ok {
+		return errInvalidAppDataQueueTimeInSeconds
+	}
+
+	if appData.ResponseTimeInSeconds, ok = arr[3].(float64); !ok {
+		return errInvalidAppDataResponseTimeInSeconds
+	}
+
+	cl, ok := arr[4].(float64)
+	if !ok {
+		return errInvalidAppDataContentLength
+	}
+	// Content length is specced as int32, but not all agents are consistent on
+	// this in practice. Let's handle it as int64 to maximise compatibility.
+	appData.ContentLength = int64(cl)
+
+	if appData.TransactionGUID, ok = arr[5].(string); !ok {
+		return errInvalidAppDataTransactionGUID
+	}
+
+	// As above, we don't bother decoding the unused field here. It just has to
+	// be present (which was checked earlier with the length check).
+
+	return nil
+}

+ 15 - 0
vendor/github.com/newrelic/go-agent/internal/cat/errors.go

@@ -0,0 +1,15 @@
+package cat
+
+import (
+	"fmt"
+)
+
+type errUnexpectedArraySize struct {
+	label    string
+	expected int
+	actual   int
+}
+
+func (e errUnexpectedArraySize) Error() string {
+	return fmt.Sprintf("%s: expected %d; got %d", e.label, e.expected, e.actual)
+}

+ 13 - 0
vendor/github.com/newrelic/go-agent/internal/cat/headers.go

@@ -0,0 +1,13 @@
+// Package cat provides functionality related to the wire format of CAT
+// headers.
+package cat
+
+// These header names don't match the spec in terms of their casing, but does
+// match what Go will give us from http.CanonicalHeaderKey(). Besides, HTTP
+// headers are case insensitive anyway. Rejoice!
+const (
+	NewRelicIDName         = "X-Newrelic-Id"
+	NewRelicTxnName        = "X-Newrelic-Transaction"
+	NewRelicAppDataName    = "X-Newrelic-App-Data"
+	NewRelicSyntheticsName = "X-Newrelic-Synthetics"
+)

+ 41 - 0
vendor/github.com/newrelic/go-agent/internal/cat/id.go

@@ -0,0 +1,41 @@
+package cat
+
+import (
+	"errors"
+	"strconv"
+	"strings"
+)
+
+// IDHeader represents a decoded cross process ID header (generally encoded as
+// a string in the form ACCOUNT#BLOB).
+type IDHeader struct {
+	AccountID int
+	Blob      string
+}
+
+var (
+	errInvalidAccountID = errors.New("invalid account ID")
+)
+
+// NewIDHeader parses the given decoded ID header and creates an IDHeader
+// representing it.
+func NewIDHeader(in []byte) (*IDHeader, error) {
+	parts := strings.Split(string(in), "#")
+	if len(parts) != 2 {
+		return nil, errUnexpectedArraySize{
+			label:    "unexpected number of ID elements",
+			expected: 2,
+			actual:   len(parts),
+		}
+	}
+
+	account, err := strconv.Atoi(parts[0])
+	if err != nil {
+		return nil, errInvalidAccountID
+	}
+
+	return &IDHeader{
+		AccountID: account,
+		Blob:      parts[1],
+	}, nil
+}

+ 35 - 0
vendor/github.com/newrelic/go-agent/internal/cat/path_hash.go

@@ -0,0 +1,35 @@
+package cat
+
+import (
+	"crypto/md5"
+	"encoding/binary"
+	"fmt"
+	"regexp"
+)
+
+var pathHashValidator = regexp.MustCompile("^[0-9a-f]{8}$")
+
+// GeneratePathHash generates a path hash given a referring path hash,
+// transaction name, and application name. referringPathHash can be an empty
+// string if there was no referring path hash.
+func GeneratePathHash(referringPathHash, txnName, appName string) (string, error) {
+	var rph uint32
+	if referringPathHash != "" {
+		if !pathHashValidator.MatchString(referringPathHash) {
+			// Per the spec, invalid referring path hashes should be treated as "0".
+			referringPathHash = "0"
+		}
+
+		if _, err := fmt.Sscanf(referringPathHash, "%x", &rph); err != nil {
+			fmt.Println(rph)
+			return "", err
+		}
+		rph = (rph << 1) | (rph >> 31)
+	}
+
+	hashInput := fmt.Sprintf("%s;%s", appName, txnName)
+	hash := md5.Sum([]byte(hashInput))
+	low32 := binary.BigEndian.Uint32(hash[12:])
+
+	return fmt.Sprintf("%08x", rph^low32), nil
+}

+ 82 - 0
vendor/github.com/newrelic/go-agent/internal/cat/synthetics.go

@@ -0,0 +1,82 @@
+package cat
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+)
+
+// SyntheticsHeader represents a decoded Synthetics header.
+type SyntheticsHeader struct {
+	Version    int
+	AccountID  int
+	ResourceID string
+	JobID      string
+	MonitorID  string
+}
+
+var (
+	errInvalidSyntheticsJSON       = errors.New("invalid synthetics JSON")
+	errInvalidSyntheticsVersion    = errors.New("version is not a float64")
+	errInvalidSyntheticsAccountID  = errors.New("account ID is not a float64")
+	errInvalidSyntheticsResourceID = errors.New("synthetics resource ID is not a string")
+	errInvalidSyntheticsJobID      = errors.New("synthetics job ID is not a string")
+	errInvalidSyntheticsMonitorID  = errors.New("synthetics monitor ID is not a string")
+)
+
+type errUnexpectedSyntheticsVersion int
+
+func (e errUnexpectedSyntheticsVersion) Error() string {
+	return fmt.Sprintf("unexpected synthetics header version: %d", e)
+}
+
+// UnmarshalJSON unmarshalls a SyntheticsHeader from raw JSON.
+func (s *SyntheticsHeader) UnmarshalJSON(data []byte) error {
+	var ok bool
+	var v interface{}
+
+	if err := json.Unmarshal(data, &v); err != nil {
+		return err
+	}
+
+	arr, ok := v.([]interface{})
+	if !ok {
+		return errInvalidSyntheticsJSON
+	}
+	if len(arr) != 5 {
+		return errUnexpectedArraySize{
+			label:    "unexpected number of application data elements",
+			expected: 5,
+			actual:   len(arr),
+		}
+	}
+
+	version, ok := arr[0].(float64)
+	if !ok {
+		return errInvalidSyntheticsVersion
+	}
+	s.Version = int(version)
+	if s.Version != 1 {
+		return errUnexpectedSyntheticsVersion(s.Version)
+	}
+
+	accountID, ok := arr[1].(float64)
+	if !ok {
+		return errInvalidSyntheticsAccountID
+	}
+	s.AccountID = int(accountID)
+
+	if s.ResourceID, ok = arr[2].(string); !ok {
+		return errInvalidSyntheticsResourceID
+	}
+
+	if s.JobID, ok = arr[3].(string); !ok {
+		return errInvalidSyntheticsJobID
+	}
+
+	if s.MonitorID, ok = arr[4].(string); !ok {
+		return errInvalidSyntheticsMonitorID
+	}
+
+	return nil
+}

+ 96 - 0
vendor/github.com/newrelic/go-agent/internal/cat/txndata.go

@@ -0,0 +1,96 @@
+package cat
+
+import (
+	"bytes"
+	"encoding/json"
+	"errors"
+
+	"github.com/newrelic/go-agent/internal/jsonx"
+)
+
+// TxnDataHeader represents a decoded TxnData header.
+type TxnDataHeader struct {
+	GUID     string
+	TripID   string
+	PathHash string
+}
+
+var (
+	errInvalidTxnDataJSON     = errors.New("invalid transaction data JSON")
+	errInvalidTxnDataGUID     = errors.New("GUID is not a string")
+	errInvalidTxnDataTripID   = errors.New("trip ID is not a string or null")
+	errInvalidTxnDataPathHash = errors.New("path hash is not a string or null")
+)
+
+// MarshalJSON marshalls a TxnDataHeader as raw JSON.
+func (txnData *TxnDataHeader) MarshalJSON() ([]byte, error) {
+	// Note that, although there are two and four element versions of this header
+	// in the wild, we will only ever generate the four element version.
+
+	buf := bytes.NewBufferString("[")
+
+	jsonx.AppendString(buf, txnData.GUID)
+
+	// Write the unused second field.
+	buf.WriteString(",false,")
+	jsonx.AppendString(buf, txnData.TripID)
+
+	buf.WriteString(",")
+	jsonx.AppendString(buf, txnData.PathHash)
+
+	buf.WriteString("]")
+
+	return buf.Bytes(), nil
+}
+
+// UnmarshalJSON unmarshalls a TxnDataHeader from raw JSON.
+func (txnData *TxnDataHeader) UnmarshalJSON(data []byte) error {
+	var ok bool
+	var v interface{}
+
+	if err := json.Unmarshal(data, &v); err != nil {
+		return err
+	}
+
+	arr, ok := v.([]interface{})
+	if !ok {
+		return errInvalidTxnDataJSON
+	}
+	if len(arr) < 2 {
+		return errUnexpectedArraySize{
+			label:    "unexpected number of transaction data elements",
+			expected: 2,
+			actual:   len(arr),
+		}
+	}
+
+	if txnData.GUID, ok = arr[0].(string); !ok {
+		return errInvalidTxnDataGUID
+	}
+
+	// Ignore the unused second field.
+
+	// Set up defaults for the optional values.
+	txnData.TripID = ""
+	txnData.PathHash = ""
+
+	if len(arr) >= 3 {
+		// Per the cross agent tests, an explicit null is valid here.
+		if nil != arr[2] {
+			if txnData.TripID, ok = arr[2].(string); !ok {
+				return errInvalidTxnDataTripID
+			}
+		}
+
+		if len(arr) >= 4 {
+			// Per the cross agent tests, an explicit null is also valid here.
+			if nil != arr[3] {
+				if txnData.PathHash, ok = arr[3].(string); !ok {
+					return errInvalidTxnDataPathHash
+				}
+			}
+		}
+	}
+
+	return nil
+}

+ 339 - 0
vendor/github.com/newrelic/go-agent/internal/collector.go

@@ -0,0 +1,339 @@
+package internal
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io/ioutil"
+	"net/http"
+	"net/url"
+	"os"
+	"regexp"
+	"time"
+
+	"github.com/newrelic/go-agent/internal/logger"
+)
+
+const (
+	procotolVersion = "16"
+	userAgentPrefix = "NewRelic-Go-Agent/"
+
+	// Methods used in collector communication.
+	cmdPreconnect   = "preconnect"
+	cmdConnect      = "connect"
+	cmdMetrics      = "metric_data"
+	cmdCustomEvents = "custom_event_data"
+	cmdTxnEvents    = "analytic_event_data"
+	cmdErrorEvents  = "error_event_data"
+	cmdErrorData    = "error_data"
+	cmdTxnTraces    = "transaction_sample_data"
+	cmdSlowSQLs     = "sql_trace_data"
+	cmdSpanEvents   = "span_event_data"
+)
+
+var (
+	// ErrPayloadTooLarge is created in response to receiving a 413 response
+	// code.
+	ErrPayloadTooLarge = errors.New("payload too large")
+	// ErrUnauthorized is created in response to receiving a 401 response code.
+	ErrUnauthorized = errors.New("unauthorized")
+	// ErrUnsupportedMedia is created in response to receiving a 415
+	// response code.
+	ErrUnsupportedMedia = errors.New("unsupported media")
+)
+
+// RpmCmd contains fields specific to an individual call made to RPM.
+type RpmCmd struct {
+	Name      string
+	Collector string
+	RunID     string
+	Data      []byte
+}
+
+// RpmControls contains fields which will be the same for all calls made
+// by the same application.
+type RpmControls struct {
+	License      string
+	Client       *http.Client
+	Logger       logger.Logger
+	AgentVersion string
+}
+
+func rpmURL(cmd RpmCmd, cs RpmControls) string {
+	var u url.URL
+
+	u.Host = cmd.Collector
+	u.Path = "agent_listener/invoke_raw_method"
+	u.Scheme = "https"
+
+	query := url.Values{}
+	query.Set("marshal_format", "json")
+	query.Set("protocol_version", procotolVersion)
+	query.Set("method", cmd.Name)
+	query.Set("license_key", cs.License)
+
+	if len(cmd.RunID) > 0 {
+		query.Set("run_id", cmd.RunID)
+	}
+
+	u.RawQuery = query.Encode()
+	return u.String()
+}
+
+type unexpectedStatusCodeErr struct {
+	code int
+}
+
+func (e unexpectedStatusCodeErr) Error() string {
+	return fmt.Sprintf("unexpected HTTP status code: %d", e.code)
+}
+
+func collectorRequestInternal(url string, data []byte, cs RpmControls) ([]byte, error) {
+	deflated, err := compress(data)
+	if nil != err {
+		return nil, err
+	}
+
+	req, err := http.NewRequest("POST", url, deflated)
+	if nil != err {
+		return nil, err
+	}
+
+	req.Header.Add("Accept-Encoding", "identity, deflate")
+	req.Header.Add("Content-Type", "application/octet-stream")
+	req.Header.Add("User-Agent", userAgentPrefix+cs.AgentVersion)
+	req.Header.Add("Content-Encoding", "deflate")
+
+	resp, err := cs.Client.Do(req)
+	if err != nil {
+		return nil, err
+	}
+
+	defer resp.Body.Close()
+
+	switch resp.StatusCode {
+	case 200:
+		// Nothing to do.
+	case 401:
+		return nil, ErrUnauthorized
+	case 413:
+		return nil, ErrPayloadTooLarge
+	case 415:
+		return nil, ErrUnsupportedMedia
+	default:
+		// If the response code is not 200, then the collector may not return
+		// valid JSON.
+		return nil, unexpectedStatusCodeErr{code: resp.StatusCode}
+	}
+
+	// Read the entire response, rather than using resp.Body as input to json.NewDecoder to
+	// avoid the issue described here:
+	// https://github.com/google/go-github/pull/317
+	// https://ahmetalpbalkan.com/blog/golang-json-decoder-pitfalls/
+	// Also, collector JSON responses are expected to be quite small.
+	b, err := ioutil.ReadAll(resp.Body)
+	if nil != err {
+		return nil, err
+	}
+	return parseResponse(b)
+}
+
+// CollectorRequest makes a request to New Relic.
+func CollectorRequest(cmd RpmCmd, cs RpmControls) ([]byte, error) {
+	url := rpmURL(cmd, cs)
+
+	if cs.Logger.DebugEnabled() {
+		cs.Logger.Debug("rpm request", map[string]interface{}{
+			"command": cmd.Name,
+			"url":     url,
+			"payload": JSONString(cmd.Data),
+		})
+	}
+
+	resp, err := collectorRequestInternal(url, cmd.Data, cs)
+	if err != nil {
+		cs.Logger.Debug("rpm failure", map[string]interface{}{
+			"command": cmd.Name,
+			"url":     url,
+			"error":   err.Error(),
+		})
+	}
+
+	if cs.Logger.DebugEnabled() {
+		cs.Logger.Debug("rpm response", map[string]interface{}{
+			"command":  cmd.Name,
+			"url":      url,
+			"response": JSONString(resp),
+		})
+	}
+
+	return resp, err
+}
+
+type rpmException struct {
+	Message   string `json:"message"`
+	ErrorType string `json:"error_type"`
+}
+
+func (e *rpmException) Error() string {
+	return fmt.Sprintf("%s: %s", e.ErrorType, e.Message)
+}
+
+func hasType(e error, expected string) bool {
+	rpmErr, ok := e.(*rpmException)
+	if !ok {
+		return false
+	}
+	return rpmErr.ErrorType == expected
+
+}
+
+const (
+	forceRestartType   = "NewRelic::Agent::ForceRestartException"
+	disconnectType     = "NewRelic::Agent::ForceDisconnectException"
+	licenseInvalidType = "NewRelic::Agent::LicenseException"
+	runtimeType        = "RuntimeError"
+)
+
+// IsRestartException indicates if the error was a restart exception.
+func IsRestartException(e error) bool { return hasType(e, forceRestartType) }
+
+// IsLicenseException indicates if the error was an invalid exception.
+func IsLicenseException(e error) bool { return hasType(e, licenseInvalidType) }
+
+// IsRuntime indicates if the error was a runtime exception.
+func IsRuntime(e error) bool { return hasType(e, runtimeType) }
+
+// IsDisconnect indicates if the error was a disconnect exception.
+func IsDisconnect(e error) bool {
+	// Unrecognized or missing security policies should be treated as
+	// disconnects.
+	if _, ok := e.(errUnknownRequiredPolicy); ok {
+		return true
+	}
+	if _, ok := e.(errUnsetPolicy); ok {
+		return true
+	}
+	return hasType(e, disconnectType)
+}
+
+func parseResponse(b []byte) ([]byte, error) {
+	var r struct {
+		ReturnValue json.RawMessage `json:"return_value"`
+		Exception   *rpmException   `json:"exception"`
+	}
+
+	err := json.Unmarshal(b, &r)
+	if nil != err {
+		return nil, err
+	}
+
+	if nil != r.Exception {
+		return nil, r.Exception
+	}
+
+	return r.ReturnValue, nil
+}
+
+const (
+	// NEW_RELIC_HOST can be used to override the New Relic endpoint.  This
+	// is useful for testing.
+	envHost = "NEW_RELIC_HOST"
+)
+
+var (
+	preconnectHostOverride       = os.Getenv(envHost)
+	preconnectHostDefault        = "collector.newrelic.com"
+	preconnectRegionLicenseRegex = regexp.MustCompile(`(^.+?)x`)
+)
+
+func calculatePreconnectHost(license, overrideHost string) string {
+	if "" != overrideHost {
+		return overrideHost
+	}
+	m := preconnectRegionLicenseRegex.FindStringSubmatch(license)
+	if len(m) > 1 {
+		return "collector." + m[1] + ".nr-data.net"
+	}
+	return preconnectHostDefault
+}
+
+// ConnectJSONCreator allows the creation of the connect payload JSON to be
+// deferred until the SecurityPolicies are acquired and vetted.
+type ConnectJSONCreator interface {
+	CreateConnectJSON(*SecurityPolicies) ([]byte, error)
+}
+
+type preconnectRequest struct {
+	SecurityPoliciesToken string `json:"security_policies_token,omitempty"`
+}
+
+// ConnectAttempt tries to connect an application.
+func ConnectAttempt(config ConnectJSONCreator, securityPoliciesToken string, cs RpmControls) (*ConnectReply, error) {
+	preconnectData, err := json.Marshal([]preconnectRequest{
+		preconnectRequest{SecurityPoliciesToken: securityPoliciesToken},
+	})
+	if nil != err {
+		return nil, fmt.Errorf("unable to marshal preconnect data: %v", err)
+	}
+
+	call := RpmCmd{
+		Name:      cmdPreconnect,
+		Collector: calculatePreconnectHost(cs.License, preconnectHostOverride),
+		Data:      preconnectData,
+	}
+
+	out, err := CollectorRequest(call, cs)
+	if nil != err {
+		// err is intentionally unmodified:  We do not want to change
+		// the type of these collector errors.
+		return nil, err
+	}
+
+	var preconnect PreconnectReply
+	err = json.Unmarshal(out, &preconnect)
+	if nil != err {
+		// Unknown policies detected during unmarshal should produce a
+		// disconnect.
+		if IsDisconnect(err) {
+			return nil, err
+		}
+		return nil, fmt.Errorf("unable to parse preconnect reply: %v", err)
+	}
+
+	js, err := config.CreateConnectJSON(preconnect.SecurityPolicies.PointerIfPopulated())
+	if nil != err {
+		return nil, fmt.Errorf("unable to create connect data: %v", err)
+	}
+
+	call.Collector = preconnect.Collector
+	call.Data = js
+	call.Name = cmdConnect
+
+	rawReply, err := CollectorRequest(call, cs)
+	if nil != err {
+		// err is intentionally unmodified:  We do not want to change
+		// the type of these collector errors.
+		return nil, err
+	}
+
+	reply := ConnectReplyDefaults()
+	err = json.Unmarshal(rawReply, reply)
+	if nil != err {
+		return nil, fmt.Errorf("unable to parse connect reply: %v", err)
+	}
+	// Note:  This should never happen.  It would mean the collector
+	// response is malformed.  This exists merely as extra defensiveness.
+	if "" == reply.RunID {
+		return nil, errors.New("connect reply missing agent run id")
+	}
+
+	reply.PreconnectReply = preconnect
+
+	reply.AdaptiveSampler = newAdaptiveSampler(adaptiveSamplerInput{
+		Period: time.Duration(reply.SamplingTargetPeriodInSeconds) * time.Second,
+		Target: reply.SamplingTarget,
+	}, time.Now())
+
+	return reply, nil
+}

+ 19 - 0
vendor/github.com/newrelic/go-agent/internal/compress.go

@@ -0,0 +1,19 @@
+package internal
+
+import (
+	"bytes"
+	"compress/zlib"
+)
+
+func compress(b []byte) (*bytes.Buffer, error) {
+	var buf bytes.Buffer
+	w := zlib.NewWriter(&buf)
+	_, err := w.Write(b)
+	w.Close()
+
+	if nil != err {
+		return nil, err
+	}
+
+	return &buf, nil
+}

+ 150 - 0
vendor/github.com/newrelic/go-agent/internal/connect_reply.go

@@ -0,0 +1,150 @@
+package internal
+
+import (
+	"encoding/json"
+	"strings"
+	"time"
+)
+
+// AgentRunID identifies the current connection with the collector.
+type AgentRunID string
+
+func (id AgentRunID) String() string {
+	return string(id)
+}
+
+// PreconnectReply contains settings from the preconnect endpoint.
+type PreconnectReply struct {
+	Collector        string           `json:"redirect_host"`
+	SecurityPolicies SecurityPolicies `json:"security_policies"`
+}
+
+// ConnectReply contains all of the settings and state send down from the
+// collector.  It should not be modified after creation.
+type ConnectReply struct {
+	RunID AgentRunID `json:"agent_run_id"`
+
+	// Transaction Name Modifiers
+	SegmentTerms segmentRules `json:"transaction_segment_terms"`
+	TxnNameRules metricRules  `json:"transaction_name_rules"`
+	URLRules     metricRules  `json:"url_rules"`
+	MetricRules  metricRules  `json:"metric_name_rules"`
+
+	// Cross Process
+	EncodingKey     string            `json:"encoding_key"`
+	CrossProcessID  string            `json:"cross_process_id"`
+	TrustedAccounts trustedAccountSet `json:"trusted_account_ids"`
+
+	// Settings
+	KeyTxnApdex            map[string]float64 `json:"web_transactions_apdex"`
+	ApdexThresholdSeconds  float64            `json:"apdex_t"`
+	CollectAnalyticsEvents bool               `json:"collect_analytics_events"`
+	CollectCustomEvents    bool               `json:"collect_custom_events"`
+	CollectTraces          bool               `json:"collect_traces"`
+	CollectErrors          bool               `json:"collect_errors"`
+	CollectErrorEvents     bool               `json:"collect_error_events"`
+
+	// RUM
+	AgentLoader string `json:"js_agent_loader"`
+	Beacon      string `json:"beacon"`
+	BrowserKey  string `json:"browser_key"`
+	AppID       string `json:"application_id"`
+	ErrorBeacon string `json:"error_beacon"`
+	JSAgentFile string `json:"js_agent_file"`
+
+	// PreconnectReply fields are not in the connect reply, this embedding
+	// is done to simplify code.
+	PreconnectReply `json:"-"`
+
+	Messages []struct {
+		Message string `json:"message"`
+		Level   string `json:"level"`
+	} `json:"messages"`
+
+	AdaptiveSampler AdaptiveSampler
+
+	// BetterCAT/Distributed Tracing
+	AccountID                     string `json:"account_id"`
+	TrustedAccountKey             string `json:"trusted_account_key"`
+	PrimaryAppID                  string `json:"primary_application_id"`
+	SamplingTarget                uint64 `json:"sampling_target"`
+	SamplingTargetPeriodInSeconds int    `json:"sampling_target_period_in_seconds"`
+}
+
+type trustedAccountSet map[int]struct{}
+
+func (t *trustedAccountSet) IsTrusted(account int) bool {
+	_, exists := (*t)[account]
+	return exists
+}
+
+func (t *trustedAccountSet) UnmarshalJSON(data []byte) error {
+	accounts := make([]int, 0)
+	if err := json.Unmarshal(data, &accounts); err != nil {
+		return err
+	}
+
+	*t = make(trustedAccountSet)
+	for _, account := range accounts {
+		(*t)[account] = struct{}{}
+	}
+
+	return nil
+}
+
+// ConnectReplyDefaults returns a newly allocated ConnectReply with the proper
+// default settings.  A pointer to a global is not used to prevent consumers
+// from changing the default settings.
+func ConnectReplyDefaults() *ConnectReply {
+	return &ConnectReply{
+		ApdexThresholdSeconds:  0.5,
+		CollectAnalyticsEvents: true,
+		CollectCustomEvents:    true,
+		CollectTraces:          true,
+		CollectErrors:          true,
+		CollectErrorEvents:     true,
+		// No transactions should be sampled before the application is
+		// connected.
+		AdaptiveSampler: SampleNothing{},
+	}
+}
+
+// CalculateApdexThreshold calculates the apdex threshold.
+func CalculateApdexThreshold(c *ConnectReply, txnName string) time.Duration {
+	if t, ok := c.KeyTxnApdex[txnName]; ok {
+		return floatSecondsToDuration(t)
+	}
+	return floatSecondsToDuration(c.ApdexThresholdSeconds)
+}
+
+// CreateFullTxnName uses collector rules and the appropriate metric prefix to
+// construct the full transaction metric name from the name given by the
+// consumer.
+func CreateFullTxnName(input string, reply *ConnectReply, isWeb bool) string {
+	var afterURLRules string
+	if "" != input {
+		afterURLRules = reply.URLRules.Apply(input)
+		if "" == afterURLRules {
+			return ""
+		}
+	}
+
+	prefix := backgroundMetricPrefix
+	if isWeb {
+		prefix = webMetricPrefix
+	}
+
+	var beforeNameRules string
+	if strings.HasPrefix(afterURLRules, "/") {
+		beforeNameRules = prefix + afterURLRules
+	} else {
+		beforeNameRules = prefix + "/" + afterURLRules
+	}
+
+	afterNameRules := reply.TxnNameRules.Apply(beforeNameRules)
+	if "" == afterNameRules {
+		return ""
+	}
+
+	return reply.SegmentTerms.apply(afterNameRules)
+}

+ 74 - 0
vendor/github.com/newrelic/go-agent/internal/cross_process_http.go

@@ -0,0 +1,74 @@
+package internal
+
+import (
+	"net/http"
+
+	"github.com/newrelic/go-agent/internal/cat"
+)
+
+// InitFromHTTPRequest initialises the TxnCrossProcess from the given request.
+// This is a convenience method to keep newTxn() as clean as possible, and to
+// support unit tests.
+func (txp *TxnCrossProcess) InitFromHTTPRequest(enabled bool, dt bool, reply *ConnectReply, req *http.Request) error {
+	metadata := CrossProcessMetadata{}
+	if req != nil {
+		metadata = HTTPHeaderToMetadata(req.Header)
+	}
+
+	return txp.Init(enabled, dt, reply, metadata)
+}
+
+// AppDataToHTTPHeader encapsulates the given appData value in the correct HTTP
+// header.
+func AppDataToHTTPHeader(appData string) http.Header {
+	header := http.Header{}
+
+	if appData != "" {
+		header.Add(cat.NewRelicAppDataName, appData)
+	}
+
+	return header
+}
+
+// HTTPHeaderToAppData gets the appData value from the correct HTTP header.
+func HTTPHeaderToAppData(header http.Header) string {
+	if header == nil {
+		return ""
+	}
+
+	return header.Get(cat.NewRelicAppDataName)
+}
+
+// HTTPHeaderToMetadata gets the cross process metadata from the relevant HTTP
+// headers.
+func HTTPHeaderToMetadata(header http.Header) CrossProcessMetadata {
+	if header == nil {
+		return CrossProcessMetadata{}
+	}
+
+	return CrossProcessMetadata{
+		ID:         header.Get(cat.NewRelicIDName),
+		TxnData:    header.Get(cat.NewRelicTxnName),
+		Synthetics: header.Get(cat.NewRelicSyntheticsName),
+	}
+}
+
+// MetadataToHTTPHeader creates a set of HTTP headers to represent the given
+// cross process metadata.
+func MetadataToHTTPHeader(metadata CrossProcessMetadata) http.Header {
+	header := http.Header{}
+
+	if metadata.ID != "" {
+		header.Add(cat.NewRelicIDName, metadata.ID)
+	}
+
+	if metadata.TxnData != "" {
+		header.Add(cat.NewRelicTxnName, metadata.TxnData)
+	}
+
+	if metadata.Synthetics != "" {
+		header.Add(cat.NewRelicSyntheticsName, metadata.Synthetics)
+	}
+
+	return header
+}

+ 103 - 0
vendor/github.com/newrelic/go-agent/internal/custom_event.go

@@ -0,0 +1,103 @@
+package internal
+
+import (
+	"bytes"
+	"fmt"
+	"regexp"
+	"time"
+)
+
+// https://newrelic.atlassian.net/wiki/display/eng/Custom+Events+in+New+Relic+Agents
+
+var (
+	eventTypeRegexRaw = `^[a-zA-Z0-9:_ ]+$`
+	eventTypeRegex    = regexp.MustCompile(eventTypeRegexRaw)
+
+	errEventTypeLength = fmt.Errorf("event type exceeds length limit of %d",
+		attributeKeyLengthLimit)
+	// ErrEventTypeRegex will be returned to caller of app.RecordCustomEvent
+	// if the event type is not valid.
+	ErrEventTypeRegex = fmt.Errorf("event type must match %s", eventTypeRegexRaw)
+	errNumAttributes  = fmt.Errorf("maximum of %d attributes exceeded",
+		customEventAttributeLimit)
+)
+
+// CustomEvent is a custom event.
+type CustomEvent struct {
+	eventType       string
+	timestamp       time.Time
+	truncatedParams map[string]interface{}
+}
+
+// WriteJSON prepares JSON in the format expected by the collector.
+func (e *CustomEvent) WriteJSON(buf *bytes.Buffer) {
+	w := jsonFieldsWriter{buf: buf}
+	buf.WriteByte('[')
+	buf.WriteByte('{')
+	w.stringField("type", e.eventType)
+	w.floatField("timestamp", timeToFloatSeconds(e.timestamp))
+	buf.WriteByte('}')
+
+	buf.WriteByte(',')
+	buf.WriteByte('{')
+	w = jsonFieldsWriter{buf: buf}
+	for key, val := range e.truncatedParams {
+		writeAttributeValueJSON(&w, key, val)
+	}
+	buf.WriteByte('}')
+
+	buf.WriteByte(',')
+	buf.WriteByte('{')
+	buf.WriteByte('}')
+	buf.WriteByte(']')
+}
+
+// MarshalJSON is used for testing.
+func (e *CustomEvent) MarshalJSON() ([]byte, error) {
+	buf := bytes.NewBuffer(make([]byte, 0, 256))
+
+	e.WriteJSON(buf)
+
+	return buf.Bytes(), nil
+}
+
+func eventTypeValidate(eventType string) error {
+	if len(eventType) > attributeKeyLengthLimit {
+		return errEventTypeLength
+	}
+	if !eventTypeRegex.MatchString(eventType) {
+		return ErrEventTypeRegex
+	}
+	return nil
+}
+
+// CreateCustomEvent creates a custom event.
+func CreateCustomEvent(eventType string, params map[string]interface{}, now time.Time) (*CustomEvent, error) {
+	if err := eventTypeValidate(eventType); nil != err {
+		return nil, err
+	}
+
+	if len(params) > customEventAttributeLimit {
+		return nil, errNumAttributes
+	}
+
+	truncatedParams := make(map[string]interface{})
+	for key, val := range params {
+		val, err := ValidateUserAttribute(key, val)
+		if nil != err {
+			return nil, err
+		}
+		truncatedParams[key] = val
+	}
+
+	return &CustomEvent{
+		eventType:       eventType,
+		timestamp:       now,
+		truncatedParams: truncatedParams,
+	}, nil
+}
+
+// MergeIntoHarvest implements Harvestable.
+func (e *CustomEvent) MergeIntoHarvest(h *Harvest) {
+	h.CustomEvents.Add(e)
+}

+ 38 - 0
vendor/github.com/newrelic/go-agent/internal/custom_events.go

@@ -0,0 +1,38 @@
+package internal
+
+import (
+	"time"
+)
+
+type customEvents struct {
+	events *analyticsEvents
+}
+
+func newCustomEvents(max int) *customEvents {
+	return &customEvents{
+		events: newAnalyticsEvents(max),
+	}
+}
+
+func (cs *customEvents) Add(e *CustomEvent) {
+	// For the Go Agent, customEvents are added to the application, not the transaction.
+	// As a result, customEvents do not inherit their priority from the transaction, though
+	// they are still sampled according to priority sampling.
+	priority := NewPriority()
+	cs.events.addEvent(analyticsEvent{priority, e})
+}
+
+func (cs *customEvents) MergeIntoHarvest(h *Harvest) {
+	h.CustomEvents.events.mergeFailed(cs.events)
+}
+
+func (cs *customEvents) Data(agentRunID string, harvestStart time.Time) ([]byte, error) {
+	return cs.events.CollectorJSON(agentRunID)
+}
+
+func (cs *customEvents) numSeen() float64  { return cs.events.NumSeen() }
+func (cs *customEvents) numSaved() float64 { return cs.events.NumSaved() }
+
+func (cs *customEvents) EndpointMethod() string {
+	return cmdCustomEvents
+}

+ 12 - 0
vendor/github.com/newrelic/go-agent/internal/custom_metric.go

@@ -0,0 +1,12 @@
+package internal
+
+// CustomMetric is a custom metric.
+type CustomMetric struct {
+	RawInputName string
+	Value        float64
+}
+
+// MergeIntoHarvest implements Harvestable.
+func (m CustomMetric) MergeIntoHarvest(h *Harvest) {
+	h.Metrics.addValue(customMetric(m.RawInputName), "", m.Value, unforced)
+}

+ 211 - 0
vendor/github.com/newrelic/go-agent/internal/distributed_tracing.go

@@ -0,0 +1,211 @@
+package internal
+
+import (
+	"encoding/base64"
+	"encoding/json"
+	"fmt"
+	"time"
+)
+
+type distTraceVersion [2]int
+
+func (v distTraceVersion) major() int { return v[0] }
+func (v distTraceVersion) minor() int { return v[1] }
+
+const (
+	// CallerType is the Type field's value for outbound payloads.
+	CallerType = "App"
+)
+
+var (
+	currentDistTraceVersion = distTraceVersion([2]int{0 /* Major */, 1 /* Minor */})
+	callerUnknown           = payloadCaller{Type: "Unknown", App: "Unknown", Account: "Unknown", TransportType: "Unknown"}
+)
+
+// timestampMillis allows raw payloads to use exact times, and marshalled
+// payloads to use times in millis.
+type timestampMillis time.Time
+
+func (tm *timestampMillis) UnmarshalJSON(data []byte) error {
+	var millis uint64
+	if err := json.Unmarshal(data, &millis); nil != err {
+		return err
+	}
+	*tm = timestampMillis(timeFromUnixMilliseconds(millis))
+	return nil
+}
+
+func (tm timestampMillis) MarshalJSON() ([]byte, error) {
+	return json.Marshal(TimeToUnixMilliseconds(tm.Time()))
+}
+
+func (tm timestampMillis) Time() time.Time  { return time.Time(tm) }
+func (tm *timestampMillis) Set(t time.Time) { *tm = timestampMillis(t) }
+
+// Payload is the distributed tracing payload.
+type Payload struct {
+	payloadCaller
+	TransactionID     string          `json:"tx,omitempty"`
+	ID                string          `json:"id,omitempty"`
+	TracedID          string          `json:"tr"`
+	Priority          Priority        `json:"pr"`
+	Sampled           *bool           `json:"sa"`
+	Timestamp         timestampMillis `json:"ti"`
+	TransportDuration time.Duration   `json:"-"`
+}
+
+type payloadCaller struct {
+	TransportType     string `json:"-"`
+	Type              string `json:"ty"`
+	App               string `json:"ap"`
+	Account           string `json:"ac"`
+	TrustedAccountKey string `json:"tk,omitempty"`
+}
+
+// IsValid validates the payload data by looking for missing fields.
+// Returns an error if there's a problem, nil if everything's fine
+func (p Payload) IsValid() error {
+
+	// If a payload is missing both `guid` and `transactionId` is received,
+	// a ParseException supportability metric should be generated.
+	if "" == p.TransactionID && "" == p.ID {
+		return ErrPayloadMissingField{message: "missing both guid/id and TransactionId/tx"}
+	}
+
+	if "" == p.Type {
+		return ErrPayloadMissingField{message: "missing Type/ty"}
+	}
+
+	if "" == p.Account {
+		return ErrPayloadMissingField{message: "missing Account/ac"}
+	}
+
+	if "" == p.App {
+		return ErrPayloadMissingField{message: "missing App/ap"}
+	}
+
+	if "" == p.TracedID {
+		return ErrPayloadMissingField{message: "missing TracedID/tr"}
+	}
+
+	if p.Timestamp.Time().IsZero() || 0 == p.Timestamp.Time().Unix() {
+		return ErrPayloadMissingField{message: "missing Timestamp/ti"}
+	}
+
+	return nil
+}
+
+func (p Payload) text(v distTraceVersion) []byte {
+	js, _ := json.Marshal(struct {
+		Version distTraceVersion `json:"v"`
+		Data    Payload          `json:"d"`
+	}{
+		Version: v,
+		Data:    p,
+	})
+	return js
+}
+
+// Text implements newrelic.DistributedTracePayload.
+func (p Payload) Text() string {
+	t := p.text(currentDistTraceVersion)
+	return string(t)
+}
+
+// HTTPSafe implements newrelic.DistributedTracePayload.
+func (p Payload) HTTPSafe() string {
+	t := p.text(currentDistTraceVersion)
+	return base64.StdEncoding.EncodeToString(t)
+}
+
+// SetSampled lets us set a value for our *bool,
+// which we can't do directly since a pointer
+// needs something to point at.
+func (p *Payload) SetSampled(sampled bool) {
+	p.Sampled = &sampled
+}
+
+// ErrPayloadParse indicates that the payload was malformed.
+type ErrPayloadParse struct{ err error }
+
+func (e ErrPayloadParse) Error() string {
+	return fmt.Sprintf("unable to parse inbound payload: %s", e.err.Error())
+}
+
+// ErrPayloadMissingField indicates there's a required field that's missing
+type ErrPayloadMissingField struct{ message string }
+
+func (e ErrPayloadMissingField) Error() string {
+	return fmt.Sprintf("payload is missing required fields: %s", e.message)
+}
+
+// ErrTrustedAccountKey indicates we don't trust the account, per the
+// new trusted_account_key routine.
+type ErrTrustedAccountKey struct{ Message string }
+
+func (e ErrTrustedAccountKey) Error() string {
+	return fmt.Sprintf("trusted account key error: %s", e.Message)
+}
+
+// ErrUnsupportedPayloadVersion indicates that the major version number is
+// unknown.
+type ErrUnsupportedPayloadVersion struct{ version int }
+
+func (e ErrUnsupportedPayloadVersion) Error() string {
+	return fmt.Sprintf("unsupported major version number %d", e.version)
+}
+
+// AcceptPayload parses the inbound distributed tracing payload.
+func AcceptPayload(p interface{}) (*Payload, error) {
+	var payload Payload
+	if byteSlice, ok := p.([]byte); ok {
+		p = string(byteSlice)
+	}
+	switch v := p.(type) {
+	case string:
+		if "" == v {
+			return nil, nil
+		}
+		var decoded []byte
+		if '{' == v[0] {
+			decoded = []byte(v)
+		} else {
+			var err error
+			decoded, err = base64.StdEncoding.DecodeString(v)
+			if nil != err {
+				return nil, ErrPayloadParse{err: err}
+			}
+		}
+		envelope := struct {
+			Version distTraceVersion `json:"v"`
+			Data    json.RawMessage  `json:"d"`
+		}{}
+		if err := json.Unmarshal(decoded, &envelope); nil != err {
+			return nil, ErrPayloadParse{err: err}
+		}
+
+		if 0 == envelope.Version.major() && 0 == envelope.Version.minor() {
+			return nil, ErrPayloadMissingField{message: "missing v"}
+		}
+
+		if envelope.Version.major() > currentDistTraceVersion.major() {
+			return nil, ErrUnsupportedPayloadVersion{
+				version: envelope.Version.major(),
+			}
+		}
+		if err := json.Unmarshal(envelope.Data, &payload); nil != err {
+			return nil, ErrPayloadParse{err: err}
+		}
+	case Payload:
+		payload = v
+	default:
+		// Could be a shim payload (if the app is not yet connected).
+		return nil, nil
+	}
+	// Ensure that we don't have a reference to the input payload: we don't
+	// want to change it, it could be used multiple times.
+	alloc := new(Payload)
+	*alloc = payload
+
+	return alloc, nil
+}

+ 61 - 0
vendor/github.com/newrelic/go-agent/internal/environment.go

@@ -0,0 +1,61 @@
+package internal
+
+import (
+	"encoding/json"
+	"reflect"
+	"runtime"
+)
+
+// Environment describes the application's environment.
+type Environment struct {
+	Compiler string `env:"runtime.Compiler"`
+	GOARCH   string `env:"runtime.GOARCH"`
+	GOOS     string `env:"runtime.GOOS"`
+	Version  string `env:"runtime.Version"`
+	NumCPU   int    `env:"runtime.NumCPU"`
+}
+
+var (
+	// SampleEnvironment is useful for testing.
+	SampleEnvironment = Environment{
+		Compiler: "comp",
+		GOARCH:   "arch",
+		GOOS:     "goos",
+		Version:  "vers",
+		NumCPU:   8,
+	}
+)
+
+// NewEnvironment returns a new Environment.
+func NewEnvironment() Environment {
+	return Environment{
+		Compiler: runtime.Compiler,
+		GOARCH:   runtime.GOARCH,
+		GOOS:     runtime.GOOS,
+		Version:  runtime.Version(),
+		NumCPU:   runtime.NumCPU(),
+	}
+}
+
+// MarshalJSON prepares Environment JSON in the format expected by the collector
+// during the connect command.
+func (e Environment) MarshalJSON() ([]byte, error) {
+	var arr [][]interface{}
+
+	val := reflect.ValueOf(e)
+	numFields := val.NumField()
+
+	arr = make([][]interface{}, numFields)
+
+	for i := 0; i < numFields; i++ {
+		v := val.Field(i)
+		t := val.Type().Field(i).Tag.Get("env")
+
+		arr[i] = []interface{}{
+			t,
+			v.Interface(),
+		}
+	}
+
+	return json.Marshal(arr)
+}

+ 67 - 0
vendor/github.com/newrelic/go-agent/internal/error_events.go

@@ -0,0 +1,67 @@
+package internal
+
+import (
+	"bytes"
+	"time"
+)
+
+// MarshalJSON is used for testing.
+func (e *ErrorEvent) MarshalJSON() ([]byte, error) {
+	buf := bytes.NewBuffer(make([]byte, 0, 256))
+
+	e.WriteJSON(buf)
+
+	return buf.Bytes(), nil
+}
+
+// WriteJSON prepares JSON in the format expected by the collector.
+// https://source.datanerd.us/agents/agent-specs/blob/master/Error-Events.md
+func (e *ErrorEvent) WriteJSON(buf *bytes.Buffer) {
+	w := jsonFieldsWriter{buf: buf}
+	buf.WriteByte('[')
+	buf.WriteByte('{')
+	w.stringField("type", "TransactionError")
+	w.stringField("error.class", e.Klass)
+	w.stringField("error.message", e.Msg)
+	w.floatField("timestamp", timeToFloatSeconds(e.When))
+	w.stringField("transactionName", e.FinalName)
+
+	sharedTransactionIntrinsics(&e.TxnEvent, &w)
+	sharedBetterCATIntrinsics(&e.TxnEvent, &w)
+
+	buf.WriteByte('}')
+	buf.WriteByte(',')
+	userAttributesJSON(e.Attrs, buf, destError, e.ErrorData.ExtraAttributes)
+	buf.WriteByte(',')
+	agentAttributesJSON(e.Attrs, buf, destError)
+	buf.WriteByte(']')
+}
+
+type errorEvents struct {
+	events *analyticsEvents
+}
+
+func newErrorEvents(max int) *errorEvents {
+	return &errorEvents{
+		events: newAnalyticsEvents(max),
+	}
+}
+
+func (events *errorEvents) Add(e *ErrorEvent, priority Priority) {
+	events.events.addEvent(analyticsEvent{priority, e})
+}
+
+func (events *errorEvents) MergeIntoHarvest(h *Harvest) {
+	h.ErrorEvents.events.mergeFailed(events.events)
+}
+
+func (events *errorEvents) Data(agentRunID string, harvestStart time.Time) ([]byte, error) {
+	return events.events.CollectorJSON(agentRunID)
+}
+
+func (events *errorEvents) numSeen() float64  { return events.events.NumSeen() }
+func (events *errorEvents) numSaved() float64 { return events.events.NumSaved() }
+
+func (events *errorEvents) EndpointMethod() string {
+	return cmdErrorEvents
+}

+ 174 - 0
vendor/github.com/newrelic/go-agent/internal/errors.go

@@ -0,0 +1,174 @@
+package internal
+
+import (
+	"bytes"
+	"fmt"
+	"net/http"
+	"strconv"
+	"time"
+
+	"github.com/newrelic/go-agent/internal/jsonx"
+)
+
+const (
+	// PanicErrorKlass is the error klass used for errors generated by
+	// recovering panics in txn.End.
+	PanicErrorKlass = "panic"
+)
+
+func panicValueMsg(v interface{}) string {
+	switch val := v.(type) {
+	case error:
+		return val.Error()
+	default:
+		return fmt.Sprintf("%v", v)
+	}
+}
+
+// TxnErrorFromPanic creates a new TxnError from a panic.
+func TxnErrorFromPanic(now time.Time, v interface{}) ErrorData {
+	return ErrorData{
+		When:  now,
+		Msg:   panicValueMsg(v),
+		Klass: PanicErrorKlass,
+	}
+}
+
+// TxnErrorFromResponseCode creates a new TxnError from an http response code.
+func TxnErrorFromResponseCode(now time.Time, code int) ErrorData {
+	return ErrorData{
+		When:  now,
+		Msg:   http.StatusText(code),
+		Klass: strconv.Itoa(code),
+	}
+}
+
+// ErrorData contains the information about a recorded error.
+type ErrorData struct {
+	When            time.Time
+	Stack           StackTrace
+	ExtraAttributes map[string]interface{}
+	Msg             string
+	Klass           string
+}
+
+// TxnError combines error data with information about a transaction.  TxnError is used for
+// both error events and traced errors.
+type TxnError struct {
+	ErrorData
+	TxnEvent
+}
+
+// ErrorEvent and tracedError are separate types so that error events and traced errors can have
+// different WriteJSON methods.
+type ErrorEvent TxnError
+
+type tracedError TxnError
+
+// TxnErrors is a set of errors captured in a Transaction.
+type TxnErrors []*ErrorData
+
+// NewTxnErrors returns a new empty TxnErrors.
+func NewTxnErrors(max int) TxnErrors {
+	return make([]*ErrorData, 0, max)
+}
+
+// Add adds a TxnError.
+func (errors *TxnErrors) Add(e ErrorData) {
+	if len(*errors) < cap(*errors) {
+		*errors = append(*errors, &e)
+	}
+}
+
+func (h *tracedError) WriteJSON(buf *bytes.Buffer) {
+	buf.WriteByte('[')
+	jsonx.AppendFloat(buf, timeToFloatMilliseconds(h.When))
+	buf.WriteByte(',')
+	jsonx.AppendString(buf, h.FinalName)
+	buf.WriteByte(',')
+	jsonx.AppendString(buf, h.Msg)
+	buf.WriteByte(',')
+	jsonx.AppendString(buf, h.Klass)
+	buf.WriteByte(',')
+
+	buf.WriteByte('{')
+	buf.WriteString(`"agentAttributes"`)
+	buf.WriteByte(':')
+	agentAttributesJSON(h.Attrs, buf, destError)
+	buf.WriteByte(',')
+	buf.WriteString(`"userAttributes"`)
+	buf.WriteByte(':')
+	userAttributesJSON(h.Attrs, buf, destError, h.ErrorData.ExtraAttributes)
+	buf.WriteByte(',')
+	buf.WriteString(`"intrinsics"`)
+	buf.WriteByte(':')
+	intrinsicsJSON(&h.TxnEvent, buf)
+	if nil != h.Stack {
+		buf.WriteByte(',')
+		buf.WriteString(`"stack_trace"`)
+		buf.WriteByte(':')
+		h.Stack.WriteJSON(buf)
+	}
+	if h.CleanURL != "" {
+		buf.WriteByte(',')
+		buf.WriteString(`"request_uri"`)
+		buf.WriteByte(':')
+		jsonx.AppendString(buf, h.CleanURL)
+	}
+	buf.WriteByte('}')
+
+	buf.WriteByte(']')
+}
+
+// MarshalJSON is used for testing.
+func (h *tracedError) MarshalJSON() ([]byte, error) {
+	buf := &bytes.Buffer{}
+	h.WriteJSON(buf)
+	return buf.Bytes(), nil
+}
+
+type harvestErrors []*tracedError
+
+func newHarvestErrors(max int) harvestErrors {
+	return make([]*tracedError, 0, max)
+}
+
+// MergeTxnErrors merges a transaction's errors into the harvest's errors.
+func MergeTxnErrors(errors *harvestErrors, errs TxnErrors, txnEvent TxnEvent) {
+	for _, e := range errs {
+		if len(*errors) == cap(*errors) {
+			return
+		}
+		*errors = append(*errors, &tracedError{
+			TxnEvent:  txnEvent,
+			ErrorData: *e,
+		})
+	}
+}
+
+func (errors harvestErrors) Data(agentRunID string, harvestStart time.Time) ([]byte, error) {
+	if 0 == len(errors) {
+		return nil, nil
+	}
+	estimate := 1024 * len(errors)
+	buf := bytes.NewBuffer(make([]byte, 0, estimate))
+	buf.WriteByte('[')
+	jsonx.AppendString(buf, agentRunID)
+	buf.WriteByte(',')
+	buf.WriteByte('[')
+	for i, e := range errors {
+		if i > 0 {
+			buf.WriteByte(',')
+		}
+		e.WriteJSON(buf)
+	}
+	buf.WriteByte(']')
+	buf.WriteByte(']')
+	return buf.Bytes(), nil
+}
+
+func (errors harvestErrors) MergeIntoHarvest(h *Harvest) {}
+
+func (errors harvestErrors) EndpointMethod() string {
+	return cmdErrorData
+}

+ 664 - 0
vendor/github.com/newrelic/go-agent/internal/expect.go

@@ -0,0 +1,664 @@
+package internal
+
+import (
+	"encoding/json"
+	"fmt"
+	"runtime"
+)
+
+var (
+	// Unfortunately, the resolution of time.Now() on Windows is coarse: Two
+	// sequential calls to time.Now() may return the same value, and tests
+	// which expect non-zero durations may fail.  To avoid adding sleep
+	// statements or mocking time.Now(), those tests are skipped on Windows.
+	doDurationTests = runtime.GOOS != `windows`
+)
+
+// Validator is used for testing.
+type Validator interface {
+	Error(...interface{})
+}
+
+func validateStringField(v Validator, fieldName, v1, v2 string) {
+	if v1 != v2 {
+		v.Error(fieldName, v1, v2)
+	}
+}
+
+type addValidatorField struct {
+	field    interface{}
+	original Validator
+}
+
+func (a addValidatorField) Error(fields ...interface{}) {
+	fields = append([]interface{}{a.field}, fields...)
+	a.original.Error(fields...)
+}
+
+// ExtendValidator is used to add more context to a validator.
+func ExtendValidator(v Validator, field interface{}) Validator {
+	return addValidatorField{
+		field:    field,
+		original: v,
+	}
+}
+
+// WantMetric is a metric expectation.  If Data is nil, then any data values are
+// acceptable.
+type WantMetric struct {
+	Name   string
+	Scope  string
+	Forced interface{} // true, false, or nil
+	Data   []float64
+}
+
+// WantError is a traced error expectation.
+type WantError struct {
+	TxnName         string
+	Msg             string
+	Klass           string
+	Caller          string
+	URL             string
+	UserAttributes  map[string]interface{}
+	AgentAttributes map[string]interface{}
+}
+
+func uniquePointer() *struct{} {
+	s := struct{}{}
+	return &s
+}
+
+var (
+	// MatchAnything is for use when matching attributes.
+	MatchAnything = uniquePointer()
+)
+
+// WantEvent is a transaction or error event expectation.
+type WantEvent struct {
+	Intrinsics      map[string]interface{}
+	UserAttributes  map[string]interface{}
+	AgentAttributes map[string]interface{}
+}
+
+// WantTxnTrace is a transaction trace expectation.
+type WantTxnTrace struct {
+	MetricName      string
+	CleanURL        string
+	NumSegments     int
+	UserAttributes  map[string]interface{}
+	AgentAttributes map[string]interface{}
+}
+
+// WantSlowQuery is a slowQuery expectation.
+type WantSlowQuery struct {
+	Count        int32
+	MetricName   string
+	Query        string
+	TxnName      string
+	TxnURL       string
+	DatabaseName string
+	Host         string
+	PortPathOrID string
+	Params       map[string]interface{}
+}
+
+// Expect exposes methods that allow for testing whether the correct data was
+// captured.
+type Expect interface {
+	ExpectCustomEvents(t Validator, want []WantEvent)
+	ExpectErrors(t Validator, want []WantError)
+	ExpectErrorEvents(t Validator, want []WantEvent)
+	ExpectErrorEventsPresent(t Validator, want []WantEvent)
+	ExpectErrorEventsAbsent(t Validator, names []string)
+
+	ExpectTxnEvents(t Validator, want []WantEvent)
+	ExpectTxnEventsPresent(t Validator, want []WantEvent)
+	ExpectTxnEventsAbsent(t Validator, names []string)
+
+	ExpectMetrics(t Validator, want []WantMetric)
+	ExpectMetricsPresent(t Validator, want []WantMetric)
+
+	ExpectTxnTraces(t Validator, want []WantTxnTrace)
+	ExpectSlowQueries(t Validator, want []WantSlowQuery)
+
+	ExpectSpanEvents(t Validator, want []WantEvent)
+	ExpectSpanEventsPresent(t Validator, want []WantEvent)
+	ExpectSpanEventsAbsent(t Validator, names []string)
+	ExpectSpanEventsCount(t Validator, c int)
+}
+
+func expectMetricField(t Validator, id metricID, v1, v2 float64, fieldName string) {
+	if v1 != v2 {
+		t.Error("metric fields do not match", id, v1, v2, fieldName)
+	}
+}
+
+// ExpectMetricsPresent allows testing of metrics with requiring an exact match
+func ExpectMetricsPresent(t Validator, mt *metricTable, expect []WantMetric) {
+	expectedIds := make(map[metricID]struct{})
+	for _, e := range expect {
+		id := metricID{Name: e.Name, Scope: e.Scope}
+		expectedIds[id] = struct{}{}
+		m := mt.metrics[id]
+		if nil == m {
+			t.Error("unable to find metric", id)
+			continue
+		}
+
+		if b, ok := e.Forced.(bool); ok {
+			if b != (forced == m.forced) {
+				t.Error("metric forced incorrect", b, m.forced, id)
+			}
+		}
+
+		if nil != e.Data {
+			expectMetricField(t, id, e.Data[0], m.data.countSatisfied, "countSatisfied")
+			expectMetricField(t, id, e.Data[1], m.data.totalTolerated, "totalTolerated")
+			expectMetricField(t, id, e.Data[2], m.data.exclusiveFailed, "exclusiveFailed")
+			expectMetricField(t, id, e.Data[3], m.data.min, "min")
+			expectMetricField(t, id, e.Data[4], m.data.max, "max")
+			expectMetricField(t, id, e.Data[5], m.data.sumSquares, "sumSquares")
+		}
+	}
+}
+
+// ExpectMetrics allows testing of metrics.  It passes if mt exactly matches expect.
+func ExpectMetrics(t Validator, mt *metricTable, expect []WantMetric) {
+	if len(mt.metrics) != len(expect) {
+		t.Error("metric counts do not match expectations", len(mt.metrics), len(expect))
+	}
+	expectedIds := make(map[metricID]struct{})
+	for _, e := range expect {
+		id := metricID{Name: e.Name, Scope: e.Scope}
+		expectedIds[id] = struct{}{}
+		m := mt.metrics[id]
+		if nil == m {
+			t.Error("unable to find metric", id)
+			continue
+		}
+
+		if b, ok := e.Forced.(bool); ok {
+			if b != (forced == m.forced) {
+				t.Error("metric forced incorrect", b, m.forced, id)
+			}
+		}
+
+		if nil != e.Data {
+			expectMetricField(t, id, e.Data[0], m.data.countSatisfied, "countSatisfied")
+			expectMetricField(t, id, e.Data[1], m.data.totalTolerated, "totalTolerated")
+			expectMetricField(t, id, e.Data[2], m.data.exclusiveFailed, "exclusiveFailed")
+			expectMetricField(t, id, e.Data[3], m.data.min, "min")
+			expectMetricField(t, id, e.Data[4], m.data.max, "max")
+			expectMetricField(t, id, e.Data[5], m.data.sumSquares, "sumSquares")
+		}
+	}
+	for id := range mt.metrics {
+		if _, ok := expectedIds[id]; !ok {
+			t.Error("expected metrics does not contain", id.Name, id.Scope)
+		}
+	}
+}
+
+func expectAttributesPresent(v Validator, exists map[string]interface{}, expect map[string]interface{}) {
+	for key, val := range expect {
+		found, ok := exists[key]
+		if !ok {
+			v.Error("expected attribute not found: ", key)
+			continue
+		}
+		if val == MatchAnything {
+			continue
+		}
+		v1 := fmt.Sprint(found)
+		v2 := fmt.Sprint(val)
+		if v1 != v2 {
+			v.Error("value difference", fmt.Sprintf("key=%s", key), v1, v2)
+		}
+	}
+}
+
+func expectAttributes(v Validator, exists map[string]interface{}, expect map[string]interface{}) {
+	// TODO: This params comparison can be made smarter: Alert differences
+	// based on sub/super set behavior.
+	if len(exists) != len(expect) {
+		v.Error("attributes length difference", len(exists), len(expect))
+	}
+	for key, val := range expect {
+		found, ok := exists[key]
+		if !ok {
+			v.Error("expected attribute not found: ", key)
+			continue
+		}
+		if val == MatchAnything {
+			continue
+		}
+		v1 := fmt.Sprint(found)
+		v2 := fmt.Sprint(val)
+		if v1 != v2 {
+			v.Error("value difference", fmt.Sprintf("key=%s", key), v1, v2)
+		}
+	}
+	for key, val := range exists {
+		_, ok := expect[key]
+		if !ok {
+			v.Error("unexpected attribute present: ", key, val)
+			continue
+		}
+	}
+}
+
+// ExpectCustomEvents allows testing of custom events.  It passes if cs exactly matches expect.
+func ExpectCustomEvents(v Validator, cs *customEvents, expect []WantEvent) {
+	if len(cs.events.events) != len(expect) {
+		v.Error("number of custom events does not match", len(cs.events.events),
+			len(expect))
+		return
+	}
+	for i, e := range expect {
+		event, ok := cs.events.events[i].jsonWriter.(*CustomEvent)
+		if !ok {
+			v.Error("wrong custom event")
+		} else {
+			expectEvent(v, event, e)
+		}
+	}
+}
+
+func expectEventAbsent(v Validator, e json.Marshaler, names []string) {
+	js, err := e.MarshalJSON()
+	if nil != err {
+		v.Error("unable to marshal event", err)
+		return
+	}
+
+	var event []map[string]interface{}
+	err = json.Unmarshal(js, &event)
+	if nil != err {
+		v.Error("unable to parse event json", err)
+		return
+	}
+
+	intrinsics := event[0]
+	userAttributes := event[1]
+	agentAttributes := event[2]
+
+	for _, name := range names {
+		if _, ok := intrinsics[name]; ok {
+			v.Error("unexpected key found", name)
+		}
+
+		if _, ok := userAttributes[name]; ok {
+			v.Error("unexpected key found", name)
+		}
+
+		if _, ok := agentAttributes[name]; ok {
+			v.Error("unexpected key found", name)
+		}
+	}
+}
+
+func expectEventPresent(v Validator, e json.Marshaler, expect WantEvent) {
+	js, err := e.MarshalJSON()
+	if nil != err {
+		v.Error("unable to marshal event", err)
+		return
+	}
+	var event []map[string]interface{}
+	err = json.Unmarshal(js, &event)
+	if nil != err {
+		v.Error("unable to parse event json", err)
+		return
+	}
+	intrinsics := event[0]
+	userAttributes := event[1]
+	agentAttributes := event[2]
+
+	if nil != expect.Intrinsics {
+		expectAttributesPresent(v, intrinsics, expect.Intrinsics)
+	}
+	if nil != expect.UserAttributes {
+		expectAttributesPresent(v, userAttributes, expect.UserAttributes)
+	}
+	if nil != expect.AgentAttributes {
+		expectAttributesPresent(v, agentAttributes, expect.AgentAttributes)
+	}
+}
+
+func expectEvent(v Validator, e json.Marshaler, expect WantEvent) {
+	js, err := e.MarshalJSON()
+	if nil != err {
+		v.Error("unable to marshal event", err)
+		return
+	}
+	var event []map[string]interface{}
+	err = json.Unmarshal(js, &event)
+	if nil != err {
+		v.Error("unable to parse event json", err)
+		return
+	}
+	intrinsics := event[0]
+	userAttributes := event[1]
+	agentAttributes := event[2]
+
+	if nil != expect.Intrinsics {
+		expectAttributes(v, intrinsics, expect.Intrinsics)
+	}
+	if nil != expect.UserAttributes {
+		expectAttributes(v, userAttributes, expect.UserAttributes)
+	}
+	if nil != expect.AgentAttributes {
+		expectAttributes(v, agentAttributes, expect.AgentAttributes)
+	}
+}
+
+// Second attributes have priority.
+func mergeAttributes(a1, a2 map[string]interface{}) map[string]interface{} {
+	a := make(map[string]interface{})
+	for k, v := range a1 {
+		a[k] = v
+	}
+	for k, v := range a2 {
+		a[k] = v
+	}
+	return a
+}
+
+// ExpectErrorEventsPresent allows testing of events with requiring an exact match
+func ExpectErrorEventsPresent(v Validator, events *errorEvents, expect []WantEvent) {
+	for i, e := range expect {
+		event, ok := events.events.events[i].jsonWriter.(*ErrorEvent)
+		if !ok {
+			v.Error("wrong span event in ExpectErrorEventsPresent")
+		} else {
+			expectEventPresent(v, event, e)
+		}
+	}
+}
+
+// ExpectErrorEventsAbsent allows testing that a set of attribute names are absent from the event data
+func ExpectErrorEventsAbsent(v Validator, events *errorEvents, names []string) {
+	for _, eventHarvested := range events.events.events {
+		event, ok := eventHarvested.jsonWriter.(*ErrorEvent)
+		if !ok {
+			v.Error("wrong span event in ExpectErrorEventsAbsent")
+		} else {
+			expectEventAbsent(v, event, names)
+		}
+	}
+}
+
+// ExpectErrorEvents allows testing of error events.  It passes if events exactly matches expect.
+func ExpectErrorEvents(v Validator, events *errorEvents, expect []WantEvent) {
+	if len(events.events.events) != len(expect) {
+		v.Error("number of custom events does not match",
+			len(events.events.events), len(expect))
+		return
+	}
+	for i, e := range expect {
+		event, ok := events.events.events[i].jsonWriter.(*ErrorEvent)
+		if !ok {
+			v.Error("wrong error event")
+		} else {
+			if nil != e.Intrinsics {
+				e.Intrinsics = mergeAttributes(map[string]interface{}{
+					// The following intrinsics should always be present in
+					// error events:
+					"type":      "TransactionError",
+					"timestamp": MatchAnything,
+					"duration":  MatchAnything,
+				}, e.Intrinsics)
+			}
+			expectEvent(v, event, e)
+		}
+	}
+}
+
+// ExpectSpanEventsCount allows us to count how many events the system generated
+func ExpectSpanEventsCount(v Validator, events *spanEvents, c int) {
+	len := len(events.events.events)
+	if len != c {
+		v.Error(fmt.Sprintf("expected %d span events, found %d", c, len))
+	}
+}
+
+// ExpectSpanEventsPresent allows us to test for the presence and value of events
+// without also requiring an exact match
+func ExpectSpanEventsPresent(v Validator, events *spanEvents, expect []WantEvent) {
+	for i, e := range expect {
+		event, ok := events.events.events[i].jsonWriter.(*SpanEvent)
+		if !ok {
+			v.Error("wrong span event in ExpectSpanEventsPresent")
+		} else {
+			expectEventPresent(v, event, e)
+		}
+	}
+}
+
+// ExpectSpanEventsAbsent allows us to ensure that a set of attribute names are absent
+// from the event data
+func ExpectSpanEventsAbsent(v Validator, events *spanEvents, names []string) {
+	for _, eventHarvested := range events.events.events {
+		event, ok := eventHarvested.jsonWriter.(*SpanEvent)
+		if !ok {
+			v.Error("wrong span event in ExpectSpanEventsAbsent")
+		} else {
+			expectEventAbsent(v, event, names)
+		}
+	}
+}
+
+// ExpectSpanEvents allows testing of span events.  It passes if events exactly matches expect.
+func ExpectSpanEvents(v Validator, events *spanEvents, expect []WantEvent) {
+	if len(events.events.events) != len(expect) {
+		v.Error("number of txn events does not match",
+			len(events.events.events), len(expect))
+		return
+	}
+	for i, e := range expect {
+		event, ok := events.events.events[i].jsonWriter.(*SpanEvent)
+		if !ok {
+			v.Error("wrong span event")
+		} else {
+			if nil != e.Intrinsics {
+				e.Intrinsics = mergeAttributes(map[string]interface{}{
+					// The following intrinsics should always be present in
+					// span events:
+					"type":      "Transaction",
+					"timestamp": MatchAnything,
+					"duration":  MatchAnything,
+				}, e.Intrinsics)
+			}
+			expectEvent(v, event, e)
+		}
+	}
+}
+
+// ExpectTxnEventsPresent allows us to test for the presence and value of events
+// without also requiring an exact match
+func ExpectTxnEventsPresent(v Validator, events *txnEvents, expect []WantEvent) {
+	for i, e := range expect {
+		event, ok := events.events.events[i].jsonWriter.(*TxnEvent)
+		if !ok {
+			v.Error("wrong txn event in ExpectTxnEventsPresent")
+		} else {
+			expectEventPresent(v, event, e)
+		}
+	}
+}
+
+// ExpectTxnEventsAbsent allows us to ensure that a set of attribute names are absent
+// from the event data
+func ExpectTxnEventsAbsent(v Validator, events *txnEvents, names []string) {
+	for _, eventHarvested := range events.events.events {
+		event, ok := eventHarvested.jsonWriter.(*TxnEvent)
+		if !ok {
+			v.Error("wrong txn event in ExpectTxnEventsAbsent")
+		} else {
+			expectEventAbsent(v, event, names)
+		}
+	}
+}
+
+// ExpectTxnEvents allows testing of txn events.
+func ExpectTxnEvents(v Validator, events *txnEvents, expect []WantEvent) {
+	if len(events.events.events) != len(expect) {
+		v.Error("number of txn events does not match",
+			len(events.events.events), len(expect))
+		return
+	}
+	for i, e := range expect {
+		event, ok := events.events.events[i].jsonWriter.(*TxnEvent)
+		if !ok {
+			v.Error("wrong txn event")
+		} else {
+			if nil != e.Intrinsics {
+				e.Intrinsics = mergeAttributes(map[string]interface{}{
+					// The following intrinsics should always be present in
+					// txn events:
+					"type":      "Transaction",
+					"timestamp": MatchAnything,
+					"duration":  MatchAnything,
+					"error":     MatchAnything,
+				}, e.Intrinsics)
+			}
+			expectEvent(v, event, e)
+		}
+	}
+}
+
+func expectError(v Validator, err *tracedError, expect WantError) {
+	caller := topCallerNameBase(err.ErrorData.Stack)
+	validateStringField(v, "caller", expect.Caller, caller)
+	validateStringField(v, "txnName", expect.TxnName, err.FinalName)
+	validateStringField(v, "klass", expect.Klass, err.Klass)
+	validateStringField(v, "msg", expect.Msg, err.Msg)
+	validateStringField(v, "URL", expect.URL, err.CleanURL)
+	js, errr := err.MarshalJSON()
+	if nil != errr {
+		v.Error("unable to marshal error json", errr)
+		return
+	}
+	var unmarshalled []interface{}
+	errr = json.Unmarshal(js, &unmarshalled)
+	if nil != errr {
+		v.Error("unable to unmarshal error json", errr)
+		return
+	}
+	attributes := unmarshalled[4].(map[string]interface{})
+	agentAttributes := attributes["agentAttributes"].(map[string]interface{})
+	userAttributes := attributes["userAttributes"].(map[string]interface{})
+
+	if nil != expect.UserAttributes {
+		expectAttributes(v, userAttributes, expect.UserAttributes)
+	}
+	if nil != expect.AgentAttributes {
+		expectAttributes(v, agentAttributes, expect.AgentAttributes)
+	}
+}
+
+// ExpectErrors allows testing of errors.
+func ExpectErrors(v Validator, errors harvestErrors, expect []WantError) {
+	if len(errors) != len(expect) {
+		v.Error("number of errors mismatch", len(errors), len(expect))
+		return
+	}
+	for i, e := range expect {
+		expectError(v, errors[i], e)
+	}
+}
+
+func countSegments(node []interface{}) int {
+	count := 1
+	children := node[4].([]interface{})
+	for _, c := range children {
+		node := c.([]interface{})
+		count += countSegments(node)
+	}
+	return count
+}
+
+func expectTxnTrace(v Validator, got json.Marshaler, expect WantTxnTrace) {
+	js, err := got.MarshalJSON()
+	if nil != err {
+		v.Error("unable to marshal txn trace json", err)
+		return
+	}
+	var unmarshalled []interface{}
+	err = json.Unmarshal(js, &unmarshalled)
+	if nil != err {
+		v.Error("unable to unmarshal error json", err)
+		return
+	}
+	duration := unmarshalled[1].(float64)
+	name := unmarshalled[2].(string)
+	cleanURL := unmarshalled[3].(string)
+	traceData := unmarshalled[4].([]interface{})
+
+	rootNode := traceData[3].([]interface{})
+	attributes := traceData[4].(map[string]interface{})
+	userAttributes := attributes["userAttributes"].(map[string]interface{})
+	agentAttributes := attributes["agentAttributes"].(map[string]interface{})
+
+	validateStringField(v, "metric name", expect.MetricName, name)
+	validateStringField(v, "request url", expect.CleanURL, cleanURL)
+
+	if doDurationTests && 0 == duration {
+		v.Error("zero trace duration")
+	}
+
+	if nil != expect.UserAttributes {
+		expectAttributes(v, userAttributes, expect.UserAttributes)
+	}
+	if nil != expect.AgentAttributes {
+		expectAttributes(v, agentAttributes, expect.AgentAttributes)
+	}
+	numSegments := countSegments(rootNode)
+	// The expectation segment count does not include the two root nodes.
+	numSegments -= 2
+	if expect.NumSegments != numSegments {
+		v.Error("wrong number of segments", expect.NumSegments, numSegments)
+	}
+}
+
+// ExpectTxnTraces allows testing of transaction traces.
+func ExpectTxnTraces(v Validator, traces *harvestTraces, want []WantTxnTrace) {
+	if len(want) != traces.Len() {
+		v.Error("number of traces do not match", len(want), traces.Len())
+	}
+
+	actual := traces.slice()
+	for i, expected := range want {
+		expectTxnTrace(v, actual[i], expected)
+	}
+}
+
+func expectSlowQuery(t Validator, slowQuery *slowQuery, want WantSlowQuery) {
+	if slowQuery.Count != want.Count {
+		t.Error("wrong Count field", slowQuery.Count, want.Count)
+	}
+	validateStringField(t, "MetricName", slowQuery.DatastoreMetric, want.MetricName)
+	validateStringField(t, "Query", slowQuery.ParameterizedQuery, want.Query)
+	validateStringField(t, "TxnEvent.FinalName", slowQuery.TxnEvent.FinalName, want.TxnName)
+	validateStringField(t, "TxnEvent.CleanURL", slowQuery.TxnEvent.CleanURL, want.TxnURL)
+	validateStringField(t, "DatabaseName", slowQuery.DatabaseName, want.DatabaseName)
+	validateStringField(t, "Host", slowQuery.Host, want.Host)
+	validateStringField(t, "PortPathOrID", slowQuery.PortPathOrID, want.PortPathOrID)
+	expectAttributes(t, map[string]interface{}(slowQuery.QueryParameters), want.Params)
+}
+
+// ExpectSlowQueries allows testing of slow queries.
+func ExpectSlowQueries(t Validator, slowQueries *slowQueries, want []WantSlowQuery) {
+	if len(want) != len(slowQueries.priorityQueue) {
+		t.Error("wrong number of slow queries",
+			"expected", len(want), "got", len(slowQueries.priorityQueue))
+		return
+	}
+	for _, s := range want {
+		idx, ok := slowQueries.lookup[s.Query]
+		if !ok {
+			t.Error("unable to find slow query", s.Query)
+			continue
+		}
+		expectSlowQuery(t, slowQueries.priorityQueue[idx], s)
+	}
+}

+ 200 - 0
vendor/github.com/newrelic/go-agent/internal/harvest.go

@@ -0,0 +1,200 @@
+package internal
+
+import (
+	"strings"
+	"sync"
+	"time"
+)
+
+// Harvestable is something that can be merged into a Harvest.
+type Harvestable interface {
+	MergeIntoHarvest(h *Harvest)
+}
+
+// Harvest contains collected data.
+type Harvest struct {
+	Metrics      *metricTable
+	CustomEvents *customEvents
+	TxnEvents    *txnEvents
+	ErrorEvents  *errorEvents
+	ErrorTraces  harvestErrors
+	TxnTraces    *harvestTraces
+	SlowSQLs     *slowQueries
+	SpanEvents   *spanEvents
+}
+
+const (
+	// txnEventPayloadlimit is the maximum number of events that should be
+	// sent up in one post.
+	txnEventPayloadlimit = 5000
+)
+
+// Payloads returns a map from expected collector method name to data type.
+func (h *Harvest) Payloads(splitLargeTxnEvents bool) []PayloadCreator {
+	ps := []PayloadCreator{
+		h.Metrics,
+		h.CustomEvents,
+		h.ErrorEvents,
+		h.ErrorTraces,
+		h.TxnTraces,
+		h.SlowSQLs,
+		h.SpanEvents,
+	}
+	if splitLargeTxnEvents {
+		ps = append(ps, h.TxnEvents.payloads(txnEventPayloadlimit)...)
+	} else {
+		ps = append(ps, h.TxnEvents)
+	}
+	return ps
+}
+
+// NewHarvest returns a new Harvest.
+func NewHarvest(now time.Time) *Harvest {
+	return &Harvest{
+		Metrics:      newMetricTable(maxMetrics, now),
+		CustomEvents: newCustomEvents(maxCustomEvents),
+		TxnEvents:    newTxnEvents(maxTxnEvents),
+		ErrorEvents:  newErrorEvents(maxErrorEvents),
+		ErrorTraces:  newHarvestErrors(maxHarvestErrors),
+		TxnTraces:    newHarvestTraces(),
+		SlowSQLs:     newSlowQueries(maxHarvestSlowSQLs),
+		SpanEvents:   newSpanEvents(maxSpanEvents),
+	}
+}
+
+var (
+	trackMutex   sync.Mutex
+	trackMetrics []string
+)
+
+// TrackUsage helps track which integration packages are used.
+func TrackUsage(s ...string) {
+	trackMutex.Lock()
+	defer trackMutex.Unlock()
+
+	m := "Supportability/" + strings.Join(s, "/")
+	trackMetrics = append(trackMetrics, m)
+}
+
+func createTrackUsageMetrics(metrics *metricTable) {
+	trackMutex.Lock()
+	defer trackMutex.Unlock()
+
+	for _, m := range trackMetrics {
+		metrics.addSingleCount(m, forced)
+	}
+}
+
+// CreateFinalMetrics creates extra metrics at harvest time.
+func (h *Harvest) CreateFinalMetrics() {
+	h.Metrics.addSingleCount(instanceReporting, forced)
+
+	h.Metrics.addCount(customEventsSeen, h.CustomEvents.numSeen(), forced)
+	h.Metrics.addCount(customEventsSent, h.CustomEvents.numSaved(), forced)
+
+	h.Metrics.addCount(txnEventsSeen, h.TxnEvents.numSeen(), forced)
+	h.Metrics.addCount(txnEventsSent, h.TxnEvents.numSaved(), forced)
+
+	h.Metrics.addCount(errorEventsSeen, h.ErrorEvents.numSeen(), forced)
+	h.Metrics.addCount(errorEventsSent, h.ErrorEvents.numSaved(), forced)
+
+	h.Metrics.addCount(spanEventsSeen, h.SpanEvents.numSeen(), forced)
+	h.Metrics.addCount(spanEventsSent, h.SpanEvents.numSaved(), forced)
+
+	if h.Metrics.numDropped > 0 {
+		h.Metrics.addCount(supportabilityDropped, float64(h.Metrics.numDropped), forced)
+	}
+
+	createTrackUsageMetrics(h.Metrics)
+}
+
+// PayloadCreator is a data type in the harvest.
+type PayloadCreator interface {
+	// In the event of a rpm request failure (hopefully simply an
+	// intermittent collector issue) the payload may be merged into the next
+	// time period's harvest.
+	Harvestable
+	// Data prepares JSON in the format expected by the collector endpoint.
+	// This method should return (nil, nil) if the payload is empty and no
+	// rpm request is necessary.
+	Data(agentRunID string, harvestStart time.Time) ([]byte, error)
+	// EndpointMethod is used for the "method" query parameter when posting
+	// the data.
+	EndpointMethod() string
+}
+
+func supportMetric(metrics *metricTable, b bool, metricName string) {
+	if b {
+		metrics.addSingleCount(metricName, forced)
+	}
+}
+
+// CreateTxnMetrics creates metrics for a transaction.
+func CreateTxnMetrics(args *TxnData, metrics *metricTable) {
+	// Duration Metrics
+	rollup := backgroundRollup
+	if args.IsWeb {
+		rollup = webRollup
+		metrics.addDuration(dispatcherMetric, "", args.Duration, 0, forced)
+	}
+
+	metrics.addDuration(args.FinalName, "", args.Duration, args.Exclusive, forced)
+	metrics.addDuration(rollup, "", args.Duration, args.Exclusive, forced)
+
+	// Better CAT Metrics
+	if cat := args.BetterCAT; cat.Enabled {
+		caller := callerUnknown
+		if nil != cat.Inbound {
+			caller = cat.Inbound.payloadCaller
+		}
+		m := durationByCallerMetric(caller)
+		metrics.addDuration(m.all, "", args.Duration, args.Duration, unforced)
+		metrics.addDuration(m.webOrOther(args.IsWeb), "", args.Duration, args.Duration, unforced)
+
+		// Transport Duration Metric
+		if nil != cat.Inbound {
+			d := cat.Inbound.TransportDuration
+			m = transportDurationMetric(caller)
+			metrics.addDuration(m.all, "", d, d, unforced)
+			metrics.addDuration(m.webOrOther(args.IsWeb), "", d, d, unforced)
+		}
+
+		// CAT Error Metrics
+		if args.HasErrors() {
+			m = errorsByCallerMetric(caller)
+			metrics.addSingleCount(m.all, unforced)
+			metrics.addSingleCount(m.webOrOther(args.IsWeb), unforced)
+		}
+
+		supportMetric(metrics, args.AcceptPayloadSuccess, supportTracingAcceptSuccess)
+		supportMetric(metrics, args.AcceptPayloadException, supportTracingAcceptException)
+		supportMetric(metrics, args.AcceptPayloadParseException, supportTracingAcceptParseException)
+		supportMetric(metrics, args.AcceptPayloadCreateBeforeAccept, supportTracingCreateBeforeAccept)
+		supportMetric(metrics, args.AcceptPayloadIgnoredMultiple, supportTracingIgnoredMultiple)
+		supportMetric(metrics, args.AcceptPayloadIgnoredVersion, supportTracingIgnoredVersion)
+		supportMetric(metrics, args.AcceptPayloadUntrustedAccount, supportTracingAcceptUntrustedAccount)
+		supportMetric(metrics, args.AcceptPayloadNullPayload, supportTracingAcceptNull)
+		supportMetric(metrics, args.CreatePayloadSuccess, supportTracingCreatePayloadSuccess)
+		supportMetric(metrics, args.CreatePayloadException, supportTracingCreatePayloadException)
+	}
+
+	// Apdex Metrics
+	if args.Zone != ApdexNone {
+		metrics.addApdex(apdexRollup, "", args.ApdexThreshold, args.Zone, forced)
+
+		mname := apdexPrefix + removeFirstSegment(args.FinalName)
+		metrics.addApdex(mname, "", args.ApdexThreshold, args.Zone, unforced)
+	}
+
+	// Error Metrics
+	if args.HasErrors() {
+		metrics.addSingleCount(errorsRollupMetric.all, forced)
+		metrics.addSingleCount(errorsRollupMetric.webOrOther(args.IsWeb), forced)
+		metrics.addSingleCount(errorsPrefix+args.FinalName, forced)
+	}
+
+	// Queueing Metrics
+	if args.Queuing > 0 {
+		metrics.addDuration(queueMetric, "", args.Queuing, args.Queuing, forced)
+	}
+}

+ 39 - 0
vendor/github.com/newrelic/go-agent/internal/intrinsics.go

@@ -0,0 +1,39 @@
+package internal
+
+import (
+	"bytes"
+)
+
+func addOptionalStringField(w *jsonFieldsWriter, key, value string) {
+	if value != "" {
+		w.stringField(key, value)
+	}
+}
+
+func intrinsicsJSON(e *TxnEvent, buf *bytes.Buffer) {
+	w := jsonFieldsWriter{buf: buf}
+
+	buf.WriteByte('{')
+
+	if e.BetterCAT.Enabled {
+		w.stringField("guid", e.BetterCAT.ID)
+		w.stringField("traceId", e.BetterCAT.TraceID())
+		w.writerField("priority", e.BetterCAT.Priority)
+		w.boolField("sampled", e.BetterCAT.Sampled)
+	}
+
+	if e.CrossProcess.Used() {
+		addOptionalStringField(&w, "client_cross_process_id", e.CrossProcess.ClientID)
+		addOptionalStringField(&w, "trip_id", e.CrossProcess.TripID)
+		addOptionalStringField(&w, "path_hash", e.CrossProcess.PathHash)
+		addOptionalStringField(&w, "referring_transaction_guid", e.CrossProcess.ReferringTxnGUID)
+	}
+
+	if e.CrossProcess.IsSynthetics() {
+		addOptionalStringField(&w, "synthetics_resource_id", e.CrossProcess.Synthetics.ResourceID)
+		addOptionalStringField(&w, "synthetics_job_id", e.CrossProcess.Synthetics.JobID)
+		addOptionalStringField(&w, "synthetics_monitor_id", e.CrossProcess.Synthetics.MonitorID)
+	}
+
+	buf.WriteByte('}')
+}

+ 61 - 0
vendor/github.com/newrelic/go-agent/internal/json_object_writer.go

@@ -0,0 +1,61 @@
+package internal
+
+import (
+	"bytes"
+
+	"github.com/newrelic/go-agent/internal/jsonx"
+)
+
+type jsonWriter interface {
+	WriteJSON(buf *bytes.Buffer)
+}
+
+type jsonFieldsWriter struct {
+	buf        *bytes.Buffer
+	needsComma bool
+}
+
+func (w *jsonFieldsWriter) addKey(key string) {
+	if w.needsComma {
+		w.buf.WriteByte(',')
+	} else {
+		w.needsComma = true
+	}
+	// defensively assume that the key needs escaping:
+	jsonx.AppendString(w.buf, key)
+	w.buf.WriteByte(':')
+}
+
+func (w *jsonFieldsWriter) stringField(key string, val string) {
+	w.addKey(key)
+	jsonx.AppendString(w.buf, val)
+}
+
+func (w *jsonFieldsWriter) intField(key string, val int64) {
+	w.addKey(key)
+	jsonx.AppendInt(w.buf, val)
+}
+
+func (w *jsonFieldsWriter) floatField(key string, val float64) {
+	w.addKey(key)
+	jsonx.AppendFloat(w.buf, val)
+}
+
+func (w *jsonFieldsWriter) boolField(key string, val bool) {
+	w.addKey(key)
+	if val {
+		w.buf.WriteString("true")
+	} else {
+		w.buf.WriteString("false")
+	}
+}
+
+func (w *jsonFieldsWriter) rawField(key string, val JSONString) {
+	w.addKey(key)
+	w.buf.WriteString(string(val))
+}
+
+func (w *jsonFieldsWriter) writerField(key string, val jsonWriter) {
+	w.addKey(key)
+	val.WriteJSON(w.buf)
+}

+ 174 - 0
vendor/github.com/newrelic/go-agent/internal/jsonx/encode.go

@@ -0,0 +1,174 @@
+// Copyright 2010 The Go Authors.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package jsonx extends the encoding/json package to encode JSON
+// incrementally and without requiring reflection.
+package jsonx
+
+import (
+	"bytes"
+	"encoding/json"
+	"math"
+	"reflect"
+	"strconv"
+	"unicode/utf8"
+)
+
+var hex = "0123456789abcdef"
+
+// AppendString escapes s appends it to buf.
+func AppendString(buf *bytes.Buffer, s string) {
+	buf.WriteByte('"')
+	start := 0
+	for i := 0; i < len(s); {
+		if b := s[i]; b < utf8.RuneSelf {
+			if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' {
+				i++
+				continue
+			}
+			if start < i {
+				buf.WriteString(s[start:i])
+			}
+			switch b {
+			case '\\', '"':
+				buf.WriteByte('\\')
+				buf.WriteByte(b)
+			case '\n':
+				buf.WriteByte('\\')
+				buf.WriteByte('n')
+			case '\r':
+				buf.WriteByte('\\')
+				buf.WriteByte('r')
+			case '\t':
+				buf.WriteByte('\\')
+				buf.WriteByte('t')
+			default:
+				// This encodes bytes < 0x20 except for \n and \r,
+				// as well as <, > and &. The latter are escaped because they
+				// can lead to security holes when user-controlled strings
+				// are rendered into JSON and served to some browsers.
+				buf.WriteString(`\u00`)
+				buf.WriteByte(hex[b>>4])
+				buf.WriteByte(hex[b&0xF])
+			}
+			i++
+			start = i
+			continue
+		}
+		c, size := utf8.DecodeRuneInString(s[i:])
+		if c == utf8.RuneError && size == 1 {
+			if start < i {
+				buf.WriteString(s[start:i])
+			}
+			buf.WriteString(`\ufffd`)
+			i += size
+			start = i
+			continue
+		}
+		// U+2028 is LINE SEPARATOR.
+		// U+2029 is PARAGRAPH SEPARATOR.
+		// They are both technically valid characters in JSON strings,
+		// but don't work in JSONP, which has to be evaluated as JavaScript,
+		// and can lead to security holes there. It is valid JSON to
+		// escape them, so we do so unconditionally.
+		// See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
+		if c == '\u2028' || c == '\u2029' {
+			if start < i {
+				buf.WriteString(s[start:i])
+			}
+			buf.WriteString(`\u202`)
+			buf.WriteByte(hex[c&0xF])
+			i += size
+			start = i
+			continue
+		}
+		i += size
+	}
+	if start < len(s) {
+		buf.WriteString(s[start:])
+	}
+	buf.WriteByte('"')
+}
+
+// AppendStringArray appends an array of string literals to buf.
+func AppendStringArray(buf *bytes.Buffer, a ...string) {
+	buf.WriteByte('[')
+	for i, s := range a {
+		if i > 0 {
+			buf.WriteByte(',')
+		}
+		AppendString(buf, s)
+	}
+	buf.WriteByte(']')
+}
+
+// AppendFloat appends a numeric literal representing the value to buf.
+func AppendFloat(buf *bytes.Buffer, x float64) error {
+	var scratch [64]byte
+
+	if math.IsInf(x, 0) || math.IsNaN(x) {
+		return &json.UnsupportedValueError{
+			Value: reflect.ValueOf(x),
+			Str:   strconv.FormatFloat(x, 'g', -1, 64),
+		}
+	}
+
+	buf.Write(strconv.AppendFloat(scratch[:0], x, 'g', -1, 64))
+	return nil
+}
+
+// AppendFloatArray appends an array of numeric literals to buf.
+func AppendFloatArray(buf *bytes.Buffer, a ...float64) error {
+	buf.WriteByte('[')
+	for i, x := range a {
+		if i > 0 {
+			buf.WriteByte(',')
+		}
+		if err := AppendFloat(buf, x); err != nil {
+			return err
+		}
+	}
+	buf.WriteByte(']')
+	return nil
+}
+
+// AppendInt appends a numeric literal representing the value to buf.
+func AppendInt(buf *bytes.Buffer, x int64) {
+	var scratch [64]byte
+	buf.Write(strconv.AppendInt(scratch[:0], x, 10))
+}
+
+// AppendIntArray appends an array of numeric literals to buf.
+func AppendIntArray(buf *bytes.Buffer, a ...int64) {
+	var scratch [64]byte
+
+	buf.WriteByte('[')
+	for i, x := range a {
+		if i > 0 {
+			buf.WriteByte(',')
+		}
+		buf.Write(strconv.AppendInt(scratch[:0], x, 10))
+	}
+	buf.WriteByte(']')
+}
+
+// AppendUint appends a numeric literal representing the value to buf.
+func AppendUint(buf *bytes.Buffer, x uint64) {
+	var scratch [64]byte
+	buf.Write(strconv.AppendUint(scratch[:0], x, 10))
+}
+
+// AppendUintArray appends an array of numeric literals to buf.
+func AppendUintArray(buf *bytes.Buffer, a ...uint64) {
+	var scratch [64]byte
+
+	buf.WriteByte('[')
+	for i, x := range a {
+		if i > 0 {
+			buf.WriteByte(',')
+		}
+		buf.Write(strconv.AppendUint(scratch[:0], x, 10))
+	}
+	buf.WriteByte(']')
+}

+ 23 - 0
vendor/github.com/newrelic/go-agent/internal/labels.go

@@ -0,0 +1,23 @@
+package internal
+
+import "encoding/json"
+
+// Labels is used for connect JSON formatting.
+type Labels map[string]string
+
+// MarshalJSON requires a comment for golint?
+func (l Labels) MarshalJSON() ([]byte, error) {
+	ls := make([]struct {
+		Key   string `json:"label_type"`
+		Value string `json:"label_value"`
+	}, len(l))
+
+	i := 0
+	for key, val := range l {
+		ls[i].Key = key
+		ls[i].Value = val
+		i++
+	}
+
+	return json.Marshal(ls)
+}

+ 61 - 0
vendor/github.com/newrelic/go-agent/internal/limits.go

@@ -0,0 +1,61 @@
+package internal
+
+import "time"
+
+const (
+	// app behavior
+
+	// ConnectBackoff is the wait time between unsuccessful connect
+	// attempts.
+	ConnectBackoff = 20 * time.Second
+	// HarvestPeriod is the period that collected data is sent to New Relic.
+	HarvestPeriod = 60 * time.Second
+	// CollectorTimeout is the timeout used in the client for communication
+	// with New Relic's servers.
+	CollectorTimeout = 20 * time.Second
+	// AppDataChanSize is the size of the channel that contains data sent
+	// the app processor.
+	AppDataChanSize           = 200
+	failedMetricAttemptsLimit = 5
+	failedEventsAttemptsLimit = 10
+
+	// transaction behavior
+	maxStackTraceFrames = 100
+	// MaxTxnErrors is the maximum number of errors captured per
+	// transaction.
+	MaxTxnErrors      = 5
+	maxTxnSlowQueries = 10
+
+	startingTxnTraceNodes = 16
+	maxTxnTraceNodes      = 256
+
+	// harvest data
+	maxMetrics          = 2 * 1000
+	maxCustomEvents     = 10 * 1000
+	maxTxnEvents        = 10 * 1000
+	maxRegularTraces    = 1
+	maxSyntheticsTraces = 20
+	maxErrorEvents      = 100
+	maxHarvestErrors    = 20
+	maxHarvestSlowSQLs  = 10
+	maxSpanEvents       = 1000
+
+	// attributes
+	attributeKeyLengthLimit   = 255
+	attributeValueLengthLimit = 255
+	attributeUserLimit        = 64
+	// AttributeErrorLimit limits the number of extra attributes that can be
+	// provided when noticing an error.
+	AttributeErrorLimit       = 32
+	attributeAgentLimit       = 255 - (attributeUserLimit + AttributeErrorLimit)
+	customEventAttributeLimit = 64
+
+	// Limits affecting Config validation are found in the config package.
+
+	// RuntimeSamplerPeriod is the period of the runtime sampler.  Runtime
+	// metrics should not depend on the sampler period, but the period must
+	// be the same across instances.  For that reason, this value should not
+	// be changed without notifying customers that they must update all
+	// instance simultaneously for valid runtime metrics.
+	RuntimeSamplerPeriod = 60 * time.Second
+)

+ 89 - 0
vendor/github.com/newrelic/go-agent/internal/logger/logger.go

@@ -0,0 +1,89 @@
+package logger
+
+import (
+	"encoding/json"
+	"fmt"
+	"io"
+	"log"
+	"os"
+)
+
+// Logger matches newrelic.Logger to allow implementations to be passed to
+// internal packages.
+type Logger interface {
+	Error(msg string, context map[string]interface{})
+	Warn(msg string, context map[string]interface{})
+	Info(msg string, context map[string]interface{})
+	Debug(msg string, context map[string]interface{})
+	DebugEnabled() bool
+}
+
+// ShimLogger implements Logger and does nothing.
+type ShimLogger struct{}
+
+// Error allows ShimLogger to implement Logger.
+func (s ShimLogger) Error(string, map[string]interface{}) {}
+
+// Warn allows ShimLogger to implement Logger.
+func (s ShimLogger) Warn(string, map[string]interface{}) {}
+
+// Info allows ShimLogger to implement Logger.
+func (s ShimLogger) Info(string, map[string]interface{}) {}
+
+// Debug allows ShimLogger to implement Logger.
+func (s ShimLogger) Debug(string, map[string]interface{}) {}
+
+// DebugEnabled allows ShimLogger to implement Logger.
+func (s ShimLogger) DebugEnabled() bool { return false }
+
+type logFile struct {
+	l       *log.Logger
+	doDebug bool
+}
+
+// New creates a basic Logger.
+func New(w io.Writer, doDebug bool) Logger {
+	return &logFile{
+		l:       log.New(w, logPid, logFlags),
+		doDebug: doDebug,
+	}
+}
+
+const logFlags = log.Ldate | log.Ltime | log.Lmicroseconds
+
+var (
+	logPid = fmt.Sprintf("(%d) ", os.Getpid())
+)
+
+func (f *logFile) fire(level, msg string, ctx map[string]interface{}) {
+	js, err := json.Marshal(struct {
+		Level   string                 `json:"level"`
+		Event   string                 `json:"msg"`
+		Context map[string]interface{} `json:"context"`
+	}{
+		level,
+		msg,
+		ctx,
+	})
+	if nil == err {
+		f.l.Printf(string(js))
+	} else {
+		f.l.Printf("unable to marshal log entry: %v", err)
+	}
+}
+
+func (f *logFile) Error(msg string, ctx map[string]interface{}) {
+	f.fire("error", msg, ctx)
+}
+func (f *logFile) Warn(msg string, ctx map[string]interface{}) {
+	f.fire("warn", msg, ctx)
+}
+func (f *logFile) Info(msg string, ctx map[string]interface{}) {
+	f.fire("info", msg, ctx)
+}
+func (f *logFile) Debug(msg string, ctx map[string]interface{}) {
+	if f.doDebug {
+		f.fire("debug", msg, ctx)
+	}
+}
+func (f *logFile) DebugEnabled() bool { return f.doDebug }

+ 237 - 0
vendor/github.com/newrelic/go-agent/internal/metric_names.go

@@ -0,0 +1,237 @@
+package internal
+
+const (
+	apdexRollup = "Apdex"
+	apdexPrefix = "Apdex/"
+
+	webRollup        = "WebTransaction"
+	backgroundRollup = "OtherTransaction/all"
+
+	errorsPrefix = "Errors/"
+
+	// "HttpDispatcher" metric is used for the overview graph, and
+	// therefore should only be made for web transactions.
+	dispatcherMetric = "HttpDispatcher"
+
+	queueMetric = "WebFrontend/QueueTime"
+
+	webMetricPrefix        = "WebTransaction/Go"
+	backgroundMetricPrefix = "OtherTransaction/Go"
+
+	instanceReporting = "Instance/Reporting"
+
+	// https://newrelic.atlassian.net/wiki/display/eng/Custom+Events+in+New+Relic+Agents
+	customEventsSeen = "Supportability/Events/Customer/Seen"
+	customEventsSent = "Supportability/Events/Customer/Sent"
+
+	// https://source.datanerd.us/agents/agent-specs/blob/master/Transaction-Events-PORTED.md
+	txnEventsSeen = "Supportability/AnalyticsEvents/TotalEventsSeen"
+	txnEventsSent = "Supportability/AnalyticsEvents/TotalEventsSent"
+
+	// https://source.datanerd.us/agents/agent-specs/blob/master/Error-Events.md
+	errorEventsSeen = "Supportability/Events/TransactionError/Seen"
+	errorEventsSent = "Supportability/Events/TransactionError/Sent"
+
+	// https://source.datanerd.us/agents/agent-specs/blob/master/Span-Events.md
+	spanEventsSeen = "Supportability/SpanEvent/TotalEventsSeen"
+	spanEventsSent = "Supportability/SpanEvent/TotalEventsSent"
+
+	supportabilityDropped = "Supportability/MetricsDropped"
+
+	// Runtime/System Metrics
+	memoryPhysical       = "Memory/Physical"
+	heapObjectsAllocated = "Memory/Heap/AllocatedObjects"
+	cpuUserUtilization   = "CPU/User/Utilization"
+	cpuSystemUtilization = "CPU/System/Utilization"
+	cpuUserTime          = "CPU/User Time"
+	cpuSystemTime        = "CPU/System Time"
+	runGoroutine         = "Go/Runtime/Goroutines"
+	gcPauseFraction      = "GC/System/Pause Fraction"
+	gcPauses             = "GC/System/Pauses"
+
+	// Distributed Tracing Supportability Metrics
+	supportTracingAcceptSuccess          = "Supportability/DistributedTrace/AcceptPayload/Success"
+	supportTracingAcceptException        = "Supportability/DistributedTrace/AcceptPayload/Exception"
+	supportTracingAcceptParseException   = "Supportability/DistributedTrace/AcceptPayload/ParseException"
+	supportTracingCreateBeforeAccept     = "Supportability/DistributedTrace/AcceptPayload/Ignored/CreateBeforeAccept"
+	supportTracingIgnoredMultiple        = "Supportability/DistributedTrace/AcceptPayload/Ignored/Multiple"
+	supportTracingIgnoredVersion         = "Supportability/DistributedTrace/AcceptPayload/Ignored/MajorVersion"
+	supportTracingAcceptUntrustedAccount = "Supportability/DistributedTrace/AcceptPayload/Ignored/UntrustedAccount"
+	supportTracingAcceptNull             = "Supportability/DistributedTrace/AcceptPayload/Ignored/Null"
+	supportTracingCreatePayloadSuccess   = "Supportability/DistributedTrace/CreatePayload/Success"
+	supportTracingCreatePayloadException = "Supportability/DistributedTrace/CreatePayload/Exception"
+)
+
+// DistributedTracingSupport is used to track distributed tracing activity for
+// supportability.
+type DistributedTracingSupport struct {
+	AcceptPayloadSuccess            bool // AcceptPayload was called successfully
+	AcceptPayloadException          bool // AcceptPayload had a generic exception
+	AcceptPayloadParseException     bool // AcceptPayload had a parsing exception
+	AcceptPayloadCreateBeforeAccept bool // AcceptPayload was ignored because CreatePayload had already been called
+	AcceptPayloadIgnoredMultiple    bool // AcceptPayload was ignored because AcceptPayload had already been called
+	AcceptPayloadIgnoredVersion     bool // AcceptPayload was ignored because the payload's major version was greater than the agent's
+	AcceptPayloadUntrustedAccount   bool // AcceptPayload was ignored because the payload was untrusted
+	AcceptPayloadNullPayload        bool // AcceptPayload was ignored because the payload was nil
+	CreatePayloadSuccess            bool // CreatePayload was called successfully
+	CreatePayloadException          bool // CreatePayload had a generic exception
+}
+
+type rollupMetric struct {
+	all      string
+	allWeb   string
+	allOther string
+}
+
+func newRollupMetric(s string) rollupMetric {
+	return rollupMetric{
+		all:      s + "all",
+		allWeb:   s + "allWeb",
+		allOther: s + "allOther",
+	}
+}
+
+func (r rollupMetric) webOrOther(isWeb bool) string {
+	if isWeb {
+		return r.allWeb
+	}
+	return r.allOther
+}
+
+var (
+	errorsRollupMetric = newRollupMetric("Errors/")
+
+	// source.datanerd.us/agents/agent-specs/blob/master/APIs/external_segment.md
+	// source.datanerd.us/agents/agent-specs/blob/master/APIs/external_cat.md
+	// source.datanerd.us/agents/agent-specs/blob/master/Cross-Application-Tracing-PORTED.md
+	externalRollupMetric = newRollupMetric("External/")
+
+	// source.datanerd.us/agents/agent-specs/blob/master/Datastore-Metrics-PORTED.md
+	datastoreRollupMetric = newRollupMetric("Datastore/")
+
+	datastoreProductMetricsCache = map[string]rollupMetric{
+		"Cassandra":     newRollupMetric("Datastore/Cassandra/"),
+		"Derby":         newRollupMetric("Datastore/Derby/"),
+		"Elasticsearch": newRollupMetric("Datastore/Elasticsearch/"),
+		"Firebird":      newRollupMetric("Datastore/Firebird/"),
+		"IBMDB2":        newRollupMetric("Datastore/IBMDB2/"),
+		"Informix":      newRollupMetric("Datastore/Informix/"),
+		"Memcached":     newRollupMetric("Datastore/Memcached/"),
+		"MongoDB":       newRollupMetric("Datastore/MongoDB/"),
+		"MySQL":         newRollupMetric("Datastore/MySQL/"),
+		"MSSQL":         newRollupMetric("Datastore/MSSQL/"),
+		"Oracle":        newRollupMetric("Datastore/Oracle/"),
+		"Postgres":      newRollupMetric("Datastore/Postgres/"),
+		"Redis":         newRollupMetric("Datastore/Redis/"),
+		"Solr":          newRollupMetric("Datastore/Solr/"),
+		"SQLite":        newRollupMetric("Datastore/SQLite/"),
+		"CouchDB":       newRollupMetric("Datastore/CouchDB/"),
+		"Riak":          newRollupMetric("Datastore/Riak/"),
+		"VoltDB":        newRollupMetric("Datastore/VoltDB/"),
+	}
+)
+
+func customSegmentMetric(s string) string {
+	return "Custom/" + s
+}
+
+// customMetric is used to construct custom metrics from the input given to
+// Application.RecordCustomMetric.  Note that the "Custom/" prefix helps prevent
+// collision with other agent metrics, but does not eliminate the possibility
+// since "Custom/" is also used for segments.
+func customMetric(customerInput string) string {
+	return "Custom/" + customerInput
+}
+
+// DatastoreMetricKey contains the fields by which datastore metrics are
+// aggregated.
+type DatastoreMetricKey struct {
+	Product      string
+	Collection   string
+	Operation    string
+	Host         string
+	PortPathOrID string
+}
+
+type externalMetricKey struct {
+	Host                    string
+	ExternalCrossProcessID  string
+	ExternalTransactionName string
+}
+
+func datastoreScopedMetric(key DatastoreMetricKey) string {
+	if "" != key.Collection {
+		return datastoreStatementMetric(key)
+	}
+	return datastoreOperationMetric(key)
+}
+
+// Datastore/{datastore}/*
+func datastoreProductMetric(key DatastoreMetricKey) rollupMetric {
+	d, ok := datastoreProductMetricsCache[key.Product]
+	if ok {
+		return d
+	}
+	return newRollupMetric("Datastore/" + key.Product + "/")
+}
+
+// Datastore/operation/{datastore}/{operation}
+func datastoreOperationMetric(key DatastoreMetricKey) string {
+	return "Datastore/operation/" + key.Product +
+		"/" + key.Operation
+}
+
+// Datastore/statement/{datastore}/{table}/{operation}
+func datastoreStatementMetric(key DatastoreMetricKey) string {
+	return "Datastore/statement/" + key.Product +
+		"/" + key.Collection +
+		"/" + key.Operation
+}
+
+// Datastore/instance/{datastore}/{host}/{port_path_or_id}
+func datastoreInstanceMetric(key DatastoreMetricKey) string {
+	return "Datastore/instance/" + key.Product +
+		"/" + key.Host +
+		"/" + key.PortPathOrID
+}
+
+// External/{host}/all
+func externalHostMetric(key externalMetricKey) string {
+	return "External/" + key.Host + "/all"
+}
+
+// ExternalApp/{host}/{external_id}/all
+func externalAppMetric(key externalMetricKey) string {
+	return "ExternalApp/" + key.Host +
+		"/" + key.ExternalCrossProcessID + "/all"
+}
+
+// ExternalTransaction/{host}/{external_id}/{external_txnname}
+func externalTransactionMetric(key externalMetricKey) string {
+	return "ExternalTransaction/" + key.Host +
+		"/" + key.ExternalCrossProcessID +
+		"/" + key.ExternalTransactionName
+}
+
+func callerFields(c payloadCaller) string {
+	return "/" + c.Type +
+		"/" + c.Account +
+		"/" + c.App +
+		"/" + c.TransportType +
+		"/"
+}
+
+// DurationByCaller/{type}/{account}/{app}/{transport}/*
+func durationByCallerMetric(c payloadCaller) rollupMetric {
+	return newRollupMetric("DurationByCaller" + callerFields(c))
+}
+
+// ErrorsByCaller/{type}/{account}/{app}/{transport}/*
+func errorsByCallerMetric(c payloadCaller) rollupMetric {
+	return newRollupMetric("ErrorsByCaller" + callerFields(c))
+}
+
+// TransportDuration/{type}/{account}/{app}/{transport}/*
+func transportDurationMetric(c payloadCaller) rollupMetric {
+	return newRollupMetric("TransportDuration" + callerFields(c))
+}

+ 164 - 0
vendor/github.com/newrelic/go-agent/internal/metric_rules.go

@@ -0,0 +1,164 @@
+package internal
+
+import (
+	"encoding/json"
+	"regexp"
+	"sort"
+	"strings"
+)
+
+type ruleResult int
+
+const (
+	ruleMatched ruleResult = iota
+	ruleUnmatched
+	ruleIgnore
+)
+
+type metricRule struct {
+	// 'Ignore' indicates if the entire transaction should be discarded if
+	// there is a match.  This field is only used by "url_rules" and
+	// "transaction_name_rules", not "metric_name_rules".
+	Ignore              bool   `json:"ignore"`
+	EachSegment         bool   `json:"each_segment"`
+	ReplaceAll          bool   `json:"replace_all"`
+	Terminate           bool   `json:"terminate_chain"`
+	Order               int    `json:"eval_order"`
+	OriginalReplacement string `json:"replacement"`
+	RawExpr             string `json:"match_expression"`
+
+	// Go's regexp backreferences use '${1}' instead of the Perlish '\1', so
+	// we transform the replacement string into the Go syntax and store it
+	// here.
+	TransformedReplacement string
+	re                     *regexp.Regexp
+}
+
+type metricRules []*metricRule
+
+// Go's regexp backreferences use `${1}` instead of the Perlish `\1`, so we must
+// transform the replacement string.  This is non-trivial: `\1` is a
+// backreference but `\\1` is not.  Rather than count the number of back slashes
+// preceding the digit, we simply skip rules with tricky replacements.
+var (
+	transformReplacementAmbiguous   = regexp.MustCompile(`\\\\([0-9]+)`)
+	transformReplacementRegex       = regexp.MustCompile(`\\([0-9]+)`)
+	transformReplacementReplacement = "$${${1}}"
+)
+
+func (rules *metricRules) UnmarshalJSON(data []byte) (err error) {
+	var raw []*metricRule
+
+	if err := json.Unmarshal(data, &raw); nil != err {
+		return err
+	}
+
+	valid := make(metricRules, 0, len(raw))
+
+	for _, r := range raw {
+		re, err := regexp.Compile("(?i)" + r.RawExpr)
+		if err != nil {
+			// TODO
+			// Warn("unable to compile rule", {
+			// 	"match_expression": r.RawExpr,
+			// 	"error":            err.Error(),
+			// })
+			continue
+		}
+
+		if transformReplacementAmbiguous.MatchString(r.OriginalReplacement) {
+			// TODO
+			// Warn("unable to transform replacement", {
+			// 	"match_expression": r.RawExpr,
+			// 	"replacement":      r.OriginalReplacement,
+			// })
+			continue
+		}
+
+		r.re = re
+		r.TransformedReplacement = transformReplacementRegex.ReplaceAllString(r.OriginalReplacement,
+			transformReplacementReplacement)
+		valid = append(valid, r)
+	}
+
+	sort.Sort(valid)
+
+	*rules = valid
+	return nil
+}
+
+func (rules metricRules) Len() int {
+	return len(rules)
+}
+
+// Rules should be applied in increasing order
+func (rules metricRules) Less(i, j int) bool {
+	return rules[i].Order < rules[j].Order
+}
+func (rules metricRules) Swap(i, j int) {
+	rules[i], rules[j] = rules[j], rules[i]
+}
+
+func replaceFirst(re *regexp.Regexp, s string, replacement string) (ruleResult, string) {
+	// Note that ReplaceAllStringFunc cannot be used here since it does
+	// not replace $1 placeholders.
+	loc := re.FindStringIndex(s)
+	if nil == loc {
+		return ruleUnmatched, s
+	}
+	firstMatch := s[loc[0]:loc[1]]
+	firstMatchReplaced := re.ReplaceAllString(firstMatch, replacement)
+	return ruleMatched, s[0:loc[0]] + firstMatchReplaced + s[loc[1]:]
+}
+
+func (r *metricRule) apply(s string) (ruleResult, string) {
+	// Rules are strange, and there is no spec.
+	// This code attempts to duplicate the logic of the PHP agent.
+	// Ambiguity abounds.
+
+	if r.Ignore {
+		if r.re.MatchString(s) {
+			return ruleIgnore, ""
+		}
+		return ruleUnmatched, s
+	}
+
+	if r.ReplaceAll {
+		if r.re.MatchString(s) {
+			return ruleMatched, r.re.ReplaceAllString(s, r.TransformedReplacement)
+		}
+		return ruleUnmatched, s
+	} else if r.EachSegment {
+		segments := strings.Split(s, "/")
+		applied := make([]string, len(segments))
+		result := ruleUnmatched
+		for i, segment := range segments {
+			var segmentMatched ruleResult
+			segmentMatched, applied[i] = replaceFirst(r.re, segment, r.TransformedReplacement)
+			if segmentMatched == ruleMatched {
+				result = ruleMatched
+			}
+		}
+		return result, strings.Join(applied, "/")
+	} else {
+		return replaceFirst(r.re, s, r.TransformedReplacement)
+	}
+}
+
+func (rules metricRules) Apply(input string) string {
+	var res ruleResult
+	s := input
+
+	for _, rule := range rules {
+		res, s = rule.apply(s)
+
+		if ruleIgnore == res {
+			return ""
+		}
+		if (ruleMatched == res) && rule.Terminate {
+			break
+		}
+	}
+
+	return s
+}

+ 262 - 0
vendor/github.com/newrelic/go-agent/internal/metrics.go

@@ -0,0 +1,262 @@
+package internal
+
+import (
+	"bytes"
+	"time"
+
+	"github.com/newrelic/go-agent/internal/jsonx"
+)
+
+type metricForce int
+
+const (
+	forced metricForce = iota
+	unforced
+)
+
+type metricID struct {
+	Name  string `json:"name"`
+	Scope string `json:"scope,omitempty"`
+}
+
+type metricData struct {
+	// These values are in the units expected by the collector.
+	countSatisfied  float64 // Seconds, or count for Apdex
+	totalTolerated  float64 // Seconds, or count for Apdex
+	exclusiveFailed float64 // Seconds, or count for Apdex
+	min             float64 // Seconds
+	max             float64 // Seconds
+	sumSquares      float64 // Seconds**2, or 0 for Apdex
+}
+
+func metricDataFromDuration(duration, exclusive time.Duration) metricData {
+	ds := duration.Seconds()
+	return metricData{
+		countSatisfied:  1,
+		totalTolerated:  ds,
+		exclusiveFailed: exclusive.Seconds(),
+		min:             ds,
+		max:             ds,
+		sumSquares:      ds * ds,
+	}
+}
+
+type metric struct {
+	forced metricForce
+	data   metricData
+}
+
+type metricTable struct {
+	metricPeriodStart time.Time
+	failedHarvests    int
+	maxTableSize      int // After this max is reached, only forced metrics are added
+	numDropped        int // Number of unforced metrics dropped due to full table
+	metrics           map[metricID]*metric
+}
+
+func newMetricTable(maxTableSize int, now time.Time) *metricTable {
+	return &metricTable{
+		metricPeriodStart: now,
+		metrics:           make(map[metricID]*metric),
+		maxTableSize:      maxTableSize,
+		failedHarvests:    0,
+	}
+}
+
+func (mt *metricTable) full() bool {
+	return len(mt.metrics) >= mt.maxTableSize
+}
+
+func (data *metricData) aggregate(src metricData) {
+	data.countSatisfied += src.countSatisfied
+	data.totalTolerated += src.totalTolerated
+	data.exclusiveFailed += src.exclusiveFailed
+
+	if src.min < data.min {
+		data.min = src.min
+	}
+	if src.max > data.max {
+		data.max = src.max
+	}
+
+	data.sumSquares += src.sumSquares
+}
+
+func (mt *metricTable) mergeMetric(id metricID, m metric) {
+	if to := mt.metrics[id]; nil != to {
+		to.data.aggregate(m.data)
+		return
+	}
+
+	if mt.full() && (unforced == m.forced) {
+		mt.numDropped++
+		return
+	}
+	// NOTE: `new` is used in place of `&m` since the latter will make `m`
+	// get heap allocated regardless of whether or not this line gets
+	// reached (running go version go1.5 darwin/amd64).  See
+	// BenchmarkAddingSameMetrics.
+	alloc := new(metric)
+	*alloc = m
+	mt.metrics[id] = alloc
+}
+
+func (mt *metricTable) mergeFailed(from *metricTable) {
+	fails := from.failedHarvests + 1
+	if fails >= failedMetricAttemptsLimit {
+		return
+	}
+	if from.metricPeriodStart.Before(mt.metricPeriodStart) {
+		mt.metricPeriodStart = from.metricPeriodStart
+	}
+	mt.failedHarvests = fails
+	mt.merge(from, "")
+}
+
+func (mt *metricTable) merge(from *metricTable, newScope string) {
+	if "" == newScope {
+		for id, m := range from.metrics {
+			mt.mergeMetric(id, *m)
+		}
+	} else {
+		for id, m := range from.metrics {
+			mt.mergeMetric(metricID{Name: id.Name, Scope: newScope}, *m)
+		}
+	}
+}
+
+func (mt *metricTable) add(name, scope string, data metricData, force metricForce) {
+	mt.mergeMetric(metricID{Name: name, Scope: scope}, metric{data: data, forced: force})
+}
+
+func (mt *metricTable) addCount(name string, count float64, force metricForce) {
+	mt.add(name, "", metricData{countSatisfied: count}, force)
+}
+
+func (mt *metricTable) addSingleCount(name string, force metricForce) {
+	mt.addCount(name, float64(1), force)
+}
+
+func (mt *metricTable) addDuration(name, scope string, duration, exclusive time.Duration, force metricForce) {
+	mt.add(name, scope, metricDataFromDuration(duration, exclusive), force)
+}
+
+func (mt *metricTable) addValueExclusive(name, scope string, total, exclusive float64, force metricForce) {
+	data := metricData{
+		countSatisfied:  1,
+		totalTolerated:  total,
+		exclusiveFailed: exclusive,
+		min:             total,
+		max:             total,
+		sumSquares:      total * total,
+	}
+	mt.add(name, scope, data, force)
+}
+
+func (mt *metricTable) addValue(name, scope string, total float64, force metricForce) {
+	mt.addValueExclusive(name, scope, total, total, force)
+}
+
+func (mt *metricTable) addApdex(name, scope string, apdexThreshold time.Duration, zone ApdexZone, force metricForce) {
+	apdexSeconds := apdexThreshold.Seconds()
+	data := metricData{min: apdexSeconds, max: apdexSeconds}
+
+	switch zone {
+	case ApdexSatisfying:
+		data.countSatisfied = 1
+	case ApdexTolerating:
+		data.totalTolerated = 1
+	case ApdexFailing:
+		data.exclusiveFailed = 1
+	}
+
+	mt.add(name, scope, data, force)
+}
+
+func (mt *metricTable) CollectorJSON(agentRunID string, now time.Time) ([]byte, error) {
+	if 0 == len(mt.metrics) {
+		return nil, nil
+	}
+	estimatedBytesPerMetric := 128
+	estimatedLen := len(mt.metrics) * estimatedBytesPerMetric
+	buf := bytes.NewBuffer(make([]byte, 0, estimatedLen))
+	buf.WriteByte('[')
+
+	jsonx.AppendString(buf, agentRunID)
+	buf.WriteByte(',')
+	jsonx.AppendInt(buf, mt.metricPeriodStart.Unix())
+	buf.WriteByte(',')
+	jsonx.AppendInt(buf, now.Unix())
+	buf.WriteByte(',')
+
+	buf.WriteByte('[')
+	first := true
+	for id, metric := range mt.metrics {
+		if first {
+			first = false
+		} else {
+			buf.WriteByte(',')
+		}
+		buf.WriteByte('[')
+		buf.WriteByte('{')
+		buf.WriteString(`"name":`)
+		jsonx.AppendString(buf, id.Name)
+		if id.Scope != "" {
+			buf.WriteString(`,"scope":`)
+			jsonx.AppendString(buf, id.Scope)
+		}
+		buf.WriteByte('}')
+		buf.WriteByte(',')
+
+		jsonx.AppendFloatArray(buf,
+			metric.data.countSatisfied,
+			metric.data.totalTolerated,
+			metric.data.exclusiveFailed,
+			metric.data.min,
+			metric.data.max,
+			metric.data.sumSquares)
+
+		buf.WriteByte(']')
+	}
+	buf.WriteByte(']')
+
+	buf.WriteByte(']')
+	return buf.Bytes(), nil
+}
+
+func (mt *metricTable) Data(agentRunID string, harvestStart time.Time) ([]byte, error) {
+	return mt.CollectorJSON(agentRunID, harvestStart)
+}
+func (mt *metricTable) MergeIntoHarvest(h *Harvest) {
+	h.Metrics.mergeFailed(mt)
+}
+
+func (mt *metricTable) ApplyRules(rules metricRules) *metricTable {
+	if nil == rules {
+		return mt
+	}
+	if len(rules) == 0 {
+		return mt
+	}
+
+	applied := newMetricTable(mt.maxTableSize, mt.metricPeriodStart)
+	cache := make(map[string]string)
+
+	for id, m := range mt.metrics {
+		out, ok := cache[id.Name]
+		if !ok {
+			out = rules.Apply(id.Name)
+			cache[id.Name] = out
+		}
+
+		if "" != out {
+			applied.mergeMetric(metricID{Name: out, Scope: id.Scope}, *m)
+		}
+	}
+
+	return applied
+}
+
+func (mt *metricTable) EndpointMethod() string {
+	return cmdMetrics
+}

+ 37 - 0
vendor/github.com/newrelic/go-agent/internal/obfuscate.go

@@ -0,0 +1,37 @@
+package internal
+
+import (
+	"encoding/base64"
+	"errors"
+)
+
+func deobfuscate(in string, key []byte) ([]byte, error) {
+	if len(key) == 0 {
+		return nil, errors.New("key cannot be zero length")
+	}
+
+	decoded, err := base64.StdEncoding.DecodeString(in)
+	if err != nil {
+		return nil, err
+	}
+
+	out := make([]byte, len(decoded))
+	for i, c := range decoded {
+		out[i] = c ^ key[i%len(key)]
+	}
+
+	return out, nil
+}
+
+func obfuscate(in, key []byte) (string, error) {
+	if len(key) == 0 {
+		return "", errors.New("key cannot be zero length")
+	}
+
+	out := make([]byte, len(in))
+	for i, c := range in {
+		out[i] = c ^ key[i%len(key)]
+	}
+
+	return base64.StdEncoding.EncodeToString(out), nil
+}

+ 27 - 0
vendor/github.com/newrelic/go-agent/internal/priority.go

@@ -0,0 +1,27 @@
+package internal
+
+// Priority allows for a priority sampling of events.  When an event
+// is created it is given a Priority.  Whenever an event pool is
+// full and events need to be dropped, the events with the lowest priority
+// are dropped.
+type Priority float32
+
+// According to spec, Agents SHOULD truncate the value to at most 6
+// digits past the decimal point.
+const (
+	priorityFormat = "%.6f"
+)
+
+// NewPriority returns a new priority.
+func NewPriority() Priority {
+	return Priority(RandFloat32())
+}
+
+// Float32 returns the priority as a float32.
+func (p Priority) Float32() float32 {
+	return float32(p)
+}
+
+func (p Priority) isLowerPriority(y Priority) bool {
+	return p < y
+}

+ 72 - 0
vendor/github.com/newrelic/go-agent/internal/queuing.go

@@ -0,0 +1,72 @@
+package internal
+
+import (
+	"net/http"
+	"strconv"
+	"strings"
+	"time"
+)
+
+const (
+	xRequestStart = "X-Request-Start"
+	xQueueStart   = "X-Queue-Start"
+)
+
+var (
+	earliestAcceptableSeconds = time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC).Unix()
+	latestAcceptableSeconds   = time.Date(2050, time.January, 1, 0, 0, 0, 0, time.UTC).Unix()
+)
+
+func checkQueueTimeSeconds(secondsFloat float64) time.Time {
+	seconds := int64(secondsFloat)
+	nanos := int64((secondsFloat - float64(seconds)) * (1000.0 * 1000.0 * 1000.0))
+	if seconds > earliestAcceptableSeconds && seconds < latestAcceptableSeconds {
+		return time.Unix(seconds, nanos)
+	}
+	return time.Time{}
+}
+
+func parseQueueTime(s string) time.Time {
+	f, err := strconv.ParseFloat(s, 64)
+	if nil != err {
+		return time.Time{}
+	}
+	if f <= 0 {
+		return time.Time{}
+	}
+
+	// try microseconds
+	if t := checkQueueTimeSeconds(f / (1000.0 * 1000.0)); !t.IsZero() {
+		return t
+	}
+	// try milliseconds
+	if t := checkQueueTimeSeconds(f / (1000.0)); !t.IsZero() {
+		return t
+	}
+	// try seconds
+	if t := checkQueueTimeSeconds(f); !t.IsZero() {
+		return t
+	}
+	return time.Time{}
+}
+
+// QueueDuration TODO
+func QueueDuration(hdr http.Header, txnStart time.Time) time.Duration {
+	s := hdr.Get(xQueueStart)
+	if "" == s {
+		s = hdr.Get(xRequestStart)
+	}
+	if "" == s {
+		return 0
+	}
+
+	s = strings.TrimPrefix(s, "t=")
+	qt := parseQueueTime(s)
+	if qt.IsZero() {
+		return 0
+	}
+	if qt.After(txnStart) {
+		return 0
+	}
+	return txnStart.Sub(qt)
+}

+ 59 - 0
vendor/github.com/newrelic/go-agent/internal/rand.go

@@ -0,0 +1,59 @@
+package internal
+
+import (
+	"math/rand"
+	"sync"
+	"time"
+)
+
+var (
+	seededRand = struct {
+		sync.Mutex
+		*rand.Rand
+	}{
+		Rand: rand.New(rand.NewSource(int64(time.Now().UnixNano()))),
+	}
+)
+
+// RandUint64 returns a random uint64.
+//
+// IMPORTANT! The default rand package functions are not used, since we want to
+// minimize the chance that different Go processes duplicate the same
+// transaction id.  (Note that the rand top level functions "use a default
+// shared Source that produces a deterministic sequence of values each time a
+// program is run" (and we don't seed the shared Source to avoid changing
+// customer apps' behavior)).
+func RandUint64() uint64 {
+	seededRand.Lock()
+	defer seededRand.Unlock()
+
+	u1 := seededRand.Uint32()
+	u2 := seededRand.Uint32()
+	return (uint64(u1) << 32) | uint64(u2)
+}
+
+// RandUint32 returns a random uint32.
+func RandUint32() uint32 {
+	seededRand.Lock()
+	defer seededRand.Unlock()
+
+	return seededRand.Uint32()
+}
+
+// RandFloat32 returns a random float32 between 0.0 and 1.0.
+func RandFloat32() float32 {
+	seededRand.Lock()
+	defer seededRand.Unlock()
+
+	for {
+		if r := seededRand.Float32(); 0.0 != r {
+			return r
+		}
+	}
+}
+
+// RandUint64N returns a random int64 that's
+// between 0 and the passed in max, non-inclusive
+func RandUint64N(max uint64) uint64 {
+	return RandUint64() % max
+}

+ 145 - 0
vendor/github.com/newrelic/go-agent/internal/sampler.go

@@ -0,0 +1,145 @@
+package internal
+
+import (
+	"runtime"
+	"time"
+
+	"github.com/newrelic/go-agent/internal/logger"
+	"github.com/newrelic/go-agent/internal/sysinfo"
+)
+
+// Sample is a system/runtime snapshot.
+type Sample struct {
+	when         time.Time
+	memStats     runtime.MemStats
+	usage        sysinfo.Usage
+	numGoroutine int
+	numCPU       int
+}
+
+func bytesToMebibytesFloat(bts uint64) float64 {
+	return float64(bts) / (1024 * 1024)
+}
+
+// GetSample gathers a new Sample.
+func GetSample(now time.Time, lg logger.Logger) *Sample {
+	s := Sample{
+		when:         now,
+		numGoroutine: runtime.NumGoroutine(),
+		numCPU:       runtime.NumCPU(),
+	}
+
+	if usage, err := sysinfo.GetUsage(); err == nil {
+		s.usage = usage
+	} else {
+		lg.Warn("unable to usage", map[string]interface{}{
+			"error": err.Error(),
+		})
+	}
+
+	runtime.ReadMemStats(&s.memStats)
+
+	return &s
+}
+
+type cpuStats struct {
+	used     time.Duration
+	fraction float64 // used / (elapsed * numCPU)
+}
+
+// Stats contains system information for a period of time.
+type Stats struct {
+	numGoroutine    int
+	allocBytes      uint64
+	heapObjects     uint64
+	user            cpuStats
+	system          cpuStats
+	gcPauseFraction float64
+	deltaNumGC      uint32
+	deltaPauseTotal time.Duration
+	minPause        time.Duration
+	maxPause        time.Duration
+}
+
+// Samples is used as the parameter to GetStats to avoid mixing up the previous
+// and current sample.
+type Samples struct {
+	Previous *Sample
+	Current  *Sample
+}
+
+// GetStats combines two Samples into a Stats.
+func GetStats(ss Samples) Stats {
+	cur := ss.Current
+	prev := ss.Previous
+	elapsed := cur.when.Sub(prev.when)
+
+	s := Stats{
+		numGoroutine: cur.numGoroutine,
+		allocBytes:   cur.memStats.Alloc,
+		heapObjects:  cur.memStats.HeapObjects,
+	}
+
+	// CPU Utilization
+	totalCPUSeconds := elapsed.Seconds() * float64(cur.numCPU)
+	if prev.usage.User != 0 && cur.usage.User > prev.usage.User {
+		s.user.used = cur.usage.User - prev.usage.User
+		s.user.fraction = s.user.used.Seconds() / totalCPUSeconds
+	}
+	if prev.usage.System != 0 && cur.usage.System > prev.usage.System {
+		s.system.used = cur.usage.System - prev.usage.System
+		s.system.fraction = s.system.used.Seconds() / totalCPUSeconds
+	}
+
+	// GC Pause Fraction
+	deltaPauseTotalNs := cur.memStats.PauseTotalNs - prev.memStats.PauseTotalNs
+	frac := float64(deltaPauseTotalNs) / float64(elapsed.Nanoseconds())
+	s.gcPauseFraction = frac
+
+	// GC Pauses
+	if deltaNumGC := cur.memStats.NumGC - prev.memStats.NumGC; deltaNumGC > 0 {
+		// In case more than 256 pauses have happened between samples
+		// and we are examining a subset of the pauses, we ensure that
+		// the min and max are not on the same side of the average by
+		// using the average as the starting min and max.
+		maxPauseNs := deltaPauseTotalNs / uint64(deltaNumGC)
+		minPauseNs := deltaPauseTotalNs / uint64(deltaNumGC)
+		for i := prev.memStats.NumGC + 1; i <= cur.memStats.NumGC; i++ {
+			pause := cur.memStats.PauseNs[(i+255)%256]
+			if pause > maxPauseNs {
+				maxPauseNs = pause
+			}
+			if pause < minPauseNs {
+				minPauseNs = pause
+			}
+		}
+		s.deltaPauseTotal = time.Duration(deltaPauseTotalNs) * time.Nanosecond
+		s.deltaNumGC = deltaNumGC
+		s.minPause = time.Duration(minPauseNs) * time.Nanosecond
+		s.maxPause = time.Duration(maxPauseNs) * time.Nanosecond
+	}
+
+	return s
+}
+
+// MergeIntoHarvest implements Harvestable.
+func (s Stats) MergeIntoHarvest(h *Harvest) {
+	h.Metrics.addValue(heapObjectsAllocated, "", float64(s.heapObjects), forced)
+	h.Metrics.addValue(runGoroutine, "", float64(s.numGoroutine), forced)
+	h.Metrics.addValueExclusive(memoryPhysical, "", bytesToMebibytesFloat(s.allocBytes), 0, forced)
+	h.Metrics.addValueExclusive(cpuUserUtilization, "", s.user.fraction, 0, forced)
+	h.Metrics.addValueExclusive(cpuSystemUtilization, "", s.system.fraction, 0, forced)
+	h.Metrics.addValue(cpuUserTime, "", s.user.used.Seconds(), forced)
+	h.Metrics.addValue(cpuSystemTime, "", s.system.used.Seconds(), forced)
+	h.Metrics.addValueExclusive(gcPauseFraction, "", s.gcPauseFraction, 0, forced)
+	if s.deltaNumGC > 0 {
+		h.Metrics.add(gcPauses, "", metricData{
+			countSatisfied:  float64(s.deltaNumGC),
+			totalTolerated:  s.deltaPauseTotal.Seconds(),
+			exclusiveFailed: 0,
+			min:             s.minPause.Seconds(),
+			max:             s.maxPause.Seconds(),
+			sumSquares:      s.deltaPauseTotal.Seconds() * s.deltaPauseTotal.Seconds(),
+		}, forced)
+	}
+}

+ 101 - 0
vendor/github.com/newrelic/go-agent/internal/security_policies.go

@@ -0,0 +1,101 @@
+package internal
+
+import (
+	"encoding/json"
+	"fmt"
+	"reflect"
+)
+
+// Security policies documentation:
+// https://source.datanerd.us/agents/agent-specs/blob/master/Language-Agent-Security-Policies.md
+
+// SecurityPolicies contains the security policies.
+type SecurityPolicies struct {
+	RecordSQL                 securityPolicy `json:"record_sql"`
+	AttributesInclude         securityPolicy `json:"attributes_include"`
+	AllowRawExceptionMessages securityPolicy `json:"allow_raw_exception_messages"`
+	CustomEvents              securityPolicy `json:"custom_events"`
+	CustomParameters          securityPolicy `json:"custom_parameters"`
+}
+
+// PointerIfPopulated returns a reference to the security policies if they have
+// been populated from JSON.
+func (sp *SecurityPolicies) PointerIfPopulated() *SecurityPolicies {
+	emptyPolicies := SecurityPolicies{}
+	if nil != sp && *sp != emptyPolicies {
+		return sp
+	}
+	return nil
+}
+
+type securityPolicy struct {
+	EnabledVal *bool `json:"enabled"`
+}
+
+func (p *securityPolicy) Enabled() bool           { return nil == p.EnabledVal || *p.EnabledVal }
+func (p *securityPolicy) SetEnabled(enabled bool) { p.EnabledVal = &enabled }
+func (p *securityPolicy) IsSet() bool             { return nil != p.EnabledVal }
+
+type policyer interface {
+	SetEnabled(bool)
+	IsSet() bool
+}
+
+// UnmarshalJSON decodes security policies sent from the preconnect endpoint.
+func (sp *SecurityPolicies) UnmarshalJSON(data []byte) (er error) {
+	defer func() {
+		// Zero out all fields if there is an error to ensure that the
+		// populated check works.
+		if er != nil {
+			*sp = SecurityPolicies{}
+		}
+	}()
+
+	var raw map[string]struct {
+		Enabled  bool `json:"enabled"`
+		Required bool `json:"required"`
+	}
+	err := json.Unmarshal(data, &raw)
+	if err != nil {
+		return fmt.Errorf("unable to unmarshal security policies: %v", err)
+	}
+
+	knownPolicies := make(map[string]policyer)
+
+	spv := reflect.ValueOf(sp).Elem()
+	for i := 0; i < spv.NumField(); i++ {
+		fieldAddress := spv.Field(i).Addr()
+		field := fieldAddress.Interface().(policyer)
+		name := spv.Type().Field(i).Tag.Get("json")
+		knownPolicies[name] = field
+	}
+
+	for name, policy := range raw {
+		p, ok := knownPolicies[name]
+		if !ok {
+			if policy.Required {
+				return errUnknownRequiredPolicy{name: name}
+			}
+		} else {
+			p.SetEnabled(policy.Enabled)
+		}
+	}
+	for name, policy := range knownPolicies {
+		if !policy.IsSet() {
+			return errUnsetPolicy{name: name}
+		}
+	}
+	return nil
+}
+
+type errUnknownRequiredPolicy struct{ name string }
+
+func (err errUnknownRequiredPolicy) Error() string {
+	return fmt.Sprintf("policy '%s' is unrecognized, please check for a newer agent version or contact support", err.name)
+}
+
+type errUnsetPolicy struct{ name string }
+
+func (err errUnsetPolicy) Error() string {
+	return fmt.Sprintf("policy '%s' not received, please contact support", err.name)
+}

+ 145 - 0
vendor/github.com/newrelic/go-agent/internal/segment_terms.go

@@ -0,0 +1,145 @@
+package internal
+
+// https://newrelic.atlassian.net/wiki/display/eng/Language+agent+transaction+segment+terms+rules
+
+import (
+	"encoding/json"
+	"strings"
+)
+
+const (
+	placeholder = "*"
+	separator   = "/"
+)
+
+type segmentRule struct {
+	Prefix   string   `json:"prefix"`
+	Terms    []string `json:"terms"`
+	TermsMap map[string]struct{}
+}
+
+// segmentRules is keyed by each segmentRule's Prefix field with any trailing
+// slash removed.
+type segmentRules map[string]*segmentRule
+
+func buildTermsMap(terms []string) map[string]struct{} {
+	m := make(map[string]struct{}, len(terms))
+	for _, t := range terms {
+		m[t] = struct{}{}
+	}
+	return m
+}
+
+func (rules *segmentRules) UnmarshalJSON(b []byte) error {
+	var raw []*segmentRule
+
+	if err := json.Unmarshal(b, &raw); nil != err {
+		return err
+	}
+
+	rs := make(map[string]*segmentRule)
+
+	for _, rule := range raw {
+		prefix := strings.TrimSuffix(rule.Prefix, "/")
+		if len(strings.Split(prefix, "/")) != 2 {
+			// TODO
+			// Warn("invalid segment term rule prefix",
+			// 	{"prefix": rule.Prefix})
+			continue
+		}
+
+		if nil == rule.Terms {
+			// TODO
+			// Warn("segment term rule has missing terms",
+			// 	{"prefix": rule.Prefix})
+			continue
+		}
+
+		rule.TermsMap = buildTermsMap(rule.Terms)
+
+		rs[prefix] = rule
+	}
+
+	*rules = rs
+	return nil
+}
+
+func (rule *segmentRule) apply(name string) string {
+	if !strings.HasPrefix(name, rule.Prefix) {
+		return name
+	}
+
+	s := strings.TrimPrefix(name, rule.Prefix)
+
+	leadingSlash := ""
+	if strings.HasPrefix(s, separator) {
+		leadingSlash = separator
+		s = strings.TrimPrefix(s, separator)
+	}
+
+	if "" != s {
+		segments := strings.Split(s, separator)
+
+		for i, segment := range segments {
+			_, whitelisted := rule.TermsMap[segment]
+			if whitelisted {
+				segments[i] = segment
+			} else {
+				segments[i] = placeholder
+			}
+		}
+
+		segments = collapsePlaceholders(segments)
+		s = strings.Join(segments, separator)
+	}
+
+	return rule.Prefix + leadingSlash + s
+}
+
+func (rules segmentRules) apply(name string) string {
+	if nil == rules {
+		return name
+	}
+
+	rule, ok := rules[firstTwoSegments(name)]
+	if !ok {
+		return name
+	}
+
+	return rule.apply(name)
+}
+
+func firstTwoSegments(name string) string {
+	firstSlashIdx := strings.Index(name, separator)
+	if firstSlashIdx == -1 {
+		return name
+	}
+
+	secondSlashIdx := strings.Index(name[firstSlashIdx+1:], separator)
+	if secondSlashIdx == -1 {
+		return name
+	}
+
+	return name[0 : firstSlashIdx+secondSlashIdx+1]
+}
+
+func collapsePlaceholders(segments []string) []string {
+	j := 0
+	prevStar := false
+	for i := 0; i < len(segments); i++ {
+		segment := segments[i]
+		if placeholder == segment {
+			if prevStar {
+				continue
+			}
+			segments[j] = placeholder
+			j++
+			prevStar = true
+		} else {
+			segments[j] = segment
+			j++
+			prevStar = false
+		}
+	}
+	return segments[0:j]
+}

+ 254 - 0
vendor/github.com/newrelic/go-agent/internal/slow_queries.go

@@ -0,0 +1,254 @@
+package internal
+
+import (
+	"bytes"
+	"container/heap"
+	"hash/fnv"
+	"time"
+
+	"github.com/newrelic/go-agent/internal/jsonx"
+)
+
+type queryParameters map[string]interface{}
+
+func vetQueryParameters(params map[string]interface{}) queryParameters {
+	if nil == params {
+		return nil
+	}
+	// Copying the parameters into a new map is safer than modifying the map
+	// from the customer.
+	vetted := make(map[string]interface{})
+	for key, val := range params {
+		val, err := ValidateUserAttribute(key, val)
+		if nil != err {
+			continue
+		}
+		vetted[key] = val
+	}
+	return queryParameters(vetted)
+}
+
+func (q queryParameters) WriteJSON(buf *bytes.Buffer) {
+	buf.WriteByte('{')
+	w := jsonFieldsWriter{buf: buf}
+	for key, val := range q {
+		writeAttributeValueJSON(&w, key, val)
+	}
+	buf.WriteByte('}')
+}
+
+// https://source.datanerd.us/agents/agent-specs/blob/master/Slow-SQLs-LEGACY.md
+
+// slowQueryInstance represents a single datastore call.
+type slowQueryInstance struct {
+	// Fields populated right after the datastore segment finishes:
+
+	Duration           time.Duration
+	DatastoreMetric    string
+	ParameterizedQuery string
+	QueryParameters    queryParameters
+	Host               string
+	PortPathOrID       string
+	DatabaseName       string
+	StackTrace         StackTrace
+
+	TxnEvent
+}
+
+// Aggregation is performed to avoid reporting multiple slow queries with same
+// query string.  Since some datastore segments may be below the slow query
+// threshold, the aggregation fields Count, Total, and Min should be taken with
+// a grain of salt.
+type slowQuery struct {
+	Count int32         // number of times the query has been observed
+	Total time.Duration // cummulative duration
+	Min   time.Duration // minimum observed duration
+
+	// When Count > 1, slowQueryInstance contains values from the slowest
+	// observation.
+	slowQueryInstance
+}
+
+type slowQueries struct {
+	priorityQueue []*slowQuery
+	// lookup maps query strings to indices in the priorityQueue
+	lookup map[string]int
+}
+
+func (slows *slowQueries) Len() int {
+	return len(slows.priorityQueue)
+}
+func (slows *slowQueries) Less(i, j int) bool {
+	pq := slows.priorityQueue
+	return pq[i].Duration < pq[j].Duration
+}
+func (slows *slowQueries) Swap(i, j int) {
+	pq := slows.priorityQueue
+	si := pq[i]
+	sj := pq[j]
+	pq[i], pq[j] = pq[j], pq[i]
+	slows.lookup[si.ParameterizedQuery] = j
+	slows.lookup[sj.ParameterizedQuery] = i
+}
+
+// Push and Pop are unused: only heap.Init and heap.Fix are used.
+func (slows *slowQueries) Push(x interface{}) {}
+func (slows *slowQueries) Pop() interface{}   { return nil }
+
+func newSlowQueries(max int) *slowQueries {
+	return &slowQueries{
+		lookup:        make(map[string]int, max),
+		priorityQueue: make([]*slowQuery, 0, max),
+	}
+}
+
+// Merge is used to merge slow queries from the transaction into the harvest.
+func (slows *slowQueries) Merge(other *slowQueries, txnEvent TxnEvent) {
+	for _, s := range other.priorityQueue {
+		cp := *s
+		cp.TxnEvent = txnEvent
+		slows.observe(cp)
+	}
+}
+
+// merge aggregates the observations from two slow queries with the same Query.
+func (slow *slowQuery) merge(other slowQuery) {
+	slow.Count += other.Count
+	slow.Total += other.Total
+
+	if other.Min < slow.Min {
+		slow.Min = other.Min
+	}
+	if other.Duration > slow.Duration {
+		slow.slowQueryInstance = other.slowQueryInstance
+	}
+}
+
+func (slows *slowQueries) observeInstance(slow slowQueryInstance) {
+	slows.observe(slowQuery{
+		Count:             1,
+		Total:             slow.Duration,
+		Min:               slow.Duration,
+		slowQueryInstance: slow,
+	})
+}
+
+func (slows *slowQueries) insertAtIndex(slow slowQuery, idx int) {
+	cpy := new(slowQuery)
+	*cpy = slow
+	slows.priorityQueue[idx] = cpy
+	slows.lookup[slow.ParameterizedQuery] = idx
+	heap.Fix(slows, idx)
+}
+
+func (slows *slowQueries) observe(slow slowQuery) {
+	// Has the query has previously been observed?
+	if idx, ok := slows.lookup[slow.ParameterizedQuery]; ok {
+		slows.priorityQueue[idx].merge(slow)
+		heap.Fix(slows, idx)
+		return
+	}
+	// Has the collection reached max capacity?
+	if len(slows.priorityQueue) < cap(slows.priorityQueue) {
+		idx := len(slows.priorityQueue)
+		slows.priorityQueue = slows.priorityQueue[0 : idx+1]
+		slows.insertAtIndex(slow, idx)
+		return
+	}
+	// Is this query slower than the existing fastest?
+	fastest := slows.priorityQueue[0]
+	if slow.Duration > fastest.Duration {
+		delete(slows.lookup, fastest.ParameterizedQuery)
+		slows.insertAtIndex(slow, 0)
+		return
+	}
+}
+
+// The third element of the slow query JSON should be a hash of the query
+// string.  This hash may be used by backend services to aggregate queries which
+// have the have the same query string.  It is unknown if this actually used.
+func makeSlowQueryID(query string) uint32 {
+	h := fnv.New32a()
+	h.Write([]byte(query))
+	return h.Sum32()
+}
+
+func (slow *slowQuery) WriteJSON(buf *bytes.Buffer) {
+	buf.WriteByte('[')
+	jsonx.AppendString(buf, slow.TxnEvent.FinalName)
+	buf.WriteByte(',')
+	jsonx.AppendString(buf, slow.TxnEvent.CleanURL)
+	buf.WriteByte(',')
+	jsonx.AppendInt(buf, int64(makeSlowQueryID(slow.ParameterizedQuery)))
+	buf.WriteByte(',')
+	jsonx.AppendString(buf, slow.ParameterizedQuery)
+	buf.WriteByte(',')
+	jsonx.AppendString(buf, slow.DatastoreMetric)
+	buf.WriteByte(',')
+	jsonx.AppendInt(buf, int64(slow.Count))
+	buf.WriteByte(',')
+	jsonx.AppendFloat(buf, slow.Total.Seconds()*1000.0)
+	buf.WriteByte(',')
+	jsonx.AppendFloat(buf, slow.Min.Seconds()*1000.0)
+	buf.WriteByte(',')
+	jsonx.AppendFloat(buf, slow.Duration.Seconds()*1000.0)
+	buf.WriteByte(',')
+	w := jsonFieldsWriter{buf: buf}
+	buf.WriteByte('{')
+	if "" != slow.Host {
+		w.stringField("host", slow.Host)
+	}
+	if "" != slow.PortPathOrID {
+		w.stringField("port_path_or_id", slow.PortPathOrID)
+	}
+	if "" != slow.DatabaseName {
+		w.stringField("database_name", slow.DatabaseName)
+	}
+	if nil != slow.StackTrace {
+		w.writerField("backtrace", slow.StackTrace)
+	}
+	if nil != slow.QueryParameters {
+		w.writerField("query_parameters", slow.QueryParameters)
+	}
+
+	sharedBetterCATIntrinsics(&slow.TxnEvent, &w)
+
+	buf.WriteByte('}')
+	buf.WriteByte(']')
+}
+
+// WriteJSON marshals the collection of slow queries into JSON according to the
+// schema expected by the collector.
+//
+// Note: This JSON does not contain the agentRunID.  This is for unknown
+// historical reasons. Since the agentRunID is included in the url,
+// its use in the other commands' JSON is redundant (although required).
+func (slows *slowQueries) WriteJSON(buf *bytes.Buffer) {
+	buf.WriteByte('[')
+	buf.WriteByte('[')
+	for idx, s := range slows.priorityQueue {
+		if idx > 0 {
+			buf.WriteByte(',')
+		}
+		s.WriteJSON(buf)
+	}
+	buf.WriteByte(']')
+	buf.WriteByte(']')
+}
+
+func (slows *slowQueries) Data(agentRunID string, harvestStart time.Time) ([]byte, error) {
+	if 0 == len(slows.priorityQueue) {
+		return nil, nil
+	}
+	estimate := 1024 * len(slows.priorityQueue)
+	buf := bytes.NewBuffer(make([]byte, 0, estimate))
+	slows.WriteJSON(buf)
+	return buf.Bytes(), nil
+}
+
+func (slows *slowQueries) MergeIntoHarvest(newHarvest *Harvest) {
+}
+
+func (slows *slowQueries) EndpointMethod() string {
+	return cmdSlowSQLs
+}

+ 172 - 0
vendor/github.com/newrelic/go-agent/internal/span_events.go

@@ -0,0 +1,172 @@
+package internal
+
+import (
+	"bytes"
+	"time"
+)
+
+// https://source.datanerd.us/agents/agent-specs/blob/master/Span-Events.md
+
+type spanCategory string
+
+const (
+	spanCategoryHTTP      spanCategory = "http"
+	spanCategoryDatastore              = "datastore"
+	spanCategoryGeneric                = "generic"
+)
+
+// SpanEvent represents a span event, neccessary to support Distributed Tracing.
+type SpanEvent struct {
+	TraceID         string
+	GUID            string
+	ParentID        string
+	TransactionID   string
+	Sampled         bool
+	Priority        Priority
+	Timestamp       time.Time
+	Duration        time.Duration
+	Name            string
+	Category        spanCategory
+	IsEntrypoint    bool
+	DatastoreExtras *spanDatastoreExtras
+	ExternalExtras  *spanExternalExtras
+}
+
+type spanDatastoreExtras struct {
+	Component string
+	Statement string
+	Instance  string
+	Address   string
+	Hostname  string
+}
+
+type spanExternalExtras struct {
+	URL       string
+	Method    string
+	Component string
+}
+
+// WriteJSON prepares JSON in the format expected by the collector.
+func (e *SpanEvent) WriteJSON(buf *bytes.Buffer) {
+	w := jsonFieldsWriter{buf: buf}
+	buf.WriteByte('[')
+	buf.WriteByte('{')
+	w.stringField("type", "Span")
+	w.stringField("traceId", e.TraceID)
+	w.stringField("guid", e.GUID)
+	if "" != e.ParentID {
+		w.stringField("parentId", e.ParentID)
+	}
+	w.stringField("transactionId", e.TransactionID)
+	w.boolField("sampled", e.Sampled)
+	w.writerField("priority", e.Priority)
+	w.intField("timestamp", e.Timestamp.UnixNano()/(1000*1000)) // in milliseconds
+	w.floatField("duration", e.Duration.Seconds())
+	w.stringField("name", e.Name)
+	w.stringField("category", string(e.Category))
+	if e.IsEntrypoint {
+		w.boolField("nr.entryPoint", true)
+	}
+	if ex := e.DatastoreExtras; nil != ex {
+		if "" != ex.Component {
+			w.stringField("component", ex.Component)
+		}
+		if "" != ex.Statement {
+			w.stringField("db.statement", ex.Statement)
+		}
+		if "" != ex.Instance {
+			w.stringField("db.instance", ex.Instance)
+		}
+		if "" != ex.Address {
+			w.stringField("peer.address", ex.Address)
+		}
+		if "" != ex.Hostname {
+			w.stringField("peer.hostname", ex.Hostname)
+		}
+		w.stringField("span.kind", "client")
+	}
+
+	if ex := e.ExternalExtras; nil != ex {
+		if "" != ex.URL {
+			w.stringField("http.url", ex.URL)
+		}
+		if "" != ex.Method {
+			w.stringField("http.method", ex.Method)
+		}
+		w.stringField("span.kind", "client")
+		w.stringField("component", "http")
+	}
+
+	buf.WriteByte('}')
+	buf.WriteByte(',')
+	buf.WriteByte('{')
+	buf.WriteByte('}')
+	buf.WriteByte(',')
+	buf.WriteByte('{')
+	buf.WriteByte('}')
+	buf.WriteByte(']')
+}
+
+// MarshalJSON is used for testing.
+func (e *SpanEvent) MarshalJSON() ([]byte, error) {
+	buf := bytes.NewBuffer(make([]byte, 0, 256))
+
+	e.WriteJSON(buf)
+
+	return buf.Bytes(), nil
+}
+
+type spanEvents struct {
+	events *analyticsEvents
+}
+
+func newSpanEvents(max int) *spanEvents {
+	return &spanEvents{
+		events: newAnalyticsEvents(max),
+	}
+}
+
+func (events *spanEvents) addEvent(e *SpanEvent, cat *BetterCAT) {
+	e.TraceID = cat.TraceID()
+	e.TransactionID = cat.ID
+	e.Sampled = cat.Sampled
+	e.Priority = cat.Priority
+	events.events.addEvent(analyticsEvent{priority: cat.Priority, jsonWriter: e})
+}
+
+// MergeFromTransaction merges the span events from a transaction into the
+// harvest's span events.  This should only be called if the transaction was
+// sampled and span events are enabled.
+func (events *spanEvents) MergeFromTransaction(txndata *TxnData) {
+	root := &SpanEvent{
+		GUID:         txndata.getRootSpanID(),
+		Timestamp:    txndata.Start,
+		Duration:     txndata.Duration,
+		Name:         txndata.FinalName,
+		Category:     spanCategoryGeneric,
+		IsEntrypoint: true,
+	}
+	if nil != txndata.BetterCAT.Inbound {
+		root.ParentID = txndata.BetterCAT.Inbound.ID
+	}
+	events.addEvent(root, &txndata.BetterCAT)
+
+	for _, evt := range txndata.spanEvents {
+		events.addEvent(evt, &txndata.BetterCAT)
+	}
+}
+
+func (events *spanEvents) MergeIntoHarvest(h *Harvest) {
+	h.SpanEvents.events.mergeFailed(events.events)
+}
+
+func (events *spanEvents) Data(agentRunID string, harvestStart time.Time) ([]byte, error) {
+	return events.events.CollectorJSON(agentRunID)
+}
+
+func (events *spanEvents) numSeen() float64  { return events.events.NumSeen() }
+func (events *spanEvents) numSaved() float64 { return events.events.NumSaved() }
+
+func (events *spanEvents) EndpointMethod() string {
+	return cmdSpanEvents
+}

+ 81 - 0
vendor/github.com/newrelic/go-agent/internal/stacktrace.go

@@ -0,0 +1,81 @@
+package internal
+
+import (
+	"bytes"
+	"path"
+	"runtime"
+)
+
+// StackTrace is a stack trace.
+type StackTrace []uintptr
+
+// GetStackTrace returns a new StackTrace.
+func GetStackTrace(skipFrames int) StackTrace {
+	skip := 2 // skips runtime.Callers and this function
+	skip += skipFrames
+
+	callers := make([]uintptr, maxStackTraceFrames)
+	written := runtime.Callers(skip, callers)
+	return StackTrace(callers[0:written])
+}
+
+func pcToFunc(pc uintptr) (*runtime.Func, uintptr) {
+	// The Golang runtime package documentation says "To look up the file
+	// and line number of the call itself, use pc[i]-1. As an exception to
+	// this rule, if pc[i-1] corresponds to the function runtime.sigpanic,
+	// then pc[i] is the program counter of a faulting instruction and
+	// should be used without any subtraction."
+	//
+	// TODO: Fully understand when this subtraction is necessary.
+	place := pc - 1
+	return runtime.FuncForPC(place), place
+}
+
+func topCallerNameBase(st StackTrace) string {
+	f, _ := pcToFunc(st[0])
+	if nil == f {
+		return ""
+	}
+	return path.Base(f.Name())
+}
+
+// WriteJSON adds the stack trace to the buffer in the JSON form expected by the
+// collector.
+func (st StackTrace) WriteJSON(buf *bytes.Buffer) {
+	buf.WriteByte('[')
+	for i, pc := range st {
+		// Stack traces may be provided by the customer, and therefore
+		// may be excessively long.  The truncation is done here to
+		// facilitate testing.
+		if i >= maxStackTraceFrames {
+			break
+		}
+		if i > 0 {
+			buf.WriteByte(',')
+		}
+		// Implements the format documented here:
+		// https://source.datanerd.us/agents/agent-specs/blob/master/Stack-Traces.md
+		buf.WriteByte('{')
+		if f, place := pcToFunc(pc); nil != f {
+			name := path.Base(f.Name())
+			file, line := f.FileLine(place)
+
+			w := jsonFieldsWriter{buf: buf}
+			w.stringField("filepath", file)
+			w.stringField("name", name)
+			w.intField("line", int64(line))
+		}
+		buf.WriteByte('}')
+	}
+	buf.WriteByte(']')
+}
+
+// MarshalJSON prepares JSON in the format expected by the collector.
+func (st StackTrace) MarshalJSON() ([]byte, error) {
+	estimate := 256 * len(st)
+	buf := bytes.NewBuffer(make([]byte, 0, estimate))
+
+	st.WriteJSON(buf)
+
+	return buf.Bytes(), nil
+}

+ 50 - 0
vendor/github.com/newrelic/go-agent/internal/sysinfo/bootid.go

@@ -0,0 +1,50 @@
+package sysinfo
+
+import (
+	"bytes"
+	"fmt"
+	"io/ioutil"
+	"runtime"
+)
+
+// BootID returns the boot ID of the executing kernel.
+func BootID() (string, error) {
+	if "linux" != runtime.GOOS {
+		return "", ErrFeatureUnsupported
+	}
+	data, err := ioutil.ReadFile("/proc/sys/kernel/random/boot_id")
+	if err != nil {
+		return "", err
+	}
+
+	return validateBootID(data)
+}
+
+type invalidBootID string
+
+func (e invalidBootID) Error() string {
+	return fmt.Sprintf("Boot id has unrecognized format, id=%q", string(e))
+}
+
+func isASCIIByte(b byte) bool {
+	return (b >= 0x20 && b <= 0x7f)
+}
+
+func validateBootID(data []byte) (string, error) {
+	// We're going to go for the permissive reading of
+	// https://source.datanerd.us/agents/agent-specs/blob/master/Utilization.md:
+	// any ASCII (excluding control characters, because I'm pretty sure that's not
+	// in the spirit of the spec) string will be sent up to and including 128
+	// bytes in length.
+	trunc := bytes.TrimSpace(data)
+	if len(trunc) > 128 {
+		trunc = trunc[:128]
+	}
+	for _, b := range trunc {
+		if !isASCIIByte(b) {
+			return "", invalidBootID(data)
+		}
+	}
+
+	return string(trunc), nil
+}

+ 114 - 0
vendor/github.com/newrelic/go-agent/internal/sysinfo/docker.go

@@ -0,0 +1,114 @@
+package sysinfo
+
+import (
+	"bufio"
+	"bytes"
+	"errors"
+	"fmt"
+	"io"
+	"os"
+	"regexp"
+	"runtime"
+)
+
+var (
+	// ErrDockerNotFound is returned if a Docker ID is not found in
+	// /proc/self/cgroup
+	ErrDockerNotFound = errors.New("Docker ID not found")
+)
+
+// DockerID attempts to detect Docker.
+func DockerID() (string, error) {
+	if "linux" != runtime.GOOS {
+		return "", ErrFeatureUnsupported
+	}
+
+	f, err := os.Open("/proc/self/cgroup")
+	if err != nil {
+		return "", err
+	}
+	defer f.Close()
+
+	return parseDockerID(f)
+}
+
+var (
+	// The DockerID must be a 64-character lowercase hex string
+	// be greedy and match anything 64-characters or longer to spot invalid IDs
+	dockerIDLength   = 64
+	dockerIDRegexRaw = fmt.Sprintf("[0-9a-f]{%d,}", dockerIDLength)
+	dockerIDRegex    = regexp.MustCompile(dockerIDRegexRaw)
+)
+
+func parseDockerID(r io.Reader) (string, error) {
+	// Each line in the cgroup file consists of three colon delimited fields.
+	//   1. hierarchy ID  - we don't care about this
+	//   2. subsystems    - comma separated list of cgroup subsystem names
+	//   3. control group - control group to which the process belongs
+	//
+	// Example
+	//   5:cpuacct,cpu,cpuset:/daemons
+
+	var id string
+
+	for scanner := bufio.NewScanner(r); scanner.Scan(); {
+		line := scanner.Bytes()
+		cols := bytes.SplitN(line, []byte(":"), 3)
+
+		if len(cols) < 3 {
+			continue
+		}
+
+		//  We're only interested in the cpu subsystem.
+		if !isCPUCol(cols[1]) {
+			continue
+		}
+
+		id = dockerIDRegex.FindString(string(cols[2]))
+
+		if err := validateDockerID(id); err != nil {
+			// We can stop searching at this point, the CPU
+			// subsystem should only occur once, and its cgroup is
+			// not docker or not a format we accept.
+			return "", err
+		}
+		return id, nil
+	}
+
+	return "", ErrDockerNotFound
+}
+
+func isCPUCol(col []byte) bool {
+	// Sometimes we have multiple subsystems in one line, as in this example
+	// from:
+	// https://source.datanerd.us/newrelic/cross_agent_tests/blob/master/docker_container_id/docker-1.1.2-native-driver-systemd.txt
+	//
+	// 3:cpuacct,cpu:/system.slice/docker-67f98c9e6188f9c1818672a15dbe46237b6ee7e77f834d40d41c5fb3c2f84a2f.scope
+	splitCSV := func(r rune) bool { return r == ',' }
+	subsysCPU := []byte("cpu")
+
+	for _, subsys := range bytes.FieldsFunc(col, splitCSV) {
+		if bytes.Equal(subsysCPU, subsys) {
+			return true
+		}
+	}
+	return false
+}
+
+func isHex(r rune) bool {
+	return ('0' <= r && r <= '9') || ('a' <= r && r <= 'f')
+}
+
+func validateDockerID(id string) error {
+	if len(id) != 64 {
+		return fmt.Errorf("%s is not %d characters long", id, dockerIDLength)
+	}
+
+	for _, c := range id {
+		if !isHex(c) {
+			return fmt.Errorf("Character: %c is not hex in string %s", c, id)
+		}
+	}
+
+	return nil
+}

+ 10 - 0
vendor/github.com/newrelic/go-agent/internal/sysinfo/errors.go

@@ -0,0 +1,10 @@
+package sysinfo
+
+import (
+	"errors"
+)
+
+var (
+	// ErrFeatureUnsupported indicates unsupported platform.
+	ErrFeatureUnsupported = errors.New("That feature is not supported on this platform")
+)

+ 10 - 0
vendor/github.com/newrelic/go-agent/internal/sysinfo/hostname_generic.go

@@ -0,0 +1,10 @@
+// +build !linux
+
+package sysinfo
+
+import "os"
+
+// Hostname returns the host name.
+func Hostname() (string, error) {
+	return os.Hostname()
+}

+ 50 - 0
vendor/github.com/newrelic/go-agent/internal/sysinfo/hostname_linux.go

@@ -0,0 +1,50 @@
+package sysinfo
+
+import (
+	"os"
+	"syscall"
+)
+
+// Hostname returns the host name.
+func Hostname() (string, error) {
+	// Try the builtin API first, which is designed to match the output of
+	// /bin/hostname, and fallback to uname(2) if that fails to match the
+	// behavior of gethostname(2) as implemented by glibc. On Linux, all
+	// these method should result in the same value because sethostname(2)
+	// limits the hostname to 64 bytes, the same size of the nodename field
+	// returned by uname(2). Note that is correspondence is not true on
+	// other platforms.
+	//
+	// os.Hostname failures should be exceedingly rare, however some systems
+	// configure SELinux to deny read access to /proc/sys/kernel/hostname.
+	// Redhat's OpenShift platform for example. os.Hostname can also fail if
+	// some or all of /proc has been hidden via chroot(2) or manipulation of
+	// the current processes' filesystem namespace via the cgroups APIs.
+	// Docker is an example of a tool that can configure such an
+	// environment.
+	name, err := os.Hostname()
+	if err == nil {
+		return name, nil
+	}
+
+	var uts syscall.Utsname
+	if err2 := syscall.Uname(&uts); err2 != nil {
+		// The man page documents only one possible error for uname(2),
+		// suggesting that as long as the buffer given is valid, the
+		// call will never fail. Return the original error in the hope
+		// it provides more relevant information about why the hostname
+		// can't be retrieved.
+		return "", err
+	}
+
+	// Convert Nodename to a Go string.
+	buf := make([]byte, 0, len(uts.Nodename))
+	for _, c := range uts.Nodename {
+		if c == 0 {
+			break
+		}
+		buf = append(buf, byte(c))
+	}
+
+	return string(buf), nil
+}

+ 40 - 0
vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal.go

@@ -0,0 +1,40 @@
+package sysinfo
+
+import (
+	"bufio"
+	"errors"
+	"io"
+	"regexp"
+	"strconv"
+)
+
+// BytesToMebibytes converts bytes into mebibytes.
+func BytesToMebibytes(bts uint64) uint64 {
+	return bts / ((uint64)(1024 * 1024))
+}
+
+var (
+	meminfoRe           = regexp.MustCompile(`^MemTotal:\s+([0-9]+)\s+[kK]B$`)
+	errMemTotalNotFound = errors.New("supported MemTotal not found in /proc/meminfo")
+)
+
+// parseProcMeminfo is used to parse Linux's "/proc/meminfo".  It is located
+// here so that the relevant cross agent tests will be run on all platforms.
+func parseProcMeminfo(f io.Reader) (uint64, error) {
+	scanner := bufio.NewScanner(f)
+	for scanner.Scan() {
+		if m := meminfoRe.FindSubmatch(scanner.Bytes()); m != nil {
+			kb, err := strconv.ParseUint(string(m[1]), 10, 64)
+			if err != nil {
+				return 0, err
+			}
+			return kb * 1024, nil
+		}
+	}
+
+	err := scanner.Err()
+	if err == nil {
+		err = errMemTotalNotFound
+	}
+	return 0, err
+}

+ 29 - 0
vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_darwin.go

@@ -0,0 +1,29 @@
+package sysinfo
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+// PhysicalMemoryBytes returns the total amount of host memory.
+func PhysicalMemoryBytes() (uint64, error) {
+	mib := []int32{6 /* CTL_HW */, 24 /* HW_MEMSIZE */}
+
+	buf := make([]byte, 8)
+	bufLen := uintptr(8)
+
+	_, _, e1 := syscall.Syscall6(syscall.SYS___SYSCTL,
+		uintptr(unsafe.Pointer(&mib[0])), uintptr(len(mib)),
+		uintptr(unsafe.Pointer(&buf[0])), uintptr(unsafe.Pointer(&bufLen)),
+		uintptr(0), uintptr(0))
+
+	if e1 != 0 {
+		return 0, e1
+	}
+
+	if bufLen != 8 {
+		return 0, syscall.EIO
+	}
+
+	return *(*uint64)(unsafe.Pointer(&buf[0])), nil
+}

+ 32 - 0
vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_freebsd.go

@@ -0,0 +1,32 @@
+package sysinfo
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+// PhysicalMemoryBytes returns the total amount of host memory.
+func PhysicalMemoryBytes() (uint64, error) {
+	mib := []int32{6 /* CTL_HW */, 5 /* HW_PHYSMEM */}
+
+	buf := make([]byte, 8)
+	bufLen := uintptr(8)
+
+	_, _, e1 := syscall.Syscall6(syscall.SYS___SYSCTL,
+		uintptr(unsafe.Pointer(&mib[0])), uintptr(len(mib)),
+		uintptr(unsafe.Pointer(&buf[0])), uintptr(unsafe.Pointer(&bufLen)),
+		uintptr(0), uintptr(0))
+
+	if e1 != 0 {
+		return 0, e1
+	}
+
+	switch bufLen {
+	case 4:
+		return uint64(*(*uint32)(unsafe.Pointer(&buf[0]))), nil
+	case 8:
+		return *(*uint64)(unsafe.Pointer(&buf[0])), nil
+	default:
+		return 0, syscall.EIO
+	}
+}

+ 14 - 0
vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_linux.go

@@ -0,0 +1,14 @@
+package sysinfo
+
+import "os"
+
+// PhysicalMemoryBytes returns the total amount of host memory.
+func PhysicalMemoryBytes() (uint64, error) {
+	f, err := os.Open("/proc/meminfo")
+	if err != nil {
+		return 0, err
+	}
+	defer f.Close()
+
+	return parseProcMeminfo(f)
+}

+ 26 - 0
vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_solaris.go

@@ -0,0 +1,26 @@
+package sysinfo
+
+/*
+#include <unistd.h>
+*/
+import "C"
+
+// PhysicalMemoryBytes returns the total amount of host memory.
+func PhysicalMemoryBytes() (uint64, error) {
+	// The function we're calling on Solaris is
+	// long sysconf(int name);
+	var pages C.long
+	var pagesizeBytes C.long
+	var err error
+
+	pagesizeBytes, err = C.sysconf(C._SC_PAGE_SIZE)
+	if pagesizeBytes < 1 {
+		return 0, err
+	}
+	pages, err = C.sysconf(C._SC_PHYS_PAGES)
+	if pages < 1 {
+		return 0, err
+	}
+
+	return uint64(pages) * uint64(pagesizeBytes), nil
+}

+ 23 - 0
vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_windows.go

@@ -0,0 +1,23 @@
+package sysinfo
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+// PhysicalMemoryBytes returns the total amount of host memory.
+func PhysicalMemoryBytes() (uint64, error) {
+	// https://msdn.microsoft.com/en-us/library/windows/desktop/cc300158(v=vs.85).aspx
+	// http://stackoverflow.com/questions/30743070/query-total-physical-memory-in-windows-with-golang
+	mod := syscall.NewLazyDLL("kernel32.dll")
+	proc := mod.NewProc("GetPhysicallyInstalledSystemMemory")
+	var memkb uint64
+
+	ret, _, err := proc.Call(uintptr(unsafe.Pointer(&memkb)))
+	// return value TRUE(1) succeeds, FAILED(0) fails
+	if ret != 1 {
+		return 0, err
+	}
+
+	return memkb * 1024, nil
+}

+ 11 - 0
vendor/github.com/newrelic/go-agent/internal/sysinfo/usage.go

@@ -0,0 +1,11 @@
+package sysinfo
+
+import (
+	"time"
+)
+
+// Usage contains process process times.
+type Usage struct {
+	System time.Duration
+	User   time.Duration
+}

+ 26 - 0
vendor/github.com/newrelic/go-agent/internal/sysinfo/usage_posix.go

@@ -0,0 +1,26 @@
+// +build !windows
+
+package sysinfo
+
+import (
+	"syscall"
+	"time"
+)
+
+func timevalToDuration(tv syscall.Timeval) time.Duration {
+	return time.Duration(tv.Nano()) * time.Nanosecond
+}
+
+// GetUsage gathers process times.
+func GetUsage() (Usage, error) {
+	ru := syscall.Rusage{}
+	err := syscall.Getrusage(syscall.RUSAGE_SELF, &ru)
+	if err != nil {
+		return Usage{}, err
+	}
+
+	return Usage{
+		System: timevalToDuration(ru.Stime),
+		User:   timevalToDuration(ru.Utime),
+	}, nil
+}

+ 34 - 0
vendor/github.com/newrelic/go-agent/internal/sysinfo/usage_windows.go

@@ -0,0 +1,34 @@
+package sysinfo
+
+import (
+	"syscall"
+	"time"
+)
+
+func filetimeToDuration(ft *syscall.Filetime) time.Duration {
+	ns := ft.Nanoseconds()
+	return time.Duration(ns)
+}
+
+// GetUsage gathers process times.
+func GetUsage() (Usage, error) {
+	var creationTime syscall.Filetime
+	var exitTime syscall.Filetime
+	var kernelTime syscall.Filetime
+	var userTime syscall.Filetime
+
+	handle, err := syscall.GetCurrentProcess()
+	if err != nil {
+		return Usage{}, err
+	}
+
+	err = syscall.GetProcessTimes(handle, &creationTime, &exitTime, &kernelTime, &userTime)
+	if err != nil {
+		return Usage{}, err
+	}
+
+	return Usage{
+		System: filetimeToDuration(&kernelTime),
+		User:   filetimeToDuration(&userTime),
+	}, nil
+}

+ 597 - 0
vendor/github.com/newrelic/go-agent/internal/tracing.go

@@ -0,0 +1,597 @@
+package internal
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"net/http"
+	"net/url"
+	"time"
+
+	"github.com/newrelic/go-agent/internal/cat"
+	"github.com/newrelic/go-agent/internal/sysinfo"
+)
+
+// MarshalJSON limits the number of decimals.
+func (p *Priority) MarshalJSON() ([]byte, error) {
+	return []byte(fmt.Sprintf(priorityFormat, *p)), nil
+}
+
+// WriteJSON limits the number of decimals.
+func (p Priority) WriteJSON(buf *bytes.Buffer) {
+	fmt.Fprintf(buf, priorityFormat, p)
+}
+
+// TxnEvent represents a transaction.
+// https://source.datanerd.us/agents/agent-specs/blob/master/Transaction-Events-PORTED.md
+// https://newrelic.atlassian.net/wiki/display/eng/Agent+Support+for+Synthetics%3A+Forced+Transaction+Traces+and+Analytic+Events
+type TxnEvent struct {
+	FinalName string
+	Start     time.Time
+	Duration  time.Duration
+	Queuing   time.Duration
+	Zone      ApdexZone
+	Attrs     *Attributes
+	DatastoreExternalTotals
+	// CleanURL is not used in txn events, but is used in traced errors which embed TxnEvent.
+	CleanURL     string
+	CrossProcess TxnCrossProcess
+	BetterCAT    BetterCAT
+	HasError     bool
+}
+
+// BetterCAT stores the transaction's priority and all fields related
+// to a DistributedTracer's Cross-Application Trace.
+type BetterCAT struct {
+	Enabled  bool
+	Priority Priority
+	Sampled  bool
+	Inbound  *Payload
+	ID       string
+}
+
+// TraceID returns the trace id.
+func (e BetterCAT) TraceID() string {
+	if nil != e.Inbound {
+		return e.Inbound.TracedID
+	}
+	return e.ID
+}
+
+// TxnData contains the recorded data of a transaction.
+type TxnData struct {
+	TxnEvent
+	IsWeb          bool
+	Name           string    // Work in progress name.
+	Errors         TxnErrors // Lazily initialized.
+	Stop           time.Time
+	ApdexThreshold time.Duration
+	Exclusive      time.Duration
+
+	finishedChildren time.Duration
+	stamp            segmentStamp
+	stack            []segmentFrame
+
+	SpanEventsEnabled bool
+	rootSpanID        string
+	spanEvents        []*SpanEvent
+
+	customSegments    map[string]*metricData
+	datastoreSegments map[DatastoreMetricKey]*metricData
+	externalSegments  map[externalMetricKey]*metricData
+
+	TxnTrace
+
+	SlowQueriesEnabled bool
+	SlowQueryThreshold time.Duration
+	SlowQueries        *slowQueries
+
+	// These better CAT supportability fields are left outside of
+	// TxnEvent.BetterCAT to minimize the size of transaction event memory.
+	DistributedTracingSupport
+}
+
+type segmentStamp uint64
+
+type segmentTime struct {
+	Stamp segmentStamp
+	Time  time.Time
+}
+
+// SegmentStartTime is embedded into the top level segments (rather than
+// segmentTime) to minimize the structure sizes to minimize allocations.
+type SegmentStartTime struct {
+	Stamp segmentStamp
+	Depth int
+}
+
+type segmentFrame struct {
+	segmentTime
+	children time.Duration
+	spanID   string
+}
+
+type segmentEnd struct {
+	start     segmentTime
+	stop      segmentTime
+	duration  time.Duration
+	exclusive time.Duration
+	SpanID    string
+	ParentID  string
+}
+
+func (end segmentEnd) spanEvent() *SpanEvent {
+	if "" == end.SpanID {
+		return nil
+	}
+	return &SpanEvent{
+		GUID:         end.SpanID,
+		ParentID:     end.ParentID,
+		Timestamp:    end.start.Time,
+		Duration:     end.duration,
+		IsEntrypoint: false,
+	}
+}
+
+const (
+	datastoreProductUnknown   = "Unknown"
+	datastoreOperationUnknown = "other"
+)
+
+// HasErrors indicates whether the transaction had errors.
+func (t *TxnData) HasErrors() bool {
+	return len(t.Errors) > 0
+}
+
+func (t *TxnData) time(now time.Time) segmentTime {
+	// Update the stamp before using it so that a 0 stamp can be special.
+	t.stamp++
+	return segmentTime{
+		Time:  now,
+		Stamp: t.stamp,
+	}
+}
+
+// TracerRootChildren is used to calculate a transaction's exclusive duration.
+func TracerRootChildren(t *TxnData) time.Duration {
+	var lostChildren time.Duration
+	for i := 0; i < len(t.stack); i++ {
+		lostChildren += t.stack[i].children
+	}
+	return t.finishedChildren + lostChildren
+}
+
+// StartSegment begins a segment.
+func StartSegment(t *TxnData, now time.Time) SegmentStartTime {
+	tm := t.time(now)
+	t.stack = append(t.stack, segmentFrame{
+		segmentTime: tm,
+		children:    0,
+	})
+
+	return SegmentStartTime{
+		Stamp: tm.Stamp,
+		Depth: len(t.stack) - 1,
+	}
+}
+
+// NewSpanID returns a random identifier in the format used for spans and
+// transactions.
+func NewSpanID() string {
+	bits := RandUint64()
+	return fmt.Sprintf("%016x", bits)
+}
+
+func (t *TxnData) getRootSpanID() string {
+	if "" == t.rootSpanID {
+		t.rootSpanID = NewSpanID()
+	}
+	return t.rootSpanID
+}
+
+// CurrentSpanIdentifier returns the identifier of the span at the top of the
+// segment stack.
+func (t *TxnData) CurrentSpanIdentifier() string {
+	if 0 == len(t.stack) {
+		return t.getRootSpanID()
+	}
+	if "" == t.stack[len(t.stack)-1].spanID {
+		t.stack[len(t.stack)-1].spanID = NewSpanID()
+	}
+	return t.stack[len(t.stack)-1].spanID
+}
+
+func (t *TxnData) saveSpanEvent(e *SpanEvent) {
+	if len(t.spanEvents) < maxSpanEvents {
+		t.spanEvents = append(t.spanEvents, e)
+	}
+}
+
+var (
+	errMalformedSegment = errors.New("segment identifier malformed: perhaps unsafe code has modified it?")
+	errSegmentOrder     = errors.New(`improper segment use: the Transaction must be used ` +
+		`in a single goroutine and segments must be ended in "last started first ended" order: ` +
+		`see https://github.com/newrelic/go-agent/blob/master/GUIDE.md#segments`)
+)
+
+func endSegment(t *TxnData, start SegmentStartTime, now time.Time) (segmentEnd, error) {
+	if 0 == start.Stamp {
+		return segmentEnd{}, errMalformedSegment
+	}
+	if start.Depth >= len(t.stack) {
+		return segmentEnd{}, errSegmentOrder
+	}
+	if start.Depth < 0 {
+		return segmentEnd{}, errMalformedSegment
+	}
+	frame := t.stack[start.Depth]
+	if start.Stamp != frame.Stamp {
+		return segmentEnd{}, errSegmentOrder
+	}
+
+	var children time.Duration
+	for i := start.Depth; i < len(t.stack); i++ {
+		children += t.stack[i].children
+	}
+	s := segmentEnd{
+		stop:  t.time(now),
+		start: frame.segmentTime,
+	}
+	if s.stop.Time.After(s.start.Time) {
+		s.duration = s.stop.Time.Sub(s.start.Time)
+	}
+	if s.duration > children {
+		s.exclusive = s.duration - children
+	}
+
+	// Note that we expect (depth == (len(t.stack) - 1)).  However, if
+	// (depth < (len(t.stack) - 1)), that's ok: could be a panic popped
+	// some stack frames (and the consumer was not using defer).
+
+	if 0 == start.Depth {
+		t.finishedChildren += s.duration
+	} else {
+		t.stack[start.Depth-1].children += s.duration
+	}
+
+	t.stack = t.stack[0:start.Depth]
+
+	if t.BetterCAT.Sampled && t.SpanEventsEnabled {
+		s.SpanID = frame.spanID
+		if "" == s.SpanID {
+			s.SpanID = NewSpanID()
+		}
+		// Note that the current span identifier is the parent's
+		// identifier because we've already popped the segment that's
+		// ending off of the stack.
+		s.ParentID = t.CurrentSpanIdentifier()
+	}
+
+	return s, nil
+}
+
+// EndBasicSegment ends a basic segment.
+func EndBasicSegment(t *TxnData, start SegmentStartTime, now time.Time, name string) error {
+	end, err := endSegment(t, start, now)
+	if nil != err {
+		return err
+	}
+	if nil == t.customSegments {
+		t.customSegments = make(map[string]*metricData)
+	}
+	m := metricDataFromDuration(end.duration, end.exclusive)
+	if data, ok := t.customSegments[name]; ok {
+		data.aggregate(m)
+	} else {
+		// Use `new` in place of &m so that m is not
+		// automatically moved to the heap.
+		cpy := new(metricData)
+		*cpy = m
+		t.customSegments[name] = cpy
+	}
+
+	if t.TxnTrace.considerNode(end) {
+		t.TxnTrace.witnessNode(end, customSegmentMetric(name), nil)
+	}
+
+	if evt := end.spanEvent(); evt != nil {
+		evt.Name = customSegmentMetric(name)
+		evt.Category = spanCategoryGeneric
+		t.saveSpanEvent(evt)
+	}
+
+	return nil
+}
+
+// EndExternalSegment ends an external segment.
+func EndExternalSegment(t *TxnData, start SegmentStartTime, now time.Time, u *url.URL, method string, resp *http.Response) error {
+	end, err := endSegment(t, start, now)
+	if nil != err {
+		return err
+	}
+
+	host := HostFromURL(u)
+	if "" == host {
+		host = "unknown"
+	}
+
+	var appData *cat.AppDataHeader
+	if resp != nil {
+		appData, err = t.CrossProcess.ParseAppData(HTTPHeaderToAppData(resp.Header))
+		if err != nil {
+			return err
+		}
+	}
+
+	var crossProcessID string
+	var transactionName string
+	var transactionGUID string
+	if appData != nil {
+		crossProcessID = appData.CrossProcessID
+		transactionName = appData.TransactionName
+		transactionGUID = appData.TransactionGUID
+	}
+
+	key := externalMetricKey{
+		Host: host,
+		ExternalCrossProcessID:  crossProcessID,
+		ExternalTransactionName: transactionName,
+	}
+	if nil == t.externalSegments {
+		t.externalSegments = make(map[externalMetricKey]*metricData)
+	}
+	t.externalCallCount++
+	t.externalDuration += end.duration
+	m := metricDataFromDuration(end.duration, end.exclusive)
+	if data, ok := t.externalSegments[key]; ok {
+		data.aggregate(m)
+	} else {
+		// Use `new` in place of &m so that m is not
+		// automatically moved to the heap.
+		cpy := new(metricData)
+		*cpy = m
+		t.externalSegments[key] = cpy
+	}
+
+	if t.TxnTrace.considerNode(end) {
+		t.TxnTrace.witnessNode(end, externalHostMetric(key), &traceNodeParams{
+			CleanURL:        SafeURL(u),
+			TransactionGUID: transactionGUID,
+		})
+	}
+
+	if evt := end.spanEvent(); evt != nil {
+		evt.Name = externalHostMetric(key)
+		evt.Category = spanCategoryHTTP
+		evt.ExternalExtras = &spanExternalExtras{
+			URL:    SafeURL(u),
+			Method: method,
+		}
+		t.saveSpanEvent(evt)
+	}
+
+	return nil
+}
+
+// EndDatastoreParams contains the parameters for EndDatastoreSegment.
+type EndDatastoreParams struct {
+	Tracer             *TxnData
+	Start              SegmentStartTime
+	Now                time.Time
+	Product            string
+	Collection         string
+	Operation          string
+	ParameterizedQuery string
+	QueryParameters    map[string]interface{}
+	Host               string
+	PortPathOrID       string
+	Database           string
+}
+
+const (
+	unknownDatastoreHost         = "unknown"
+	unknownDatastorePortPathOrID = "unknown"
+)
+
+var (
+	// ThisHost is the system hostname.
+	ThisHost = func() string {
+		if h, err := sysinfo.Hostname(); nil == err {
+			return h
+		}
+		return unknownDatastoreHost
+	}()
+	hostsToReplace = map[string]struct{}{
+		"localhost":       {},
+		"127.0.0.1":       {},
+		"0.0.0.0":         {},
+		"0:0:0:0:0:0:0:1": {},
+		"::1":             {},
+		"0:0:0:0:0:0:0:0": {},
+		"::":              {},
+	}
+)
+
+func (t TxnData) slowQueryWorthy(d time.Duration) bool {
+	return t.SlowQueriesEnabled && (d >= t.SlowQueryThreshold)
+}
+
+func datastoreSpanAddress(host, portPathOrID string) string {
+	if "" != host && "" != portPathOrID {
+		return host + ":" + portPathOrID
+	}
+	if "" != host {
+		return host
+	}
+	return portPathOrID
+}
+
+// EndDatastoreSegment ends a datastore segment.
+func EndDatastoreSegment(p EndDatastoreParams) error {
+	end, err := endSegment(p.Tracer, p.Start, p.Now)
+	if nil != err {
+		return err
+	}
+	if p.Operation == "" {
+		p.Operation = datastoreOperationUnknown
+	}
+	if p.Product == "" {
+		p.Product = datastoreProductUnknown
+	}
+	if p.Host == "" && p.PortPathOrID != "" {
+		p.Host = unknownDatastoreHost
+	}
+	if p.PortPathOrID == "" && p.Host != "" {
+		p.PortPathOrID = unknownDatastorePortPathOrID
+	}
+	if _, ok := hostsToReplace[p.Host]; ok {
+		p.Host = ThisHost
+	}
+
+	// We still want to create a slowQuery if the consumer has not provided
+	// a Query string (or it has been removed by LASP) since the stack trace
+	// has value.
+	if p.ParameterizedQuery == "" {
+		collection := p.Collection
+		if "" == collection {
+			collection = "unknown"
+		}
+		p.ParameterizedQuery = fmt.Sprintf(`'%s' on '%s' using '%s'`,
+			p.Operation, collection, p.Product)
+	}
+
+	key := DatastoreMetricKey{
+		Product:      p.Product,
+		Collection:   p.Collection,
+		Operation:    p.Operation,
+		Host:         p.Host,
+		PortPathOrID: p.PortPathOrID,
+	}
+	if nil == p.Tracer.datastoreSegments {
+		p.Tracer.datastoreSegments = make(map[DatastoreMetricKey]*metricData)
+	}
+	p.Tracer.datastoreCallCount++
+	p.Tracer.datastoreDuration += end.duration
+	m := metricDataFromDuration(end.duration, end.exclusive)
+	if data, ok := p.Tracer.datastoreSegments[key]; ok {
+		data.aggregate(m)
+	} else {
+		// Use `new` in place of &m so that m is not
+		// automatically moved to the heap.
+		cpy := new(metricData)
+		*cpy = m
+		p.Tracer.datastoreSegments[key] = cpy
+	}
+
+	scopedMetric := datastoreScopedMetric(key)
+	queryParams := vetQueryParameters(p.QueryParameters)
+
+	if p.Tracer.TxnTrace.considerNode(end) {
+		p.Tracer.TxnTrace.witnessNode(end, scopedMetric, &traceNodeParams{
+			Host:            p.Host,
+			PortPathOrID:    p.PortPathOrID,
+			Database:        p.Database,
+			Query:           p.ParameterizedQuery,
+			queryParameters: queryParams,
+		})
+	}
+
+	if p.Tracer.slowQueryWorthy(end.duration) {
+		if nil == p.Tracer.SlowQueries {
+			p.Tracer.SlowQueries = newSlowQueries(maxTxnSlowQueries)
+		}
+		// Frames to skip:
+		//   this function
+		//   endDatastore
+		//   DatastoreSegment.End
+		skipFrames := 3
+		p.Tracer.SlowQueries.observeInstance(slowQueryInstance{
+			Duration:           end.duration,
+			DatastoreMetric:    scopedMetric,
+			ParameterizedQuery: p.ParameterizedQuery,
+			QueryParameters:    queryParams,
+			Host:               p.Host,
+			PortPathOrID:       p.PortPathOrID,
+			DatabaseName:       p.Database,
+			StackTrace:         GetStackTrace(skipFrames),
+		})
+	}
+
+	if evt := end.spanEvent(); evt != nil {
+		evt.Name = scopedMetric
+		evt.Category = spanCategoryDatastore
+		evt.DatastoreExtras = &spanDatastoreExtras{
+			Component: p.Product,
+			Statement: p.ParameterizedQuery,
+			Instance:  p.Database,
+			Address:   datastoreSpanAddress(p.Host, p.PortPathOrID),
+			Hostname:  p.Host,
+		}
+		p.Tracer.saveSpanEvent(evt)
+	}
+
+	return nil
+}
+
+// MergeBreakdownMetrics creates segment metrics.
+func MergeBreakdownMetrics(t *TxnData, metrics *metricTable) {
+	scope := t.FinalName
+	isWeb := t.IsWeb
+	// Custom Segment Metrics
+	for key, data := range t.customSegments {
+		name := customSegmentMetric(key)
+		// Unscoped
+		metrics.add(name, "", *data, unforced)
+		// Scoped
+		metrics.add(name, scope, *data, unforced)
+	}
+
+	// External Segment Metrics
+	for key, data := range t.externalSegments {
+		metrics.add(externalRollupMetric.all, "", *data, forced)
+		metrics.add(externalRollupMetric.webOrOther(isWeb), "", *data, forced)
+
+		hostMetric := externalHostMetric(key)
+		metrics.add(hostMetric, "", *data, unforced)
+		if "" != key.ExternalCrossProcessID && "" != key.ExternalTransactionName {
+			txnMetric := externalTransactionMetric(key)
+
+			// Unscoped CAT metrics
+			metrics.add(externalAppMetric(key), "", *data, unforced)
+			metrics.add(txnMetric, "", *data, unforced)
+
+			// Scoped External Metric
+			metrics.add(txnMetric, scope, *data, unforced)
+		} else {
+			// Scoped External Metric
+			metrics.add(hostMetric, scope, *data, unforced)
+		}
+	}
+
+	// Datastore Segment Metrics
+	for key, data := range t.datastoreSegments {
+		metrics.add(datastoreRollupMetric.all, "", *data, forced)
+		metrics.add(datastoreRollupMetric.webOrOther(isWeb), "", *data, forced)
+
+		product := datastoreProductMetric(key)
+		metrics.add(product.all, "", *data, forced)
+		metrics.add(product.webOrOther(isWeb), "", *data, forced)
+
+		if key.Host != "" && key.PortPathOrID != "" {
+			instance := datastoreInstanceMetric(key)
+			metrics.add(instance, "", *data, unforced)
+		}
+
+		operation := datastoreOperationMetric(key)
+		metrics.add(operation, "", *data, unforced)
+
+		if "" != key.Collection {
+			statement := datastoreStatementMetric(key)
+
+			metrics.add(statement, "", *data, unforced)
+			metrics.add(statement, scope, *data, unforced)
+		} else {
+			metrics.add(operation, scope, *data, unforced)
+		}
+	}
+}

+ 419 - 0
vendor/github.com/newrelic/go-agent/internal/txn_cross_process.go

@@ -0,0 +1,419 @@
+package internal
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"time"
+
+	"github.com/newrelic/go-agent/internal/cat"
+)
+
+// Bitfield values for the TxnCrossProcess.Type field.
+const (
+	txnCrossProcessSynthetics = (1 << 0)
+	txnCrossProcessInbound    = (1 << 1)
+	txnCrossProcessOutbound   = (1 << 2)
+)
+
+var (
+	// ErrAccountNotTrusted indicates that, while the inbound headers were valid,
+	// the account ID within them is not trusted by the user's application.
+	ErrAccountNotTrusted = errors.New("account not trusted")
+)
+
+// TxnCrossProcess contains the metadata required for CAT and Synthetics
+// headers, transaction events, and traces.
+type TxnCrossProcess struct {
+	// The user side switch controlling whether CAT is enabled or not.
+	Enabled bool
+
+	// The user side switch controlling whether Distributed Tracing is enabled or not
+	// This is required by synthetics support.  If Distributed Tracing is enabled,
+	// any synthetics functionality that is triggered should not set nr.guid.
+	DistributedTracingEnabled bool
+
+	// Rather than copying in the entire ConnectReply, here are the fields that
+	// we need to support CAT.
+	CrossProcessID  []byte
+	EncodingKey     []byte
+	TrustedAccounts trustedAccountSet
+
+	// CAT state for a given transaction.
+	Type                uint8
+	ClientID            string
+	GUID                string
+	TripID              string
+	PathHash            string
+	AlternatePathHashes map[string]bool
+	ReferringPathHash   string
+	ReferringTxnGUID    string
+	Synthetics          *cat.SyntheticsHeader
+
+	// The encoded synthetics header received as part of the request headers, if
+	// any. By storing this here, we avoid needing to marshal the invariant
+	// Synthetics struct above each time an external segment is created.
+	SyntheticsHeader string
+}
+
+// CrossProcessMetadata represents the metadata that must be transmitted with
+// an external request for CAT to work.
+type CrossProcessMetadata struct {
+	ID         string
+	TxnData    string
+	Synthetics string
+}
+
+// Init initialises a TxnCrossProcess based on the given application connect
+// reply and metadata fields, if any.
+func (txp *TxnCrossProcess) Init(enabled bool, dt bool, reply *ConnectReply, metadata CrossProcessMetadata) error {
+	txp.CrossProcessID = []byte(reply.CrossProcessID)
+	txp.EncodingKey = []byte(reply.EncodingKey)
+	txp.DistributedTracingEnabled = dt
+	txp.Enabled = enabled
+	txp.TrustedAccounts = reply.TrustedAccounts
+
+	return txp.handleInboundRequestHeaders(metadata)
+}
+
+// CreateCrossProcessMetadata generates request metadata that enable CAT and
+// Synthetics support for an external segment.
+func (txp *TxnCrossProcess) CreateCrossProcessMetadata(txnName, appName string) (CrossProcessMetadata, error) {
+	metadata := CrossProcessMetadata{}
+
+	// Regardless of the user's CAT settings, if there was a synthetics header in
+	// the inbound request, a synthetics header should always be included in the
+	// outbound request headers.
+	if txp.IsSynthetics() {
+		metadata.Synthetics = txp.SyntheticsHeader
+	}
+
+	if txp.Enabled {
+		txp.SetOutbound(true)
+		txp.requireTripID()
+
+		id, err := txp.outboundID()
+		if err != nil {
+			return metadata, err
+		}
+
+		txnData, err := txp.outboundTxnData(txnName, appName)
+		if err != nil {
+			return metadata, err
+		}
+
+		metadata.ID = id
+		metadata.TxnData = txnData
+	}
+
+	return metadata, nil
+}
+
+// Finalise handles any end-of-transaction tasks. In practice, this simply
+// means ensuring the path hash is set if it hasn't already been.
+func (txp *TxnCrossProcess) Finalise(txnName, appName string) error {
+	if txp.Enabled && txp.Used() {
+		_, err := txp.setPathHash(txnName, appName)
+		return err
+	}
+
+	// If there was no CAT activity, then do nothing, successfully.
+	return nil
+}
+
+// IsInbound returns true if the transaction had inbound CAT headers.
+func (txp *TxnCrossProcess) IsInbound() bool {
+	return 0 != (txp.Type & txnCrossProcessInbound)
+}
+
+// IsOutbound returns true if the transaction has generated outbound CAT
+// headers.
+func (txp *TxnCrossProcess) IsOutbound() bool {
+	// We don't actually use this anywhere today, but it feels weird not having
+	// it.
+	return 0 != (txp.Type & txnCrossProcessOutbound)
+}
+
+// IsSynthetics returns true if the transaction had inbound Synthetics headers.
+func (txp *TxnCrossProcess) IsSynthetics() bool {
+	// Technically, this is redundant: the presence of a non-nil Synthetics
+	// pointer should be sufficient to determine if this is a synthetics
+	// transaction. Nevertheless, it's convenient to have the Type field be
+	// non-zero if any CAT behaviour has occurred.
+	return 0 != (txp.Type&txnCrossProcessSynthetics) && nil != txp.Synthetics
+}
+
+// ParseAppData decodes the given appData value.
+func (txp *TxnCrossProcess) ParseAppData(encodedAppData string) (*cat.AppDataHeader, error) {
+	if !txp.Enabled {
+		return nil, nil
+	}
+	if encodedAppData != "" {
+		rawAppData, err := deobfuscate(encodedAppData, txp.EncodingKey)
+		if err != nil {
+			return nil, err
+		}
+
+		appData := &cat.AppDataHeader{}
+		if err := json.Unmarshal(rawAppData, appData); err != nil {
+			return nil, err
+		}
+
+		return appData, nil
+	}
+
+	return nil, nil
+}
+
+// CreateAppData creates the appData value that should be sent with a response
+// to ensure CAT operates as expected.
+func (txp *TxnCrossProcess) CreateAppData(name string, queueTime, responseTime time.Duration, contentLength int64) (string, error) {
+	// If CAT is disabled, do nothing, successfully.
+	if !txp.Enabled {
+		return "", nil
+	}
+
+	data, err := json.Marshal(&cat.AppDataHeader{
+		CrossProcessID:        string(txp.CrossProcessID),
+		TransactionName:       name,
+		QueueTimeInSeconds:    queueTime.Seconds(),
+		ResponseTimeInSeconds: responseTime.Seconds(),
+		ContentLength:         contentLength,
+		TransactionGUID:       txp.GUID,
+	})
+	if err != nil {
+		return "", err
+	}
+
+	obfuscated, err := obfuscate(data, txp.EncodingKey)
+	if err != nil {
+		return "", err
+	}
+
+	return obfuscated, nil
+}
+
+// Used returns true if any CAT or Synthetics related functionality has been
+// triggered on the transaction.
+func (txp *TxnCrossProcess) Used() bool {
+	return 0 != txp.Type
+}
+
+// SetInbound sets the inbound CAT flag. This function is provided only for
+// internal and unit testing purposes, and should not be used outside of this
+// package normally.
+func (txp *TxnCrossProcess) SetInbound(inbound bool) {
+	if inbound {
+		txp.Type |= txnCrossProcessInbound
+	} else {
+		txp.Type &^= txnCrossProcessInbound
+	}
+}
+
+// SetOutbound sets the outbound CAT flag. This function is provided only for
+// internal and unit testing purposes, and should not be used outside of this
+// package normally.
+func (txp *TxnCrossProcess) SetOutbound(outbound bool) {
+	if outbound {
+		txp.Type |= txnCrossProcessOutbound
+	} else {
+		txp.Type &^= txnCrossProcessOutbound
+	}
+}
+
+// SetSynthetics sets the Synthetics CAT flag. This function is provided only
+// for internal and unit testing purposes, and should not be used outside of
+// this package normally.
+func (txp *TxnCrossProcess) SetSynthetics(synthetics bool) {
+	if synthetics {
+		txp.Type |= txnCrossProcessSynthetics
+	} else {
+		txp.Type &^= txnCrossProcessSynthetics
+	}
+}
+
+// handleInboundRequestHeaders parses the CAT headers from the given metadata
+// and updates the relevant fields on the provided TxnData.
+func (txp *TxnCrossProcess) handleInboundRequestHeaders(metadata CrossProcessMetadata) error {
+	if txp.Enabled && metadata.ID != "" && metadata.TxnData != "" {
+		if err := txp.handleInboundRequestEncodedCAT(metadata.ID, metadata.TxnData); err != nil {
+			return err
+		}
+	}
+
+	if metadata.Synthetics != "" {
+		if err := txp.handleInboundRequestEncodedSynthetics(metadata.Synthetics); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (txp *TxnCrossProcess) handleInboundRequestEncodedCAT(encodedID, encodedTxnData string) error {
+	rawID, err := deobfuscate(encodedID, txp.EncodingKey)
+	if err != nil {
+		return err
+	}
+
+	rawTxnData, err := deobfuscate(encodedTxnData, txp.EncodingKey)
+	if err != nil {
+		return err
+	}
+
+	if err := txp.handleInboundRequestID(rawID); err != nil {
+		return err
+	}
+
+	return txp.handleInboundRequestTxnData(rawTxnData)
+}
+
+func (txp *TxnCrossProcess) handleInboundRequestID(raw []byte) error {
+	id, err := cat.NewIDHeader(raw)
+	if err != nil {
+		return err
+	}
+
+	if !txp.TrustedAccounts.IsTrusted(id.AccountID) {
+		return ErrAccountNotTrusted
+	}
+
+	txp.SetInbound(true)
+	txp.ClientID = string(raw)
+	txp.setRequireGUID()
+
+	return nil
+}
+
+func (txp *TxnCrossProcess) handleInboundRequestTxnData(raw []byte) error {
+	txnData := &cat.TxnDataHeader{}
+	if err := json.Unmarshal(raw, txnData); err != nil {
+		return err
+	}
+
+	txp.SetInbound(true)
+	if txnData.TripID != "" {
+		txp.TripID = txnData.TripID
+	} else {
+		txp.setRequireGUID()
+		txp.TripID = txp.GUID
+	}
+	txp.ReferringTxnGUID = txnData.GUID
+	txp.ReferringPathHash = txnData.PathHash
+
+	return nil
+}
+
+func (txp *TxnCrossProcess) handleInboundRequestEncodedSynthetics(encoded string) error {
+	raw, err := deobfuscate(encoded, txp.EncodingKey)
+	if err != nil {
+		return err
+	}
+
+	if err := txp.handleInboundRequestSynthetics(raw); err != nil {
+		return err
+	}
+
+	txp.SyntheticsHeader = encoded
+	return nil
+}
+
+func (txp *TxnCrossProcess) handleInboundRequestSynthetics(raw []byte) error {
+	synthetics := &cat.SyntheticsHeader{}
+	if err := json.Unmarshal(raw, synthetics); err != nil {
+		return err
+	}
+
+	// The specced behaviour here if the account isn't trusted is to disable the
+	// synthetics handling, but not CAT in general, so we won't return an error
+	// here.
+	if txp.TrustedAccounts.IsTrusted(synthetics.AccountID) {
+		txp.SetSynthetics(true)
+		txp.setRequireGUID()
+		txp.Synthetics = synthetics
+	}
+
+	return nil
+}
+
+func (txp *TxnCrossProcess) outboundID() (string, error) {
+	return obfuscate(txp.CrossProcessID, txp.EncodingKey)
+}
+
+func (txp *TxnCrossProcess) outboundTxnData(txnName, appName string) (string, error) {
+	pathHash, err := txp.setPathHash(txnName, appName)
+	if err != nil {
+		return "", err
+	}
+
+	data, err := json.Marshal(&cat.TxnDataHeader{
+		GUID:     txp.GUID,
+		TripID:   txp.TripID,
+		PathHash: pathHash,
+	})
+	if err != nil {
+		return "", err
+	}
+
+	return obfuscate(data, txp.EncodingKey)
+}
+
+// setRequireGUID ensures that the transaction has a valid GUID, and sets the
+// nr.guid and trip ID if they are not already set.  If the customer has enabled
+// DistributedTracing, then the new style of guid will be set elsewhere.
+func (txp *TxnCrossProcess) setRequireGUID() {
+	if txp.DistributedTracingEnabled {
+		return
+	}
+
+	if txp.GUID != "" {
+		return
+	}
+
+	txp.GUID = fmt.Sprintf("%x", RandUint64())
+
+	if txp.TripID == "" {
+		txp.requireTripID()
+	}
+}
+
+// requireTripID ensures that the transaction has a valid trip ID.
+func (txp *TxnCrossProcess) requireTripID() {
+	if !txp.Enabled {
+		return
+	}
+	if txp.TripID != "" {
+		return
+	}
+
+	txp.setRequireGUID()
+	txp.TripID = txp.GUID
+}
+
+// setPathHash generates a path hash, sets the transaction's path hash to
+// match, and returns it. This function will also ensure that the alternate
+// path hashes are correctly updated.
+func (txp *TxnCrossProcess) setPathHash(txnName, appName string) (string, error) {
+	pathHash, err := cat.GeneratePathHash(txp.ReferringPathHash, txnName, appName)
+	if err != nil {
+		return "", err
+	}
+
+	if pathHash != txp.PathHash {
+		if txp.PathHash != "" {
+			// Lazily initialise the alternate path hashes if they haven't been
+			// already.
+			if txp.AlternatePathHashes == nil {
+				txp.AlternatePathHashes = make(map[string]bool)
+			}
+
+			// The spec limits us to a maximum of 10 alternate path hashes.
+			if len(txp.AlternatePathHashes) < 10 {
+				txp.AlternatePathHashes[txp.PathHash] = true
+			}
+		}
+		txp.PathHash = pathHash
+	}
+
+	return pathHash, nil
+}

+ 193 - 0
vendor/github.com/newrelic/go-agent/internal/txn_events.go

@@ -0,0 +1,193 @@
+package internal
+
+import (
+	"bytes"
+	"sort"
+	"strings"
+	"time"
+)
+
+// DatastoreExternalTotals contains overview of external and datastore calls
+// made during a transaction.
+type DatastoreExternalTotals struct {
+	externalCallCount  uint64
+	externalDuration   time.Duration
+	datastoreCallCount uint64
+	datastoreDuration  time.Duration
+}
+
+// WriteJSON prepares JSON in the format expected by the collector.
+func (e *TxnEvent) WriteJSON(buf *bytes.Buffer) {
+	w := jsonFieldsWriter{buf: buf}
+	buf.WriteByte('[')
+	buf.WriteByte('{')
+	w.stringField("type", "Transaction")
+	w.stringField("name", e.FinalName)
+	w.floatField("timestamp", timeToFloatSeconds(e.Start))
+	if ApdexNone != e.Zone {
+		w.stringField("nr.apdexPerfZone", e.Zone.label())
+	}
+
+	w.boolField("error", e.HasError)
+
+	sharedTransactionIntrinsics(e, &w)
+
+	// Write better CAT intrinsics if enabled
+	sharedBetterCATIntrinsics(e, &w)
+
+	if e.BetterCAT.Enabled {
+		if p := e.BetterCAT.Inbound; nil != p {
+			if "" != p.TransactionID {
+				w.stringField("parentId", p.TransactionID)
+			}
+
+			if "" != p.ID {
+				w.stringField("parentSpanId", p.ID)
+			}
+		}
+	}
+
+	// Write old CAT intrinsics if enabled
+	oldCATIntrinsics(e, &w)
+
+	buf.WriteByte('}')
+	buf.WriteByte(',')
+	userAttributesJSON(e.Attrs, buf, destTxnEvent, nil)
+	buf.WriteByte(',')
+	agentAttributesJSON(e.Attrs, buf, destTxnEvent)
+	buf.WriteByte(']')
+}
+
+// oldCATIntrinsics reports old CAT intrinsics for Transaction
+// if CrossProcess.Used() is true
+func oldCATIntrinsics(e *TxnEvent, w *jsonFieldsWriter) {
+	if !e.CrossProcess.Used() {
+		return
+	}
+
+	if e.CrossProcess.ClientID != "" {
+		w.stringField("client_cross_process_id", e.CrossProcess.ClientID)
+	}
+	if e.CrossProcess.TripID != "" {
+		w.stringField("nr.tripId", e.CrossProcess.TripID)
+	}
+	if e.CrossProcess.PathHash != "" {
+		w.stringField("nr.pathHash", e.CrossProcess.PathHash)
+	}
+	if e.CrossProcess.ReferringPathHash != "" {
+		w.stringField("nr.referringPathHash", e.CrossProcess.ReferringPathHash)
+	}
+	if e.CrossProcess.GUID != "" {
+		w.stringField("nr.guid", e.CrossProcess.GUID)
+	}
+	if e.CrossProcess.ReferringTxnGUID != "" {
+		w.stringField("nr.referringTransactionGuid", e.CrossProcess.ReferringTxnGUID)
+	}
+	if len(e.CrossProcess.AlternatePathHashes) > 0 {
+		hashes := make([]string, 0, len(e.CrossProcess.AlternatePathHashes))
+		for hash := range e.CrossProcess.AlternatePathHashes {
+			hashes = append(hashes, hash)
+		}
+		sort.Strings(hashes)
+		w.stringField("nr.alternatePathHashes", strings.Join(hashes, ","))
+	}
+}
+
+// sharedTransactionIntrinsics reports intrinsics that are shared
+// by Transaction and TransactionError
+func sharedTransactionIntrinsics(e *TxnEvent, w *jsonFieldsWriter) {
+	w.floatField("duration", e.Duration.Seconds())
+	if e.Queuing > 0 {
+		w.floatField("queueDuration", e.Queuing.Seconds())
+	}
+	if e.externalCallCount > 0 {
+		w.intField("externalCallCount", int64(e.externalCallCount))
+		w.floatField("externalDuration", e.externalDuration.Seconds())
+	}
+	if e.datastoreCallCount > 0 {
+		// Note that "database" is used for the keys here instead of
+		// "datastore" for historical reasons.
+		w.intField("databaseCallCount", int64(e.datastoreCallCount))
+		w.floatField("databaseDuration", e.datastoreDuration.Seconds())
+	}
+
+	if e.CrossProcess.IsSynthetics() {
+		w.stringField("nr.syntheticsResourceId", e.CrossProcess.Synthetics.ResourceID)
+		w.stringField("nr.syntheticsJobId", e.CrossProcess.Synthetics.JobID)
+		w.stringField("nr.syntheticsMonitorId", e.CrossProcess.Synthetics.MonitorID)
+	}
+}
+
+// sharedBetterCATIntrinsics reports intrinsics that are shared
+// by Transaction, TransactionError, and Slow SQL
+func sharedBetterCATIntrinsics(e *TxnEvent, w *jsonFieldsWriter) {
+	if e.BetterCAT.Enabled {
+		if p := e.BetterCAT.Inbound; nil != p {
+			w.stringField("parent.type", p.Type)
+			w.stringField("parent.app", p.App)
+			w.stringField("parent.account", p.Account)
+			w.stringField("parent.transportType", p.TransportType)
+			w.floatField("parent.transportDuration", p.TransportDuration.Seconds())
+		}
+
+		w.stringField("guid", e.BetterCAT.ID)
+		w.stringField("traceId", e.BetterCAT.TraceID())
+		w.writerField("priority", e.BetterCAT.Priority)
+		w.boolField("sampled", e.BetterCAT.Sampled)
+	}
+}
+
+// MarshalJSON is used for testing.
+func (e *TxnEvent) MarshalJSON() ([]byte, error) {
+	buf := bytes.NewBuffer(make([]byte, 0, 256))
+
+	e.WriteJSON(buf)
+
+	return buf.Bytes(), nil
+}
+
+type txnEvents struct {
+	events *analyticsEvents
+}
+
+func newTxnEvents(max int) *txnEvents {
+	return &txnEvents{
+		events: newAnalyticsEvents(max),
+	}
+}
+
+func (events *txnEvents) AddTxnEvent(e *TxnEvent, priority Priority) {
+	// Synthetics events always get priority: normal event priorities are in the
+	// range [0.0,1.99999], so adding 2 means that a Synthetics event will always
+	// win.
+	if e.CrossProcess.IsSynthetics() {
+		priority += 2.0
+	}
+	events.events.addEvent(analyticsEvent{priority: priority, jsonWriter: e})
+}
+
+func (events *txnEvents) MergeIntoHarvest(h *Harvest) {
+	h.TxnEvents.events.mergeFailed(events.events)
+}
+
+func (events *txnEvents) Data(agentRunID string, harvestStart time.Time) ([]byte, error) {
+	return events.events.CollectorJSON(agentRunID)
+}
+
+func (events *txnEvents) numSeen() float64  { return events.events.NumSeen() }
+func (events *txnEvents) numSaved() float64 { return events.events.NumSaved() }
+
+func (events *txnEvents) EndpointMethod() string {
+	return cmdTxnEvents
+}
+
+func (events *txnEvents) payloads(limit int) []PayloadCreator {
+	if events.numSaved() < float64(limit) {
+		return []PayloadCreator{events}
+	}
+	e1, e2 := events.events.split()
+	return []PayloadCreator{
+		&txnEvents{events: e1},
+		&txnEvents{events: e2},
+	}
+}

+ 410 - 0
vendor/github.com/newrelic/go-agent/internal/txn_trace.go

@@ -0,0 +1,410 @@
+package internal
+
+import (
+	"bytes"
+	"container/heap"
+	"encoding/json"
+	"sort"
+	"time"
+
+	"github.com/newrelic/go-agent/internal/jsonx"
+)
+
+// See https://source.datanerd.us/agents/agent-specs/blob/master/Transaction-Trace-LEGACY.md
+
+type traceNodeHeap []traceNode
+
+// traceNodeParams is used for trace node parameters.  A struct is used in place
+// of a map[string]interface{} to facilitate testing and reduce JSON Marshal
+// overhead.  If too many fields get added here, it probably makes sense to
+// start using a map.  This struct is not embedded into traceNode to minimize
+// the size of traceNode:  Not all nodes will have parameters.
+type traceNodeParams struct {
+	StackTrace      StackTrace
+	CleanURL        string
+	Database        string
+	Host            string
+	PortPathOrID    string
+	Query           string
+	TransactionGUID string
+	queryParameters queryParameters
+}
+
+func (p *traceNodeParams) WriteJSON(buf *bytes.Buffer) {
+	w := jsonFieldsWriter{buf: buf}
+	buf.WriteByte('{')
+	if nil != p.StackTrace {
+		w.writerField("backtrace", p.StackTrace)
+	}
+	if "" != p.CleanURL {
+		w.stringField("uri", p.CleanURL)
+	}
+	if "" != p.Database {
+		w.stringField("database_name", p.Database)
+	}
+	if "" != p.Host {
+		w.stringField("host", p.Host)
+	}
+	if "" != p.PortPathOrID {
+		w.stringField("port_path_or_id", p.PortPathOrID)
+	}
+	if "" != p.Query {
+		w.stringField("query", p.Query)
+	}
+	if "" != p.TransactionGUID {
+		w.stringField("transaction_guid", p.TransactionGUID)
+	}
+	if nil != p.queryParameters {
+		w.writerField("query_parameters", p.queryParameters)
+	}
+	buf.WriteByte('}')
+}
+
+// MarshalJSON is used for testing.
+func (p *traceNodeParams) MarshalJSON() ([]byte, error) {
+	buf := &bytes.Buffer{}
+	p.WriteJSON(buf)
+	return buf.Bytes(), nil
+}
+
+type traceNode struct {
+	start    segmentTime
+	stop     segmentTime
+	duration time.Duration
+	params   *traceNodeParams
+	name     string
+}
+
+func (h traceNodeHeap) Len() int           { return len(h) }
+func (h traceNodeHeap) Less(i, j int) bool { return h[i].duration < h[j].duration }
+func (h traceNodeHeap) Swap(i, j int)      { h[i], h[j] = h[j], h[i] }
+
+// Push and Pop are unused: only heap.Init and heap.Fix are used.
+func (h traceNodeHeap) Push(x interface{}) {}
+func (h traceNodeHeap) Pop() interface{}   { return nil }
+
+// TxnTrace contains the work in progress transaction trace.
+type TxnTrace struct {
+	Enabled             bool
+	SegmentThreshold    time.Duration
+	StackTraceThreshold time.Duration
+	nodes               traceNodeHeap
+	maxNodes            int
+}
+
+// getMaxNodes allows the maximum number of nodes to be overwritten for unit
+// tests.
+func (trace *TxnTrace) getMaxNodes() int {
+	if 0 != trace.maxNodes {
+		return trace.maxNodes
+	}
+	return maxTxnTraceNodes
+}
+
+// considerNode exists to prevent unnecessary calls to witnessNode: constructing
+// the metric name and params map requires allocations.
+func (trace *TxnTrace) considerNode(end segmentEnd) bool {
+	return trace.Enabled && (end.duration >= trace.SegmentThreshold)
+}
+
+func (trace *TxnTrace) witnessNode(end segmentEnd, name string, params *traceNodeParams) {
+	node := traceNode{
+		start:    end.start,
+		stop:     end.stop,
+		duration: end.duration,
+		name:     name,
+		params:   params,
+	}
+	if !trace.considerNode(end) {
+		return
+	}
+	if trace.nodes == nil {
+		trace.nodes = make(traceNodeHeap, 0, startingTxnTraceNodes)
+	}
+	if end.exclusive >= trace.StackTraceThreshold {
+		if node.params == nil {
+			p := new(traceNodeParams)
+			node.params = p
+		}
+		// skip the following stack frames:
+		//   this method
+		//   function in tracing.go      (EndBasicSegment, EndExternalSegment, EndDatastoreSegment)
+		//   function in internal_txn.go (endSegment, endExternal, endDatastore)
+		//   segment end method
+		skip := 4
+		node.params.StackTrace = GetStackTrace(skip)
+	}
+	if max := trace.getMaxNodes(); len(trace.nodes) < max {
+		trace.nodes = append(trace.nodes, node)
+		if len(trace.nodes) == max {
+			heap.Init(trace.nodes)
+		}
+		return
+	}
+
+	if node.duration <= trace.nodes[0].duration {
+		return
+	}
+	trace.nodes[0] = node
+	heap.Fix(trace.nodes, 0)
+}
+
+// HarvestTrace contains a finished transaction trace ready for serialization to
+// the collector.
+type HarvestTrace struct {
+	TxnEvent
+	Trace TxnTrace
+}
+
+type nodeDetails struct {
+	name          string
+	relativeStart time.Duration
+	relativeStop  time.Duration
+	params        *traceNodeParams
+}
+
+func printNodeStart(buf *bytes.Buffer, n nodeDetails) {
+	// time.Seconds() is intentionally not used here.  Millisecond
+	// precision is enough.
+	relativeStartMillis := n.relativeStart.Nanoseconds() / (1000 * 1000)
+	relativeStopMillis := n.relativeStop.Nanoseconds() / (1000 * 1000)
+
+	buf.WriteByte('[')
+	jsonx.AppendInt(buf, relativeStartMillis)
+	buf.WriteByte(',')
+	jsonx.AppendInt(buf, relativeStopMillis)
+	buf.WriteByte(',')
+	jsonx.AppendString(buf, n.name)
+	buf.WriteByte(',')
+	if nil == n.params {
+		buf.WriteString("{}")
+	} else {
+		n.params.WriteJSON(buf)
+	}
+	buf.WriteByte(',')
+	buf.WriteByte('[')
+}
+
+func printChildren(buf *bytes.Buffer, traceStart time.Time, nodes sortedTraceNodes, next int, stop segmentStamp) int {
+	firstChild := true
+	for next < len(nodes) && nodes[next].start.Stamp < stop {
+		if firstChild {
+			firstChild = false
+		} else {
+			buf.WriteByte(',')
+		}
+		printNodeStart(buf, nodeDetails{
+			name:          nodes[next].name,
+			relativeStart: nodes[next].start.Time.Sub(traceStart),
+			relativeStop:  nodes[next].stop.Time.Sub(traceStart),
+			params:        nodes[next].params,
+		})
+		next = printChildren(buf, traceStart, nodes, next+1, nodes[next].stop.Stamp)
+		buf.WriteString("]]")
+
+	}
+	return next
+}
+
+type sortedTraceNodes []*traceNode
+
+func (s sortedTraceNodes) Len() int           { return len(s) }
+func (s sortedTraceNodes) Less(i, j int) bool { return s[i].start.Stamp < s[j].start.Stamp }
+func (s sortedTraceNodes) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
+
+// MarshalJSON prepares the trace in the JSON expected by the collector.
+func (trace *HarvestTrace) MarshalJSON() ([]byte, error) {
+	estimate := 100 * len(trace.Trace.nodes)
+	buf := bytes.NewBuffer(make([]byte, 0, estimate))
+
+	nodes := make(sortedTraceNodes, len(trace.Trace.nodes))
+	for i := 0; i < len(nodes); i++ {
+		nodes[i] = &trace.Trace.nodes[i]
+	}
+	sort.Sort(nodes)
+
+	buf.WriteByte('[') // begin trace
+
+	jsonx.AppendInt(buf, trace.Start.UnixNano()/1000)
+	buf.WriteByte(',')
+	jsonx.AppendFloat(buf, trace.Duration.Seconds()*1000.0)
+	buf.WriteByte(',')
+	jsonx.AppendString(buf, trace.FinalName)
+	buf.WriteByte(',')
+	jsonx.AppendString(buf, trace.CleanURL)
+	buf.WriteByte(',')
+
+	buf.WriteByte('[') // begin trace data
+
+	// If the trace string pool is used, insert another array here.
+
+	jsonx.AppendFloat(buf, 0.0) // unused timestamp
+	buf.WriteByte(',')          //
+	buf.WriteString("{}")       // unused: formerly request parameters
+	buf.WriteByte(',')          //
+	buf.WriteString("{}")       // unused: formerly custom parameters
+	buf.WriteByte(',')          //
+
+	printNodeStart(buf, nodeDetails{ // begin outer root
+		name:          "ROOT",
+		relativeStart: 0,
+		relativeStop:  trace.Duration,
+	})
+
+	printNodeStart(buf, nodeDetails{ // begin inner root
+		name:          trace.FinalName,
+		relativeStart: 0,
+		relativeStop:  trace.Duration,
+	})
+
+	if len(nodes) > 0 {
+		lastStopStamp := nodes[len(nodes)-1].stop.Stamp + 1
+		printChildren(buf, trace.Start, nodes, 0, lastStopStamp)
+	}
+
+	buf.WriteString("]]") // end outer root
+	buf.WriteString("]]") // end inner root
+
+	buf.WriteByte(',')
+	buf.WriteByte('{')
+	buf.WriteString(`"agentAttributes":`)
+	agentAttributesJSON(trace.Attrs, buf, destTxnTrace)
+	buf.WriteByte(',')
+	buf.WriteString(`"userAttributes":`)
+	userAttributesJSON(trace.Attrs, buf, destTxnTrace, nil)
+	buf.WriteByte(',')
+	buf.WriteString(`"intrinsics":`)
+	intrinsicsJSON(&trace.TxnEvent, buf)
+	buf.WriteByte('}')
+
+	// If the trace string pool is used, end another array here.
+
+	buf.WriteByte(']') // end trace data
+
+	buf.WriteByte(',')
+	if trace.CrossProcess.Used() && trace.CrossProcess.GUID != "" {
+		jsonx.AppendString(buf, trace.CrossProcess.GUID)
+	} else {
+		buf.WriteString(`""`)
+	}
+	buf.WriteByte(',')       //
+	buf.WriteString(`null`)  // reserved for future use
+	buf.WriteByte(',')       //
+	buf.WriteString(`false`) // ForcePersist is not yet supported
+	buf.WriteByte(',')       //
+	buf.WriteString(`null`)  // X-Ray sessions not supported
+	buf.WriteByte(',')       //
+
+	// Synthetics are supported:
+	if trace.CrossProcess.IsSynthetics() {
+		jsonx.AppendString(buf, trace.CrossProcess.Synthetics.ResourceID)
+	} else {
+		buf.WriteString(`""`)
+	}
+
+	buf.WriteByte(']') // end trace
+
+	return buf.Bytes(), nil
+}
+
+type txnTraceHeap []*HarvestTrace
+
+func (h *txnTraceHeap) isEmpty() bool {
+	return 0 == len(*h)
+}
+
+func newTxnTraceHeap(max int) *txnTraceHeap {
+	h := make(txnTraceHeap, 0, max)
+	heap.Init(&h)
+	return &h
+}
+
+// Implement sort.Interface.
+func (h txnTraceHeap) Len() int           { return len(h) }
+func (h txnTraceHeap) Less(i, j int) bool { return h[i].Duration < h[j].Duration }
+func (h txnTraceHeap) Swap(i, j int)      { h[i], h[j] = h[j], h[i] }
+
+// Implement heap.Interface.
+func (h *txnTraceHeap) Push(x interface{}) { *h = append(*h, x.(*HarvestTrace)) }
+
+func (h *txnTraceHeap) Pop() interface{} {
+	old := *h
+	n := len(old)
+	x := old[n-1]
+	*h = old[0 : n-1]
+	return x
+}
+
+func (h *txnTraceHeap) isKeeper(t *HarvestTrace) bool {
+	if len(*h) < cap(*h) {
+		return true
+	}
+	return t.Duration >= (*h)[0].Duration
+}
+
+func (h *txnTraceHeap) addTxnTrace(t *HarvestTrace) {
+	if len(*h) < cap(*h) {
+		heap.Push(h, t)
+		return
+	}
+
+	if t.Duration <= (*h)[0].Duration {
+		return
+	}
+	heap.Pop(h)
+	heap.Push(h, t)
+}
+
+type harvestTraces struct {
+	regular    *txnTraceHeap
+	synthetics *txnTraceHeap
+}
+
+func newHarvestTraces() *harvestTraces {
+	return &harvestTraces{
+		regular:    newTxnTraceHeap(maxRegularTraces),
+		synthetics: newTxnTraceHeap(maxSyntheticsTraces),
+	}
+}
+
+func (traces *harvestTraces) Len() int {
+	return traces.regular.Len() + traces.synthetics.Len()
+}
+
+func (traces *harvestTraces) Witness(trace HarvestTrace) {
+	traceHeap := traces.regular
+	if trace.CrossProcess.IsSynthetics() {
+		traceHeap = traces.synthetics
+	}
+
+	if traceHeap.isKeeper(&trace) {
+		cpy := new(HarvestTrace)
+		*cpy = trace
+		traceHeap.addTxnTrace(cpy)
+	}
+}
+
+func (traces *harvestTraces) Data(agentRunID string, harvestStart time.Time) ([]byte, error) {
+	if traces.Len() == 0 {
+		return nil, nil
+	}
+
+	return json.Marshal([]interface{}{
+		agentRunID,
+		traces.slice(),
+	})
+}
+
+func (traces *harvestTraces) slice() []*HarvestTrace {
+	out := make([]*HarvestTrace, 0, traces.Len())
+	out = append(out, (*traces.regular)...)
+	out = append(out, (*traces.synthetics)...)
+
+	return out
+}
+
+func (traces *harvestTraces) MergeIntoHarvest(h *Harvest) {}
+
+func (traces *harvestTraces) EndpointMethod() string {
+	return cmdTxnTraces
+}

+ 43 - 0
vendor/github.com/newrelic/go-agent/internal/url.go

@@ -0,0 +1,43 @@
+package internal
+
+import "net/url"
+
+// SafeURL removes sensitive information from a URL.
+func SafeURL(u *url.URL) string {
+	if nil == u {
+		return ""
+	}
+	if "" != u.Opaque {
+		// If the URL is opaque, we cannot be sure if it contains
+		// sensitive information.
+		return ""
+	}
+
+	// Omit user, query, and fragment information for security.
+	ur := url.URL{
+		Scheme: u.Scheme,
+		Host:   u.Host,
+		Path:   u.Path,
+	}
+	return ur.String()
+}
+
+// SafeURLFromString removes sensitive information from a URL.
+func SafeURLFromString(rawurl string) string {
+	u, err := url.Parse(rawurl)
+	if nil != err {
+		return ""
+	}
+	return SafeURL(u)
+}
+
+// HostFromURL returns the URL's host.
+func HostFromURL(u *url.URL) string {
+	if nil == u {
+		return ""
+	}
+	if "" != u.Opaque {
+		return "opaque"
+	}
+	return u.Host
+}

+ 107 - 0
vendor/github.com/newrelic/go-agent/internal/utilities.go

@@ -0,0 +1,107 @@
+package internal
+
+import (
+	"bytes"
+	"encoding/json"
+	"net/http"
+	"strconv"
+	"strings"
+	"time"
+)
+
+// JSONString assists in logging JSON:  Based on the formatter used to log
+// Context contents, the contents could be marshalled as JSON or just printed
+// directly.
+type JSONString string
+
+// MarshalJSON returns the JSONString unmodified without any escaping.
+func (js JSONString) MarshalJSON() ([]byte, error) {
+	if "" == js {
+		return []byte("null"), nil
+	}
+	return []byte(js), nil
+}
+
+func removeFirstSegment(name string) string {
+	idx := strings.Index(name, "/")
+	if -1 == idx {
+		return name
+	}
+	return name[idx+1:]
+}
+
+func timeToFloatSeconds(t time.Time) float64 {
+	return float64(t.UnixNano()) / float64(1000*1000*1000)
+}
+
+func timeToFloatMilliseconds(t time.Time) float64 {
+	return float64(t.UnixNano()) / float64(1000*1000)
+}
+
+func floatSecondsToDuration(seconds float64) time.Duration {
+	nanos := seconds * 1000 * 1000 * 1000
+	return time.Duration(nanos) * time.Nanosecond
+}
+
+func absTimeDiff(t1, t2 time.Time) time.Duration {
+	if t1.After(t2) {
+		return t1.Sub(t2)
+	}
+	return t2.Sub(t1)
+}
+
+func compactJSON(js []byte) []byte {
+	buf := new(bytes.Buffer)
+	if err := json.Compact(buf, js); err != nil {
+		return nil
+	}
+	return buf.Bytes()
+}
+
+// CompactJSONString removes the whitespace from a JSON string.
+func CompactJSONString(js string) string {
+	out := compactJSON([]byte(js))
+	return string(out)
+}
+
+// GetContentLengthFromHeader gets the content length from a HTTP header, or -1
+// if no content length is available.
+func GetContentLengthFromHeader(h http.Header) int64 {
+	if cl := h.Get("Content-Length"); cl != "" {
+		if contentLength, err := strconv.ParseInt(cl, 10, 64); err == nil {
+			return contentLength
+		}
+	}
+
+	return -1
+}
+
+// StringLengthByteLimit truncates strings using a byte-limit boundary and
+// avoids terminating in the middle of a multibyte character.
+func StringLengthByteLimit(str string, byteLimit int) string {
+	if len(str) <= byteLimit {
+		return str
+	}
+
+	limitIndex := 0
+	for pos := range str {
+		if pos > byteLimit {
+			break
+		}
+		limitIndex = pos
+	}
+	return str[0:limitIndex]
+}
+
+func timeFromUnixMilliseconds(millis uint64) time.Time {
+	secs := int64(millis) / 1000
+	msecsRemaining := int64(millis) % 1000
+	nsecsRemaining := msecsRemaining * (1000 * 1000)
+	return time.Unix(secs, nsecsRemaining)
+}
+
+// TimeToUnixMilliseconds converts a time into a Unix timestamp in millisecond
+// units.
+func TimeToUnixMilliseconds(tm time.Time) uint64 {
+	return uint64(tm.UnixNano()) / uint64(1000*1000)
+}

+ 89 - 0
vendor/github.com/newrelic/go-agent/internal/utilization/aws.go

@@ -0,0 +1,89 @@
+package utilization
+
+import (
+	"encoding/json"
+	"fmt"
+	"io/ioutil"
+	"net/http"
+)
+
+const (
+	awsHostname     = "169.254.169.254"
+	awsEndpointPath = "/2016-09-02/dynamic/instance-identity/document"
+	awsEndpoint     = "http://" + awsHostname + awsEndpointPath
+)
+
+type aws struct {
+	InstanceID       string `json:"instanceId,omitempty"`
+	InstanceType     string `json:"instanceType,omitempty"`
+	AvailabilityZone string `json:"availabilityZone,omitempty"`
+}
+
+func gatherAWS(util *Data, client *http.Client) error {
+	aws, err := getAWS(client)
+	if err != nil {
+		// Only return the error here if it is unexpected to prevent
+		// warning customers who aren't running AWS about a timeout.
+		if _, ok := err.(unexpectedAWSErr); ok {
+			return err
+		}
+		return nil
+	}
+	util.Vendors.AWS = aws
+
+	return nil
+}
+
+type unexpectedAWSErr struct{ e error }
+
+func (e unexpectedAWSErr) Error() string {
+	return fmt.Sprintf("unexpected AWS error: %v", e.e)
+}
+
+func getAWS(client *http.Client) (*aws, error) {
+	response, err := client.Get(awsEndpoint)
+	if err != nil {
+		// No unexpectedAWSErr here: A timeout is usually going to
+		// happen.
+		return nil, err
+	}
+	defer response.Body.Close()
+
+	if response.StatusCode != 200 {
+		return nil, unexpectedAWSErr{e: fmt.Errorf("response code %d", response.StatusCode)}
+	}
+
+	data, err := ioutil.ReadAll(response.Body)
+	if err != nil {
+		return nil, unexpectedAWSErr{e: err}
+	}
+	a := &aws{}
+	if err := json.Unmarshal(data, a); err != nil {
+		return nil, unexpectedAWSErr{e: err}
+	}
+
+	if err := a.validate(); err != nil {
+		return nil, unexpectedAWSErr{e: err}
+	}
+
+	return a, nil
+}
+
+func (a *aws) validate() (err error) {
+	a.InstanceID, err = normalizeValue(a.InstanceID)
+	if err != nil {
+		return fmt.Errorf("invalid instance ID: %v", err)
+	}
+
+	a.InstanceType, err = normalizeValue(a.InstanceType)
+	if err != nil {
+		return fmt.Errorf("invalid instance type: %v", err)
+	}
+
+	a.AvailabilityZone, err = normalizeValue(a.AvailabilityZone)
+	if err != nil {
+		return fmt.Errorf("invalid availability zone: %v", err)
+	}
+
+	return
+}

+ 102 - 0
vendor/github.com/newrelic/go-agent/internal/utilization/azure.go

@@ -0,0 +1,102 @@
+package utilization
+
+import (
+	"encoding/json"
+	"fmt"
+	"io/ioutil"
+	"net/http"
+)
+
+const (
+	azureHostname     = "169.254.169.254"
+	azureEndpointPath = "/metadata/instance/compute?api-version=2017-03-01"
+	azureEndpoint     = "http://" + azureHostname + azureEndpointPath
+)
+
+type azure struct {
+	Location string `json:"location,omitempty"`
+	Name     string `json:"name,omitempty"`
+	VMID     string `json:"vmId,omitempty"`
+	VMSize   string `json:"vmSize,omitempty"`
+}
+
+func gatherAzure(util *Data, client *http.Client) error {
+	az, err := getAzure(client)
+	if err != nil {
+		// Only return the error here if it is unexpected to prevent
+		// warning customers who aren't running Azure about a timeout.
+		if _, ok := err.(unexpectedAzureErr); ok {
+			return err
+		}
+		return nil
+	}
+	util.Vendors.Azure = az
+
+	return nil
+}
+
+type unexpectedAzureErr struct{ e error }
+
+func (e unexpectedAzureErr) Error() string {
+	return fmt.Sprintf("unexpected Azure error: %v", e.e)
+}
+
+func getAzure(client *http.Client) (*azure, error) {
+	req, err := http.NewRequest("GET", azureEndpoint, nil)
+	if err != nil {
+		return nil, err
+	}
+	req.Header.Add("Metadata", "true")
+
+	response, err := client.Do(req)
+	if err != nil {
+		// No unexpectedAzureErr here: a timeout isusually going to
+		// happen.
+		return nil, err
+	}
+	defer response.Body.Close()
+
+	if response.StatusCode != 200 {
+		return nil, unexpectedAzureErr{e: fmt.Errorf("response code %d", response.StatusCode)}
+	}
+
+	data, err := ioutil.ReadAll(response.Body)
+	if err != nil {
+		return nil, unexpectedAzureErr{e: err}
+	}
+
+	az := &azure{}
+	if err := json.Unmarshal(data, az); err != nil {
+		return nil, unexpectedAzureErr{e: err}
+	}
+
+	if err := az.validate(); err != nil {
+		return nil, unexpectedAzureErr{e: err}
+	}
+
+	return az, nil
+}
+
+func (az *azure) validate() (err error) {
+	az.Location, err = normalizeValue(az.Location)
+	if err != nil {
+		return fmt.Errorf("Invalid location: %v", err)
+	}
+
+	az.Name, err = normalizeValue(az.Name)
+	if err != nil {
+		return fmt.Errorf("Invalid name: %v", err)
+	}
+
+	az.VMID, err = normalizeValue(az.VMID)
+	if err != nil {
+		return fmt.Errorf("Invalid VM ID: %v", err)
+	}
+
+	az.VMSize, err = normalizeValue(az.VMSize)
+	if err != nil {
+		return fmt.Errorf("Invalid VM size: %v", err)
+	}
+
+	return
+}

+ 152 - 0
vendor/github.com/newrelic/go-agent/internal/utilization/gcp.go

@@ -0,0 +1,152 @@
+package utilization
+
+import (
+	"encoding/json"
+	"fmt"
+	"io/ioutil"
+	"net/http"
+	"strings"
+)
+
+const (
+	gcpHostname     = "metadata.google.internal"
+	gcpEndpointPath = "/computeMetadata/v1/instance/?recursive=true"
+	gcpEndpoint     = "http://" + gcpHostname + gcpEndpointPath
+)
+
+func gatherGCP(util *Data, client *http.Client) error {
+	gcp, err := getGCP(client)
+	if err != nil {
+		// Only return the error here if it is unexpected to prevent
+		// warning customers who aren't running GCP about a timeout.
+		if _, ok := err.(unexpectedGCPErr); ok {
+			return err
+		}
+		return nil
+	}
+	util.Vendors.GCP = gcp
+
+	return nil
+}
+
+// numericString is used rather than json.Number because we want the output when
+// marshalled to be a string, rather than a number.
+type numericString string
+
+func (ns *numericString) MarshalJSON() ([]byte, error) {
+	return json.Marshal(ns.String())
+}
+
+func (ns *numericString) String() string {
+	return string(*ns)
+}
+
+func (ns *numericString) UnmarshalJSON(data []byte) error {
+	var n int64
+
+	// Try to unmarshal as an integer first.
+	if err := json.Unmarshal(data, &n); err == nil {
+		*ns = numericString(fmt.Sprintf("%d", n))
+		return nil
+	}
+
+	// Otherwise, unmarshal as a string, and verify that it's numeric (for our
+	// definition of numeric, which is actually integral).
+	var s string
+	if err := json.Unmarshal(data, &s); err != nil {
+		return err
+	}
+
+	for _, r := range s {
+		if r < '0' || r > '9' {
+			return fmt.Errorf("invalid numeric character: %c", r)
+		}
+	}
+
+	*ns = numericString(s)
+	return nil
+}
+
+type gcp struct {
+	ID          numericString `json:"id"`
+	MachineType string        `json:"machineType,omitempty"`
+	Name        string        `json:"name,omitempty"`
+	Zone        string        `json:"zone,omitempty"`
+}
+
+type unexpectedGCPErr struct{ e error }
+
+func (e unexpectedGCPErr) Error() string {
+	return fmt.Sprintf("unexpected GCP error: %v", e.e)
+}
+
+func getGCP(client *http.Client) (*gcp, error) {
+	// GCP's metadata service requires a Metadata-Flavor header because... hell, I
+	// don't know, maybe they really like Guy Fieri?
+	req, err := http.NewRequest("GET", gcpEndpoint, nil)
+	if err != nil {
+		return nil, err
+	}
+	req.Header.Add("Metadata-Flavor", "Google")
+
+	response, err := client.Do(req)
+	if err != nil {
+		return nil, err
+	}
+	defer response.Body.Close()
+
+	if response.StatusCode != 200 {
+		return nil, unexpectedGCPErr{e: fmt.Errorf("response code %d", response.StatusCode)}
+	}
+
+	data, err := ioutil.ReadAll(response.Body)
+	if err != nil {
+		return nil, unexpectedGCPErr{e: err}
+	}
+
+	g := &gcp{}
+	if err := json.Unmarshal(data, g); err != nil {
+		return nil, unexpectedGCPErr{e: err}
+	}
+
+	if err := g.validate(); err != nil {
+		return nil, unexpectedGCPErr{e: err}
+	}
+
+	return g, nil
+}
+
+func (g *gcp) validate() (err error) {
+	id, err := normalizeValue(g.ID.String())
+	if err != nil {
+		return fmt.Errorf("Invalid ID: %v", err)
+	}
+	g.ID = numericString(id)
+
+	mt, err := normalizeValue(g.MachineType)
+	if err != nil {
+		return fmt.Errorf("Invalid machine type: %v", err)
+	}
+	g.MachineType = stripGCPPrefix(mt)
+
+	g.Name, err = normalizeValue(g.Name)
+	if err != nil {
+		return fmt.Errorf("Invalid name: %v", err)
+	}
+
+	zone, err := normalizeValue(g.Zone)
+	if err != nil {
+		return fmt.Errorf("Invalid zone: %v", err)
+	}
+	g.Zone = stripGCPPrefix(zone)
+
+	return
+}
+
+// We're only interested in the last element of slash separated paths for the
+// machine type and zone values, so this function handles stripping the parts
+// we don't need.
+func stripGCPPrefix(s string) string {
+	parts := strings.Split(s, "/")
+	return parts[len(parts)-1]
+}

+ 80 - 0
vendor/github.com/newrelic/go-agent/internal/utilization/pcf.go

@@ -0,0 +1,80 @@
+package utilization
+
+import (
+	"errors"
+	"fmt"
+	"net/http"
+	"os"
+)
+
+type pcf struct {
+	InstanceGUID string `json:"cf_instance_guid,omitempty"`
+	InstanceIP   string `json:"cf_instance_ip,omitempty"`
+	MemoryLimit  string `json:"memory_limit,omitempty"`
+}
+
+func gatherPCF(util *Data, _ *http.Client) error {
+	pcf, err := getPCF(os.Getenv)
+	if err != nil {
+		// Only return the error here if it is unexpected to prevent
+		// warning customers who aren't running PCF about a timeout.
+		if _, ok := err.(unexpectedPCFErr); ok {
+			return err
+		}
+		return nil
+	}
+	util.Vendors.PCF = pcf
+
+	return nil
+}
+
+type unexpectedPCFErr struct{ e error }
+
+func (e unexpectedPCFErr) Error() string {
+	return fmt.Sprintf("unexpected PCF error: %v", e.e)
+}
+
+var (
+	errNoPCFVariables = errors.New("no PCF environment variables present")
+)
+
+func getPCF(initializer func(key string) string) (*pcf, error) {
+	p := &pcf{}
+
+	p.InstanceGUID = initializer("CF_INSTANCE_GUID")
+	p.InstanceIP = initializer("CF_INSTANCE_IP")
+	p.MemoryLimit = initializer("MEMORY_LIMIT")
+
+	if "" == p.InstanceGUID && "" == p.InstanceIP && "" == p.MemoryLimit {
+		return nil, errNoPCFVariables
+	}
+
+	if err := p.validate(); err != nil {
+		return nil, unexpectedPCFErr{e: err}
+	}
+
+	return p, nil
+}
+
+func (pcf *pcf) validate() (err error) {
+	pcf.InstanceGUID, err = normalizeValue(pcf.InstanceGUID)
+	if err != nil {
+		return fmt.Errorf("Invalid instance GUID: %v", err)
+	}
+
+	pcf.InstanceIP, err = normalizeValue(pcf.InstanceIP)
+	if err != nil {
+		return fmt.Errorf("Invalid instance IP: %v", err)
+	}
+
+	pcf.MemoryLimit, err = normalizeValue(pcf.MemoryLimit)
+	if err != nil {
+		return fmt.Errorf("Invalid memory limit: %v", err)
+	}
+
+	if pcf.InstanceGUID == "" || pcf.InstanceIP == "" || pcf.MemoryLimit == "" {
+		err = errors.New("One or more environment variables are unavailable")
+	}
+
+	return
+}

+ 59 - 0
vendor/github.com/newrelic/go-agent/internal/utilization/provider.go

@@ -0,0 +1,59 @@
+package utilization
+
+import (
+	"fmt"
+	"strings"
+	"time"
+)
+
+// Helper constants, functions, and types common to multiple providers are
+// contained in this file.
+
+// Constants from the spec.
+const (
+	maxFieldValueSize = 255             // The maximum value size, in bytes.
+	providerTimeout   = 1 * time.Second // The maximum time a HTTP provider may block.
+)
+
+type validationError struct{ e error }
+
+func (a validationError) Error() string {
+	return a.e.Error()
+}
+
+func isValidationError(e error) bool {
+	_, is := e.(validationError)
+	return is
+}
+
+// This function normalises string values per the utilization spec.
+func normalizeValue(s string) (string, error) {
+	out := strings.TrimSpace(s)
+
+	bytes := []byte(out)
+	if len(bytes) > maxFieldValueSize {
+		return "", validationError{fmt.Errorf("response is too long: got %d; expected <=%d", len(bytes), maxFieldValueSize)}
+	}
+
+	for i, r := range out {
+		if !isAcceptableRune(r) {
+			return "", validationError{fmt.Errorf("bad character %x at position %d in response", r, i)}
+		}
+	}
+
+	return out, nil
+}
+
+func isAcceptableRune(r rune) bool {
+	switch r {
+	case 0xFFFD:
+		return false // invalid UTF-8
+	case '_', ' ', '/', '.', '-':
+		return true
+	default:
+		return r > 0x7f || // still allows some invalid UTF-8, but that's the spec.
+			('0' <= r && r <= '9') ||
+			('a' <= r && r <= 'z') ||
+			('A' <= r && r <= 'Z')
+	}
+}

+ 206 - 0
vendor/github.com/newrelic/go-agent/internal/utilization/utilization.go

@@ -0,0 +1,206 @@
+// Package utilization implements the Utilization spec, available at
+// https://source.datanerd.us/agents/agent-specs/blob/master/Utilization.md
+//
+package utilization
+
+import (
+	"net/http"
+	"runtime"
+	"sync"
+
+	"github.com/newrelic/go-agent/internal/logger"
+	"github.com/newrelic/go-agent/internal/sysinfo"
+)
+
+const (
+	metadataVersion = 3
+)
+
+// Config controls the behavior of utilization information capture.
+type Config struct {
+	DetectAWS         bool
+	DetectAzure       bool
+	DetectGCP         bool
+	DetectPCF         bool
+	DetectDocker      bool
+	LogicalProcessors int
+	TotalRAMMIB       int
+	BillingHostname   string
+}
+
+type override struct {
+	LogicalProcessors *int   `json:"logical_processors,omitempty"`
+	TotalRAMMIB       *int   `json:"total_ram_mib,omitempty"`
+	BillingHostname   string `json:"hostname,omitempty"`
+}
+
+// Data contains utilization system information.
+type Data struct {
+	MetadataVersion int `json:"metadata_version"`
+	// Although `runtime.NumCPU()` will never fail, this field is a pointer
+	// to facilitate the cross agent tests.
+	LogicalProcessors *int      `json:"logical_processors"`
+	RAMMiB            *uint64   `json:"total_ram_mib"`
+	Hostname          string    `json:"hostname"`
+	BootID            string    `json:"boot_id,omitempty"`
+	Vendors           *vendors  `json:"vendors,omitempty"`
+	Config            *override `json:"config,omitempty"`
+}
+
+var (
+	sampleRAMMib    = uint64(1024)
+	sampleLogicProc = int(16)
+	// SampleData contains sample utilization data useful for testing.
+	SampleData = Data{
+		MetadataVersion:   metadataVersion,
+		LogicalProcessors: &sampleLogicProc,
+		RAMMiB:            &sampleRAMMib,
+		Hostname:          "my-hostname",
+	}
+)
+
+type docker struct {
+	ID string `json:"id,omitempty"`
+}
+
+type vendors struct {
+	AWS    *aws    `json:"aws,omitempty"`
+	Azure  *azure  `json:"azure,omitempty"`
+	GCP    *gcp    `json:"gcp,omitempty"`
+	PCF    *pcf    `json:"pcf,omitempty"`
+	Docker *docker `json:"docker,omitempty"`
+}
+
+func (v *vendors) isEmpty() bool {
+	return v.AWS == nil && v.Azure == nil && v.GCP == nil && v.PCF == nil && v.Docker == nil
+}
+
+func overrideFromConfig(config Config) *override {
+	ov := &override{}
+
+	if 0 != config.LogicalProcessors {
+		x := config.LogicalProcessors
+		ov.LogicalProcessors = &x
+	}
+	if 0 != config.TotalRAMMIB {
+		x := config.TotalRAMMIB
+		ov.TotalRAMMIB = &x
+	}
+	ov.BillingHostname = config.BillingHostname
+
+	if "" == ov.BillingHostname &&
+		nil == ov.LogicalProcessors &&
+		nil == ov.TotalRAMMIB {
+		ov = nil
+	}
+	return ov
+}
+
+// Gather gathers system utilization data.
+func Gather(config Config, lg logger.Logger) *Data {
+	client := &http.Client{
+		Timeout: providerTimeout,
+	}
+	return gatherWithClient(config, lg, client)
+}
+
+func gatherWithClient(config Config, lg logger.Logger, client *http.Client) *Data {
+	var wg sync.WaitGroup
+
+	cpu := runtime.NumCPU()
+	uDat := &Data{
+		MetadataVersion:   metadataVersion,
+		LogicalProcessors: &cpu,
+		Vendors:           &vendors{},
+	}
+
+	warnGatherError := func(datatype string, err error) {
+		lg.Debug("error gathering utilization data", map[string]interface{}{
+			"error":    err.Error(),
+			"datatype": datatype,
+		})
+	}
+
+	// This closure allows us to run each gather function in a separate goroutine
+	// and wait for them at the end by closing over the wg WaitGroup we
+	// instantiated at the start of the function.
+	goGather := func(datatype string, gather func(*Data, *http.Client) error) {
+		wg.Add(1)
+		go func() {
+			// Note that locking around util is not necessary since
+			// WaitGroup provides acts as a memory barrier:
+			// https://groups.google.com/d/msg/golang-nuts/5oHzhzXCcmM/utEwIAApCQAJ
+			// Thus this code is fine as long as each routine is
+			// modifying a different field of util.
+			defer wg.Done()
+			if err := gather(uDat, client); err != nil {
+				warnGatherError(datatype, err)
+			}
+		}()
+	}
+
+	// Kick off gathering which requires network calls in goroutines.
+
+	if config.DetectAWS {
+		goGather("aws", gatherAWS)
+	}
+
+	if config.DetectAzure {
+		goGather("azure", gatherAzure)
+	}
+
+	if config.DetectPCF {
+		goGather("pcf", gatherPCF)
+	}
+
+	if config.DetectGCP {
+		goGather("gcp", gatherGCP)
+	}
+
+	// Do non-network gathering sequentially since it is fast.
+
+	if id, err := sysinfo.BootID(); err != nil {
+		if err != sysinfo.ErrFeatureUnsupported {
+			warnGatherError("bootid", err)
+		}
+	} else {
+		uDat.BootID = id
+	}
+
+	if config.DetectDocker {
+		if id, err := sysinfo.DockerID(); err != nil {
+			if err != sysinfo.ErrFeatureUnsupported &&
+				err != sysinfo.ErrDockerNotFound {
+				warnGatherError("docker", err)
+			}
+		} else {
+			uDat.Vendors.Docker = &docker{ID: id}
+		}
+	}
+
+	if hostname, err := sysinfo.Hostname(); nil == err {
+		uDat.Hostname = hostname
+	} else {
+		warnGatherError("hostname", err)
+	}
+
+	if bts, err := sysinfo.PhysicalMemoryBytes(); nil == err {
+		mib := sysinfo.BytesToMebibytes(bts)
+		uDat.RAMMiB = &mib
+	} else {
+		warnGatherError("memory", err)
+	}
+
+	// Now we wait for everything!
+	wg.Wait()
+
+	// Override whatever needs to be overridden.
+	uDat.Config = overrideFromConfig(config)
+
+	if uDat.Vendors.isEmpty() {
+		// Per spec, we MUST NOT send any vendors hash if it's empty.
+		uDat.Vendors = nil
+	}
+
+	return uDat
+}

+ 668 - 0
vendor/github.com/newrelic/go-agent/internal_app.go

@@ -0,0 +1,668 @@
+package newrelic
+
+import (
+	"errors"
+	"fmt"
+	"math"
+	"net/http"
+	"os"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/newrelic/go-agent/internal"
+	"github.com/newrelic/go-agent/internal/logger"
+)
+
+var (
+	// NEW_RELIC_DEBUG_LOGGING can be set to anything to enable additional
+	// debug logging: the agent will log every transaction's data at info
+	// level.
+	envDebugLogging = "NEW_RELIC_DEBUG_LOGGING"
+	debugLogging    = os.Getenv(envDebugLogging)
+)
+
+type dataConsumer interface {
+	Consume(internal.AgentRunID, internal.Harvestable)
+}
+
+type appData struct {
+	id   internal.AgentRunID
+	data internal.Harvestable
+}
+
+type app struct {
+	config      Config
+	rpmControls internal.RpmControls
+	testHarvest *internal.Harvest
+
+	// placeholderRun is used when the application is not connected.
+	placeholderRun *appRun
+
+	// initiateShutdown is used to tell the processor to shutdown.
+	initiateShutdown chan struct{}
+
+	// shutdownStarted and shutdownComplete are closed by the processor
+	// goroutine to indicate the shutdown status.  Two channels are used so
+	// that the call of app.Shutdown() can block until shutdown has
+	// completed but other goroutines can exit when shutdown has started.
+	// This is not just an optimization:  This prevents a deadlock if
+	// harvesting data during the shutdown fails and an attempt is made to
+	// merge the data into the next harvest.
+	shutdownStarted  chan struct{}
+	shutdownComplete chan struct{}
+
+	// Sends to these channels should not occur without a <-shutdownStarted
+	// select option to prevent deadlock.
+	dataChan           chan appData
+	collectorErrorChan chan error
+	connectChan        chan *appRun
+
+	harvestTicker *time.Ticker
+
+	// This mutex protects both `run` and `err`, both of which should only
+	// be accessed using getState and setState.
+	sync.RWMutex
+	// run is non-nil when the app is successfully connected.  It is
+	// immutable.
+	run *appRun
+	// err is non-nil if the application will never be connected again
+	// (disconnect, license exception, shutdown).
+	err error
+}
+
+// appRun contains information regarding a single connection session with the
+// collector.  It is immutable after creation at application connect.
+type appRun struct {
+	*internal.ConnectReply
+
+	// AttributeConfig is calculated on every connect since it depends on
+	// the security policies.
+	AttributeConfig *internal.AttributeConfig
+}
+
+func newAppRun(config Config, reply *internal.ConnectReply) *appRun {
+	return &appRun{
+		ConnectReply: reply,
+		AttributeConfig: internal.CreateAttributeConfig(internal.AttributeConfigInput{
+			Attributes:        convertAttributeDestinationConfig(config.Attributes),
+			ErrorCollector:    convertAttributeDestinationConfig(config.ErrorCollector.Attributes),
+			TransactionEvents: convertAttributeDestinationConfig(config.TransactionEvents.Attributes),
+			TransactionTracer: convertAttributeDestinationConfig(config.TransactionTracer.Attributes),
+		}, reply.SecurityPolicies.AttributesInclude.Enabled()),
+	}
+}
+
+func isFatalHarvestError(e error) bool {
+	return internal.IsDisconnect(e) ||
+		internal.IsLicenseException(e) ||
+		internal.IsRestartException(e)
+}
+
+func shouldSaveFailedHarvest(e error) bool {
+	if e == internal.ErrPayloadTooLarge || e == internal.ErrUnsupportedMedia {
+		return false
+	}
+	return true
+}
+
+func (app *app) doHarvest(h *internal.Harvest, harvestStart time.Time, run *appRun) {
+	h.CreateFinalMetrics()
+	h.Metrics = h.Metrics.ApplyRules(run.MetricRules)
+
+	payloads := h.Payloads(app.config.DistributedTracer.Enabled)
+	for _, p := range payloads {
+		cmd := p.EndpointMethod()
+		data, err := p.Data(run.RunID.String(), harvestStart)
+
+		if nil == data && nil == err {
+			continue
+		}
+
+		if nil == err {
+			call := internal.RpmCmd{
+				Collector: run.Collector,
+				RunID:     run.RunID.String(),
+				Name:      cmd,
+				Data:      data,
+			}
+
+			// The reply from harvest calls is always unused.
+			_, err = internal.CollectorRequest(call, app.rpmControls)
+		}
+
+		if nil == err {
+			continue
+		}
+
+		if isFatalHarvestError(err) {
+			select {
+			case app.collectorErrorChan <- err:
+			case <-app.shutdownStarted:
+			}
+			return
+		}
+
+		app.config.Logger.Warn("harvest failure", map[string]interface{}{
+			"cmd":   cmd,
+			"error": err.Error(),
+		})
+
+		if shouldSaveFailedHarvest(err) {
+			app.Consume(run.RunID, p)
+		}
+	}
+}
+
+func connectAttempt(app *app) (*appRun, error) {
+	reply, err := internal.ConnectAttempt(config{app.config}, app.config.SecurityPoliciesToken, app.rpmControls)
+	if nil != err {
+		return nil, err
+	}
+	return newAppRun(app.config, reply), nil
+}
+
+func (app *app) connectRoutine() {
+	for {
+		run, err := connectAttempt(app)
+		if nil == err {
+			select {
+			case app.connectChan <- run:
+			case <-app.shutdownStarted:
+			}
+			return
+		}
+
+		if internal.IsDisconnect(err) || internal.IsLicenseException(err) {
+			select {
+			case app.collectorErrorChan <- err:
+			case <-app.shutdownStarted:
+			}
+			return
+		}
+
+		app.config.Logger.Warn("application connect failure", map[string]interface{}{
+			"error": err.Error(),
+		})
+
+		time.Sleep(internal.ConnectBackoff)
+	}
+}
+
+func debug(data internal.Harvestable, lg Logger) {
+	now := time.Now()
+	h := internal.NewHarvest(now)
+	data.MergeIntoHarvest(h)
+	ps := h.Payloads(false)
+	for _, p := range ps {
+		cmd := p.EndpointMethod()
+		d, err := p.Data("agent run id", now)
+		if nil == d && nil == err {
+			continue
+		}
+		if nil != err {
+			lg.Info("integration", map[string]interface{}{
+				"cmd":   cmd,
+				"error": err.Error(),
+			})
+			continue
+		}
+		lg.Info("integration", map[string]interface{}{
+			"cmd":  cmd,
+			"data": internal.JSONString(d),
+		})
+	}
+}
+
+func processConnectMessages(run *appRun, lg Logger) {
+	for _, msg := range run.Messages {
+		event := "collector message"
+		cn := map[string]interface{}{"msg": msg.Message}
+
+		switch strings.ToLower(msg.Level) {
+		case "error":
+			lg.Error(event, cn)
+		case "warn":
+			lg.Warn(event, cn)
+		case "info":
+			lg.Info(event, cn)
+		case "debug", "verbose":
+			lg.Debug(event, cn)
+		}
+	}
+}
+
+func (app *app) process() {
+	// Both the harvest and the run are non-nil when the app is connected,
+	// and nil otherwise.
+	var h *internal.Harvest
+	var run *appRun
+
+	for {
+		select {
+		case <-app.harvestTicker.C:
+			if nil != run {
+				now := time.Now()
+				go app.doHarvest(h, now, run)
+				h = internal.NewHarvest(now)
+			}
+		case d := <-app.dataChan:
+			if nil != run && run.RunID == d.id {
+				d.data.MergeIntoHarvest(h)
+			}
+		case <-app.initiateShutdown:
+			close(app.shutdownStarted)
+
+			// Remove the run before merging any final data to
+			// ensure a bounded number of receives from dataChan.
+			app.setState(nil, errors.New("application shut down"))
+			app.harvestTicker.Stop()
+
+			if nil != run {
+				for done := false; !done; {
+					select {
+					case d := <-app.dataChan:
+						if run.RunID == d.id {
+							d.data.MergeIntoHarvest(h)
+						}
+					default:
+						done = true
+					}
+				}
+				app.doHarvest(h, time.Now(), run)
+			}
+
+			close(app.shutdownComplete)
+			return
+		case err := <-app.collectorErrorChan:
+			run = nil
+			h = nil
+			app.setState(nil, nil)
+
+			switch {
+			case internal.IsDisconnect(err):
+				app.setState(nil, err)
+				app.config.Logger.Error("application disconnected", map[string]interface{}{
+					"app": app.config.AppName,
+					"err": err.Error(),
+				})
+			case internal.IsLicenseException(err):
+				app.setState(nil, err)
+				app.config.Logger.Error("invalid license", map[string]interface{}{
+					"app":     app.config.AppName,
+					"license": app.config.License,
+				})
+			case internal.IsRestartException(err):
+				app.config.Logger.Info("application restarted", map[string]interface{}{
+					"app": app.config.AppName,
+				})
+				go app.connectRoutine()
+			}
+		case run = <-app.connectChan:
+			h = internal.NewHarvest(time.Now())
+			app.setState(run, nil)
+
+			app.config.Logger.Info("application connected", map[string]interface{}{
+				"app": app.config.AppName,
+				"run": run.RunID.String(),
+			})
+			processConnectMessages(run, app.config.Logger)
+		}
+	}
+}
+
+func (app *app) Shutdown(timeout time.Duration) {
+	if !app.config.Enabled {
+		return
+	}
+
+	select {
+	case app.initiateShutdown <- struct{}{}:
+	default:
+	}
+
+	// Block until shutdown is done or timeout occurs.
+	t := time.NewTimer(timeout)
+	select {
+	case <-app.shutdownComplete:
+	case <-t.C:
+	}
+	t.Stop()
+
+	app.config.Logger.Info("application shutdown", map[string]interface{}{
+		"app": app.config.AppName,
+	})
+}
+
+func convertAttributeDestinationConfig(c AttributeDestinationConfig) internal.AttributeDestinationConfig {
+	return internal.AttributeDestinationConfig{
+		Enabled: c.Enabled,
+		Include: c.Include,
+		Exclude: c.Exclude,
+	}
+}
+
+func runSampler(app *app, period time.Duration) {
+	previous := internal.GetSample(time.Now(), app.config.Logger)
+	t := time.NewTicker(period)
+	for {
+		select {
+		case now := <-t.C:
+			current := internal.GetSample(now, app.config.Logger)
+			run, _ := app.getState()
+			app.Consume(run.RunID, internal.GetStats(internal.Samples{
+				Previous: previous,
+				Current:  current,
+			}))
+			previous = current
+		case <-app.shutdownStarted:
+			t.Stop()
+			return
+		}
+	}
+}
+
+func (app *app) WaitForConnection(timeout time.Duration) error {
+	if !app.config.Enabled {
+		return nil
+	}
+	deadline := time.Now().Add(timeout)
+	pollPeriod := 50 * time.Millisecond
+
+	for {
+		run, err := app.getState()
+		if nil != err {
+			return err
+		}
+		if run.RunID != "" {
+			return nil
+		}
+		if time.Now().After(deadline) {
+			return fmt.Errorf("timeout out after %s", timeout.String())
+		}
+		time.Sleep(pollPeriod)
+	}
+}
+
+func newApp(c Config) (Application, error) {
+	c = copyConfigReferenceFields(c)
+	if err := c.Validate(); nil != err {
+		return nil, err
+	}
+	if nil == c.Logger {
+		c.Logger = logger.ShimLogger{}
+	}
+	app := &app{
+		config: c,
+
+		placeholderRun: newAppRun(c, internal.ConnectReplyDefaults()),
+
+		// This channel must be buffered since Shutdown makes a
+		// non-blocking send attempt.
+		initiateShutdown: make(chan struct{}, 1),
+
+		shutdownStarted:    make(chan struct{}),
+		shutdownComplete:   make(chan struct{}),
+		connectChan:        make(chan *appRun, 1),
+		collectorErrorChan: make(chan error, 1),
+		dataChan:           make(chan appData, internal.AppDataChanSize),
+		rpmControls: internal.RpmControls{
+			License: c.License,
+			Client: &http.Client{
+				Transport: c.Transport,
+				Timeout:   internal.CollectorTimeout,
+			},
+			Logger:       c.Logger,
+			AgentVersion: Version,
+		},
+	}
+
+	app.config.Logger.Info("application created", map[string]interface{}{
+		"app":     app.config.AppName,
+		"version": Version,
+		"enabled": app.config.Enabled,
+	})
+
+	if !app.config.Enabled {
+		return app, nil
+	}
+
+	app.harvestTicker = time.NewTicker(internal.HarvestPeriod)
+
+	go app.process()
+	go app.connectRoutine()
+
+	if app.config.RuntimeSampler.Enabled {
+		go runSampler(app, internal.RuntimeSamplerPeriod)
+	}
+
+	return app, nil
+}
+
+type expectApp interface {
+	internal.Expect
+	Application
+}
+
+func newTestApp(replyfn func(*internal.ConnectReply), cfg Config) (expectApp, error) {
+	cfg.Enabled = false
+	application, err := newApp(cfg)
+	if nil != err {
+		return nil, err
+	}
+	app := application.(*app)
+	if nil != replyfn {
+		replyfn(app.placeholderRun.ConnectReply)
+		app.placeholderRun = newAppRun(cfg, app.placeholderRun.ConnectReply)
+	}
+	app.testHarvest = internal.NewHarvest(time.Now())
+
+	return app, nil
+}
+
+func (app *app) getState() (*appRun, error) {
+	app.RLock()
+	defer app.RUnlock()
+
+	run := app.run
+	if nil == run {
+		run = app.placeholderRun
+	}
+	return run, app.err
+}
+
+func (app *app) setState(run *appRun, err error) {
+	app.Lock()
+	defer app.Unlock()
+
+	app.run = run
+	app.err = err
+}
+
+func transportTypeFromRequest(r *http.Request) TransportType {
+	if strings.HasPrefix(r.Proto, "HTTP") {
+		if r.TLS != nil {
+			return TransportHTTPS
+		}
+		return TransportHTTP
+	}
+	return TransportUnknown
+}
+
+// StartTransaction implements newrelic.Application's StartTransaction.
+func (app *app) StartTransaction(name string, w http.ResponseWriter, r *http.Request) Transaction {
+	run, _ := app.getState()
+	txn := upgradeTxn(newTxn(txnInput{
+		Config:     app.config,
+		Reply:      run.ConnectReply,
+		W:          w,
+		Consumer:   app,
+		attrConfig: run.AttributeConfig,
+	}, r, name))
+
+	if nil != r {
+		if p := r.Header.Get(DistributedTracePayloadHeader); p != "" {
+			txn.AcceptDistributedTracePayload(transportTypeFromRequest(r), p)
+		}
+	}
+	return txn
+}
+
+var (
+	errHighSecurityEnabled        = errors.New("high security enabled")
+	errCustomEventsDisabled       = errors.New("custom events disabled")
+	errCustomEventsRemoteDisabled = errors.New("custom events disabled by server")
+)
+
+// RecordCustomEvent implements newrelic.Application's RecordCustomEvent.
+func (app *app) RecordCustomEvent(eventType string, params map[string]interface{}) error {
+	if app.config.HighSecurity {
+		return errHighSecurityEnabled
+	}
+
+	if !app.config.CustomInsightsEvents.Enabled {
+		return errCustomEventsDisabled
+	}
+
+	event, e := internal.CreateCustomEvent(eventType, params, time.Now())
+	if nil != e {
+		return e
+	}
+
+	run, _ := app.getState()
+	if !run.CollectCustomEvents {
+		return errCustomEventsRemoteDisabled
+	}
+
+	if !run.SecurityPolicies.CustomEvents.Enabled() {
+		return errSecurityPolicy
+	}
+
+	app.Consume(run.RunID, event)
+
+	return nil
+}
+
+var (
+	errMetricInf       = errors.New("invalid metric value: inf")
+	errMetricNaN       = errors.New("invalid metric value: NaN")
+	errMetricNameEmpty = errors.New("missing metric name")
+)
+
+// RecordCustomMetric implements newrelic.Application's RecordCustomMetric.
+func (app *app) RecordCustomMetric(name string, value float64) error {
+	if math.IsNaN(value) {
+		return errMetricNaN
+	}
+	if math.IsInf(value, 0) {
+		return errMetricInf
+	}
+	if "" == name {
+		return errMetricNameEmpty
+	}
+	run, _ := app.getState()
+	app.Consume(run.RunID, internal.CustomMetric{
+		RawInputName: name,
+		Value:        value,
+	})
+	return nil
+}
+
+func (app *app) Consume(id internal.AgentRunID, data internal.Harvestable) {
+	if "" != debugLogging {
+		debug(data, app.config.Logger)
+	}
+
+	if nil != app.testHarvest {
+		data.MergeIntoHarvest(app.testHarvest)
+		return
+	}
+
+	if "" == id {
+		return
+	}
+
+	select {
+	case app.dataChan <- appData{id, data}:
+	case <-app.shutdownStarted:
+	}
+}
+
+func (app *app) ExpectCustomEvents(t internal.Validator, want []internal.WantEvent) {
+	internal.ExpectCustomEvents(internal.ExtendValidator(t, "custom events"), app.testHarvest.CustomEvents, want)
+}
+
+func (app *app) ExpectErrors(t internal.Validator, want []internal.WantError) {
+	t = internal.ExtendValidator(t, "traced errors")
+	internal.ExpectErrors(t, app.testHarvest.ErrorTraces, want)
+}
+
+func (app *app) ExpectErrorEvents(t internal.Validator, want []internal.WantEvent) {
+	t = internal.ExtendValidator(t, "error events")
+	internal.ExpectErrorEvents(t, app.testHarvest.ErrorEvents, want)
+}
+
+func (app *app) ExpectErrorEventsPresent(t internal.Validator, want []internal.WantEvent) {
+	t = internal.ExtendValidator(t, "error events")
+	internal.ExpectErrorEventsPresent(t, app.testHarvest.ErrorEvents, want)
+}
+
+func (app *app) ExpectErrorEventsAbsent(t internal.Validator, names []string) {
+	t = internal.ExtendValidator(t, "error events")
+	internal.ExpectErrorEventsAbsent(t, app.testHarvest.ErrorEvents, names)
+}
+
+func (app *app) ExpectSpanEvents(t internal.Validator, want []internal.WantEvent) {
+	t = internal.ExtendValidator(t, "txn events")
+	internal.ExpectSpanEvents(t, app.testHarvest.SpanEvents, want)
+}
+
+func (app *app) ExpectSpanEventsPresent(t internal.Validator, want []internal.WantEvent) {
+	t = internal.ExtendValidator(t, "span events")
+	internal.ExpectSpanEventsPresent(t, app.testHarvest.SpanEvents, want)
+}
+
+func (app *app) ExpectSpanEventsAbsent(t internal.Validator, names []string) {
+	t = internal.ExtendValidator(t, "span events")
+	internal.ExpectSpanEventsAbsent(t, app.testHarvest.SpanEvents, names)
+}
+
+func (app *app) ExpectSpanEventsCount(t internal.Validator, c int) {
+	t = internal.ExtendValidator(t, "span events")
+	internal.ExpectSpanEventsCount(t, app.testHarvest.SpanEvents, c)
+}
+
+func (app *app) ExpectTxnEvents(t internal.Validator, want []internal.WantEvent) {
+	t = internal.ExtendValidator(t, "txn events")
+	internal.ExpectTxnEvents(t, app.testHarvest.TxnEvents, want)
+}
+
+func (app *app) ExpectTxnEventsPresent(t internal.Validator, want []internal.WantEvent) {
+	t = internal.ExtendValidator(t, "txn events")
+	internal.ExpectTxnEventsPresent(t, app.testHarvest.TxnEvents, want)
+}
+
+func (app *app) ExpectTxnEventsAbsent(t internal.Validator, names []string) {
+	t = internal.ExtendValidator(t, "txn events")
+	internal.ExpectTxnEventsAbsent(t, app.testHarvest.TxnEvents, names)
+}
+
+func (app *app) ExpectMetrics(t internal.Validator, want []internal.WantMetric) {
+	t = internal.ExtendValidator(t, "metrics")
+	internal.ExpectMetrics(t, app.testHarvest.Metrics, want)
+}
+
+func (app *app) ExpectMetricsPresent(t internal.Validator, want []internal.WantMetric) {
+	t = internal.ExtendValidator(t, "metrics")
+	internal.ExpectMetricsPresent(t, app.testHarvest.Metrics, want)
+}
+
+func (app *app) ExpectTxnTraces(t internal.Validator, want []internal.WantTxnTrace) {
+	t = internal.ExtendValidator(t, "txn traces")
+	internal.ExpectTxnTraces(t, app.testHarvest.TxnTraces, want)
+}
+
+func (app *app) ExpectSlowQueries(t internal.Validator, want []internal.WantSlowQuery) {
+	t = internal.ExtendValidator(t, "slow queries")
+	internal.ExpectSlowQueries(t, app.testHarvest.SlowSQLs, want)
+}

+ 158 - 0
vendor/github.com/newrelic/go-agent/internal_config.go

@@ -0,0 +1,158 @@
+package newrelic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/http"
+	"os"
+	"strings"
+
+	"github.com/newrelic/go-agent/internal"
+	"github.com/newrelic/go-agent/internal/logger"
+	"github.com/newrelic/go-agent/internal/utilization"
+)
+
+func copyDestConfig(c AttributeDestinationConfig) AttributeDestinationConfig {
+	cp := c
+	if nil != c.Include {
+		cp.Include = make([]string, len(c.Include))
+		copy(cp.Include, c.Include)
+	}
+	if nil != c.Exclude {
+		cp.Exclude = make([]string, len(c.Exclude))
+		copy(cp.Exclude, c.Exclude)
+	}
+	return cp
+}
+
+func copyConfigReferenceFields(cfg Config) Config {
+	cp := cfg
+	if nil != cfg.Labels {
+		cp.Labels = make(map[string]string, len(cfg.Labels))
+		for key, val := range cfg.Labels {
+			cp.Labels[key] = val
+		}
+	}
+	if nil != cfg.ErrorCollector.IgnoreStatusCodes {
+		ignored := make([]int, len(cfg.ErrorCollector.IgnoreStatusCodes))
+		copy(ignored, cfg.ErrorCollector.IgnoreStatusCodes)
+		cp.ErrorCollector.IgnoreStatusCodes = ignored
+	}
+
+	cp.Attributes = copyDestConfig(cfg.Attributes)
+	cp.ErrorCollector.Attributes = copyDestConfig(cfg.ErrorCollector.Attributes)
+	cp.TransactionEvents.Attributes = copyDestConfig(cfg.TransactionEvents.Attributes)
+	cp.TransactionTracer.Attributes = copyDestConfig(cfg.TransactionTracer.Attributes)
+
+	return cp
+}
+
+const (
+	agentLanguage = "go"
+)
+
+func transportSetting(t http.RoundTripper) interface{} {
+	if nil == t {
+		return nil
+	}
+	return fmt.Sprintf("%T", t)
+}
+
+func loggerSetting(lg Logger) interface{} {
+	if nil == lg {
+		return nil
+	}
+	if _, ok := lg.(logger.ShimLogger); ok {
+		return nil
+	}
+	return fmt.Sprintf("%T", lg)
+}
+
+const (
+	// https://source.datanerd.us/agents/agent-specs/blob/master/Custom-Host-Names.md
+	hostByteLimit = 255
+)
+
+type settings Config
+
+func (s settings) MarshalJSON() ([]byte, error) {
+	c := Config(s)
+	transport := c.Transport
+	c.Transport = nil
+	logger := c.Logger
+	c.Logger = nil
+
+	js, err := json.Marshal(c)
+	if nil != err {
+		return nil, err
+	}
+	fields := make(map[string]interface{})
+	err = json.Unmarshal(js, &fields)
+	if nil != err {
+		return nil, err
+	}
+	// The License field is not simply ignored by adding the `json:"-"` tag
+	// to it since we want to allow consumers to populate Config from JSON.
+	delete(fields, `License`)
+	fields[`Transport`] = transportSetting(transport)
+	fields[`Logger`] = loggerSetting(logger)
+	return json.Marshal(fields)
+}
+
+func configConnectJSONInternal(c Config, pid int, util *utilization.Data, e internal.Environment, version string, securityPolicies *internal.SecurityPolicies) ([]byte, error) {
+	return json.Marshal([]interface{}{struct {
+		Pid              int                        `json:"pid"`
+		Language         string                     `json:"language"`
+		Version          string                     `json:"agent_version"`
+		Host             string                     `json:"host"`
+		HostDisplayName  string                     `json:"display_host,omitempty"`
+		Settings         interface{}                `json:"settings"`
+		AppName          []string                   `json:"app_name"`
+		HighSecurity     bool                       `json:"high_security"`
+		Labels           internal.Labels            `json:"labels,omitempty"`
+		Environment      internal.Environment       `json:"environment"`
+		Identifier       string                     `json:"identifier"`
+		Util             *utilization.Data          `json:"utilization"`
+		SecurityPolicies *internal.SecurityPolicies `json:"security_policies,omitempty"`
+	}{
+		Pid:             pid,
+		Language:        agentLanguage,
+		Version:         version,
+		Host:            internal.StringLengthByteLimit(util.Hostname, hostByteLimit),
+		HostDisplayName: internal.StringLengthByteLimit(c.HostDisplayName, hostByteLimit),
+		Settings:        (settings)(c),
+		AppName:         strings.Split(c.AppName, ";"),
+		HighSecurity:    c.HighSecurity,
+		Labels:          internal.Labels(c.Labels),
+		Environment:     e,
+		// This identifier field is provided to avoid:
+		// https://newrelic.atlassian.net/browse/DSCORE-778
+		//
+		// This identifier is used by the collector to look up the real
+		// agent. If an identifier isn't provided, the collector will
+		// create its own based on the first appname, which prevents a
+		// single daemon from connecting "a;b" and "a;c" at the same
+		// time.
+		//
+		// Providing the identifier below works around this issue and
+		// allows users more flexibility in using application rollups.
+		Identifier:       c.AppName,
+		Util:             util,
+		SecurityPolicies: securityPolicies,
+	}})
+}
+
+// config allows CreateConnectJSON to be a method on a non-public type.
+type config struct{ Config }
+
+func (c config) CreateConnectJSON(securityPolicies *internal.SecurityPolicies) ([]byte, error) {
+	env := internal.NewEnvironment()
+	util := utilization.Gather(utilization.Config{
+		DetectAWS:         c.Utilization.DetectAWS,
+		DetectDocker:      c.Utilization.DetectDocker,
+		LogicalProcessors: c.Utilization.LogicalProcessors,
+		TotalRAMMIB:       c.Utilization.TotalRAMMIB,
+		BillingHostname:   c.Utilization.BillingHostname,
+	}, c.Logger)
+	return configConnectJSONInternal(c.Config, os.Getpid(), util, env, Version, securityPolicies)
+}

+ 121 - 0
vendor/github.com/newrelic/go-agent/internal_response_writer.go

@@ -0,0 +1,121 @@
+package newrelic
+
+import (
+	"bufio"
+	"io"
+	"net"
+	"net/http"
+)
+
+const (
+	hasC = 1 << iota // CloseNotifier
+	hasF             // Flusher
+	hasH             // Hijacker
+	hasR             // ReaderFrom
+)
+
+type wrap struct{ *txn }
+type wrapR struct{ *txn }
+type wrapH struct{ *txn }
+type wrapHR struct{ *txn }
+type wrapF struct{ *txn }
+type wrapFR struct{ *txn }
+type wrapFH struct{ *txn }
+type wrapFHR struct{ *txn }
+type wrapC struct{ *txn }
+type wrapCR struct{ *txn }
+type wrapCH struct{ *txn }
+type wrapCHR struct{ *txn }
+type wrapCF struct{ *txn }
+type wrapCFR struct{ *txn }
+type wrapCFH struct{ *txn }
+type wrapCFHR struct{ *txn }
+
+func (x wrapC) CloseNotify() <-chan bool    { return x.W.(http.CloseNotifier).CloseNotify() }
+func (x wrapCR) CloseNotify() <-chan bool   { return x.W.(http.CloseNotifier).CloseNotify() }
+func (x wrapCH) CloseNotify() <-chan bool   { return x.W.(http.CloseNotifier).CloseNotify() }
+func (x wrapCHR) CloseNotify() <-chan bool  { return x.W.(http.CloseNotifier).CloseNotify() }
+func (x wrapCF) CloseNotify() <-chan bool   { return x.W.(http.CloseNotifier).CloseNotify() }
+func (x wrapCFR) CloseNotify() <-chan bool  { return x.W.(http.CloseNotifier).CloseNotify() }
+func (x wrapCFH) CloseNotify() <-chan bool  { return x.W.(http.CloseNotifier).CloseNotify() }
+func (x wrapCFHR) CloseNotify() <-chan bool { return x.W.(http.CloseNotifier).CloseNotify() }
+
+func (x wrapF) Flush()    { x.W.(http.Flusher).Flush() }
+func (x wrapFR) Flush()   { x.W.(http.Flusher).Flush() }
+func (x wrapFH) Flush()   { x.W.(http.Flusher).Flush() }
+func (x wrapFHR) Flush()  { x.W.(http.Flusher).Flush() }
+func (x wrapCF) Flush()   { x.W.(http.Flusher).Flush() }
+func (x wrapCFR) Flush()  { x.W.(http.Flusher).Flush() }
+func (x wrapCFH) Flush()  { x.W.(http.Flusher).Flush() }
+func (x wrapCFHR) Flush() { x.W.(http.Flusher).Flush() }
+
+func (x wrapH) Hijack() (net.Conn, *bufio.ReadWriter, error)    { return x.W.(http.Hijacker).Hijack() }
+func (x wrapHR) Hijack() (net.Conn, *bufio.ReadWriter, error)   { return x.W.(http.Hijacker).Hijack() }
+func (x wrapFH) Hijack() (net.Conn, *bufio.ReadWriter, error)   { return x.W.(http.Hijacker).Hijack() }
+func (x wrapFHR) Hijack() (net.Conn, *bufio.ReadWriter, error)  { return x.W.(http.Hijacker).Hijack() }
+func (x wrapCH) Hijack() (net.Conn, *bufio.ReadWriter, error)   { return x.W.(http.Hijacker).Hijack() }
+func (x wrapCHR) Hijack() (net.Conn, *bufio.ReadWriter, error)  { return x.W.(http.Hijacker).Hijack() }
+func (x wrapCFH) Hijack() (net.Conn, *bufio.ReadWriter, error)  { return x.W.(http.Hijacker).Hijack() }
+func (x wrapCFHR) Hijack() (net.Conn, *bufio.ReadWriter, error) { return x.W.(http.Hijacker).Hijack() }
+
+func (x wrapR) ReadFrom(r io.Reader) (int64, error)    { return x.W.(io.ReaderFrom).ReadFrom(r) }
+func (x wrapHR) ReadFrom(r io.Reader) (int64, error)   { return x.W.(io.ReaderFrom).ReadFrom(r) }
+func (x wrapFR) ReadFrom(r io.Reader) (int64, error)   { return x.W.(io.ReaderFrom).ReadFrom(r) }
+func (x wrapFHR) ReadFrom(r io.Reader) (int64, error)  { return x.W.(io.ReaderFrom).ReadFrom(r) }
+func (x wrapCR) ReadFrom(r io.Reader) (int64, error)   { return x.W.(io.ReaderFrom).ReadFrom(r) }
+func (x wrapCHR) ReadFrom(r io.Reader) (int64, error)  { return x.W.(io.ReaderFrom).ReadFrom(r) }
+func (x wrapCFR) ReadFrom(r io.Reader) (int64, error)  { return x.W.(io.ReaderFrom).ReadFrom(r) }
+func (x wrapCFHR) ReadFrom(r io.Reader) (int64, error) { return x.W.(io.ReaderFrom).ReadFrom(r) }
+
+func upgradeTxn(txn *txn) Transaction {
+	x := 0
+	if _, ok := txn.W.(http.CloseNotifier); ok {
+		x |= hasC
+	}
+	if _, ok := txn.W.(http.Flusher); ok {
+		x |= hasF
+	}
+	if _, ok := txn.W.(http.Hijacker); ok {
+		x |= hasH
+	}
+	if _, ok := txn.W.(io.ReaderFrom); ok {
+		x |= hasR
+	}
+
+	switch x {
+	default:
+		// Wrap the transaction even when there are no methods needed to
+		// ensure consistent error stack trace depth.
+		return wrap{txn}
+	case hasR:
+		return wrapR{txn}
+	case hasH:
+		return wrapH{txn}
+	case hasH | hasR:
+		return wrapHR{txn}
+	case hasF:
+		return wrapF{txn}
+	case hasF | hasR:
+		return wrapFR{txn}
+	case hasF | hasH:
+		return wrapFH{txn}
+	case hasF | hasH | hasR:
+		return wrapFHR{txn}
+	case hasC:
+		return wrapC{txn}
+	case hasC | hasR:
+		return wrapCR{txn}
+	case hasC | hasH:
+		return wrapCH{txn}
+	case hasC | hasH | hasR:
+		return wrapCHR{txn}
+	case hasC | hasF:
+		return wrapCF{txn}
+	case hasC | hasF | hasR:
+		return wrapCFR{txn}
+	case hasC | hasF | hasH:
+		return wrapCFH{txn}
+	case hasC | hasF | hasH | hasR:
+		return wrapCFHR{txn}
+	}
+}

+ 854 - 0
vendor/github.com/newrelic/go-agent/internal_txn.go

@@ -0,0 +1,854 @@
+package newrelic
+
+import (
+	"errors"
+	"fmt"
+	"net/http"
+	"net/url"
+	"reflect"
+	"sync"
+	"time"
+
+	"github.com/newrelic/go-agent/internal"
+)
+
+type txnInput struct {
+	W          http.ResponseWriter
+	Config     Config
+	Reply      *internal.ConnectReply
+	Consumer   dataConsumer
+	attrConfig *internal.AttributeConfig
+}
+
+type txn struct {
+	txnInput
+	// This mutex is required since the consumer may call the public API
+	// interface functions from different routines.
+	sync.Mutex
+	// finished indicates whether or not End() has been called.  After
+	// finished has been set to true, no recording should occur.
+	finished           bool
+	numPayloadsCreated uint32
+
+	ignore bool
+
+	// wroteHeader prevents capturing multiple response code errors if the
+	// user erroneously calls WriteHeader multiple times.
+	wroteHeader bool
+
+	internal.TxnData
+}
+
+func newTxn(input txnInput, req *http.Request, name string) *txn {
+	txn := &txn{
+		txnInput: input,
+	}
+	txn.Start = time.Now()
+	txn.Name = name
+	txn.IsWeb = nil != req
+	txn.Attrs = internal.NewAttributes(input.attrConfig)
+
+	if input.Config.DistributedTracer.Enabled {
+		txn.BetterCAT.Enabled = true
+		txn.BetterCAT.Priority = internal.NewPriority()
+		txn.BetterCAT.ID = internal.NewSpanID()
+
+		// Calculate sampled at the beginning of the transaction (rather
+		// than lazily at payload creation time) because it controls the
+		// creation of span events.
+		txn.BetterCAT.Sampled = txn.Reply.AdaptiveSampler.ComputeSampled(txn.BetterCAT.Priority.Float32(), txn.Start)
+		if txn.BetterCAT.Sampled {
+			txn.BetterCAT.Priority += 1.0
+		}
+		txn.SpanEventsEnabled = input.Config.SpanEvents.Enabled
+	}
+
+	if nil != req {
+		txn.Queuing = internal.QueueDuration(req.Header, txn.Start)
+		internal.RequestAgentAttributes(txn.Attrs, req)
+	}
+	txn.Attrs.Agent.HostDisplayName = txn.Config.HostDisplayName
+	txn.TxnTrace.Enabled = txn.txnTracesEnabled()
+	txn.TxnTrace.SegmentThreshold = txn.Config.TransactionTracer.SegmentThreshold
+	txn.StackTraceThreshold = txn.Config.TransactionTracer.StackTraceThreshold
+	txn.SlowQueriesEnabled = txn.slowQueriesEnabled()
+	txn.SlowQueryThreshold = txn.Config.DatastoreTracer.SlowQuery.Threshold
+	if nil != req && nil != req.URL {
+		txn.CleanURL = internal.SafeURL(req.URL)
+	}
+
+	// Synthetics support is tied up with a transaction's Old CAT field,
+	// CrossProcess. To support Synthetics with either BetterCAT or Old CAT,
+	// Initialize the CrossProcess field of the transaction, passing in
+	// the top-level configuration.
+	doOldCAT := txn.Config.CrossApplicationTracer.Enabled
+	noGUID := txn.Config.DistributedTracer.Enabled
+	txn.CrossProcess.InitFromHTTPRequest(doOldCAT, noGUID, input.Reply, req)
+
+	return txn
+}
+
+func (txn *txn) slowQueriesEnabled() bool {
+	return txn.Config.DatastoreTracer.SlowQuery.Enabled &&
+		txn.Reply.CollectTraces
+}
+
+func (txn *txn) txnTracesEnabled() bool {
+	return txn.Config.TransactionTracer.Enabled &&
+		txn.Reply.CollectTraces
+}
+
+func (txn *txn) txnEventsEnabled() bool {
+	return txn.Config.TransactionEvents.Enabled &&
+		txn.Reply.CollectAnalyticsEvents
+}
+
+func (txn *txn) errorEventsEnabled() bool {
+	return txn.Config.ErrorCollector.CaptureEvents &&
+		txn.Reply.CollectErrorEvents
+}
+
+func (txn *txn) freezeName() {
+	if txn.ignore || ("" != txn.FinalName) {
+		return
+	}
+
+	txn.FinalName = internal.CreateFullTxnName(txn.Name, txn.Reply, txn.IsWeb)
+	if "" == txn.FinalName {
+		txn.ignore = true
+	}
+}
+
+func (txn *txn) getsApdex() bool {
+	return txn.IsWeb
+}
+
+func (txn *txn) txnTraceThreshold() time.Duration {
+	if txn.Config.TransactionTracer.Threshold.IsApdexFailing {
+		return internal.ApdexFailingThreshold(txn.ApdexThreshold)
+	}
+	return txn.Config.TransactionTracer.Threshold.Duration
+}
+
+func (txn *txn) shouldSaveTrace() bool {
+	return txn.CrossProcess.IsSynthetics() ||
+		(txn.txnTracesEnabled() && (txn.Duration >= txn.txnTraceThreshold()))
+}
+
+func (txn *txn) MergeIntoHarvest(h *internal.Harvest) {
+
+	var priority internal.Priority
+	if txn.BetterCAT.Enabled {
+		priority = txn.BetterCAT.Priority
+	} else {
+		priority = internal.NewPriority()
+	}
+
+	internal.CreateTxnMetrics(&txn.TxnData, h.Metrics)
+	internal.MergeBreakdownMetrics(&txn.TxnData, h.Metrics)
+
+	if txn.txnEventsEnabled() {
+		// Allocate a new TxnEvent to prevent a reference to the large transaction.
+		alloc := new(internal.TxnEvent)
+		*alloc = txn.TxnData.TxnEvent
+		h.TxnEvents.AddTxnEvent(alloc, priority)
+	}
+
+	internal.MergeTxnErrors(&h.ErrorTraces, txn.Errors, txn.TxnEvent)
+
+	if txn.errorEventsEnabled() {
+		for _, e := range txn.Errors {
+			errEvent := &internal.ErrorEvent{
+				ErrorData: *e,
+				TxnEvent:  txn.TxnEvent,
+			}
+			// Since the stack trace is not used in error events, remove the reference
+			// to minimize memory.
+			errEvent.Stack = nil
+			h.ErrorEvents.Add(errEvent, priority)
+		}
+	}
+
+	if txn.shouldSaveTrace() {
+		h.TxnTraces.Witness(internal.HarvestTrace{
+			TxnEvent: txn.TxnEvent,
+			Trace:    txn.TxnTrace,
+		})
+	}
+
+	if nil != txn.SlowQueries {
+		h.SlowSQLs.Merge(txn.SlowQueries, txn.TxnEvent)
+	}
+
+	if txn.BetterCAT.Sampled && txn.Config.SpanEvents.Enabled {
+		h.SpanEvents.MergeFromTransaction(&txn.TxnData)
+	}
+}
+
+// TransportType's name field is not mutable outside of its package
+// however, it still periodically needs to be used and assigned within
+// the this package.  For testing purposes only.
+func getTransport(transport string) string {
+	var retVal string
+
+	switch transport {
+	case TransportHTTP.name:
+		retVal = TransportHTTP.name
+	case TransportHTTPS.name:
+		retVal = TransportHTTPS.name
+	case TransportKafka.name:
+		retVal = TransportKafka.name
+	case TransportJMS.name:
+		retVal = TransportJMS.name
+	case TransportIronMQ.name:
+		retVal = TransportIronMQ.name
+	case TransportAMQP.name:
+		retVal = TransportAMQP.name
+	case TransportQueue.name:
+		retVal = TransportQueue.name
+	case TransportOther.name:
+		retVal = TransportOther.name
+	case TransportUnknown.name:
+	default:
+		retVal = TransportUnknown.name
+	}
+	return retVal
+}
+
+func responseCodeIsError(cfg *Config, code int) bool {
+	if code < http.StatusBadRequest { // 400
+		return false
+	}
+	for _, ignoreCode := range cfg.ErrorCollector.IgnoreStatusCodes {
+		if code == ignoreCode {
+			return false
+		}
+	}
+	return true
+}
+
+func headersJustWritten(txn *txn, code int) {
+	txn.Lock()
+	defer txn.Unlock()
+
+	if txn.finished {
+		return
+	}
+	if txn.wroteHeader {
+		return
+	}
+	txn.wroteHeader = true
+
+	internal.ResponseHeaderAttributes(txn.Attrs, txn.W.Header())
+	internal.ResponseCodeAttribute(txn.Attrs, code)
+
+	if responseCodeIsError(&txn.Config, code) {
+		e := internal.TxnErrorFromResponseCode(time.Now(), code)
+		e.Stack = internal.GetStackTrace(1)
+		txn.noticeErrorInternal(e)
+	}
+}
+
+func (txn *txn) responseHeader() http.Header {
+	txn.Lock()
+	defer txn.Unlock()
+
+	if txn.finished {
+		return nil
+	}
+	if txn.wroteHeader {
+		return nil
+	}
+	if !txn.CrossProcess.Enabled {
+		return nil
+	}
+	if !txn.CrossProcess.IsInbound() {
+		return nil
+	}
+	txn.freezeName()
+	contentLength := internal.GetContentLengthFromHeader(txn.W.Header())
+
+	appData, err := txn.CrossProcess.CreateAppData(txn.FinalName, txn.Queuing, time.Since(txn.Start), contentLength)
+	if err != nil {
+		txn.Config.Logger.Debug("error generating outbound response header", map[string]interface{}{
+			"error": err,
+		})
+		return nil
+	}
+	return internal.AppDataToHTTPHeader(appData)
+}
+
+func addCrossProcessHeaders(txn *txn) {
+	// responseHeader() checks the wroteHeader field and returns a nil map if the
+	// header has been written, so we don't need a check here.
+	for key, values := range txn.responseHeader() {
+		for _, value := range values {
+			txn.W.Header().Add(key, value)
+		}
+	}
+}
+
+func (txn *txn) Header() http.Header { return txn.W.Header() }
+
+func (txn *txn) Write(b []byte) (int, error) {
+	// This is safe to call unconditionally, even if Write() is called multiple
+	// times; see also the commentary in addCrossProcessHeaders().
+	addCrossProcessHeaders(txn)
+
+	n, err := txn.W.Write(b)
+
+	headersJustWritten(txn, http.StatusOK)
+
+	return n, err
+}
+
+func (txn *txn) WriteHeader(code int) {
+	addCrossProcessHeaders(txn)
+
+	txn.W.WriteHeader(code)
+
+	headersJustWritten(txn, code)
+}
+
+func (txn *txn) End() error {
+	txn.Lock()
+	defer txn.Unlock()
+
+	if txn.finished {
+		return errAlreadyEnded
+	}
+
+	txn.finished = true
+
+	r := recover()
+	if nil != r {
+		e := internal.TxnErrorFromPanic(time.Now(), r)
+		e.Stack = internal.GetStackTrace(0)
+		txn.noticeErrorInternal(e)
+	}
+
+	txn.Stop = time.Now()
+	txn.Duration = txn.Stop.Sub(txn.Start)
+	if children := internal.TracerRootChildren(&txn.TxnData); txn.Duration > children {
+		txn.Exclusive = txn.Duration - children
+	}
+
+	txn.freezeName()
+
+	// Finalise the CAT state.
+	if err := txn.CrossProcess.Finalise(txn.Name, txn.Config.AppName); err != nil {
+		txn.Config.Logger.Debug("error finalising the cross process state", map[string]interface{}{
+			"error": err,
+		})
+	}
+
+	// Assign apdexThreshold regardless of whether or not the transaction
+	// gets apdex since it may be used to calculate the trace threshold.
+	txn.ApdexThreshold = internal.CalculateApdexThreshold(txn.Reply, txn.FinalName)
+
+	if txn.getsApdex() {
+		if txn.HasErrors() {
+			txn.Zone = internal.ApdexFailing
+		} else {
+			txn.Zone = internal.CalculateApdexZone(txn.ApdexThreshold, txn.Duration)
+		}
+	} else {
+		txn.Zone = internal.ApdexNone
+	}
+
+	if txn.Config.Logger.DebugEnabled() {
+		txn.Config.Logger.Debug("transaction ended", map[string]interface{}{
+			"name":        txn.FinalName,
+			"duration_ms": txn.Duration.Seconds() * 1000.0,
+			"ignored":     txn.ignore,
+			"run":         txn.Reply.RunID,
+		})
+	}
+
+	if !txn.ignore {
+		txn.Consumer.Consume(txn.Reply.RunID, txn)
+	}
+
+	// Note that if a consumer uses `panic(nil)`, the panic will not
+	// propagate.
+	if nil != r {
+		panic(r)
+	}
+
+	return nil
+}
+
+func (txn *txn) AddAttribute(name string, value interface{}) error {
+	txn.Lock()
+	defer txn.Unlock()
+
+	if txn.Config.HighSecurity {
+		return errHighSecurityEnabled
+	}
+
+	if !txn.Reply.SecurityPolicies.CustomParameters.Enabled() {
+		return errSecurityPolicy
+	}
+
+	if txn.finished {
+		return errAlreadyEnded
+	}
+
+	return internal.AddUserAttribute(txn.Attrs, name, value, internal.DestAll)
+}
+
+var (
+	errorsLocallyDisabled  = errors.New("errors locally disabled")
+	errorsRemotelyDisabled = errors.New("errors remotely disabled")
+	errNilError            = errors.New("nil error")
+	errAlreadyEnded        = errors.New("transaction has already ended")
+	errSecurityPolicy      = errors.New("disabled by security policy")
+)
+
+const (
+	highSecurityErrorMsg   = "message removed by high security setting"
+	securityPolicyErrorMsg = "message removed by security policy"
+)
+
+func (txn *txn) noticeErrorInternal(err internal.ErrorData) error {
+	if !txn.Config.ErrorCollector.Enabled {
+		return errorsLocallyDisabled
+	}
+
+	if !txn.Reply.CollectErrors {
+		return errorsRemotelyDisabled
+	}
+
+	if nil == txn.Errors {
+		txn.Errors = internal.NewTxnErrors(internal.MaxTxnErrors)
+	}
+
+	if txn.Config.HighSecurity {
+		err.Msg = highSecurityErrorMsg
+	}
+
+	if !txn.Reply.SecurityPolicies.AllowRawExceptionMessages.Enabled() {
+		err.Msg = securityPolicyErrorMsg
+	}
+
+	txn.Errors.Add(err)
+	txn.TxnData.TxnEvent.HasError = true //mark transaction as having an error
+	return nil
+}
+
+var (
+	errTooManyErrorAttributes = fmt.Errorf("too many extra attributes: limit is %d",
+		internal.AttributeErrorLimit)
+)
+
+func (txn *txn) NoticeError(err error) error {
+	txn.Lock()
+	defer txn.Unlock()
+
+	if txn.finished {
+		return errAlreadyEnded
+	}
+
+	if nil == err {
+		return errNilError
+	}
+
+	e := internal.ErrorData{
+		When: time.Now(),
+		Msg:  err.Error(),
+	}
+	if ec, ok := err.(ErrorClasser); ok {
+		e.Klass = ec.ErrorClass()
+	}
+	if "" == e.Klass {
+		e.Klass = reflect.TypeOf(err).String()
+	}
+	if st, ok := err.(StackTracer); ok {
+		e.Stack = st.StackTrace()
+		// Note that if the provided stack trace is excessive in length,
+		// it will be truncated during JSON creation.
+	}
+	if nil == e.Stack {
+		e.Stack = internal.GetStackTrace(2)
+	}
+
+	if ea, ok := err.(ErrorAttributer); ok && !txn.Config.HighSecurity && txn.Reply.SecurityPolicies.CustomParameters.Enabled() {
+		unvetted := ea.ErrorAttributes()
+		if len(unvetted) > internal.AttributeErrorLimit {
+			return errTooManyErrorAttributes
+		}
+
+		e.ExtraAttributes = make(map[string]interface{})
+		for key, val := range unvetted {
+			val, errr := internal.ValidateUserAttribute(key, val)
+			if nil != errr {
+				return errr
+			}
+			e.ExtraAttributes[key] = val
+		}
+	}
+
+	return txn.noticeErrorInternal(e)
+}
+
+func (txn *txn) SetName(name string) error {
+	txn.Lock()
+	defer txn.Unlock()
+
+	if txn.finished {
+		return errAlreadyEnded
+	}
+
+	txn.Name = name
+	return nil
+}
+
+func (txn *txn) Ignore() error {
+	txn.Lock()
+	defer txn.Unlock()
+
+	if txn.finished {
+		return errAlreadyEnded
+	}
+	txn.ignore = true
+	return nil
+}
+
+func (txn *txn) StartSegmentNow() SegmentStartTime {
+	var s internal.SegmentStartTime
+	txn.Lock()
+	if !txn.finished {
+		s = internal.StartSegment(&txn.TxnData, time.Now())
+	}
+	txn.Unlock()
+	return SegmentStartTime{
+		segment: segment{
+			start: s,
+			txn:   txn,
+		},
+	}
+}
+
+type segment struct {
+	start internal.SegmentStartTime
+	txn   *txn
+}
+
+func endSegment(s *Segment) error {
+	txn := s.StartTime.txn
+	if nil == txn {
+		return nil
+	}
+	var err error
+	txn.Lock()
+	if txn.finished {
+		err = errAlreadyEnded
+	} else {
+		err = internal.EndBasicSegment(&txn.TxnData, s.StartTime.start, time.Now(), s.Name)
+	}
+	txn.Unlock()
+	return err
+}
+
+func endDatastore(s *DatastoreSegment) error {
+	txn := s.StartTime.txn
+	if nil == txn {
+		return nil
+	}
+	txn.Lock()
+	defer txn.Unlock()
+
+	if txn.finished {
+		return errAlreadyEnded
+	}
+	if txn.Config.HighSecurity {
+		s.QueryParameters = nil
+	}
+	if !txn.Config.DatastoreTracer.QueryParameters.Enabled {
+		s.QueryParameters = nil
+	}
+	if txn.Reply.SecurityPolicies.RecordSQL.IsSet() {
+		s.QueryParameters = nil
+		if !txn.Reply.SecurityPolicies.RecordSQL.Enabled() {
+			s.ParameterizedQuery = ""
+		}
+	}
+	if !txn.Config.DatastoreTracer.DatabaseNameReporting.Enabled {
+		s.DatabaseName = ""
+	}
+	if !txn.Config.DatastoreTracer.InstanceReporting.Enabled {
+		s.Host = ""
+		s.PortPathOrID = ""
+	}
+	return internal.EndDatastoreSegment(internal.EndDatastoreParams{
+		Tracer:             &txn.TxnData,
+		Start:              s.StartTime.start,
+		Now:                time.Now(),
+		Product:            string(s.Product),
+		Collection:         s.Collection,
+		Operation:          s.Operation,
+		ParameterizedQuery: s.ParameterizedQuery,
+		QueryParameters:    s.QueryParameters,
+		Host:               s.Host,
+		PortPathOrID:       s.PortPathOrID,
+		Database:           s.DatabaseName,
+	})
+}
+
+func externalSegmentMethod(s *ExternalSegment) string {
+	r := s.Request
+
+	// Is this a client request?
+	if nil != s.Response && nil != s.Response.Request {
+		r = s.Response.Request
+
+		// Golang's http package states that when a client's
+		// Request has an empty string for Method, the
+		// method is GET.
+		if "" == r.Method {
+			return "GET"
+		}
+	}
+	if nil == r {
+		return ""
+	}
+	return r.Method
+}
+
+func externalSegmentURL(s *ExternalSegment) (*url.URL, error) {
+	if "" != s.URL {
+		return url.Parse(s.URL)
+	}
+	r := s.Request
+	if nil != s.Response && nil != s.Response.Request {
+		r = s.Response.Request
+	}
+	if r != nil {
+		return r.URL, nil
+	}
+	return nil, nil
+}
+
+func endExternal(s *ExternalSegment) error {
+	txn := s.StartTime.txn
+	if nil == txn {
+		return nil
+	}
+	txn.Lock()
+	defer txn.Unlock()
+
+	if txn.finished {
+		return errAlreadyEnded
+	}
+	m := externalSegmentMethod(s)
+	u, err := externalSegmentURL(s)
+	if nil != err {
+		return err
+	}
+	return internal.EndExternalSegment(&txn.TxnData, s.StartTime.start, time.Now(), u, m, s.Response)
+}
+
+// oldCATOutboundHeaders generates the Old CAT and Synthetics headers, depending
+// on whether Old CAT is enabled or any Synthetics functionality has been
+// triggered in the agent.
+func oldCATOutboundHeaders(txn *txn) http.Header {
+	txn.Lock()
+	defer txn.Unlock()
+
+	if txn.finished {
+		return http.Header{}
+	}
+
+	metadata, err := txn.CrossProcess.CreateCrossProcessMetadata(txn.Name, txn.Config.AppName)
+	if err != nil {
+		txn.Config.Logger.Debug("error generating outbound headers", map[string]interface{}{
+			"error": err,
+		})
+
+		// It's possible for CreateCrossProcessMetadata() to error and still have a
+		// Synthetics header, so we'll still fall through to returning headers
+		// based on whatever metadata was returned.
+	}
+
+	return internal.MetadataToHTTPHeader(metadata)
+}
+
+func outboundHeaders(s *ExternalSegment) http.Header {
+	txn := s.StartTime.txn
+
+	if nil == txn {
+		return http.Header{}
+	}
+
+	hdr := oldCATOutboundHeaders(txn)
+
+	// hdr may be empty, or it may contain headers.  If DistributedTracer
+	// is enabled, add more to the existing hdr
+	if p := txn.CreateDistributedTracePayload().HTTPSafe(); "" != p {
+		hdr.Add(DistributedTracePayloadHeader, p)
+		return hdr
+	}
+
+	return hdr
+}
+
+const (
+	maxSampledDistributedPayloads = 35
+)
+
+type shimPayload struct{}
+
+func (s shimPayload) Text() string     { return "" }
+func (s shimPayload) HTTPSafe() string { return "" }
+
+func (txn *txn) CreateDistributedTracePayload() (payload DistributedTracePayload) {
+	payload = shimPayload{}
+
+	txn.Lock()
+	defer txn.Unlock()
+
+	if !txn.BetterCAT.Enabled {
+		return
+	}
+
+	if txn.finished {
+		txn.CreatePayloadException = true
+		return
+	}
+
+	if "" == txn.Reply.PrimaryAppID {
+		// Return a shimPayload if the application is not yet connected.
+		return
+	}
+
+	txn.numPayloadsCreated++
+
+	var p internal.Payload
+	p.Type = internal.CallerType
+	p.Account = txn.Reply.AccountID
+
+	p.App = txn.Reply.PrimaryAppID
+	p.TracedID = txn.BetterCAT.TraceID()
+	p.Priority = txn.BetterCAT.Priority
+	p.Timestamp.Set(time.Now())
+	p.TransactionID = txn.BetterCAT.ID // Set the transaction ID to the transaction guid.
+
+	if txn.Reply.AccountID != txn.Reply.TrustedAccountKey {
+		p.TrustedAccountKey = txn.Reply.TrustedAccountKey
+	}
+
+	if txn.BetterCAT.Sampled && txn.Config.SpanEvents.Enabled {
+		p.ID = txn.CurrentSpanIdentifier()
+	}
+
+	// limit the number of outbound sampled=true payloads to prevent too
+	// many downstream sampled events.
+	p.SetSampled(false)
+	if txn.numPayloadsCreated < maxSampledDistributedPayloads {
+		p.SetSampled(txn.BetterCAT.Sampled)
+	}
+
+	txn.CreatePayloadSuccess = true
+
+	payload = p
+	return
+}
+
+var (
+	errOutboundPayloadCreated   = errors.New("outbound payload already created")
+	errAlreadyAccepted          = errors.New("AcceptDistributedTracePayload has already been called")
+	errInboundPayloadDTDisabled = errors.New("DistributedTracer must be enabled to accept an inbound payload")
+)
+
+func (txn *txn) AcceptDistributedTracePayload(t TransportType, p interface{}) error {
+	txn.Lock()
+	defer txn.Unlock()
+
+	if !txn.BetterCAT.Enabled {
+		return errInboundPayloadDTDisabled
+	}
+
+	if txn.finished {
+		txn.AcceptPayloadException = true
+		return errAlreadyEnded
+	}
+
+	if txn.numPayloadsCreated > 0 {
+		txn.AcceptPayloadCreateBeforeAccept = true
+		return errOutboundPayloadCreated
+	}
+
+	if txn.BetterCAT.Inbound != nil {
+		txn.AcceptPayloadIgnoredMultiple = true
+		return errAlreadyAccepted
+	}
+
+	if nil == p {
+		txn.AcceptPayloadNullPayload = true
+		return nil
+	}
+
+	payload, err := internal.AcceptPayload(p)
+	if nil != err {
+		if _, ok := err.(internal.ErrPayloadParse); ok {
+			txn.AcceptPayloadParseException = true
+		} else if _, ok := err.(internal.ErrUnsupportedPayloadVersion); ok {
+			txn.AcceptPayloadIgnoredVersion = true
+		} else if _, ok := err.(internal.ErrPayloadMissingField); ok {
+			txn.AcceptPayloadParseException = true
+		} else {
+			txn.AcceptPayloadException = true
+		}
+		return err
+	}
+
+	if nil == payload {
+		return nil
+	}
+
+	// now that we have a parsed and alloc'd payload,
+	// let's make  sure it has the correct fields
+	if err := payload.IsValid(); nil != err {
+		txn.AcceptPayloadParseException = true
+		return err
+	}
+
+	// and let's also do our trustedKey check
+	receivedTrustKey := payload.TrustedAccountKey
+	if "" == receivedTrustKey {
+		receivedTrustKey = payload.Account
+	}
+	if receivedTrustKey != txn.Reply.TrustedAccountKey {
+		txn.AcceptPayloadUntrustedAccount = true
+		return internal.ErrTrustedAccountKey{Message: "trusted account key missing or does not match"}
+	}
+
+	if 0 != payload.Priority {
+		txn.BetterCAT.Priority = payload.Priority
+	}
+
+	// a nul payload.Sampled means the a field wasn't provided
+	if nil != payload.Sampled {
+		txn.BetterCAT.Sampled = *payload.Sampled
+	} else {
+		txn.BetterCAT.Sampled = txn.Reply.AdaptiveSampler.ComputeSampled(txn.BetterCAT.Priority.Float32(), time.Now())
+	}
+
+	txn.BetterCAT.Inbound = payload
+
+	// TransportType's name field is not mutable outside of its package
+	// so the only check needed is if the caller is using an empty TransportType
+	txn.BetterCAT.Inbound.TransportType = t.name
+	if t.name == "" {
+		txn.BetterCAT.Inbound.TransportType = TransportUnknown.name
+		txn.Config.Logger.Debug("Invalid transport type, defaulting to Unknown", map[string]interface{}{})
+	}
+
+	if tm := payload.Timestamp.Time(); txn.Start.After(tm) {
+		txn.BetterCAT.Inbound.TransportDuration = txn.Start.Sub(tm)
+	}
+
+	txn.AcceptPayloadSuccess = true
+
+	return nil
+}

+ 30 - 0
vendor/github.com/newrelic/go-agent/log.go

@@ -0,0 +1,30 @@
+package newrelic
+
+import (
+	"io"
+
+	"github.com/newrelic/go-agent/internal/logger"
+)
+
+// Logger is the interface that is used for logging in the go-agent.  Assign the
+// Config.Logger field to the Logger you wish to use.  Loggers must be safe for
+// use in multiple goroutines.
+//
+// For an example implementation, see: _integrations/nrlogrus/nrlogrus.go
+type Logger interface {
+	Error(msg string, context map[string]interface{})
+	Warn(msg string, context map[string]interface{})
+	Info(msg string, context map[string]interface{})
+	Debug(msg string, context map[string]interface{})
+	DebugEnabled() bool
+}
+
+// NewLogger creates a basic Logger at info level.
+func NewLogger(w io.Writer) Logger {
+	return logger.New(w, false)
+}
+
+// NewDebugLogger creates a basic Logger at debug level.
+func NewDebugLogger(w io.Writer) Logger {
+	return logger.New(w, true)
+}

+ 130 - 0
vendor/github.com/newrelic/go-agent/segments.go

@@ -0,0 +1,130 @@
+package newrelic
+
+import "net/http"
+
+// SegmentStartTime is created by Transaction.StartSegmentNow and marks the
+// beginning of a segment.  A segment with a zero-valued SegmentStartTime may
+// safely be ended.
+type SegmentStartTime struct{ segment }
+
+// Segment is used to instrument functions, methods, and blocks of code.  The
+// easiest way use Segment is the StartSegment function.
+type Segment struct {
+	StartTime SegmentStartTime
+	Name      string
+}
+
+// DatastoreSegment is used to instrument calls to databases and object stores.
+// Here is an example:
+//
+// 	ds := &newrelic.DatastoreSegment{
+// 		StartTime:  newrelic.StartSegmentNow(txn),
+// 		Product:    newrelic.DatastoreMySQL,
+// 		Collection: "my_table",
+// 		Operation:  "SELECT",
+// 	}
+// 	defer ds.End()
+//
+type DatastoreSegment struct {
+	StartTime SegmentStartTime
+	// Product is the datastore type.  See the constants in datastore.go.
+	Product DatastoreProduct
+	// Collection is the table or group.
+	Collection string
+	// Operation is the relevant action, e.g. "SELECT" or "GET".
+	Operation string
+	// ParameterizedQuery may be set to the query being performed.  It must
+	// not contain any raw parameters, only placeholders.
+	ParameterizedQuery string
+	// QueryParameters may be used to provide query parameters.  Care should
+	// be taken to only provide parameters which are not sensitive.
+	// QueryParameters are ignored in high security mode.
+	QueryParameters map[string]interface{}
+	// Host is the name of the server hosting the datastore.
+	Host string
+	// PortPathOrID can represent either the port, path, or id of the
+	// datastore being connected to.
+	PortPathOrID string
+	// DatabaseName is name of database where the current query is being
+	// executed.
+	DatabaseName string
+}
+
+// ExternalSegment is used to instrument external calls.  StartExternalSegment
+// is recommended when you have access to an http.Request.
+type ExternalSegment struct {
+	StartTime SegmentStartTime
+	Request   *http.Request
+	Response  *http.Response
+	// If you do not have access to the request, this URL field should be
+	// used to indicate the endpoint.  NOTE: If non-empty, this field
+	// is parsed using url.Parse and therefore it MUST include the protocol
+	// (eg. "http://").
+	URL string
+}
+
+// End finishes the segment.
+func (s *Segment) End() error { return endSegment(s) }
+
+// End finishes the datastore segment.
+func (s *DatastoreSegment) End() error { return endDatastore(s) }
+
+// End finishes the external segment.
+func (s *ExternalSegment) End() error { return endExternal(s) }
+
+// OutboundHeaders returns the headers that should be attached to the external
+// request.
+func (s *ExternalSegment) OutboundHeaders() http.Header {
+	return outboundHeaders(s)
+}
+
+// StartSegmentNow helps avoid Transaction nil checks.
+func StartSegmentNow(txn Transaction) SegmentStartTime {
+	if nil != txn {
+		return txn.StartSegmentNow()
+	}
+	return SegmentStartTime{}
+}
+
+// StartSegment makes it easy to instrument segments.  To time a function, do
+// the following:
+//
+//	func timeMe(txn newrelic.Transaction) {
+//		defer newrelic.StartSegment(txn, "timeMe").End()
+//		// ... function code here ...
+//	}
+//
+// To time a block of code, do the following:
+//
+//	segment := StartSegment(txn, "myBlock")
+//	// ... code you want to time here ...
+//	segment.End()
+//
+func StartSegment(txn Transaction, name string) *Segment {
+	return &Segment{
+		StartTime: StartSegmentNow(txn),
+		Name:      name,
+	}
+}
+
+// StartExternalSegment makes it easier to instrument external calls.
+//
+//    segment := newrelic.StartExternalSegment(txn, request)
+//    resp, err := client.Do(request)
+//    segment.Response = resp
+//    segment.End()
+//
+func StartExternalSegment(txn Transaction, request *http.Request) *ExternalSegment {
+	s := &ExternalSegment{
+		StartTime: StartSegmentNow(txn),
+		Request:   request,
+	}
+
+	for key, values := range s.OutboundHeaders() {
+		for _, value := range values {
+			request.Header.Add(key, value)
+		}
+	}
+
+	return s
+}

+ 107 - 0
vendor/github.com/newrelic/go-agent/transaction.go

@@ -0,0 +1,107 @@
+package newrelic
+
+import (
+	"net/http"
+)
+
+// Transaction represents a request or a background task.
+// Each Transaction should only be used in a single goroutine.
+type Transaction interface {
+	// If StartTransaction is called with a non-nil http.ResponseWriter then
+	// the Transaction may be used in its place.  This allows
+	// instrumentation of the response code and response headers.
+	http.ResponseWriter
+
+	// End finishes the current transaction, stopping all further
+	// instrumentation.  Subsequent calls to End will have no effect.
+	End() error
+
+	// Ignore ensures that this transaction's data will not be recorded.
+	Ignore() error
+
+	// SetName names the transaction.  Transactions will not be grouped
+	// usefully if too many unique names are used.
+	SetName(name string) error
+
+	// NoticeError records an error.  The first five errors per transaction
+	// are recorded (this behavior is subject to potential change in the
+	// future).
+	NoticeError(err error) error
+
+	// AddAttribute adds a key value pair to the current transaction.  This
+	// information is attached to errors, transaction events, and error
+	// events.  The key must contain fewer than than 255 bytes.  The value
+	// must be a number, string, or boolean.  Attribute configuration is
+	// applied (see config.go).
+	//
+	// For more information, see:
+	// https://docs.newrelic.com/docs/agents/manage-apm-agents/agent-metrics/collect-custom-attributes
+	AddAttribute(key string, value interface{}) error
+
+	// StartSegmentNow allows the timing of functions, external calls, and
+	// datastore calls.  The segments of each transaction MUST be used in a
+	// single goroutine.  Consumers are encouraged to use the
+	// `StartSegmentNow` functions which checks if the Transaction is nil.
+	// See segments.go
+	StartSegmentNow() SegmentStartTime
+
+	// CreateDistributedTracePayload creates a payload to link the calls
+	// between transactions. This method never returns nil. Instead, it may
+	// return a shim implementation whose methods return empty strings.
+	// CreateDistributedTracePayload should be called every time an outbound
+	// call is made since the payload contains a timestamp.
+	//
+	// StartExternalSegment calls CreateDistributedTracePayload, so you
+	// should not need to use this method for typical outbound HTTP calls.
+	// Just use StartExternalSegment!
+	CreateDistributedTracePayload() DistributedTracePayload
+
+	// AcceptDistributedTracePayload is used at the beginning of a
+	// transaction to identify the caller.
+	//
+	// Application.StartTransaction calls this method automatically if a
+	// payload is present in the request headers (under the key
+	// DistributedTracePayloadHeader).  Therefore, this method does not need
+	// to be used for typical HTTP transactions.
+	//
+	// AcceptDistributedTracePayload should be used as early in the
+	// transaction as possible. It may not be called after a call to
+	// CreateDistributedTracePayload.
+	//
+	// The payload parameter may be a DistributedTracePayload or a string.
+	AcceptDistributedTracePayload(t TransportType, payload interface{}) error
+}
+
+// DistributedTracePayload is used to instrument connections between
+// transactions and applications.
+type DistributedTracePayload interface {
+	// HTTPSafe serializes the payload into a string containing http safe
+	// characters.
+	HTTPSafe() string
+	// Text serializes the payload into a string.  The format is slightly
+	// more compact than HTTPSafe.
+	Text() string
+}
+
+const (
+	// DistributedTracePayloadHeader is the header used by New Relic agents
+	// for automatic trace payload instrumentation.
+	DistributedTracePayloadHeader = "Newrelic"
+)
+
+// TransportType represents the type of connection that the trace payload was
+// transported over.
+type TransportType struct{ name string }
+
+// TransportType names used across New Relic agents:
+var (
+	TransportUnknown = TransportType{name: "Unknown"}
+	TransportHTTP    = TransportType{name: "HTTP"}
+	TransportHTTPS   = TransportType{name: "HTTPS"}
+	TransportKafka   = TransportType{name: "Kafka"}
+	TransportJMS     = TransportType{name: "JMS"}
+	TransportIronMQ  = TransportType{name: "IronMQ"}
+	TransportAMQP    = TransportType{name: "AMQP"}
+	TransportQueue   = TransportType{name: "Queue"}
+	TransportOther   = TransportType{name: "Other"}
+)

Some files were not shown because too many files changed in this diff