diff --git a/Dockerfile b/Dockerfile
index d548fc6..dcf1154 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,4 +1,7 @@
-FROM busybox:latest
+# use prometheus busybox with glibc for golang compatibility
+FROM prom/busybox:glibc
+
+MAINTAINER John Doe
COPY exporter /bin/exporter
diff --git a/Makefile b/Makefile
index 9af6731..4f8f3b5 100644
--- a/Makefile
+++ b/Makefile
@@ -1,11 +1,10 @@
GO := GO15VENDOREXPERIMENT=1 go
pkgs = .
-DOCKER_IMAGE_NAME ?= exporter
+DOCKER_IMAGE_NAME ?= challenge/exporter
DOCKER_IMAGE_TAG ?= latest
-
-all: get format test build
+all: format build test
get:
@echo ">> get dependencies"
@@ -25,10 +24,12 @@ format:
build:
@echo ">> building binaries"
- @$(GO) build -tags netgo -a -o exporter
+ @$(GO) build -o exporter
+# do not use static binaries:
+# @$(GO) build -tags netgo -a -o exporter
docker:
@echo ">> building docker image"
@docker build -t "$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" .
-.PHONY: all get style format test build docker
+.PHONY: all get style format build test docker
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..500c53b
--- /dev/null
+++ b/README.md
@@ -0,0 +1,41 @@
+Challenge: Exporter for Prometheus
+==================================
+This is an prometheus metrics exporter for the soundcloud challenge.
+
+Requirements
+------------
+
+* Docker, of course ;-)
+* Working go 1.8 (older should work as well)
+
+Build
+--------------------
+Build binary `exporter`:
+```bash
+make
+```
+
+Build docker image with tag `challenge/exporter`:
+```bash
+make docker
+```
+
+Run
+---
+Start locally in docker:
+
+```bash
+docker run -d -p 8080:8080 --name challenge beorn7/syseng-challenge
+docker run -d -p 8081:8080 --link challenge --name challenge_exporter challenge/exporter --endpoint http://challenge:8080/stats
+
+docker stop challenge_exporter
+docker rm challenge_exporter
+docker stop challenge
+docker rm challenge
+```
+or with docker-compose
+```bash
+docker-compose up
+```
+
+Metrics should now be availabe on `http://localhost:8081/metrics`
diff --git a/docker-compose.yml b/docker-compose.yml
new file mode 100644
index 0000000..e76de7e
--- /dev/null
+++ b/docker-compose.yml
@@ -0,0 +1,11 @@
+exporter:
+ build: ./
+ command: -endpoint=http://app:8080/stats -listen :8081 --timeout 200
+ links:
+ - app
+ ports:
+ - 8081:8081
+app:
+ image: beorn7/syseng-challenge
+ ports:
+ - 8080:8080
\ No newline at end of file
diff --git a/exporter.go b/exporter.go
index fc87967..f821684 100644
--- a/exporter.go
+++ b/exporter.go
@@ -25,6 +25,7 @@ const (
var (
listenAddress = flag.String("listen", ":8080", "The address to listen on for HTTP requests.")
endpointApp = flag.String("endpoint", "http://localhost:8050/stats", "HTTP API address of the application")
+ timeoutApp = flag.Int("timeout", 500, "Connection timeout in ms")
hostname = flag.String("hostname", "", "Optional hostname which will be added to the exported metrics (defaults to $HOSTNAME)")
prometheusConstLabel = parseConstLabel()
)
@@ -219,7 +220,7 @@ func NewExporter(endpoint string) (*Exporter, error) {
// use custom http client with specific timeout
client := &http.Client{
- Timeout: time.Duration(100 * time.Millisecond),
+ Timeout: time.Duration(*timeoutApp) * time.Millisecond,
}
// create api client
diff --git a/vendor/github.com/Sirupsen/logrus/CHANGELOG.md b/vendor/github.com/Sirupsen/logrus/CHANGELOG.md
new file mode 100644
index 0000000..4c9418f
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/CHANGELOG.md
@@ -0,0 +1,90 @@
+# 0.11.4
+
+* bug: fix undefined variable on solaris (#493)
+
+# 0.11.3
+
+* formatter: configure quoting of empty values (#484)
+* formatter: configure quoting character (default is `"`) (#484)
+* bug: fix not importing io correctly in non-linux environments (#481)
+
+# 0.11.2
+
+* bug: fix windows terminal detection (#476)
+
+# 0.11.1
+
+* bug: fix tty detection with custom out (#471)
+
+# 0.11.0
+
+* performance: Use bufferpool to allocate (#370)
+* terminal: terminal detection for app-engine (#343)
+* feature: exit handler (#375)
+
+# 0.10.0
+
+* feature: Add a test hook (#180)
+* feature: `ParseLevel` is now case-insensitive (#326)
+* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308)
+* performance: avoid re-allocations on `WithFields` (#335)
+
+# 0.9.0
+
+* logrus/text_formatter: don't emit empty msg
+* logrus/hooks/airbrake: move out of main repository
+* logrus/hooks/sentry: move out of main repository
+* logrus/hooks/papertrail: move out of main repository
+* logrus/hooks/bugsnag: move out of main repository
+* logrus/core: run tests with `-race`
+* logrus/core: detect TTY based on `stderr`
+* logrus/core: support `WithError` on logger
+* logrus/core: Solaris support
+
+# 0.8.7
+
+* logrus/core: fix possible race (#216)
+* logrus/doc: small typo fixes and doc improvements
+
+
+# 0.8.6
+
+* hooks/raven: allow passing an initialized client
+
+# 0.8.5
+
+* logrus/core: revert #208
+
+# 0.8.4
+
+* formatter/text: fix data race (#218)
+
+# 0.8.3
+
+* logrus/core: fix entry log level (#208)
+* logrus/core: improve performance of text formatter by 40%
+* logrus/core: expose `LevelHooks` type
+* logrus/core: add support for DragonflyBSD and NetBSD
+* formatter/text: print structs more verbosely
+
+# 0.8.2
+
+* logrus: fix more Fatal family functions
+
+# 0.8.1
+
+* logrus: fix not exiting on `Fatalf` and `Fatalln`
+
+# 0.8.0
+
+* logrus: defaults to stderr instead of stdout
+* hooks/sentry: add special field for `*http.Request`
+* formatter/text: ignore Windows for colors
+
+# 0.7.3
+
+* formatter/\*: allow configuration of timestamp layout
+
+# 0.7.2
+
+* formatter/text: Add configuration option for time format (#158)
diff --git a/vendor/github.com/Sirupsen/logrus/LICENSE b/vendor/github.com/Sirupsen/logrus/LICENSE
new file mode 100644
index 0000000..f090cb4
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Simon Eskildsen
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/Sirupsen/logrus/README.md b/vendor/github.com/Sirupsen/logrus/README.md
new file mode 100644
index 0000000..640cf61
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/README.md
@@ -0,0 +1,476 @@
+# Logrus
[](https://travis-ci.org/Sirupsen/logrus) [](https://godoc.org/github.com/Sirupsen/logrus)
+
+**Seeing weird case-sensitive problems?** See [this
+issue](https://github.com/sirupsen/logrus/issues/451#issuecomment-264332021).
+This change has been reverted. I apologize for causing this. I greatly
+underestimated the impact this would have. Logrus strives for stability and
+backwards compatibility and failed to provide that.
+
+Logrus is a structured logger for Go (golang), completely API compatible with
+the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not
+yet stable (pre 1.0). Logrus itself is completely stable and has been used in
+many large deployments. The core API is unlikely to change much but please
+version control your Logrus to make sure you aren't fetching latest `master` on
+every build.**
+
+Nicely color-coded in development (when a TTY is attached, otherwise just
+plain text):
+
+
+
+With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash
+or Splunk:
+
+```json
+{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
+ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
+
+{"level":"warning","msg":"The group's number increased tremendously!",
+"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"}
+
+{"animal":"walrus","level":"info","msg":"A giant walrus appears!",
+"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"}
+
+{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.",
+"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"}
+
+{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true,
+"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
+```
+
+With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not
+attached, the output is compatible with the
+[logfmt](http://godoc.org/github.com/kr/logfmt) format:
+
+```text
+time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8
+time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
+time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true
+time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
+time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
+time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
+exit status 1
+```
+
+#### Example
+
+The simplest way to use Logrus is simply the package-level exported logger:
+
+```go
+package main
+
+import (
+ log "github.com/Sirupsen/logrus"
+)
+
+func main() {
+ log.WithFields(log.Fields{
+ "animal": "walrus",
+ }).Info("A walrus appears")
+}
+```
+
+Note that it's completely api-compatible with the stdlib logger, so you can
+replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"`
+and you'll now have the flexibility of Logrus. You can customize it all you
+want:
+
+```go
+package main
+
+import (
+ "os"
+ log "github.com/Sirupsen/logrus"
+)
+
+func init() {
+ // Log as JSON instead of the default ASCII formatter.
+ log.SetFormatter(&log.JSONFormatter{})
+
+ // Output to stdout instead of the default stderr
+ // Can be any io.Writer, see below for File example
+ log.SetOutput(os.Stdout)
+
+ // Only log the warning severity or above.
+ log.SetLevel(log.WarnLevel)
+}
+
+func main() {
+ log.WithFields(log.Fields{
+ "animal": "walrus",
+ "size": 10,
+ }).Info("A group of walrus emerges from the ocean")
+
+ log.WithFields(log.Fields{
+ "omg": true,
+ "number": 122,
+ }).Warn("The group's number increased tremendously!")
+
+ log.WithFields(log.Fields{
+ "omg": true,
+ "number": 100,
+ }).Fatal("The ice breaks!")
+
+ // A common pattern is to re-use fields between logging statements by re-using
+ // the logrus.Entry returned from WithFields()
+ contextLogger := log.WithFields(log.Fields{
+ "common": "this is a common field",
+ "other": "I also should be logged always",
+ })
+
+ contextLogger.Info("I'll be logged with common and other field")
+ contextLogger.Info("Me too")
+}
+```
+
+For more advanced usage such as logging to multiple locations from the same
+application, you can also create an instance of the `logrus` Logger:
+
+```go
+package main
+
+import (
+ "github.com/Sirupsen/logrus"
+)
+
+// Create a new instance of the logger. You can have any number of instances.
+var log = logrus.New()
+
+func main() {
+ // The API for setting attributes is a little different than the package level
+ // exported logger. See Godoc.
+ log.Out = os.Stdout
+
+ // You could set this to any `io.Writer` such as a file
+ // file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY, 0666)
+ // if err == nil {
+ // log.Out = file
+ // } else {
+ // log.Info("Failed to log to file, using default stderr")
+ // }
+
+ log.WithFields(logrus.Fields{
+ "animal": "walrus",
+ "size": 10,
+ }).Info("A group of walrus emerges from the ocean")
+}
+```
+
+#### Fields
+
+Logrus encourages careful, structured logging though logging fields instead of
+long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
+to send event %s to topic %s with key %d")`, you should log the much more
+discoverable:
+
+```go
+log.WithFields(log.Fields{
+ "event": event,
+ "topic": topic,
+ "key": key,
+}).Fatal("Failed to send event")
+```
+
+We've found this API forces you to think about logging in a way that produces
+much more useful logging messages. We've been in countless situations where just
+a single added field to a log statement that was already there would've saved us
+hours. The `WithFields` call is optional.
+
+In general, with Logrus using any of the `printf`-family functions should be
+seen as a hint you should add a field, however, you can still use the
+`printf`-family functions with Logrus.
+
+#### Default Fields
+
+Often it's helpful to have fields _always_ attached to log statements in an
+application or parts of one. For example, you may want to always log the
+`request_id` and `user_ip` in the context of a request. Instead of writing
+`log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})` on
+every line, you can create a `logrus.Entry` to pass around instead:
+
+```go
+requestLogger := log.WithFields(log.Fields{"request_id": request_id, user_ip: user_ip})
+requestLogger.Info("something happened on that request") # will log request_id and user_ip
+requestLogger.Warn("something not great happened")
+```
+
+#### Hooks
+
+You can add hooks for logging levels. For example to send errors to an exception
+tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
+multiple places simultaneously, e.g. syslog.
+
+Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
+`init`:
+
+```go
+import (
+ log "github.com/Sirupsen/logrus"
+ "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake"
+ logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
+ "log/syslog"
+)
+
+func init() {
+
+ // Use the Airbrake hook to report errors that have Error severity or above to
+ // an exception tracker. You can create custom hooks, see the Hooks section.
+ log.AddHook(airbrake.NewHook(123, "xyz", "production"))
+
+ hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
+ if err != nil {
+ log.Error("Unable to connect to local syslog daemon")
+ } else {
+ log.AddHook(hook)
+ }
+}
+```
+Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md).
+
+| Hook | Description |
+| ----- | ----------- |
+| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
+| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. |
+| [Amazon Kinesis](https://github.com/evalphobia/logrus_kinesis) | Hook for logging to [Amazon Kinesis](https://aws.amazon.com/kinesis/) |
+| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) |
+| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
+| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic |
+| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch|
+| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd |
+| [Go-Slack](https://github.com/multiplay/go-slack) | Hook for logging to [Slack](https://slack.com) |
+| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) |
+| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
+| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger |
+| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb |
+| [Influxus] (http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB] (http://influxdata.com/) |
+| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
+| [KafkaLogrus](https://github.com/goibibo/KafkaLogrus) | Hook for logging to kafka |
+| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem |
+| [Logentries](https://github.com/jcftang/logentriesrus) | Hook for logging to [Logentries](https://logentries.com/) |
+| [Logentrus](https://github.com/puddingfactory/logentrus) | Hook for logging to [Logentries](https://logentries.com/) |
+| [Logmatic.io](https://github.com/logmatic/logmatic-go) | Hook for logging to [Logmatic.io](http://logmatic.io/) |
+| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
+| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) |
+| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail |
+| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb |
+| [NATS-Hook](https://github.com/rybit/nats_logrus_hook) | Hook for logging to [NATS](https://nats.io) |
+| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit |
+| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. |
+| [PostgreSQL](https://github.com/gemnasium/logrus-postgresql-hook) | Send logs to [PostgreSQL](http://postgresql.org) |
+| [Pushover](https://github.com/toorop/logrus_pushover) | Send error via [Pushover](https://pushover.net) |
+| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) |
+| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) |
+| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
+| [Scribe](https://github.com/sagar8192/logrus-scribe-hook) | Hook for logging to [Scribe](https://github.com/facebookarchive/scribe)|
+| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. |
+| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
+| [Stackdriver](https://github.com/knq/sdhook) | Hook for logging to [Google Stackdriver](https://cloud.google.com/logging/) |
+| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)|
+| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
+| [TraceView](https://github.com/evalphobia/logrus_appneta) | Hook for logging to [AppNeta TraceView](https://www.appneta.com/products/traceview/) |
+| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) |
+| [logz.io](https://github.com/ripcurld00d/logrus-logzio-hook) | Hook for logging to [logz.io](https://logz.io), a Log as a Service using Logstash |
+
+#### Level logging
+
+Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic.
+
+```go
+log.Debug("Useful debugging information.")
+log.Info("Something noteworthy happened!")
+log.Warn("You should probably take a look at this.")
+log.Error("Something failed but I'm not quitting.")
+// Calls os.Exit(1) after logging
+log.Fatal("Bye.")
+// Calls panic() after logging
+log.Panic("I'm bailing.")
+```
+
+You can set the logging level on a `Logger`, then it will only log entries with
+that severity or anything above it:
+
+```go
+// Will log anything that is info or above (warn, error, fatal, panic). Default.
+log.SetLevel(log.InfoLevel)
+```
+
+It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
+environment if your application has that.
+
+#### Entries
+
+Besides the fields added with `WithField` or `WithFields` some fields are
+automatically added to all logging events:
+
+1. `time`. The timestamp when the entry was created.
+2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after
+ the `AddFields` call. E.g. `Failed to send event.`
+3. `level`. The logging level. E.g. `info`.
+
+#### Environments
+
+Logrus has no notion of environment.
+
+If you wish for hooks and formatters to only be used in specific environments,
+you should handle that yourself. For example, if your application has a global
+variable `Environment`, which is a string representation of the environment you
+could do:
+
+```go
+import (
+ log "github.com/Sirupsen/logrus"
+)
+
+init() {
+ // do something here to set environment depending on an environment variable
+ // or command-line flag
+ if Environment == "production" {
+ log.SetFormatter(&log.JSONFormatter{})
+ } else {
+ // The TextFormatter is default, you don't actually have to do this.
+ log.SetFormatter(&log.TextFormatter{})
+ }
+}
+```
+
+This configuration is how `logrus` was intended to be used, but JSON in
+production is mostly only useful if you do log aggregation with tools like
+Splunk or Logstash.
+
+#### Formatters
+
+The built-in logging formatters are:
+
+* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise
+ without colors.
+ * *Note:* to force colored output when there is no TTY, set the `ForceColors`
+ field to `true`. To force no colored output even if there is a TTY set the
+ `DisableColors` field to `true`. For Windows, see
+ [github.com/mattn/go-colorable](https://github.com/mattn/go-colorable).
+ * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter).
+* `logrus.JSONFormatter`. Logs fields as JSON.
+ * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter).
+
+Third party logging formatters:
+
+* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events.
+* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
+* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
+
+You can define your formatter by implementing the `Formatter` interface,
+requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
+`Fields` type (`map[string]interface{}`) with all your fields as well as the
+default ones (see Entries section above):
+
+```go
+type MyJSONFormatter struct {
+}
+
+log.SetFormatter(new(MyJSONFormatter))
+
+func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) {
+ // Note this doesn't include Time, Level and Message which are available on
+ // the Entry. Consult `godoc` on information about those fields or read the
+ // source of the official loggers.
+ serialized, err := json.Marshal(entry.Data)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
+ }
+ return append(serialized, '\n'), nil
+}
+```
+
+#### Logger as an `io.Writer`
+
+Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it.
+
+```go
+w := logger.Writer()
+defer w.Close()
+
+srv := http.Server{
+ // create a stdlib log.Logger that writes to
+ // logrus.Logger.
+ ErrorLog: log.New(w, "", 0),
+}
+```
+
+Each line written to that writer will be printed the usual way, using formatters
+and hooks. The level for those entries is `info`.
+
+This means that we can override the standard library logger easily:
+
+```go
+logger := logrus.New()
+logger.Formatter = &logrus.JSONFormatter{}
+
+// Use logrus for standard log output
+// Note that `log` here references stdlib's log
+// Not logrus imported under the name `log`.
+log.SetOutput(logger.Writer())
+```
+
+#### Rotation
+
+Log rotation is not provided with Logrus. Log rotation should be done by an
+external program (like `logrotate(8)`) that can compress and delete old log
+entries. It should not be a feature of the application-level logger.
+
+#### Tools
+
+| Tool | Description |
+| ---- | ----------- |
+|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.|
+|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper arround Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) |
+
+#### Testing
+
+Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides:
+
+* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just add the `test` hook
+* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any):
+
+```go
+logger, hook := NewNullLogger()
+logger.Error("Hello error")
+
+assert.Equal(1, len(hook.Entries))
+assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level)
+assert.Equal("Hello error", hook.LastEntry().Message)
+
+hook.Reset()
+assert.Nil(hook.LastEntry())
+```
+
+#### Fatal handlers
+
+Logrus can register one or more functions that will be called when any `fatal`
+level message is logged. The registered handlers will be executed before
+logrus performs a `os.Exit(1)`. This behavior may be helpful if callers need
+to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted.
+
+```
+...
+handler := func() {
+ // gracefully shutdown something...
+}
+logrus.RegisterExitHandler(handler)
+...
+```
+
+#### Thread safety
+
+By default Logger is protected by mutex for concurrent writes, this mutex is invoked when calling hooks and writing logs.
+If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking.
+
+Situation when locking is not needed includes:
+
+* You have no hooks registered, or hooks calling is already thread-safe.
+
+* Writing to logger.Out is already thread-safe, for example:
+
+ 1) logger.Out is protected by locks.
+
+ 2) logger.Out is a os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allow multi-thread/multi-process writing)
+
+ (Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/)
diff --git a/vendor/github.com/Sirupsen/logrus/alt_exit.go b/vendor/github.com/Sirupsen/logrus/alt_exit.go
new file mode 100644
index 0000000..b4c9e84
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/alt_exit.go
@@ -0,0 +1,64 @@
+package logrus
+
+// The following code was sourced and modified from the
+// https://bitbucket.org/tebeka/atexit package governed by the following license:
+//
+// Copyright (c) 2012 Miki Tebeka .
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+// the Software, and to permit persons to whom the Software is furnished to do so,
+// subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+import (
+ "fmt"
+ "os"
+)
+
+var handlers = []func(){}
+
+func runHandler(handler func()) {
+ defer func() {
+ if err := recover(); err != nil {
+ fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err)
+ }
+ }()
+
+ handler()
+}
+
+func runHandlers() {
+ for _, handler := range handlers {
+ runHandler(handler)
+ }
+}
+
+// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code)
+func Exit(code int) {
+ runHandlers()
+ os.Exit(code)
+}
+
+// RegisterExitHandler adds a Logrus Exit handler, call logrus.Exit to invoke
+// all handlers. The handlers will also be invoked when any Fatal log entry is
+// made.
+//
+// This method is useful when a caller wishes to use logrus to log a fatal
+// message but also needs to gracefully shutdown. An example usecase could be
+// closing database connections, or sending a alert that the application is
+// closing.
+func RegisterExitHandler(handler func()) {
+ handlers = append(handlers, handler)
+}
diff --git a/vendor/github.com/Sirupsen/logrus/doc.go b/vendor/github.com/Sirupsen/logrus/doc.go
new file mode 100644
index 0000000..dddd5f8
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/doc.go
@@ -0,0 +1,26 @@
+/*
+Package logrus is a structured logger for Go, completely API compatible with the standard library logger.
+
+
+The simplest way to use Logrus is simply the package-level exported logger:
+
+ package main
+
+ import (
+ log "github.com/Sirupsen/logrus"
+ )
+
+ func main() {
+ log.WithFields(log.Fields{
+ "animal": "walrus",
+ "number": 1,
+ "size": 10,
+ }).Info("A walrus appears")
+ }
+
+Output:
+ time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10
+
+For a full guide visit https://github.com/Sirupsen/logrus
+*/
+package logrus
diff --git a/vendor/github.com/Sirupsen/logrus/entry.go b/vendor/github.com/Sirupsen/logrus/entry.go
new file mode 100644
index 0000000..4edbe7a
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/entry.go
@@ -0,0 +1,275 @@
+package logrus
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "sync"
+ "time"
+)
+
+var bufferPool *sync.Pool
+
+func init() {
+ bufferPool = &sync.Pool{
+ New: func() interface{} {
+ return new(bytes.Buffer)
+ },
+ }
+}
+
+// Defines the key when adding errors using WithError.
+var ErrorKey = "error"
+
+// An entry is the final or intermediate Logrus logging entry. It contains all
+// the fields passed with WithField{,s}. It's finally logged when Debug, Info,
+// Warn, Error, Fatal or Panic is called on it. These objects can be reused and
+// passed around as much as you wish to avoid field duplication.
+type Entry struct {
+ Logger *Logger
+
+ // Contains all the fields set by the user.
+ Data Fields
+
+ // Time at which the log entry was created
+ Time time.Time
+
+ // Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic
+ Level Level
+
+ // Message passed to Debug, Info, Warn, Error, Fatal or Panic
+ Message string
+
+ // When formatter is called in entry.log(), an Buffer may be set to entry
+ Buffer *bytes.Buffer
+}
+
+func NewEntry(logger *Logger) *Entry {
+ return &Entry{
+ Logger: logger,
+ // Default is three fields, give a little extra room
+ Data: make(Fields, 5),
+ }
+}
+
+// Returns the string representation from the reader and ultimately the
+// formatter.
+func (entry *Entry) String() (string, error) {
+ serialized, err := entry.Logger.Formatter.Format(entry)
+ if err != nil {
+ return "", err
+ }
+ str := string(serialized)
+ return str, nil
+}
+
+// Add an error as single field (using the key defined in ErrorKey) to the Entry.
+func (entry *Entry) WithError(err error) *Entry {
+ return entry.WithField(ErrorKey, err)
+}
+
+// Add a single field to the Entry.
+func (entry *Entry) WithField(key string, value interface{}) *Entry {
+ return entry.WithFields(Fields{key: value})
+}
+
+// Add a map of fields to the Entry.
+func (entry *Entry) WithFields(fields Fields) *Entry {
+ data := make(Fields, len(entry.Data)+len(fields))
+ for k, v := range entry.Data {
+ data[k] = v
+ }
+ for k, v := range fields {
+ data[k] = v
+ }
+ return &Entry{Logger: entry.Logger, Data: data}
+}
+
+// This function is not declared with a pointer value because otherwise
+// race conditions will occur when using multiple goroutines
+func (entry Entry) log(level Level, msg string) {
+ var buffer *bytes.Buffer
+ entry.Time = time.Now()
+ entry.Level = level
+ entry.Message = msg
+
+ if err := entry.Logger.Hooks.Fire(level, &entry); err != nil {
+ entry.Logger.mu.Lock()
+ fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
+ entry.Logger.mu.Unlock()
+ }
+ buffer = bufferPool.Get().(*bytes.Buffer)
+ buffer.Reset()
+ defer bufferPool.Put(buffer)
+ entry.Buffer = buffer
+ serialized, err := entry.Logger.Formatter.Format(&entry)
+ entry.Buffer = nil
+ if err != nil {
+ entry.Logger.mu.Lock()
+ fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
+ entry.Logger.mu.Unlock()
+ } else {
+ entry.Logger.mu.Lock()
+ _, err = entry.Logger.Out.Write(serialized)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
+ }
+ entry.Logger.mu.Unlock()
+ }
+
+ // To avoid Entry#log() returning a value that only would make sense for
+ // panic() to use in Entry#Panic(), we avoid the allocation by checking
+ // directly here.
+ if level <= PanicLevel {
+ panic(&entry)
+ }
+}
+
+func (entry *Entry) Debug(args ...interface{}) {
+ if entry.Logger.Level >= DebugLevel {
+ entry.log(DebugLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Print(args ...interface{}) {
+ entry.Info(args...)
+}
+
+func (entry *Entry) Info(args ...interface{}) {
+ if entry.Logger.Level >= InfoLevel {
+ entry.log(InfoLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Warn(args ...interface{}) {
+ if entry.Logger.Level >= WarnLevel {
+ entry.log(WarnLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Warning(args ...interface{}) {
+ entry.Warn(args...)
+}
+
+func (entry *Entry) Error(args ...interface{}) {
+ if entry.Logger.Level >= ErrorLevel {
+ entry.log(ErrorLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Fatal(args ...interface{}) {
+ if entry.Logger.Level >= FatalLevel {
+ entry.log(FatalLevel, fmt.Sprint(args...))
+ }
+ Exit(1)
+}
+
+func (entry *Entry) Panic(args ...interface{}) {
+ if entry.Logger.Level >= PanicLevel {
+ entry.log(PanicLevel, fmt.Sprint(args...))
+ }
+ panic(fmt.Sprint(args...))
+}
+
+// Entry Printf family functions
+
+func (entry *Entry) Debugf(format string, args ...interface{}) {
+ if entry.Logger.Level >= DebugLevel {
+ entry.Debug(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Infof(format string, args ...interface{}) {
+ if entry.Logger.Level >= InfoLevel {
+ entry.Info(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Printf(format string, args ...interface{}) {
+ entry.Infof(format, args...)
+}
+
+func (entry *Entry) Warnf(format string, args ...interface{}) {
+ if entry.Logger.Level >= WarnLevel {
+ entry.Warn(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Warningf(format string, args ...interface{}) {
+ entry.Warnf(format, args...)
+}
+
+func (entry *Entry) Errorf(format string, args ...interface{}) {
+ if entry.Logger.Level >= ErrorLevel {
+ entry.Error(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Fatalf(format string, args ...interface{}) {
+ if entry.Logger.Level >= FatalLevel {
+ entry.Fatal(fmt.Sprintf(format, args...))
+ }
+ Exit(1)
+}
+
+func (entry *Entry) Panicf(format string, args ...interface{}) {
+ if entry.Logger.Level >= PanicLevel {
+ entry.Panic(fmt.Sprintf(format, args...))
+ }
+}
+
+// Entry Println family functions
+
+func (entry *Entry) Debugln(args ...interface{}) {
+ if entry.Logger.Level >= DebugLevel {
+ entry.Debug(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Infoln(args ...interface{}) {
+ if entry.Logger.Level >= InfoLevel {
+ entry.Info(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Println(args ...interface{}) {
+ entry.Infoln(args...)
+}
+
+func (entry *Entry) Warnln(args ...interface{}) {
+ if entry.Logger.Level >= WarnLevel {
+ entry.Warn(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Warningln(args ...interface{}) {
+ entry.Warnln(args...)
+}
+
+func (entry *Entry) Errorln(args ...interface{}) {
+ if entry.Logger.Level >= ErrorLevel {
+ entry.Error(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Fatalln(args ...interface{}) {
+ if entry.Logger.Level >= FatalLevel {
+ entry.Fatal(entry.sprintlnn(args...))
+ }
+ Exit(1)
+}
+
+func (entry *Entry) Panicln(args ...interface{}) {
+ if entry.Logger.Level >= PanicLevel {
+ entry.Panic(entry.sprintlnn(args...))
+ }
+}
+
+// Sprintlnn => Sprint no newline. This is to get the behavior of how
+// fmt.Sprintln where spaces are always added between operands, regardless of
+// their type. Instead of vendoring the Sprintln implementation to spare a
+// string allocation, we do the simplest thing.
+func (entry *Entry) sprintlnn(args ...interface{}) string {
+ msg := fmt.Sprintln(args...)
+ return msg[:len(msg)-1]
+}
diff --git a/vendor/github.com/Sirupsen/logrus/exported.go b/vendor/github.com/Sirupsen/logrus/exported.go
new file mode 100644
index 0000000..9a0120a
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/exported.go
@@ -0,0 +1,193 @@
+package logrus
+
+import (
+ "io"
+)
+
+var (
+ // std is the name of the standard logger in stdlib `log`
+ std = New()
+)
+
+func StandardLogger() *Logger {
+ return std
+}
+
+// SetOutput sets the standard logger output.
+func SetOutput(out io.Writer) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Out = out
+}
+
+// SetFormatter sets the standard logger formatter.
+func SetFormatter(formatter Formatter) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Formatter = formatter
+}
+
+// SetLevel sets the standard logger level.
+func SetLevel(level Level) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Level = level
+}
+
+// GetLevel returns the standard logger level.
+func GetLevel() Level {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ return std.Level
+}
+
+// AddHook adds a hook to the standard logger hooks.
+func AddHook(hook Hook) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Hooks.Add(hook)
+}
+
+// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key.
+func WithError(err error) *Entry {
+ return std.WithField(ErrorKey, err)
+}
+
+// WithField creates an entry from the standard logger and adds a field to
+// it. If you want multiple fields, use `WithFields`.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithField(key string, value interface{}) *Entry {
+ return std.WithField(key, value)
+}
+
+// WithFields creates an entry from the standard logger and adds multiple
+// fields to it. This is simply a helper for `WithField`, invoking it
+// once for each field.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithFields(fields Fields) *Entry {
+ return std.WithFields(fields)
+}
+
+// Debug logs a message at level Debug on the standard logger.
+func Debug(args ...interface{}) {
+ std.Debug(args...)
+}
+
+// Print logs a message at level Info on the standard logger.
+func Print(args ...interface{}) {
+ std.Print(args...)
+}
+
+// Info logs a message at level Info on the standard logger.
+func Info(args ...interface{}) {
+ std.Info(args...)
+}
+
+// Warn logs a message at level Warn on the standard logger.
+func Warn(args ...interface{}) {
+ std.Warn(args...)
+}
+
+// Warning logs a message at level Warn on the standard logger.
+func Warning(args ...interface{}) {
+ std.Warning(args...)
+}
+
+// Error logs a message at level Error on the standard logger.
+func Error(args ...interface{}) {
+ std.Error(args...)
+}
+
+// Panic logs a message at level Panic on the standard logger.
+func Panic(args ...interface{}) {
+ std.Panic(args...)
+}
+
+// Fatal logs a message at level Fatal on the standard logger.
+func Fatal(args ...interface{}) {
+ std.Fatal(args...)
+}
+
+// Debugf logs a message at level Debug on the standard logger.
+func Debugf(format string, args ...interface{}) {
+ std.Debugf(format, args...)
+}
+
+// Printf logs a message at level Info on the standard logger.
+func Printf(format string, args ...interface{}) {
+ std.Printf(format, args...)
+}
+
+// Infof logs a message at level Info on the standard logger.
+func Infof(format string, args ...interface{}) {
+ std.Infof(format, args...)
+}
+
+// Warnf logs a message at level Warn on the standard logger.
+func Warnf(format string, args ...interface{}) {
+ std.Warnf(format, args...)
+}
+
+// Warningf logs a message at level Warn on the standard logger.
+func Warningf(format string, args ...interface{}) {
+ std.Warningf(format, args...)
+}
+
+// Errorf logs a message at level Error on the standard logger.
+func Errorf(format string, args ...interface{}) {
+ std.Errorf(format, args...)
+}
+
+// Panicf logs a message at level Panic on the standard logger.
+func Panicf(format string, args ...interface{}) {
+ std.Panicf(format, args...)
+}
+
+// Fatalf logs a message at level Fatal on the standard logger.
+func Fatalf(format string, args ...interface{}) {
+ std.Fatalf(format, args...)
+}
+
+// Debugln logs a message at level Debug on the standard logger.
+func Debugln(args ...interface{}) {
+ std.Debugln(args...)
+}
+
+// Println logs a message at level Info on the standard logger.
+func Println(args ...interface{}) {
+ std.Println(args...)
+}
+
+// Infoln logs a message at level Info on the standard logger.
+func Infoln(args ...interface{}) {
+ std.Infoln(args...)
+}
+
+// Warnln logs a message at level Warn on the standard logger.
+func Warnln(args ...interface{}) {
+ std.Warnln(args...)
+}
+
+// Warningln logs a message at level Warn on the standard logger.
+func Warningln(args ...interface{}) {
+ std.Warningln(args...)
+}
+
+// Errorln logs a message at level Error on the standard logger.
+func Errorln(args ...interface{}) {
+ std.Errorln(args...)
+}
+
+// Panicln logs a message at level Panic on the standard logger.
+func Panicln(args ...interface{}) {
+ std.Panicln(args...)
+}
+
+// Fatalln logs a message at level Fatal on the standard logger.
+func Fatalln(args ...interface{}) {
+ std.Fatalln(args...)
+}
diff --git a/vendor/github.com/Sirupsen/logrus/formatter.go b/vendor/github.com/Sirupsen/logrus/formatter.go
new file mode 100644
index 0000000..b5fbe93
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/formatter.go
@@ -0,0 +1,45 @@
+package logrus
+
+import "time"
+
+const DefaultTimestampFormat = time.RFC3339
+
+// The Formatter interface is used to implement a custom Formatter. It takes an
+// `Entry`. It exposes all the fields, including the default ones:
+//
+// * `entry.Data["msg"]`. The message passed from Info, Warn, Error ..
+// * `entry.Data["time"]`. The timestamp.
+// * `entry.Data["level"]. The level the entry was logged at.
+//
+// Any additional fields added with `WithField` or `WithFields` are also in
+// `entry.Data`. Format is expected to return an array of bytes which are then
+// logged to `logger.Out`.
+type Formatter interface {
+ Format(*Entry) ([]byte, error)
+}
+
+// This is to not silently overwrite `time`, `msg` and `level` fields when
+// dumping it. If this code wasn't there doing:
+//
+// logrus.WithField("level", 1).Info("hello")
+//
+// Would just silently drop the user provided level. Instead with this code
+// it'll logged as:
+//
+// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."}
+//
+// It's not exported because it's still using Data in an opinionated way. It's to
+// avoid code duplication between the two default formatters.
+func prefixFieldClashes(data Fields) {
+ if t, ok := data["time"]; ok {
+ data["fields.time"] = t
+ }
+
+ if m, ok := data["msg"]; ok {
+ data["fields.msg"] = m
+ }
+
+ if l, ok := data["level"]; ok {
+ data["fields.level"] = l
+ }
+}
diff --git a/vendor/github.com/Sirupsen/logrus/hooks.go b/vendor/github.com/Sirupsen/logrus/hooks.go
new file mode 100644
index 0000000..3f151cd
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/hooks.go
@@ -0,0 +1,34 @@
+package logrus
+
+// A hook to be fired when logging on the logging levels returned from
+// `Levels()` on your implementation of the interface. Note that this is not
+// fired in a goroutine or a channel with workers, you should handle such
+// functionality yourself if your call is non-blocking and you don't wish for
+// the logging calls for levels returned from `Levels()` to block.
+type Hook interface {
+ Levels() []Level
+ Fire(*Entry) error
+}
+
+// Internal type for storing the hooks on a logger instance.
+type LevelHooks map[Level][]Hook
+
+// Add a hook to an instance of logger. This is called with
+// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
+func (hooks LevelHooks) Add(hook Hook) {
+ for _, level := range hook.Levels() {
+ hooks[level] = append(hooks[level], hook)
+ }
+}
+
+// Fire all the hooks for the passed level. Used by `entry.log` to fire
+// appropriate hooks for a log entry.
+func (hooks LevelHooks) Fire(level Level, entry *Entry) error {
+ for _, hook := range hooks[level] {
+ if err := hook.Fire(entry); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/Sirupsen/logrus/json_formatter.go b/vendor/github.com/Sirupsen/logrus/json_formatter.go
new file mode 100644
index 0000000..266554e
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/json_formatter.go
@@ -0,0 +1,74 @@
+package logrus
+
+import (
+ "encoding/json"
+ "fmt"
+)
+
+type fieldKey string
+type FieldMap map[fieldKey]string
+
+const (
+ FieldKeyMsg = "msg"
+ FieldKeyLevel = "level"
+ FieldKeyTime = "time"
+)
+
+func (f FieldMap) resolve(key fieldKey) string {
+ if k, ok := f[key]; ok {
+ return k
+ }
+
+ return string(key)
+}
+
+type JSONFormatter struct {
+ // TimestampFormat sets the format used for marshaling timestamps.
+ TimestampFormat string
+
+ // DisableTimestamp allows disabling automatic timestamps in output
+ DisableTimestamp bool
+
+ // FieldMap allows users to customize the names of keys for various fields.
+ // As an example:
+ // formatter := &JSONFormatter{
+ // FieldMap: FieldMap{
+ // FieldKeyTime: "@timestamp",
+ // FieldKeyLevel: "@level",
+ // FieldKeyLevel: "@message",
+ // },
+ // }
+ FieldMap FieldMap
+}
+
+func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
+ data := make(Fields, len(entry.Data)+3)
+ for k, v := range entry.Data {
+ switch v := v.(type) {
+ case error:
+ // Otherwise errors are ignored by `encoding/json`
+ // https://github.com/Sirupsen/logrus/issues/137
+ data[k] = v.Error()
+ default:
+ data[k] = v
+ }
+ }
+ prefixFieldClashes(data)
+
+ timestampFormat := f.TimestampFormat
+ if timestampFormat == "" {
+ timestampFormat = DefaultTimestampFormat
+ }
+
+ if !f.DisableTimestamp {
+ data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat)
+ }
+ data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message
+ data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String()
+
+ serialized, err := json.Marshal(data)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
+ }
+ return append(serialized, '\n'), nil
+}
diff --git a/vendor/github.com/Sirupsen/logrus/logger.go b/vendor/github.com/Sirupsen/logrus/logger.go
new file mode 100644
index 0000000..b769f3d
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/logger.go
@@ -0,0 +1,308 @@
+package logrus
+
+import (
+ "io"
+ "os"
+ "sync"
+)
+
+type Logger struct {
+ // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
+ // file, or leave it default which is `os.Stderr`. You can also set this to
+ // something more adventorous, such as logging to Kafka.
+ Out io.Writer
+ // Hooks for the logger instance. These allow firing events based on logging
+ // levels and log entries. For example, to send errors to an error tracking
+ // service, log to StatsD or dump the core on fatal errors.
+ Hooks LevelHooks
+ // All log entries pass through the formatter before logged to Out. The
+ // included formatters are `TextFormatter` and `JSONFormatter` for which
+ // TextFormatter is the default. In development (when a TTY is attached) it
+ // logs with colors, but to a file it wouldn't. You can easily implement your
+ // own that implements the `Formatter` interface, see the `README` or included
+ // formatters for examples.
+ Formatter Formatter
+ // The logging level the logger should log at. This is typically (and defaults
+ // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
+ // logged. `logrus.Debug` is useful in
+ Level Level
+ // Used to sync writing to the log. Locking is enabled by Default
+ mu MutexWrap
+ // Reusable empty entry
+ entryPool sync.Pool
+}
+
+type MutexWrap struct {
+ lock sync.Mutex
+ disabled bool
+}
+
+func (mw *MutexWrap) Lock() {
+ if !mw.disabled {
+ mw.lock.Lock()
+ }
+}
+
+func (mw *MutexWrap) Unlock() {
+ if !mw.disabled {
+ mw.lock.Unlock()
+ }
+}
+
+func (mw *MutexWrap) Disable() {
+ mw.disabled = true
+}
+
+// Creates a new logger. Configuration should be set by changing `Formatter`,
+// `Out` and `Hooks` directly on the default logger instance. You can also just
+// instantiate your own:
+//
+// var log = &Logger{
+// Out: os.Stderr,
+// Formatter: new(JSONFormatter),
+// Hooks: make(LevelHooks),
+// Level: logrus.DebugLevel,
+// }
+//
+// It's recommended to make this a global instance called `log`.
+func New() *Logger {
+ return &Logger{
+ Out: os.Stderr,
+ Formatter: new(TextFormatter),
+ Hooks: make(LevelHooks),
+ Level: InfoLevel,
+ }
+}
+
+func (logger *Logger) newEntry() *Entry {
+ entry, ok := logger.entryPool.Get().(*Entry)
+ if ok {
+ return entry
+ }
+ return NewEntry(logger)
+}
+
+func (logger *Logger) releaseEntry(entry *Entry) {
+ logger.entryPool.Put(entry)
+}
+
+// Adds a field to the log entry, note that it doesn't log until you call
+// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
+// If you want multiple fields, use `WithFields`.
+func (logger *Logger) WithField(key string, value interface{}) *Entry {
+ entry := logger.newEntry()
+ defer logger.releaseEntry(entry)
+ return entry.WithField(key, value)
+}
+
+// Adds a struct of fields to the log entry. All it does is call `WithField` for
+// each `Field`.
+func (logger *Logger) WithFields(fields Fields) *Entry {
+ entry := logger.newEntry()
+ defer logger.releaseEntry(entry)
+ return entry.WithFields(fields)
+}
+
+// Add an error as single field to the log entry. All it does is call
+// `WithError` for the given `error`.
+func (logger *Logger) WithError(err error) *Entry {
+ entry := logger.newEntry()
+ defer logger.releaseEntry(entry)
+ return entry.WithError(err)
+}
+
+func (logger *Logger) Debugf(format string, args ...interface{}) {
+ if logger.Level >= DebugLevel {
+ entry := logger.newEntry()
+ entry.Debugf(format, args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Infof(format string, args ...interface{}) {
+ if logger.Level >= InfoLevel {
+ entry := logger.newEntry()
+ entry.Infof(format, args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Printf(format string, args ...interface{}) {
+ entry := logger.newEntry()
+ entry.Printf(format, args...)
+ logger.releaseEntry(entry)
+}
+
+func (logger *Logger) Warnf(format string, args ...interface{}) {
+ if logger.Level >= WarnLevel {
+ entry := logger.newEntry()
+ entry.Warnf(format, args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Warningf(format string, args ...interface{}) {
+ if logger.Level >= WarnLevel {
+ entry := logger.newEntry()
+ entry.Warnf(format, args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Errorf(format string, args ...interface{}) {
+ if logger.Level >= ErrorLevel {
+ entry := logger.newEntry()
+ entry.Errorf(format, args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Fatalf(format string, args ...interface{}) {
+ if logger.Level >= FatalLevel {
+ entry := logger.newEntry()
+ entry.Fatalf(format, args...)
+ logger.releaseEntry(entry)
+ }
+ Exit(1)
+}
+
+func (logger *Logger) Panicf(format string, args ...interface{}) {
+ if logger.Level >= PanicLevel {
+ entry := logger.newEntry()
+ entry.Panicf(format, args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Debug(args ...interface{}) {
+ if logger.Level >= DebugLevel {
+ entry := logger.newEntry()
+ entry.Debug(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Info(args ...interface{}) {
+ if logger.Level >= InfoLevel {
+ entry := logger.newEntry()
+ entry.Info(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Print(args ...interface{}) {
+ entry := logger.newEntry()
+ entry.Info(args...)
+ logger.releaseEntry(entry)
+}
+
+func (logger *Logger) Warn(args ...interface{}) {
+ if logger.Level >= WarnLevel {
+ entry := logger.newEntry()
+ entry.Warn(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Warning(args ...interface{}) {
+ if logger.Level >= WarnLevel {
+ entry := logger.newEntry()
+ entry.Warn(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Error(args ...interface{}) {
+ if logger.Level >= ErrorLevel {
+ entry := logger.newEntry()
+ entry.Error(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Fatal(args ...interface{}) {
+ if logger.Level >= FatalLevel {
+ entry := logger.newEntry()
+ entry.Fatal(args...)
+ logger.releaseEntry(entry)
+ }
+ Exit(1)
+}
+
+func (logger *Logger) Panic(args ...interface{}) {
+ if logger.Level >= PanicLevel {
+ entry := logger.newEntry()
+ entry.Panic(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Debugln(args ...interface{}) {
+ if logger.Level >= DebugLevel {
+ entry := logger.newEntry()
+ entry.Debugln(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Infoln(args ...interface{}) {
+ if logger.Level >= InfoLevel {
+ entry := logger.newEntry()
+ entry.Infoln(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Println(args ...interface{}) {
+ entry := logger.newEntry()
+ entry.Println(args...)
+ logger.releaseEntry(entry)
+}
+
+func (logger *Logger) Warnln(args ...interface{}) {
+ if logger.Level >= WarnLevel {
+ entry := logger.newEntry()
+ entry.Warnln(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Warningln(args ...interface{}) {
+ if logger.Level >= WarnLevel {
+ entry := logger.newEntry()
+ entry.Warnln(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Errorln(args ...interface{}) {
+ if logger.Level >= ErrorLevel {
+ entry := logger.newEntry()
+ entry.Errorln(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Fatalln(args ...interface{}) {
+ if logger.Level >= FatalLevel {
+ entry := logger.newEntry()
+ entry.Fatalln(args...)
+ logger.releaseEntry(entry)
+ }
+ Exit(1)
+}
+
+func (logger *Logger) Panicln(args ...interface{}) {
+ if logger.Level >= PanicLevel {
+ entry := logger.newEntry()
+ entry.Panicln(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+//When file is opened with appending mode, it's safe to
+//write concurrently to a file (within 4k message on Linux).
+//In these cases user can choose to disable the lock.
+func (logger *Logger) SetNoLock() {
+ logger.mu.Disable()
+}
diff --git a/vendor/github.com/Sirupsen/logrus/logrus.go b/vendor/github.com/Sirupsen/logrus/logrus.go
new file mode 100644
index 0000000..e596691
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/logrus.go
@@ -0,0 +1,143 @@
+package logrus
+
+import (
+ "fmt"
+ "log"
+ "strings"
+)
+
+// Fields type, used to pass to `WithFields`.
+type Fields map[string]interface{}
+
+// Level type
+type Level uint8
+
+// Convert the Level to a string. E.g. PanicLevel becomes "panic".
+func (level Level) String() string {
+ switch level {
+ case DebugLevel:
+ return "debug"
+ case InfoLevel:
+ return "info"
+ case WarnLevel:
+ return "warning"
+ case ErrorLevel:
+ return "error"
+ case FatalLevel:
+ return "fatal"
+ case PanicLevel:
+ return "panic"
+ }
+
+ return "unknown"
+}
+
+// ParseLevel takes a string level and returns the Logrus log level constant.
+func ParseLevel(lvl string) (Level, error) {
+ switch strings.ToLower(lvl) {
+ case "panic":
+ return PanicLevel, nil
+ case "fatal":
+ return FatalLevel, nil
+ case "error":
+ return ErrorLevel, nil
+ case "warn", "warning":
+ return WarnLevel, nil
+ case "info":
+ return InfoLevel, nil
+ case "debug":
+ return DebugLevel, nil
+ }
+
+ var l Level
+ return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
+}
+
+// A constant exposing all logging levels
+var AllLevels = []Level{
+ PanicLevel,
+ FatalLevel,
+ ErrorLevel,
+ WarnLevel,
+ InfoLevel,
+ DebugLevel,
+}
+
+// These are the different logging levels. You can set the logging level to log
+// on your instance of logger, obtained with `logrus.New()`.
+const (
+ // PanicLevel level, highest level of severity. Logs and then calls panic with the
+ // message passed to Debug, Info, ...
+ PanicLevel Level = iota
+ // FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the
+ // logging level is set to Panic.
+ FatalLevel
+ // ErrorLevel level. Logs. Used for errors that should definitely be noted.
+ // Commonly used for hooks to send errors to an error tracking service.
+ ErrorLevel
+ // WarnLevel level. Non-critical entries that deserve eyes.
+ WarnLevel
+ // InfoLevel level. General operational entries about what's going on inside the
+ // application.
+ InfoLevel
+ // DebugLevel level. Usually only enabled when debugging. Very verbose logging.
+ DebugLevel
+)
+
+// Won't compile if StdLogger can't be realized by a log.Logger
+var (
+ _ StdLogger = &log.Logger{}
+ _ StdLogger = &Entry{}
+ _ StdLogger = &Logger{}
+)
+
+// StdLogger is what your logrus-enabled library should take, that way
+// it'll accept a stdlib logger and a logrus logger. There's no standard
+// interface, this is the closest we get, unfortunately.
+type StdLogger interface {
+ Print(...interface{})
+ Printf(string, ...interface{})
+ Println(...interface{})
+
+ Fatal(...interface{})
+ Fatalf(string, ...interface{})
+ Fatalln(...interface{})
+
+ Panic(...interface{})
+ Panicf(string, ...interface{})
+ Panicln(...interface{})
+}
+
+// The FieldLogger interface generalizes the Entry and Logger types
+type FieldLogger interface {
+ WithField(key string, value interface{}) *Entry
+ WithFields(fields Fields) *Entry
+ WithError(err error) *Entry
+
+ Debugf(format string, args ...interface{})
+ Infof(format string, args ...interface{})
+ Printf(format string, args ...interface{})
+ Warnf(format string, args ...interface{})
+ Warningf(format string, args ...interface{})
+ Errorf(format string, args ...interface{})
+ Fatalf(format string, args ...interface{})
+ Panicf(format string, args ...interface{})
+
+ Debug(args ...interface{})
+ Info(args ...interface{})
+ Print(args ...interface{})
+ Warn(args ...interface{})
+ Warning(args ...interface{})
+ Error(args ...interface{})
+ Fatal(args ...interface{})
+ Panic(args ...interface{})
+
+ Debugln(args ...interface{})
+ Infoln(args ...interface{})
+ Println(args ...interface{})
+ Warnln(args ...interface{})
+ Warningln(args ...interface{})
+ Errorln(args ...interface{})
+ Fatalln(args ...interface{})
+ Panicln(args ...interface{})
+}
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_appengine.go b/vendor/github.com/Sirupsen/logrus/terminal_appengine.go
new file mode 100644
index 0000000..e011a86
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/terminal_appengine.go
@@ -0,0 +1,10 @@
+// +build appengine
+
+package logrus
+
+import "io"
+
+// IsTerminal returns true if stderr's file descriptor is a terminal.
+func IsTerminal(f io.Writer) bool {
+ return true
+}
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_bsd.go b/vendor/github.com/Sirupsen/logrus/terminal_bsd.go
new file mode 100644
index 0000000..5f6be4d
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/terminal_bsd.go
@@ -0,0 +1,10 @@
+// +build darwin freebsd openbsd netbsd dragonfly
+// +build !appengine
+
+package logrus
+
+import "syscall"
+
+const ioctlReadTermios = syscall.TIOCGETA
+
+type Termios syscall.Termios
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_linux.go b/vendor/github.com/Sirupsen/logrus/terminal_linux.go
new file mode 100644
index 0000000..308160c
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/terminal_linux.go
@@ -0,0 +1,14 @@
+// Based on ssh/terminal:
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package logrus
+
+import "syscall"
+
+const ioctlReadTermios = syscall.TCGETS
+
+type Termios syscall.Termios
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go b/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go
new file mode 100644
index 0000000..190297a
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go
@@ -0,0 +1,28 @@
+// Based on ssh/terminal:
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux darwin freebsd openbsd netbsd dragonfly
+// +build !appengine
+
+package logrus
+
+import (
+ "io"
+ "os"
+ "syscall"
+ "unsafe"
+)
+
+// IsTerminal returns true if stderr's file descriptor is a terminal.
+func IsTerminal(f io.Writer) bool {
+ var termios Termios
+ switch v := f.(type) {
+ case *os.File:
+ _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(v.Fd()), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
+ return err == 0
+ default:
+ return false
+ }
+}
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_solaris.go b/vendor/github.com/Sirupsen/logrus/terminal_solaris.go
new file mode 100644
index 0000000..3c86b1a
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/terminal_solaris.go
@@ -0,0 +1,21 @@
+// +build solaris,!appengine
+
+package logrus
+
+import (
+ "io"
+ "os"
+
+ "golang.org/x/sys/unix"
+)
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+func IsTerminal(f io.Writer) bool {
+ switch v := f.(type) {
+ case *os.File:
+ _, err := unix.IoctlGetTermios(int(v.Fd()), unix.TCGETA)
+ return err == nil
+ default:
+ return false
+ }
+}
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_windows.go b/vendor/github.com/Sirupsen/logrus/terminal_windows.go
new file mode 100644
index 0000000..05d2f91
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/terminal_windows.go
@@ -0,0 +1,33 @@
+// Based on ssh/terminal:
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build windows,!appengine
+
+package logrus
+
+import (
+ "io"
+ "os"
+ "syscall"
+ "unsafe"
+)
+
+var kernel32 = syscall.NewLazyDLL("kernel32.dll")
+
+var (
+ procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
+)
+
+// IsTerminal returns true if stderr's file descriptor is a terminal.
+func IsTerminal(f io.Writer) bool {
+ switch v := f.(type) {
+ case *os.File:
+ var st uint32
+ r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(v.Fd()), uintptr(unsafe.Pointer(&st)), 0)
+ return r != 0 && e == 0
+ default:
+ return false
+ }
+}
diff --git a/vendor/github.com/Sirupsen/logrus/text_formatter.go b/vendor/github.com/Sirupsen/logrus/text_formatter.go
new file mode 100644
index 0000000..ba88854
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/text_formatter.go
@@ -0,0 +1,189 @@
+package logrus
+
+import (
+ "bytes"
+ "fmt"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+)
+
+const (
+ nocolor = 0
+ red = 31
+ green = 32
+ yellow = 33
+ blue = 34
+ gray = 37
+)
+
+var (
+ baseTimestamp time.Time
+)
+
+func init() {
+ baseTimestamp = time.Now()
+}
+
+type TextFormatter struct {
+ // Set to true to bypass checking for a TTY before outputting colors.
+ ForceColors bool
+
+ // Force disabling colors.
+ DisableColors bool
+
+ // Disable timestamp logging. useful when output is redirected to logging
+ // system that already adds timestamps.
+ DisableTimestamp bool
+
+ // Enable logging the full timestamp when a TTY is attached instead of just
+ // the time passed since beginning of execution.
+ FullTimestamp bool
+
+ // TimestampFormat to use for display when a full timestamp is printed
+ TimestampFormat string
+
+ // The fields are sorted by default for a consistent output. For applications
+ // that log extremely frequently and don't use the JSON formatter this may not
+ // be desired.
+ DisableSorting bool
+
+ // QuoteEmptyFields will wrap empty fields in quotes if true
+ QuoteEmptyFields bool
+
+ // QuoteCharacter can be set to the override the default quoting character "
+ // with something else. For example: ', or `.
+ QuoteCharacter string
+
+ // Whether the logger's out is to a terminal
+ isTerminal bool
+
+ sync.Once
+}
+
+func (f *TextFormatter) init(entry *Entry) {
+ if len(f.QuoteCharacter) == 0 {
+ f.QuoteCharacter = "\""
+ }
+ if entry.Logger != nil {
+ f.isTerminal = IsTerminal(entry.Logger.Out)
+ }
+}
+
+func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
+ var b *bytes.Buffer
+ keys := make([]string, 0, len(entry.Data))
+ for k := range entry.Data {
+ keys = append(keys, k)
+ }
+
+ if !f.DisableSorting {
+ sort.Strings(keys)
+ }
+ if entry.Buffer != nil {
+ b = entry.Buffer
+ } else {
+ b = &bytes.Buffer{}
+ }
+
+ prefixFieldClashes(entry.Data)
+
+ f.Do(func() { f.init(entry) })
+
+ isColored := (f.ForceColors || f.isTerminal) && !f.DisableColors
+
+ timestampFormat := f.TimestampFormat
+ if timestampFormat == "" {
+ timestampFormat = DefaultTimestampFormat
+ }
+ if isColored {
+ f.printColored(b, entry, keys, timestampFormat)
+ } else {
+ if !f.DisableTimestamp {
+ f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat))
+ }
+ f.appendKeyValue(b, "level", entry.Level.String())
+ if entry.Message != "" {
+ f.appendKeyValue(b, "msg", entry.Message)
+ }
+ for _, key := range keys {
+ f.appendKeyValue(b, key, entry.Data[key])
+ }
+ }
+
+ b.WriteByte('\n')
+ return b.Bytes(), nil
+}
+
+func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) {
+ var levelColor int
+ switch entry.Level {
+ case DebugLevel:
+ levelColor = gray
+ case WarnLevel:
+ levelColor = yellow
+ case ErrorLevel, FatalLevel, PanicLevel:
+ levelColor = red
+ default:
+ levelColor = blue
+ }
+
+ levelText := strings.ToUpper(entry.Level.String())[0:4]
+
+ if f.DisableTimestamp {
+ fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m %-44s ", levelColor, levelText, entry.Message)
+ } else if !f.FullTimestamp {
+ fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), entry.Message)
+ } else {
+ fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message)
+ }
+ for _, k := range keys {
+ v := entry.Data[k]
+ fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k)
+ f.appendValue(b, v)
+ }
+}
+
+func (f *TextFormatter) needsQuoting(text string) bool {
+ if f.QuoteEmptyFields && len(text) == 0 {
+ return true
+ }
+ for _, ch := range text {
+ if !((ch >= 'a' && ch <= 'z') ||
+ (ch >= 'A' && ch <= 'Z') ||
+ (ch >= '0' && ch <= '9') ||
+ ch == '-' || ch == '.') {
+ return true
+ }
+ }
+ return false
+}
+
+func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
+
+ b.WriteString(key)
+ b.WriteByte('=')
+ f.appendValue(b, value)
+ b.WriteByte(' ')
+}
+
+func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) {
+ switch value := value.(type) {
+ case string:
+ if !f.needsQuoting(value) {
+ b.WriteString(value)
+ } else {
+ fmt.Fprintf(b, "%s%v%s", f.QuoteCharacter, value, f.QuoteCharacter)
+ }
+ case error:
+ errmsg := value.Error()
+ if !f.needsQuoting(errmsg) {
+ b.WriteString(errmsg)
+ } else {
+ fmt.Fprintf(b, "%s%v%s", f.QuoteCharacter, errmsg, f.QuoteCharacter)
+ }
+ default:
+ fmt.Fprint(b, value)
+ }
+}
diff --git a/vendor/github.com/Sirupsen/logrus/writer.go b/vendor/github.com/Sirupsen/logrus/writer.go
new file mode 100644
index 0000000..7bdebed
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/writer.go
@@ -0,0 +1,62 @@
+package logrus
+
+import (
+ "bufio"
+ "io"
+ "runtime"
+)
+
+func (logger *Logger) Writer() *io.PipeWriter {
+ return logger.WriterLevel(InfoLevel)
+}
+
+func (logger *Logger) WriterLevel(level Level) *io.PipeWriter {
+ return NewEntry(logger).WriterLevel(level)
+}
+
+func (entry *Entry) Writer() *io.PipeWriter {
+ return entry.WriterLevel(InfoLevel)
+}
+
+func (entry *Entry) WriterLevel(level Level) *io.PipeWriter {
+ reader, writer := io.Pipe()
+
+ var printFunc func(args ...interface{})
+
+ switch level {
+ case DebugLevel:
+ printFunc = entry.Debug
+ case InfoLevel:
+ printFunc = entry.Info
+ case WarnLevel:
+ printFunc = entry.Warn
+ case ErrorLevel:
+ printFunc = entry.Error
+ case FatalLevel:
+ printFunc = entry.Fatal
+ case PanicLevel:
+ printFunc = entry.Panic
+ default:
+ printFunc = entry.Print
+ }
+
+ go entry.writerScanner(reader, printFunc)
+ runtime.SetFinalizer(writer, writerFinalizer)
+
+ return writer
+}
+
+func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) {
+ scanner := bufio.NewScanner(reader)
+ for scanner.Scan() {
+ printFunc(scanner.Text())
+ }
+ if err := scanner.Err(); err != nil {
+ entry.Errorf("Error while reading from Writer: %s", err)
+ }
+ reader.Close()
+}
+
+func writerFinalizer(writer *io.PipeWriter) {
+ writer.Close()
+}
diff --git a/vendor/github.com/beorn7/perks/LICENSE b/vendor/github.com/beorn7/perks/LICENSE
new file mode 100644
index 0000000..339177b
--- /dev/null
+++ b/vendor/github.com/beorn7/perks/LICENSE
@@ -0,0 +1,20 @@
+Copyright (C) 2013 Blake Mizerany
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/beorn7/perks/quantile/exampledata.txt b/vendor/github.com/beorn7/perks/quantile/exampledata.txt
new file mode 100644
index 0000000..1602287
--- /dev/null
+++ b/vendor/github.com/beorn7/perks/quantile/exampledata.txt
@@ -0,0 +1,2388 @@
+8
+5
+26
+12
+5
+235
+13
+6
+28
+30
+3
+3
+3
+3
+5
+2
+33
+7
+2
+4
+7
+12
+14
+5
+8
+3
+10
+4
+5
+3
+6
+6
+209
+20
+3
+10
+14
+3
+4
+6
+8
+5
+11
+7
+3
+2
+3
+3
+212
+5
+222
+4
+10
+10
+5
+6
+3
+8
+3
+10
+254
+220
+2
+3
+5
+24
+5
+4
+222
+7
+3
+3
+223
+8
+15
+12
+14
+14
+3
+2
+2
+3
+13
+3
+11
+4
+4
+6
+5
+7
+13
+5
+3
+5
+2
+5
+3
+5
+2
+7
+15
+17
+14
+3
+6
+6
+3
+17
+5
+4
+7
+6
+4
+4
+8
+6
+8
+3
+9
+3
+6
+3
+4
+5
+3
+3
+660
+4
+6
+10
+3
+6
+3
+2
+5
+13
+2
+4
+4
+10
+4
+8
+4
+3
+7
+9
+9
+3
+10
+37
+3
+13
+4
+12
+3
+6
+10
+8
+5
+21
+2
+3
+8
+3
+2
+3
+3
+4
+12
+2
+4
+8
+8
+4
+3
+2
+20
+1
+6
+32
+2
+11
+6
+18
+3
+8
+11
+3
+212
+3
+4
+2
+6
+7
+12
+11
+3
+2
+16
+10
+6
+4
+6
+3
+2
+7
+3
+2
+2
+2
+2
+5
+6
+4
+3
+10
+3
+4
+6
+5
+3
+4
+4
+5
+6
+4
+3
+4
+4
+5
+7
+5
+5
+3
+2
+7
+2
+4
+12
+4
+5
+6
+2
+4
+4
+8
+4
+15
+13
+7
+16
+5
+3
+23
+5
+5
+7
+3
+2
+9
+8
+7
+5
+8
+11
+4
+10
+76
+4
+47
+4
+3
+2
+7
+4
+2
+3
+37
+10
+4
+2
+20
+5
+4
+4
+10
+10
+4
+3
+7
+23
+240
+7
+13
+5
+5
+3
+3
+2
+5
+4
+2
+8
+7
+19
+2
+23
+8
+7
+2
+5
+3
+8
+3
+8
+13
+5
+5
+5
+2
+3
+23
+4
+9
+8
+4
+3
+3
+5
+220
+2
+3
+4
+6
+14
+3
+53
+6
+2
+5
+18
+6
+3
+219
+6
+5
+2
+5
+3
+6
+5
+15
+4
+3
+17
+3
+2
+4
+7
+2
+3
+3
+4
+4
+3
+2
+664
+6
+3
+23
+5
+5
+16
+5
+8
+2
+4
+2
+24
+12
+3
+2
+3
+5
+8
+3
+5
+4
+3
+14
+3
+5
+8
+2
+3
+7
+9
+4
+2
+3
+6
+8
+4
+3
+4
+6
+5
+3
+3
+6
+3
+19
+4
+4
+6
+3
+6
+3
+5
+22
+5
+4
+4
+3
+8
+11
+4
+9
+7
+6
+13
+4
+4
+4
+6
+17
+9
+3
+3
+3
+4
+3
+221
+5
+11
+3
+4
+2
+12
+6
+3
+5
+7
+5
+7
+4
+9
+7
+14
+37
+19
+217
+16
+3
+5
+2
+2
+7
+19
+7
+6
+7
+4
+24
+5
+11
+4
+7
+7
+9
+13
+3
+4
+3
+6
+28
+4
+4
+5
+5
+2
+5
+6
+4
+4
+6
+10
+5
+4
+3
+2
+3
+3
+6
+5
+5
+4
+3
+2
+3
+7
+4
+6
+18
+16
+8
+16
+4
+5
+8
+6
+9
+13
+1545
+6
+215
+6
+5
+6
+3
+45
+31
+5
+2
+2
+4
+3
+3
+2
+5
+4
+3
+5
+7
+7
+4
+5
+8
+5
+4
+749
+2
+31
+9
+11
+2
+11
+5
+4
+4
+7
+9
+11
+4
+5
+4
+7
+3
+4
+6
+2
+15
+3
+4
+3
+4
+3
+5
+2
+13
+5
+5
+3
+3
+23
+4
+4
+5
+7
+4
+13
+2
+4
+3
+4
+2
+6
+2
+7
+3
+5
+5
+3
+29
+5
+4
+4
+3
+10
+2
+3
+79
+16
+6
+6
+7
+7
+3
+5
+5
+7
+4
+3
+7
+9
+5
+6
+5
+9
+6
+3
+6
+4
+17
+2
+10
+9
+3
+6
+2
+3
+21
+22
+5
+11
+4
+2
+17
+2
+224
+2
+14
+3
+4
+4
+2
+4
+4
+4
+4
+5
+3
+4
+4
+10
+2
+6
+3
+3
+5
+7
+2
+7
+5
+6
+3
+218
+2
+2
+5
+2
+6
+3
+5
+222
+14
+6
+33
+3
+2
+5
+3
+3
+3
+9
+5
+3
+3
+2
+7
+4
+3
+4
+3
+5
+6
+5
+26
+4
+13
+9
+7
+3
+221
+3
+3
+4
+4
+4
+4
+2
+18
+5
+3
+7
+9
+6
+8
+3
+10
+3
+11
+9
+5
+4
+17
+5
+5
+6
+6
+3
+2
+4
+12
+17
+6
+7
+218
+4
+2
+4
+10
+3
+5
+15
+3
+9
+4
+3
+3
+6
+29
+3
+3
+4
+5
+5
+3
+8
+5
+6
+6
+7
+5
+3
+5
+3
+29
+2
+31
+5
+15
+24
+16
+5
+207
+4
+3
+3
+2
+15
+4
+4
+13
+5
+5
+4
+6
+10
+2
+7
+8
+4
+6
+20
+5
+3
+4
+3
+12
+12
+5
+17
+7
+3
+3
+3
+6
+10
+3
+5
+25
+80
+4
+9
+3
+2
+11
+3
+3
+2
+3
+8
+7
+5
+5
+19
+5
+3
+3
+12
+11
+2
+6
+5
+5
+5
+3
+3
+3
+4
+209
+14
+3
+2
+5
+19
+4
+4
+3
+4
+14
+5
+6
+4
+13
+9
+7
+4
+7
+10
+2
+9
+5
+7
+2
+8
+4
+6
+5
+5
+222
+8
+7
+12
+5
+216
+3
+4
+4
+6
+3
+14
+8
+7
+13
+4
+3
+3
+3
+3
+17
+5
+4
+3
+33
+6
+6
+33
+7
+5
+3
+8
+7
+5
+2
+9
+4
+2
+233
+24
+7
+4
+8
+10
+3
+4
+15
+2
+16
+3
+3
+13
+12
+7
+5
+4
+207
+4
+2
+4
+27
+15
+2
+5
+2
+25
+6
+5
+5
+6
+13
+6
+18
+6
+4
+12
+225
+10
+7
+5
+2
+2
+11
+4
+14
+21
+8
+10
+3
+5
+4
+232
+2
+5
+5
+3
+7
+17
+11
+6
+6
+23
+4
+6
+3
+5
+4
+2
+17
+3
+6
+5
+8
+3
+2
+2
+14
+9
+4
+4
+2
+5
+5
+3
+7
+6
+12
+6
+10
+3
+6
+2
+2
+19
+5
+4
+4
+9
+2
+4
+13
+3
+5
+6
+3
+6
+5
+4
+9
+6
+3
+5
+7
+3
+6
+6
+4
+3
+10
+6
+3
+221
+3
+5
+3
+6
+4
+8
+5
+3
+6
+4
+4
+2
+54
+5
+6
+11
+3
+3
+4
+4
+4
+3
+7
+3
+11
+11
+7
+10
+6
+13
+223
+213
+15
+231
+7
+3
+7
+228
+2
+3
+4
+4
+5
+6
+7
+4
+13
+3
+4
+5
+3
+6
+4
+6
+7
+2
+4
+3
+4
+3
+3
+6
+3
+7
+3
+5
+18
+5
+6
+8
+10
+3
+3
+3
+2
+4
+2
+4
+4
+5
+6
+6
+4
+10
+13
+3
+12
+5
+12
+16
+8
+4
+19
+11
+2
+4
+5
+6
+8
+5
+6
+4
+18
+10
+4
+2
+216
+6
+6
+6
+2
+4
+12
+8
+3
+11
+5
+6
+14
+5
+3
+13
+4
+5
+4
+5
+3
+28
+6
+3
+7
+219
+3
+9
+7
+3
+10
+6
+3
+4
+19
+5
+7
+11
+6
+15
+19
+4
+13
+11
+3
+7
+5
+10
+2
+8
+11
+2
+6
+4
+6
+24
+6
+3
+3
+3
+3
+6
+18
+4
+11
+4
+2
+5
+10
+8
+3
+9
+5
+3
+4
+5
+6
+2
+5
+7
+4
+4
+14
+6
+4
+4
+5
+5
+7
+2
+4
+3
+7
+3
+3
+6
+4
+5
+4
+4
+4
+3
+3
+3
+3
+8
+14
+2
+3
+5
+3
+2
+4
+5
+3
+7
+3
+3
+18
+3
+4
+4
+5
+7
+3
+3
+3
+13
+5
+4
+8
+211
+5
+5
+3
+5
+2
+5
+4
+2
+655
+6
+3
+5
+11
+2
+5
+3
+12
+9
+15
+11
+5
+12
+217
+2
+6
+17
+3
+3
+207
+5
+5
+4
+5
+9
+3
+2
+8
+5
+4
+3
+2
+5
+12
+4
+14
+5
+4
+2
+13
+5
+8
+4
+225
+4
+3
+4
+5
+4
+3
+3
+6
+23
+9
+2
+6
+7
+233
+4
+4
+6
+18
+3
+4
+6
+3
+4
+4
+2
+3
+7
+4
+13
+227
+4
+3
+5
+4
+2
+12
+9
+17
+3
+7
+14
+6
+4
+5
+21
+4
+8
+9
+2
+9
+25
+16
+3
+6
+4
+7
+8
+5
+2
+3
+5
+4
+3
+3
+5
+3
+3
+3
+2
+3
+19
+2
+4
+3
+4
+2
+3
+4
+4
+2
+4
+3
+3
+3
+2
+6
+3
+17
+5
+6
+4
+3
+13
+5
+3
+3
+3
+4
+9
+4
+2
+14
+12
+4
+5
+24
+4
+3
+37
+12
+11
+21
+3
+4
+3
+13
+4
+2
+3
+15
+4
+11
+4
+4
+3
+8
+3
+4
+4
+12
+8
+5
+3
+3
+4
+2
+220
+3
+5
+223
+3
+3
+3
+10
+3
+15
+4
+241
+9
+7
+3
+6
+6
+23
+4
+13
+7
+3
+4
+7
+4
+9
+3
+3
+4
+10
+5
+5
+1
+5
+24
+2
+4
+5
+5
+6
+14
+3
+8
+2
+3
+5
+13
+13
+3
+5
+2
+3
+15
+3
+4
+2
+10
+4
+4
+4
+5
+5
+3
+5
+3
+4
+7
+4
+27
+3
+6
+4
+15
+3
+5
+6
+6
+5
+4
+8
+3
+9
+2
+6
+3
+4
+3
+7
+4
+18
+3
+11
+3
+3
+8
+9
+7
+24
+3
+219
+7
+10
+4
+5
+9
+12
+2
+5
+4
+4
+4
+3
+3
+19
+5
+8
+16
+8
+6
+22
+3
+23
+3
+242
+9
+4
+3
+3
+5
+7
+3
+3
+5
+8
+3
+7
+5
+14
+8
+10
+3
+4
+3
+7
+4
+6
+7
+4
+10
+4
+3
+11
+3
+7
+10
+3
+13
+6
+8
+12
+10
+5
+7
+9
+3
+4
+7
+7
+10
+8
+30
+9
+19
+4
+3
+19
+15
+4
+13
+3
+215
+223
+4
+7
+4
+8
+17
+16
+3
+7
+6
+5
+5
+4
+12
+3
+7
+4
+4
+13
+4
+5
+2
+5
+6
+5
+6
+6
+7
+10
+18
+23
+9
+3
+3
+6
+5
+2
+4
+2
+7
+3
+3
+2
+5
+5
+14
+10
+224
+6
+3
+4
+3
+7
+5
+9
+3
+6
+4
+2
+5
+11
+4
+3
+3
+2
+8
+4
+7
+4
+10
+7
+3
+3
+18
+18
+17
+3
+3
+3
+4
+5
+3
+3
+4
+12
+7
+3
+11
+13
+5
+4
+7
+13
+5
+4
+11
+3
+12
+3
+6
+4
+4
+21
+4
+6
+9
+5
+3
+10
+8
+4
+6
+4
+4
+6
+5
+4
+8
+6
+4
+6
+4
+4
+5
+9
+6
+3
+4
+2
+9
+3
+18
+2
+4
+3
+13
+3
+6
+6
+8
+7
+9
+3
+2
+16
+3
+4
+6
+3
+2
+33
+22
+14
+4
+9
+12
+4
+5
+6
+3
+23
+9
+4
+3
+5
+5
+3
+4
+5
+3
+5
+3
+10
+4
+5
+5
+8
+4
+4
+6
+8
+5
+4
+3
+4
+6
+3
+3
+3
+5
+9
+12
+6
+5
+9
+3
+5
+3
+2
+2
+2
+18
+3
+2
+21
+2
+5
+4
+6
+4
+5
+10
+3
+9
+3
+2
+10
+7
+3
+6
+6
+4
+4
+8
+12
+7
+3
+7
+3
+3
+9
+3
+4
+5
+4
+4
+5
+5
+10
+15
+4
+4
+14
+6
+227
+3
+14
+5
+216
+22
+5
+4
+2
+2
+6
+3
+4
+2
+9
+9
+4
+3
+28
+13
+11
+4
+5
+3
+3
+2
+3
+3
+5
+3
+4
+3
+5
+23
+26
+3
+4
+5
+6
+4
+6
+3
+5
+5
+3
+4
+3
+2
+2
+2
+7
+14
+3
+6
+7
+17
+2
+2
+15
+14
+16
+4
+6
+7
+13
+6
+4
+5
+6
+16
+3
+3
+28
+3
+6
+15
+3
+9
+2
+4
+6
+3
+3
+22
+4
+12
+6
+7
+2
+5
+4
+10
+3
+16
+6
+9
+2
+5
+12
+7
+5
+5
+5
+5
+2
+11
+9
+17
+4
+3
+11
+7
+3
+5
+15
+4
+3
+4
+211
+8
+7
+5
+4
+7
+6
+7
+6
+3
+6
+5
+6
+5
+3
+4
+4
+26
+4
+6
+10
+4
+4
+3
+2
+3
+3
+4
+5
+9
+3
+9
+4
+4
+5
+5
+8
+2
+4
+2
+3
+8
+4
+11
+19
+5
+8
+6
+3
+5
+6
+12
+3
+2
+4
+16
+12
+3
+4
+4
+8
+6
+5
+6
+6
+219
+8
+222
+6
+16
+3
+13
+19
+5
+4
+3
+11
+6
+10
+4
+7
+7
+12
+5
+3
+3
+5
+6
+10
+3
+8
+2
+5
+4
+7
+2
+4
+4
+2
+12
+9
+6
+4
+2
+40
+2
+4
+10
+4
+223
+4
+2
+20
+6
+7
+24
+5
+4
+5
+2
+20
+16
+6
+5
+13
+2
+3
+3
+19
+3
+2
+4
+5
+6
+7
+11
+12
+5
+6
+7
+7
+3
+5
+3
+5
+3
+14
+3
+4
+4
+2
+11
+1
+7
+3
+9
+6
+11
+12
+5
+8
+6
+221
+4
+2
+12
+4
+3
+15
+4
+5
+226
+7
+218
+7
+5
+4
+5
+18
+4
+5
+9
+4
+4
+2
+9
+18
+18
+9
+5
+6
+6
+3
+3
+7
+3
+5
+4
+4
+4
+12
+3
+6
+31
+5
+4
+7
+3
+6
+5
+6
+5
+11
+2
+2
+11
+11
+6
+7
+5
+8
+7
+10
+5
+23
+7
+4
+3
+5
+34
+2
+5
+23
+7
+3
+6
+8
+4
+4
+4
+2
+5
+3
+8
+5
+4
+8
+25
+2
+3
+17
+8
+3
+4
+8
+7
+3
+15
+6
+5
+7
+21
+9
+5
+6
+6
+5
+3
+2
+3
+10
+3
+6
+3
+14
+7
+4
+4
+8
+7
+8
+2
+6
+12
+4
+213
+6
+5
+21
+8
+2
+5
+23
+3
+11
+2
+3
+6
+25
+2
+3
+6
+7
+6
+6
+4
+4
+6
+3
+17
+9
+7
+6
+4
+3
+10
+7
+2
+3
+3
+3
+11
+8
+3
+7
+6
+4
+14
+36
+3
+4
+3
+3
+22
+13
+21
+4
+2
+7
+4
+4
+17
+15
+3
+7
+11
+2
+4
+7
+6
+209
+6
+3
+2
+2
+24
+4
+9
+4
+3
+3
+3
+29
+2
+2
+4
+3
+3
+5
+4
+6
+3
+3
+2
+4
diff --git a/vendor/github.com/beorn7/perks/quantile/stream.go b/vendor/github.com/beorn7/perks/quantile/stream.go
new file mode 100644
index 0000000..f4cabd6
--- /dev/null
+++ b/vendor/github.com/beorn7/perks/quantile/stream.go
@@ -0,0 +1,292 @@
+// Package quantile computes approximate quantiles over an unbounded data
+// stream within low memory and CPU bounds.
+//
+// A small amount of accuracy is traded to achieve the above properties.
+//
+// Multiple streams can be merged before calling Query to generate a single set
+// of results. This is meaningful when the streams represent the same type of
+// data. See Merge and Samples.
+//
+// For more detailed information about the algorithm used, see:
+//
+// Effective Computation of Biased Quantiles over Data Streams
+//
+// http://www.cs.rutgers.edu/~muthu/bquant.pdf
+package quantile
+
+import (
+ "math"
+ "sort"
+)
+
+// Sample holds an observed value and meta information for compression. JSON
+// tags have been added for convenience.
+type Sample struct {
+ Value float64 `json:",string"`
+ Width float64 `json:",string"`
+ Delta float64 `json:",string"`
+}
+
+// Samples represents a slice of samples. It implements sort.Interface.
+type Samples []Sample
+
+func (a Samples) Len() int { return len(a) }
+func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
+func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+type invariant func(s *stream, r float64) float64
+
+// NewLowBiased returns an initialized Stream for low-biased quantiles
+// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
+// error guarantees can still be given even for the lower ranks of the data
+// distribution.
+//
+// The provided epsilon is a relative error, i.e. the true quantile of a value
+// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
+//
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
+// properties.
+func NewLowBiased(epsilon float64) *Stream {
+ ƒ := func(s *stream, r float64) float64 {
+ return 2 * epsilon * r
+ }
+ return newStream(ƒ)
+}
+
+// NewHighBiased returns an initialized Stream for high-biased quantiles
+// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
+// error guarantees can still be given even for the higher ranks of the data
+// distribution.
+//
+// The provided epsilon is a relative error, i.e. the true quantile of a value
+// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
+//
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
+// properties.
+func NewHighBiased(epsilon float64) *Stream {
+ ƒ := func(s *stream, r float64) float64 {
+ return 2 * epsilon * (s.n - r)
+ }
+ return newStream(ƒ)
+}
+
+// NewTargeted returns an initialized Stream concerned with a particular set of
+// quantile values that are supplied a priori. Knowing these a priori reduces
+// space and computation time. The targets map maps the desired quantiles to
+// their absolute errors, i.e. the true quantile of a value returned by a query
+// is guaranteed to be within (Quantile±Epsilon).
+//
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
+func NewTargeted(targets map[float64]float64) *Stream {
+ ƒ := func(s *stream, r float64) float64 {
+ var m = math.MaxFloat64
+ var f float64
+ for quantile, epsilon := range targets {
+ if quantile*s.n <= r {
+ f = (2 * epsilon * r) / quantile
+ } else {
+ f = (2 * epsilon * (s.n - r)) / (1 - quantile)
+ }
+ if f < m {
+ m = f
+ }
+ }
+ return m
+ }
+ return newStream(ƒ)
+}
+
+// Stream computes quantiles for a stream of float64s. It is not thread-safe by
+// design. Take care when using across multiple goroutines.
+type Stream struct {
+ *stream
+ b Samples
+ sorted bool
+}
+
+func newStream(ƒ invariant) *Stream {
+ x := &stream{ƒ: ƒ}
+ return &Stream{x, make(Samples, 0, 500), true}
+}
+
+// Insert inserts v into the stream.
+func (s *Stream) Insert(v float64) {
+ s.insert(Sample{Value: v, Width: 1})
+}
+
+func (s *Stream) insert(sample Sample) {
+ s.b = append(s.b, sample)
+ s.sorted = false
+ if len(s.b) == cap(s.b) {
+ s.flush()
+ }
+}
+
+// Query returns the computed qth percentiles value. If s was created with
+// NewTargeted, and q is not in the set of quantiles provided a priori, Query
+// will return an unspecified result.
+func (s *Stream) Query(q float64) float64 {
+ if !s.flushed() {
+ // Fast path when there hasn't been enough data for a flush;
+ // this also yields better accuracy for small sets of data.
+ l := len(s.b)
+ if l == 0 {
+ return 0
+ }
+ i := int(math.Ceil(float64(l) * q))
+ if i > 0 {
+ i -= 1
+ }
+ s.maybeSort()
+ return s.b[i].Value
+ }
+ s.flush()
+ return s.stream.query(q)
+}
+
+// Merge merges samples into the underlying streams samples. This is handy when
+// merging multiple streams from separate threads, database shards, etc.
+//
+// ATTENTION: This method is broken and does not yield correct results. The
+// underlying algorithm is not capable of merging streams correctly.
+func (s *Stream) Merge(samples Samples) {
+ sort.Sort(samples)
+ s.stream.merge(samples)
+}
+
+// Reset reinitializes and clears the list reusing the samples buffer memory.
+func (s *Stream) Reset() {
+ s.stream.reset()
+ s.b = s.b[:0]
+}
+
+// Samples returns stream samples held by s.
+func (s *Stream) Samples() Samples {
+ if !s.flushed() {
+ return s.b
+ }
+ s.flush()
+ return s.stream.samples()
+}
+
+// Count returns the total number of samples observed in the stream
+// since initialization.
+func (s *Stream) Count() int {
+ return len(s.b) + s.stream.count()
+}
+
+func (s *Stream) flush() {
+ s.maybeSort()
+ s.stream.merge(s.b)
+ s.b = s.b[:0]
+}
+
+func (s *Stream) maybeSort() {
+ if !s.sorted {
+ s.sorted = true
+ sort.Sort(s.b)
+ }
+}
+
+func (s *Stream) flushed() bool {
+ return len(s.stream.l) > 0
+}
+
+type stream struct {
+ n float64
+ l []Sample
+ ƒ invariant
+}
+
+func (s *stream) reset() {
+ s.l = s.l[:0]
+ s.n = 0
+}
+
+func (s *stream) insert(v float64) {
+ s.merge(Samples{{v, 1, 0}})
+}
+
+func (s *stream) merge(samples Samples) {
+ // TODO(beorn7): This tries to merge not only individual samples, but
+ // whole summaries. The paper doesn't mention merging summaries at
+ // all. Unittests show that the merging is inaccurate. Find out how to
+ // do merges properly.
+ var r float64
+ i := 0
+ for _, sample := range samples {
+ for ; i < len(s.l); i++ {
+ c := s.l[i]
+ if c.Value > sample.Value {
+ // Insert at position i.
+ s.l = append(s.l, Sample{})
+ copy(s.l[i+1:], s.l[i:])
+ s.l[i] = Sample{
+ sample.Value,
+ sample.Width,
+ math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
+ // TODO(beorn7): How to calculate delta correctly?
+ }
+ i++
+ goto inserted
+ }
+ r += c.Width
+ }
+ s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
+ i++
+ inserted:
+ s.n += sample.Width
+ r += sample.Width
+ }
+ s.compress()
+}
+
+func (s *stream) count() int {
+ return int(s.n)
+}
+
+func (s *stream) query(q float64) float64 {
+ t := math.Ceil(q * s.n)
+ t += math.Ceil(s.ƒ(s, t) / 2)
+ p := s.l[0]
+ var r float64
+ for _, c := range s.l[1:] {
+ r += p.Width
+ if r+c.Width+c.Delta > t {
+ return p.Value
+ }
+ p = c
+ }
+ return p.Value
+}
+
+func (s *stream) compress() {
+ if len(s.l) < 2 {
+ return
+ }
+ x := s.l[len(s.l)-1]
+ xi := len(s.l) - 1
+ r := s.n - 1 - x.Width
+
+ for i := len(s.l) - 2; i >= 0; i-- {
+ c := s.l[i]
+ if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
+ x.Width += c.Width
+ s.l[xi] = x
+ // Remove element at i.
+ copy(s.l[i:], s.l[i+1:])
+ s.l = s.l[:len(s.l)-1]
+ xi -= 1
+ } else {
+ x = c
+ xi = i
+ }
+ r -= c.Width
+ }
+}
+
+func (s *stream) samples() Samples {
+ samples := make(Samples, len(s.l))
+ copy(samples, s.l)
+ return samples
+}
diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE
new file mode 100644
index 0000000..1b1b192
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/LICENSE
@@ -0,0 +1,31 @@
+Go support for Protocol Buffers - Google's data interchange format
+
+Copyright 2010 The Go Authors. All rights reserved.
+https://github.com/golang/protobuf
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/vendor/github.com/golang/protobuf/proto/Makefile b/vendor/github.com/golang/protobuf/proto/Makefile
new file mode 100644
index 0000000..e2e0651
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/Makefile
@@ -0,0 +1,43 @@
+# Go support for Protocol Buffers - Google's data interchange format
+#
+# Copyright 2010 The Go Authors. All rights reserved.
+# https://github.com/golang/protobuf
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+install:
+ go install
+
+test: install generate-test-pbs
+ go test
+
+
+generate-test-pbs:
+ make install
+ make -C testdata
+ protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. proto3_proto/proto3.proto
+ make
diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go
new file mode 100644
index 0000000..e392575
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/clone.go
@@ -0,0 +1,229 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer deep copy and merge.
+// TODO: RawMessage.
+
+package proto
+
+import (
+ "log"
+ "reflect"
+ "strings"
+)
+
+// Clone returns a deep copy of a protocol buffer.
+func Clone(pb Message) Message {
+ in := reflect.ValueOf(pb)
+ if in.IsNil() {
+ return pb
+ }
+
+ out := reflect.New(in.Type().Elem())
+ // out is empty so a merge is a deep copy.
+ mergeStruct(out.Elem(), in.Elem())
+ return out.Interface().(Message)
+}
+
+// Merge merges src into dst.
+// Required and optional fields that are set in src will be set to that value in dst.
+// Elements of repeated fields will be appended.
+// Merge panics if src and dst are not the same type, or if dst is nil.
+func Merge(dst, src Message) {
+ in := reflect.ValueOf(src)
+ out := reflect.ValueOf(dst)
+ if out.IsNil() {
+ panic("proto: nil destination")
+ }
+ if in.Type() != out.Type() {
+ // Explicit test prior to mergeStruct so that mistyped nils will fail
+ panic("proto: type mismatch")
+ }
+ if in.IsNil() {
+ // Merging nil into non-nil is a quiet no-op
+ return
+ }
+ mergeStruct(out.Elem(), in.Elem())
+}
+
+func mergeStruct(out, in reflect.Value) {
+ sprop := GetProperties(in.Type())
+ for i := 0; i < in.NumField(); i++ {
+ f := in.Type().Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
+ }
+
+ if emIn, ok := extendable(in.Addr().Interface()); ok {
+ emOut, _ := extendable(out.Addr().Interface())
+ mIn, muIn := emIn.extensionsRead()
+ if mIn != nil {
+ mOut := emOut.extensionsWrite()
+ muIn.Lock()
+ mergeExtension(mOut, mIn)
+ muIn.Unlock()
+ }
+ }
+
+ uf := in.FieldByName("XXX_unrecognized")
+ if !uf.IsValid() {
+ return
+ }
+ uin := uf.Bytes()
+ if len(uin) > 0 {
+ out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
+ }
+}
+
+// mergeAny performs a merge between two values of the same type.
+// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
+// prop is set if this is a struct field (it may be nil).
+func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
+ if in.Type() == protoMessageType {
+ if !in.IsNil() {
+ if out.IsNil() {
+ out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
+ } else {
+ Merge(out.Interface().(Message), in.Interface().(Message))
+ }
+ }
+ return
+ }
+ switch in.Kind() {
+ case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+ reflect.String, reflect.Uint32, reflect.Uint64:
+ if !viaPtr && isProto3Zero(in) {
+ return
+ }
+ out.Set(in)
+ case reflect.Interface:
+ // Probably a oneof field; copy non-nil values.
+ if in.IsNil() {
+ return
+ }
+ // Allocate destination if it is not set, or set to a different type.
+ // Otherwise we will merge as normal.
+ if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
+ out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
+ }
+ mergeAny(out.Elem(), in.Elem(), false, nil)
+ case reflect.Map:
+ if in.Len() == 0 {
+ return
+ }
+ if out.IsNil() {
+ out.Set(reflect.MakeMap(in.Type()))
+ }
+ // For maps with value types of *T or []byte we need to deep copy each value.
+ elemKind := in.Type().Elem().Kind()
+ for _, key := range in.MapKeys() {
+ var val reflect.Value
+ switch elemKind {
+ case reflect.Ptr:
+ val = reflect.New(in.Type().Elem().Elem())
+ mergeAny(val, in.MapIndex(key), false, nil)
+ case reflect.Slice:
+ val = in.MapIndex(key)
+ val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
+ default:
+ val = in.MapIndex(key)
+ }
+ out.SetMapIndex(key, val)
+ }
+ case reflect.Ptr:
+ if in.IsNil() {
+ return
+ }
+ if out.IsNil() {
+ out.Set(reflect.New(in.Elem().Type()))
+ }
+ mergeAny(out.Elem(), in.Elem(), true, nil)
+ case reflect.Slice:
+ if in.IsNil() {
+ return
+ }
+ if in.Type().Elem().Kind() == reflect.Uint8 {
+ // []byte is a scalar bytes field, not a repeated field.
+
+ // Edge case: if this is in a proto3 message, a zero length
+ // bytes field is considered the zero value, and should not
+ // be merged.
+ if prop != nil && prop.proto3 && in.Len() == 0 {
+ return
+ }
+
+ // Make a deep copy.
+ // Append to []byte{} instead of []byte(nil) so that we never end up
+ // with a nil result.
+ out.SetBytes(append([]byte{}, in.Bytes()...))
+ return
+ }
+ n := in.Len()
+ if out.IsNil() {
+ out.Set(reflect.MakeSlice(in.Type(), 0, n))
+ }
+ switch in.Type().Elem().Kind() {
+ case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+ reflect.String, reflect.Uint32, reflect.Uint64:
+ out.Set(reflect.AppendSlice(out, in))
+ default:
+ for i := 0; i < n; i++ {
+ x := reflect.Indirect(reflect.New(in.Type().Elem()))
+ mergeAny(x, in.Index(i), false, nil)
+ out.Set(reflect.Append(out, x))
+ }
+ }
+ case reflect.Struct:
+ mergeStruct(out, in)
+ default:
+ // unknown type, so not a protocol buffer
+ log.Printf("proto: don't know how to copy %v", in)
+ }
+}
+
+func mergeExtension(out, in map[int32]Extension) {
+ for extNum, eIn := range in {
+ eOut := Extension{desc: eIn.desc}
+ if eIn.value != nil {
+ v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
+ mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
+ eOut.value = v.Interface()
+ }
+ if eIn.enc != nil {
+ eOut.enc = make([]byte, len(eIn.enc))
+ copy(eOut.enc, eIn.enc)
+ }
+
+ out[extNum] = eOut
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go
new file mode 100644
index 0000000..aa20729
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/decode.go
@@ -0,0 +1,970 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for decoding protocol buffer data to construct in-memory representations.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+)
+
+// errOverflow is returned when an integer is too large to be represented.
+var errOverflow = errors.New("proto: integer overflow")
+
+// ErrInternalBadWireType is returned by generated code when an incorrect
+// wire type is encountered. It does not get returned to user code.
+var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
+
+// The fundamental decoders that interpret bytes on the wire.
+// Those that take integer types all return uint64 and are
+// therefore of type valueDecoder.
+
+// DecodeVarint reads a varint-encoded integer from the slice.
+// It returns the integer and the number of bytes consumed, or
+// zero if there is not enough.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func DecodeVarint(buf []byte) (x uint64, n int) {
+ for shift := uint(0); shift < 64; shift += 7 {
+ if n >= len(buf) {
+ return 0, 0
+ }
+ b := uint64(buf[n])
+ n++
+ x |= (b & 0x7F) << shift
+ if (b & 0x80) == 0 {
+ return x, n
+ }
+ }
+
+ // The number is too large to represent in a 64-bit value.
+ return 0, 0
+}
+
+func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
+ i := p.index
+ l := len(p.buf)
+
+ for shift := uint(0); shift < 64; shift += 7 {
+ if i >= l {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ b := p.buf[i]
+ i++
+ x |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ p.index = i
+ return
+ }
+ }
+
+ // The number is too large to represent in a 64-bit value.
+ err = errOverflow
+ return
+}
+
+// DecodeVarint reads a varint-encoded integer from the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) DecodeVarint() (x uint64, err error) {
+ i := p.index
+ buf := p.buf
+
+ if i >= len(buf) {
+ return 0, io.ErrUnexpectedEOF
+ } else if buf[i] < 0x80 {
+ p.index++
+ return uint64(buf[i]), nil
+ } else if len(buf)-i < 10 {
+ return p.decodeVarintSlow()
+ }
+
+ var b uint64
+ // we already checked the first byte
+ x = uint64(buf[i]) - 0x80
+ i++
+
+ b = uint64(buf[i])
+ i++
+ x += b << 7
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 7
+
+ b = uint64(buf[i])
+ i++
+ x += b << 14
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 14
+
+ b = uint64(buf[i])
+ i++
+ x += b << 21
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 21
+
+ b = uint64(buf[i])
+ i++
+ x += b << 28
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 28
+
+ b = uint64(buf[i])
+ i++
+ x += b << 35
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 35
+
+ b = uint64(buf[i])
+ i++
+ x += b << 42
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 42
+
+ b = uint64(buf[i])
+ i++
+ x += b << 49
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 49
+
+ b = uint64(buf[i])
+ i++
+ x += b << 56
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 56
+
+ b = uint64(buf[i])
+ i++
+ x += b << 63
+ if b&0x80 == 0 {
+ goto done
+ }
+ // x -= 0x80 << 63 // Always zero.
+
+ return 0, errOverflow
+
+done:
+ p.index = i
+ return x, nil
+}
+
+// DecodeFixed64 reads a 64-bit integer from the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) DecodeFixed64() (x uint64, err error) {
+ // x, err already 0
+ i := p.index + 8
+ if i < 0 || i > len(p.buf) {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ p.index = i
+
+ x = uint64(p.buf[i-8])
+ x |= uint64(p.buf[i-7]) << 8
+ x |= uint64(p.buf[i-6]) << 16
+ x |= uint64(p.buf[i-5]) << 24
+ x |= uint64(p.buf[i-4]) << 32
+ x |= uint64(p.buf[i-3]) << 40
+ x |= uint64(p.buf[i-2]) << 48
+ x |= uint64(p.buf[i-1]) << 56
+ return
+}
+
+// DecodeFixed32 reads a 32-bit integer from the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) DecodeFixed32() (x uint64, err error) {
+ // x, err already 0
+ i := p.index + 4
+ if i < 0 || i > len(p.buf) {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ p.index = i
+
+ x = uint64(p.buf[i-4])
+ x |= uint64(p.buf[i-3]) << 8
+ x |= uint64(p.buf[i-2]) << 16
+ x |= uint64(p.buf[i-1]) << 24
+ return
+}
+
+// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
+// from the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
+ x, err = p.DecodeVarint()
+ if err != nil {
+ return
+ }
+ x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
+ return
+}
+
+// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
+// from the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
+ x, err = p.DecodeVarint()
+ if err != nil {
+ return
+ }
+ x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
+ return
+}
+
+// These are not ValueDecoders: they produce an array of bytes or a string.
+// bytes, embedded messages
+
+// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
+ n, err := p.DecodeVarint()
+ if err != nil {
+ return nil, err
+ }
+
+ nb := int(n)
+ if nb < 0 {
+ return nil, fmt.Errorf("proto: bad byte length %d", nb)
+ }
+ end := p.index + nb
+ if end < p.index || end > len(p.buf) {
+ return nil, io.ErrUnexpectedEOF
+ }
+
+ if !alloc {
+ // todo: check if can get more uses of alloc=false
+ buf = p.buf[p.index:end]
+ p.index += nb
+ return
+ }
+
+ buf = make([]byte, nb)
+ copy(buf, p.buf[p.index:])
+ p.index += nb
+ return
+}
+
+// DecodeStringBytes reads an encoded string from the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) DecodeStringBytes() (s string, err error) {
+ buf, err := p.DecodeRawBytes(false)
+ if err != nil {
+ return
+ }
+ return string(buf), nil
+}
+
+// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
+// If the protocol buffer has extensions, and the field matches, add it as an extension.
+// Otherwise, if the XXX_unrecognized field exists, append the skipped data there.
+func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error {
+ oi := o.index
+
+ err := o.skip(t, tag, wire)
+ if err != nil {
+ return err
+ }
+
+ if !unrecField.IsValid() {
+ return nil
+ }
+
+ ptr := structPointer_Bytes(base, unrecField)
+
+ // Add the skipped field to struct field
+ obuf := o.buf
+
+ o.buf = *ptr
+ o.EncodeVarint(uint64(tag<<3 | wire))
+ *ptr = append(o.buf, obuf[oi:o.index]...)
+
+ o.buf = obuf
+
+ return nil
+}
+
+// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
+func (o *Buffer) skip(t reflect.Type, tag, wire int) error {
+
+ var u uint64
+ var err error
+
+ switch wire {
+ case WireVarint:
+ _, err = o.DecodeVarint()
+ case WireFixed64:
+ _, err = o.DecodeFixed64()
+ case WireBytes:
+ _, err = o.DecodeRawBytes(false)
+ case WireFixed32:
+ _, err = o.DecodeFixed32()
+ case WireStartGroup:
+ for {
+ u, err = o.DecodeVarint()
+ if err != nil {
+ break
+ }
+ fwire := int(u & 0x7)
+ if fwire == WireEndGroup {
+ break
+ }
+ ftag := int(u >> 3)
+ err = o.skip(t, ftag, fwire)
+ if err != nil {
+ break
+ }
+ }
+ default:
+ err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t)
+ }
+ return err
+}
+
+// Unmarshaler is the interface representing objects that can
+// unmarshal themselves. The method should reset the receiver before
+// decoding starts. The argument points to data that may be
+// overwritten, so implementations should not keep references to the
+// buffer.
+type Unmarshaler interface {
+ Unmarshal([]byte) error
+}
+
+// Unmarshal parses the protocol buffer representation in buf and places the
+// decoded result in pb. If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// Unmarshal resets pb before starting to unmarshal, so any
+// existing data in pb is always removed. Use UnmarshalMerge
+// to preserve and append to existing data.
+func Unmarshal(buf []byte, pb Message) error {
+ pb.Reset()
+ return UnmarshalMerge(buf, pb)
+}
+
+// UnmarshalMerge parses the protocol buffer representation in buf and
+// writes the decoded result to pb. If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// UnmarshalMerge merges into existing data in pb.
+// Most code should use Unmarshal instead.
+func UnmarshalMerge(buf []byte, pb Message) error {
+ // If the object can unmarshal itself, let it.
+ if u, ok := pb.(Unmarshaler); ok {
+ return u.Unmarshal(buf)
+ }
+ return NewBuffer(buf).Unmarshal(pb)
+}
+
+// DecodeMessage reads a count-delimited message from the Buffer.
+func (p *Buffer) DecodeMessage(pb Message) error {
+ enc, err := p.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+ return NewBuffer(enc).Unmarshal(pb)
+}
+
+// DecodeGroup reads a tag-delimited group from the Buffer.
+func (p *Buffer) DecodeGroup(pb Message) error {
+ typ, base, err := getbase(pb)
+ if err != nil {
+ return err
+ }
+ return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base)
+}
+
+// Unmarshal parses the protocol buffer representation in the
+// Buffer and places the decoded result in pb. If the struct
+// underlying pb does not match the data in the buffer, the results can be
+// unpredictable.
+//
+// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
+func (p *Buffer) Unmarshal(pb Message) error {
+ // If the object can unmarshal itself, let it.
+ if u, ok := pb.(Unmarshaler); ok {
+ err := u.Unmarshal(p.buf[p.index:])
+ p.index = len(p.buf)
+ return err
+ }
+
+ typ, base, err := getbase(pb)
+ if err != nil {
+ return err
+ }
+
+ err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base)
+
+ if collectStats {
+ stats.Decode++
+ }
+
+ return err
+}
+
+// unmarshalType does the work of unmarshaling a structure.
+func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error {
+ var state errorState
+ required, reqFields := prop.reqCount, uint64(0)
+
+ var err error
+ for err == nil && o.index < len(o.buf) {
+ oi := o.index
+ var u uint64
+ u, err = o.DecodeVarint()
+ if err != nil {
+ break
+ }
+ wire := int(u & 0x7)
+ if wire == WireEndGroup {
+ if is_group {
+ if required > 0 {
+ // Not enough information to determine the exact field.
+ // (See below.)
+ return &RequiredNotSetError{"{Unknown}"}
+ }
+ return nil // input is satisfied
+ }
+ return fmt.Errorf("proto: %s: wiretype end group for non-group", st)
+ }
+ tag := int(u >> 3)
+ if tag <= 0 {
+ return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire)
+ }
+ fieldnum, ok := prop.decoderTags.get(tag)
+ if !ok {
+ // Maybe it's an extension?
+ if prop.extendable {
+ if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) {
+ if err = o.skip(st, tag, wire); err == nil {
+ extmap := e.extensionsWrite()
+ ext := extmap[int32(tag)] // may be missing
+ ext.enc = append(ext.enc, o.buf[oi:o.index]...)
+ extmap[int32(tag)] = ext
+ }
+ continue
+ }
+ }
+ // Maybe it's a oneof?
+ if prop.oneofUnmarshaler != nil {
+ m := structPointer_Interface(base, st).(Message)
+ // First return value indicates whether tag is a oneof field.
+ ok, err = prop.oneofUnmarshaler(m, tag, wire, o)
+ if err == ErrInternalBadWireType {
+ // Map the error to something more descriptive.
+ // Do the formatting here to save generated code space.
+ err = fmt.Errorf("bad wiretype for oneof field in %T", m)
+ }
+ if ok {
+ continue
+ }
+ }
+ err = o.skipAndSave(st, tag, wire, base, prop.unrecField)
+ continue
+ }
+ p := prop.Prop[fieldnum]
+
+ if p.dec == nil {
+ fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name)
+ continue
+ }
+ dec := p.dec
+ if wire != WireStartGroup && wire != p.WireType {
+ if wire == WireBytes && p.packedDec != nil {
+ // a packable field
+ dec = p.packedDec
+ } else {
+ err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType)
+ continue
+ }
+ }
+ decErr := dec(o, p, base)
+ if decErr != nil && !state.shouldContinue(decErr, p) {
+ err = decErr
+ }
+ if err == nil && p.Required {
+ // Successfully decoded a required field.
+ if tag <= 64 {
+ // use bitmap for fields 1-64 to catch field reuse.
+ var mask uint64 = 1 << uint64(tag-1)
+ if reqFields&mask == 0 {
+ // new required field
+ reqFields |= mask
+ required--
+ }
+ } else {
+ // This is imprecise. It can be fooled by a required field
+ // with a tag > 64 that is encoded twice; that's very rare.
+ // A fully correct implementation would require allocating
+ // a data structure, which we would like to avoid.
+ required--
+ }
+ }
+ }
+ if err == nil {
+ if is_group {
+ return io.ErrUnexpectedEOF
+ }
+ if state.err != nil {
+ return state.err
+ }
+ if required > 0 {
+ // Not enough information to determine the exact field. If we use extra
+ // CPU, we could determine the field only if the missing required field
+ // has a tag <= 64 and we check reqFields.
+ return &RequiredNotSetError{"{Unknown}"}
+ }
+ }
+ return err
+}
+
+// Individual type decoders
+// For each,
+// u is the decoded value,
+// v is a pointer to the field (pointer) in the struct
+
+// Sizes of the pools to allocate inside the Buffer.
+// The goal is modest amortization and allocation
+// on at least 16-byte boundaries.
+const (
+ boolPoolSize = 16
+ uint32PoolSize = 8
+ uint64PoolSize = 4
+)
+
+// Decode a bool.
+func (o *Buffer) dec_bool(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ if len(o.bools) == 0 {
+ o.bools = make([]bool, boolPoolSize)
+ }
+ o.bools[0] = u != 0
+ *structPointer_Bool(base, p.field) = &o.bools[0]
+ o.bools = o.bools[1:]
+ return nil
+}
+
+func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ *structPointer_BoolVal(base, p.field) = u != 0
+ return nil
+}
+
+// Decode an int32.
+func (o *Buffer) dec_int32(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word32_Set(structPointer_Word32(base, p.field), o, uint32(u))
+ return nil
+}
+
+func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u))
+ return nil
+}
+
+// Decode an int64.
+func (o *Buffer) dec_int64(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word64_Set(structPointer_Word64(base, p.field), o, u)
+ return nil
+}
+
+func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word64Val_Set(structPointer_Word64Val(base, p.field), o, u)
+ return nil
+}
+
+// Decode a string.
+func (o *Buffer) dec_string(p *Properties, base structPointer) error {
+ s, err := o.DecodeStringBytes()
+ if err != nil {
+ return err
+ }
+ *structPointer_String(base, p.field) = &s
+ return nil
+}
+
+func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error {
+ s, err := o.DecodeStringBytes()
+ if err != nil {
+ return err
+ }
+ *structPointer_StringVal(base, p.field) = s
+ return nil
+}
+
+// Decode a slice of bytes ([]byte).
+func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error {
+ b, err := o.DecodeRawBytes(true)
+ if err != nil {
+ return err
+ }
+ *structPointer_Bytes(base, p.field) = b
+ return nil
+}
+
+// Decode a slice of bools ([]bool).
+func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ v := structPointer_BoolSlice(base, p.field)
+ *v = append(*v, u != 0)
+ return nil
+}
+
+// Decode a slice of bools ([]bool) in packed format.
+func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error {
+ v := structPointer_BoolSlice(base, p.field)
+
+ nn, err := o.DecodeVarint()
+ if err != nil {
+ return err
+ }
+ nb := int(nn) // number of bytes of encoded bools
+ fin := o.index + nb
+ if fin < o.index {
+ return errOverflow
+ }
+
+ y := *v
+ for o.index < fin {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ y = append(y, u != 0)
+ }
+
+ *v = y
+ return nil
+}
+
+// Decode a slice of int32s ([]int32).
+func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ structPointer_Word32Slice(base, p.field).Append(uint32(u))
+ return nil
+}
+
+// Decode a slice of int32s ([]int32) in packed format.
+func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Slice(base, p.field)
+
+ nn, err := o.DecodeVarint()
+ if err != nil {
+ return err
+ }
+ nb := int(nn) // number of bytes of encoded int32s
+
+ fin := o.index + nb
+ if fin < o.index {
+ return errOverflow
+ }
+ for o.index < fin {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ v.Append(uint32(u))
+ }
+ return nil
+}
+
+// Decode a slice of int64s ([]int64).
+func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+
+ structPointer_Word64Slice(base, p.field).Append(u)
+ return nil
+}
+
+// Decode a slice of int64s ([]int64) in packed format.
+func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error {
+ v := structPointer_Word64Slice(base, p.field)
+
+ nn, err := o.DecodeVarint()
+ if err != nil {
+ return err
+ }
+ nb := int(nn) // number of bytes of encoded int64s
+
+ fin := o.index + nb
+ if fin < o.index {
+ return errOverflow
+ }
+ for o.index < fin {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ v.Append(u)
+ }
+ return nil
+}
+
+// Decode a slice of strings ([]string).
+func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error {
+ s, err := o.DecodeStringBytes()
+ if err != nil {
+ return err
+ }
+ v := structPointer_StringSlice(base, p.field)
+ *v = append(*v, s)
+ return nil
+}
+
+// Decode a slice of slice of bytes ([][]byte).
+func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error {
+ b, err := o.DecodeRawBytes(true)
+ if err != nil {
+ return err
+ }
+ v := structPointer_BytesSlice(base, p.field)
+ *v = append(*v, b)
+ return nil
+}
+
+// Decode a map field.
+func (o *Buffer) dec_new_map(p *Properties, base structPointer) error {
+ raw, err := o.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+ oi := o.index // index at the end of this map entry
+ o.index -= len(raw) // move buffer back to start of map entry
+
+ mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V
+ if mptr.Elem().IsNil() {
+ mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem()))
+ }
+ v := mptr.Elem() // map[K]V
+
+ // Prepare addressable doubly-indirect placeholders for the key and value types.
+ // See enc_new_map for why.
+ keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K
+ keybase := toStructPointer(keyptr.Addr()) // **K
+
+ var valbase structPointer
+ var valptr reflect.Value
+ switch p.mtype.Elem().Kind() {
+ case reflect.Slice:
+ // []byte
+ var dummy []byte
+ valptr = reflect.ValueOf(&dummy) // *[]byte
+ valbase = toStructPointer(valptr) // *[]byte
+ case reflect.Ptr:
+ // message; valptr is **Msg; need to allocate the intermediate pointer
+ valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
+ valptr.Set(reflect.New(valptr.Type().Elem()))
+ valbase = toStructPointer(valptr)
+ default:
+ // everything else
+ valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
+ valbase = toStructPointer(valptr.Addr()) // **V
+ }
+
+ // Decode.
+ // This parses a restricted wire format, namely the encoding of a message
+ // with two fields. See enc_new_map for the format.
+ for o.index < oi {
+ // tagcode for key and value properties are always a single byte
+ // because they have tags 1 and 2.
+ tagcode := o.buf[o.index]
+ o.index++
+ switch tagcode {
+ case p.mkeyprop.tagcode[0]:
+ if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil {
+ return err
+ }
+ case p.mvalprop.tagcode[0]:
+ if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil {
+ return err
+ }
+ default:
+ // TODO: Should we silently skip this instead?
+ return fmt.Errorf("proto: bad map data tag %d", raw[0])
+ }
+ }
+ keyelem, valelem := keyptr.Elem(), valptr.Elem()
+ if !keyelem.IsValid() {
+ keyelem = reflect.Zero(p.mtype.Key())
+ }
+ if !valelem.IsValid() {
+ valelem = reflect.Zero(p.mtype.Elem())
+ }
+
+ v.SetMapIndex(keyelem, valelem)
+ return nil
+}
+
+// Decode a group.
+func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error {
+ bas := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(bas) {
+ // allocate new nested message
+ bas = toStructPointer(reflect.New(p.stype))
+ structPointer_SetStructPointer(base, p.field, bas)
+ }
+ return o.unmarshalType(p.stype, p.sprop, true, bas)
+}
+
+// Decode an embedded message.
+func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) {
+ raw, e := o.DecodeRawBytes(false)
+ if e != nil {
+ return e
+ }
+
+ bas := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(bas) {
+ // allocate new nested message
+ bas = toStructPointer(reflect.New(p.stype))
+ structPointer_SetStructPointer(base, p.field, bas)
+ }
+
+ // If the object can unmarshal itself, let it.
+ if p.isUnmarshaler {
+ iv := structPointer_Interface(bas, p.stype)
+ return iv.(Unmarshaler).Unmarshal(raw)
+ }
+
+ obuf := o.buf
+ oi := o.index
+ o.buf = raw
+ o.index = 0
+
+ err = o.unmarshalType(p.stype, p.sprop, false, bas)
+ o.buf = obuf
+ o.index = oi
+
+ return err
+}
+
+// Decode a slice of embedded messages.
+func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error {
+ return o.dec_slice_struct(p, false, base)
+}
+
+// Decode a slice of embedded groups.
+func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error {
+ return o.dec_slice_struct(p, true, base)
+}
+
+// Decode a slice of structs ([]*struct).
+func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error {
+ v := reflect.New(p.stype)
+ bas := toStructPointer(v)
+ structPointer_StructPointerSlice(base, p.field).Append(bas)
+
+ if is_group {
+ err := o.unmarshalType(p.stype, p.sprop, is_group, bas)
+ return err
+ }
+
+ raw, err := o.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+
+ // If the object can unmarshal itself, let it.
+ if p.isUnmarshaler {
+ iv := v.Interface()
+ return iv.(Unmarshaler).Unmarshal(raw)
+ }
+
+ obuf := o.buf
+ oi := o.index
+ o.buf = raw
+ o.index = 0
+
+ err = o.unmarshalType(p.stype, p.sprop, is_group, bas)
+
+ o.buf = obuf
+ o.index = oi
+
+ return err
+}
diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go
new file mode 100644
index 0000000..2b30f84
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/encode.go
@@ -0,0 +1,1362 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+)
+
+// RequiredNotSetError is the error returned if Marshal is called with
+// a protocol buffer struct whose required fields have not
+// all been initialized. It is also the error returned if Unmarshal is
+// called with an encoded protocol buffer that does not include all the
+// required fields.
+//
+// When printed, RequiredNotSetError reports the first unset required field in a
+// message. If the field cannot be precisely determined, it is reported as
+// "{Unknown}".
+type RequiredNotSetError struct {
+ field string
+}
+
+func (e *RequiredNotSetError) Error() string {
+ return fmt.Sprintf("proto: required field %q not set", e.field)
+}
+
+var (
+ // errRepeatedHasNil is the error returned if Marshal is called with
+ // a struct with a repeated field containing a nil element.
+ errRepeatedHasNil = errors.New("proto: repeated field has nil element")
+
+ // errOneofHasNil is the error returned if Marshal is called with
+ // a struct with a oneof field containing a nil element.
+ errOneofHasNil = errors.New("proto: oneof field has nil value")
+
+ // ErrNil is the error returned if Marshal is called with nil.
+ ErrNil = errors.New("proto: Marshal called with nil")
+
+ // ErrTooLarge is the error returned if Marshal is called with a
+ // message that encodes to >2GB.
+ ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
+)
+
+// The fundamental encoders that put bytes on the wire.
+// Those that take integer types all accept uint64 and are
+// therefore of type valueEncoder.
+
+const maxVarintBytes = 10 // maximum length of a varint
+
+// maxMarshalSize is the largest allowed size of an encoded protobuf,
+// since C++ and Java use signed int32s for the size.
+const maxMarshalSize = 1<<31 - 1
+
+// EncodeVarint returns the varint encoding of x.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+// Not used by the package itself, but helpful to clients
+// wishing to use the same encoding.
+func EncodeVarint(x uint64) []byte {
+ var buf [maxVarintBytes]byte
+ var n int
+ for n = 0; x > 127; n++ {
+ buf[n] = 0x80 | uint8(x&0x7F)
+ x >>= 7
+ }
+ buf[n] = uint8(x)
+ n++
+ return buf[0:n]
+}
+
+// EncodeVarint writes a varint-encoded integer to the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) EncodeVarint(x uint64) error {
+ for x >= 1<<7 {
+ p.buf = append(p.buf, uint8(x&0x7f|0x80))
+ x >>= 7
+ }
+ p.buf = append(p.buf, uint8(x))
+ return nil
+}
+
+// SizeVarint returns the varint encoding size of an integer.
+func SizeVarint(x uint64) int {
+ return sizeVarint(x)
+}
+
+func sizeVarint(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+
+// EncodeFixed64 writes a 64-bit integer to the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) EncodeFixed64(x uint64) error {
+ p.buf = append(p.buf,
+ uint8(x),
+ uint8(x>>8),
+ uint8(x>>16),
+ uint8(x>>24),
+ uint8(x>>32),
+ uint8(x>>40),
+ uint8(x>>48),
+ uint8(x>>56))
+ return nil
+}
+
+func sizeFixed64(x uint64) int {
+ return 8
+}
+
+// EncodeFixed32 writes a 32-bit integer to the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) EncodeFixed32(x uint64) error {
+ p.buf = append(p.buf,
+ uint8(x),
+ uint8(x>>8),
+ uint8(x>>16),
+ uint8(x>>24))
+ return nil
+}
+
+func sizeFixed32(x uint64) int {
+ return 4
+}
+
+// EncodeZigzag64 writes a zigzag-encoded 64-bit integer
+// to the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) EncodeZigzag64(x uint64) error {
+ // use signed number to get arithmetic right shift.
+ return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+
+func sizeZigzag64(x uint64) int {
+ return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+
+// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
+// to the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) EncodeZigzag32(x uint64) error {
+ // use signed number to get arithmetic right shift.
+ return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
+}
+
+func sizeZigzag32(x uint64) int {
+ return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
+}
+
+// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) EncodeRawBytes(b []byte) error {
+ p.EncodeVarint(uint64(len(b)))
+ p.buf = append(p.buf, b...)
+ return nil
+}
+
+func sizeRawBytes(b []byte) int {
+ return sizeVarint(uint64(len(b))) +
+ len(b)
+}
+
+// EncodeStringBytes writes an encoded string to the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) EncodeStringBytes(s string) error {
+ p.EncodeVarint(uint64(len(s)))
+ p.buf = append(p.buf, s...)
+ return nil
+}
+
+func sizeStringBytes(s string) int {
+ return sizeVarint(uint64(len(s))) +
+ len(s)
+}
+
+// Marshaler is the interface representing objects that can marshal themselves.
+type Marshaler interface {
+ Marshal() ([]byte, error)
+}
+
+// Marshal takes the protocol buffer
+// and encodes it into the wire format, returning the data.
+func Marshal(pb Message) ([]byte, error) {
+ // Can the object marshal itself?
+ if m, ok := pb.(Marshaler); ok {
+ return m.Marshal()
+ }
+ p := NewBuffer(nil)
+ err := p.Marshal(pb)
+ if p.buf == nil && err == nil {
+ // Return a non-nil slice on success.
+ return []byte{}, nil
+ }
+ return p.buf, err
+}
+
+// EncodeMessage writes the protocol buffer to the Buffer,
+// prefixed by a varint-encoded length.
+func (p *Buffer) EncodeMessage(pb Message) error {
+ t, base, err := getbase(pb)
+ if structPointer_IsNil(base) {
+ return ErrNil
+ }
+ if err == nil {
+ var state errorState
+ err = p.enc_len_struct(GetProperties(t.Elem()), base, &state)
+ }
+ return err
+}
+
+// Marshal takes the protocol buffer
+// and encodes it into the wire format, writing the result to the
+// Buffer.
+func (p *Buffer) Marshal(pb Message) error {
+ // Can the object marshal itself?
+ if m, ok := pb.(Marshaler); ok {
+ data, err := m.Marshal()
+ p.buf = append(p.buf, data...)
+ return err
+ }
+
+ t, base, err := getbase(pb)
+ if structPointer_IsNil(base) {
+ return ErrNil
+ }
+ if err == nil {
+ err = p.enc_struct(GetProperties(t.Elem()), base)
+ }
+
+ if collectStats {
+ (stats).Encode++ // Parens are to work around a goimports bug.
+ }
+
+ if len(p.buf) > maxMarshalSize {
+ return ErrTooLarge
+ }
+ return err
+}
+
+// Size returns the encoded size of a protocol buffer.
+func Size(pb Message) (n int) {
+ // Can the object marshal itself? If so, Size is slow.
+ // TODO: add Size to Marshaler, or add a Sizer interface.
+ if m, ok := pb.(Marshaler); ok {
+ b, _ := m.Marshal()
+ return len(b)
+ }
+
+ t, base, err := getbase(pb)
+ if structPointer_IsNil(base) {
+ return 0
+ }
+ if err == nil {
+ n = size_struct(GetProperties(t.Elem()), base)
+ }
+
+ if collectStats {
+ (stats).Size++ // Parens are to work around a goimports bug.
+ }
+
+ return
+}
+
+// Individual type encoders.
+
+// Encode a bool.
+func (o *Buffer) enc_bool(p *Properties, base structPointer) error {
+ v := *structPointer_Bool(base, p.field)
+ if v == nil {
+ return ErrNil
+ }
+ x := 0
+ if *v {
+ x = 1
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error {
+ v := *structPointer_BoolVal(base, p.field)
+ if !v {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, 1)
+ return nil
+}
+
+func size_bool(p *Properties, base structPointer) int {
+ v := *structPointer_Bool(base, p.field)
+ if v == nil {
+ return 0
+ }
+ return len(p.tagcode) + 1 // each bool takes exactly one byte
+}
+
+func size_proto3_bool(p *Properties, base structPointer) int {
+ v := *structPointer_BoolVal(base, p.field)
+ if !v && !p.oneof {
+ return 0
+ }
+ return len(p.tagcode) + 1 // each bool takes exactly one byte
+}
+
+// Encode an int32.
+func (o *Buffer) enc_int32(p *Properties, base structPointer) error {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return ErrNil
+ }
+ x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Val(base, p.field)
+ x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range
+ if x == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func size_int32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return 0
+ }
+ x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+func size_proto3_int32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32Val(base, p.field)
+ x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range
+ if x == 0 && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+// Encode a uint32.
+// Exactly the same as int32, except for no sign extension.
+func (o *Buffer) enc_uint32(p *Properties, base structPointer) error {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return ErrNil
+ }
+ x := word32_Get(v)
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Val(base, p.field)
+ x := word32Val_Get(v)
+ if x == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func size_uint32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return 0
+ }
+ x := word32_Get(v)
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+func size_proto3_uint32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32Val(base, p.field)
+ x := word32Val_Get(v)
+ if x == 0 && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+// Encode an int64.
+func (o *Buffer) enc_int64(p *Properties, base structPointer) error {
+ v := structPointer_Word64(base, p.field)
+ if word64_IsNil(v) {
+ return ErrNil
+ }
+ x := word64_Get(v)
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, x)
+ return nil
+}
+
+func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error {
+ v := structPointer_Word64Val(base, p.field)
+ x := word64Val_Get(v)
+ if x == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, x)
+ return nil
+}
+
+func size_int64(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word64(base, p.field)
+ if word64_IsNil(v) {
+ return 0
+ }
+ x := word64_Get(v)
+ n += len(p.tagcode)
+ n += p.valSize(x)
+ return
+}
+
+func size_proto3_int64(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word64Val(base, p.field)
+ x := word64Val_Get(v)
+ if x == 0 && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += p.valSize(x)
+ return
+}
+
+// Encode a string.
+func (o *Buffer) enc_string(p *Properties, base structPointer) error {
+ v := *structPointer_String(base, p.field)
+ if v == nil {
+ return ErrNil
+ }
+ x := *v
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeStringBytes(x)
+ return nil
+}
+
+func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error {
+ v := *structPointer_StringVal(base, p.field)
+ if v == "" {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeStringBytes(v)
+ return nil
+}
+
+func size_string(p *Properties, base structPointer) (n int) {
+ v := *structPointer_String(base, p.field)
+ if v == nil {
+ return 0
+ }
+ x := *v
+ n += len(p.tagcode)
+ n += sizeStringBytes(x)
+ return
+}
+
+func size_proto3_string(p *Properties, base structPointer) (n int) {
+ v := *structPointer_StringVal(base, p.field)
+ if v == "" && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeStringBytes(v)
+ return
+}
+
+// All protocol buffer fields are nillable, but be careful.
+func isNil(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+ return v.IsNil()
+ }
+ return false
+}
+
+// Encode a message struct.
+func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error {
+ var state errorState
+ structp := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(structp) {
+ return ErrNil
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, err := m.Marshal()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(data)
+ return state.err
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ return o.enc_len_struct(p.sprop, structp, &state)
+}
+
+func size_struct_message(p *Properties, base structPointer) int {
+ structp := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(structp) {
+ return 0
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, _ := m.Marshal()
+ n0 := len(p.tagcode)
+ n1 := sizeRawBytes(data)
+ return n0 + n1
+ }
+
+ n0 := len(p.tagcode)
+ n1 := size_struct(p.sprop, structp)
+ n2 := sizeVarint(uint64(n1)) // size of encoded length
+ return n0 + n1 + n2
+}
+
+// Encode a group struct.
+func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error {
+ var state errorState
+ b := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(b) {
+ return ErrNil
+ }
+
+ o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))
+ err := o.enc_struct(p.sprop, b)
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup))
+ return state.err
+}
+
+func size_struct_group(p *Properties, base structPointer) (n int) {
+ b := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(b) {
+ return 0
+ }
+
+ n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup))
+ n += size_struct(p.sprop, b)
+ n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup))
+ return
+}
+
+// Encode a slice of bools ([]bool).
+func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return ErrNil
+ }
+ for _, x := range s {
+ o.buf = append(o.buf, p.tagcode...)
+ v := uint64(0)
+ if x {
+ v = 1
+ }
+ p.valEnc(o, v)
+ }
+ return nil
+}
+
+func size_slice_bool(p *Properties, base structPointer) int {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return 0
+ }
+ return l * (len(p.tagcode) + 1) // each bool takes exactly one byte
+}
+
+// Encode a slice of bools ([]bool) in packed format.
+func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(l)) // each bool takes exactly one byte
+ for _, x := range s {
+ v := uint64(0)
+ if x {
+ v = 1
+ }
+ p.valEnc(o, v)
+ }
+ return nil
+}
+
+func size_slice_packed_bool(p *Properties, base structPointer) (n int) {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(l))
+ n += l // each bool takes exactly one byte
+ return
+}
+
+// Encode a slice of bytes ([]byte).
+func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error {
+ s := *structPointer_Bytes(base, p.field)
+ if s == nil {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(s)
+ return nil
+}
+
+func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error {
+ s := *structPointer_Bytes(base, p.field)
+ if len(s) == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(s)
+ return nil
+}
+
+func size_slice_byte(p *Properties, base structPointer) (n int) {
+ s := *structPointer_Bytes(base, p.field)
+ if s == nil && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeRawBytes(s)
+ return
+}
+
+func size_proto3_slice_byte(p *Properties, base structPointer) (n int) {
+ s := *structPointer_Bytes(base, p.field)
+ if len(s) == 0 && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeRawBytes(s)
+ return
+}
+
+// Encode a slice of int32s ([]int32).
+func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ p.valEnc(o, uint64(x))
+ }
+ return nil
+}
+
+func size_slice_int32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ for i := 0; i < l; i++ {
+ n += len(p.tagcode)
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ n += p.valSize(uint64(x))
+ }
+ return
+}
+
+// Encode a slice of int32s ([]int32) in packed format.
+func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ // TODO: Reuse a Buffer.
+ buf := NewBuffer(nil)
+ for i := 0; i < l; i++ {
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ p.valEnc(buf, uint64(x))
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(len(buf.buf)))
+ o.buf = append(o.buf, buf.buf...)
+ return nil
+}
+
+func size_slice_packed_int32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ var bufSize int
+ for i := 0; i < l; i++ {
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ bufSize += p.valSize(uint64(x))
+ }
+
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(bufSize))
+ n += bufSize
+ return
+}
+
+// Encode a slice of uint32s ([]uint32).
+// Exactly the same as int32, except for no sign extension.
+func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ x := s.Index(i)
+ p.valEnc(o, uint64(x))
+ }
+ return nil
+}
+
+func size_slice_uint32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ for i := 0; i < l; i++ {
+ n += len(p.tagcode)
+ x := s.Index(i)
+ n += p.valSize(uint64(x))
+ }
+ return
+}
+
+// Encode a slice of uint32s ([]uint32) in packed format.
+// Exactly the same as int32, except for no sign extension.
+func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ // TODO: Reuse a Buffer.
+ buf := NewBuffer(nil)
+ for i := 0; i < l; i++ {
+ p.valEnc(buf, uint64(s.Index(i)))
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(len(buf.buf)))
+ o.buf = append(o.buf, buf.buf...)
+ return nil
+}
+
+func size_slice_packed_uint32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ var bufSize int
+ for i := 0; i < l; i++ {
+ bufSize += p.valSize(uint64(s.Index(i)))
+ }
+
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(bufSize))
+ n += bufSize
+ return
+}
+
+// Encode a slice of int64s ([]int64).
+func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, s.Index(i))
+ }
+ return nil
+}
+
+func size_slice_int64(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ for i := 0; i < l; i++ {
+ n += len(p.tagcode)
+ n += p.valSize(s.Index(i))
+ }
+ return
+}
+
+// Encode a slice of int64s ([]int64) in packed format.
+func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ // TODO: Reuse a Buffer.
+ buf := NewBuffer(nil)
+ for i := 0; i < l; i++ {
+ p.valEnc(buf, s.Index(i))
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(len(buf.buf)))
+ o.buf = append(o.buf, buf.buf...)
+ return nil
+}
+
+func size_slice_packed_int64(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ var bufSize int
+ for i := 0; i < l; i++ {
+ bufSize += p.valSize(s.Index(i))
+ }
+
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(bufSize))
+ n += bufSize
+ return
+}
+
+// Encode a slice of slice of bytes ([][]byte).
+func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error {
+ ss := *structPointer_BytesSlice(base, p.field)
+ l := len(ss)
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(ss[i])
+ }
+ return nil
+}
+
+func size_slice_slice_byte(p *Properties, base structPointer) (n int) {
+ ss := *structPointer_BytesSlice(base, p.field)
+ l := len(ss)
+ if l == 0 {
+ return 0
+ }
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ n += sizeRawBytes(ss[i])
+ }
+ return
+}
+
+// Encode a slice of strings ([]string).
+func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error {
+ ss := *structPointer_StringSlice(base, p.field)
+ l := len(ss)
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeStringBytes(ss[i])
+ }
+ return nil
+}
+
+func size_slice_string(p *Properties, base structPointer) (n int) {
+ ss := *structPointer_StringSlice(base, p.field)
+ l := len(ss)
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ n += sizeStringBytes(ss[i])
+ }
+ return
+}
+
+// Encode a slice of message structs ([]*struct).
+func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error {
+ var state errorState
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+
+ for i := 0; i < l; i++ {
+ structp := s.Index(i)
+ if structPointer_IsNil(structp) {
+ return errRepeatedHasNil
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, err := m.Marshal()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(data)
+ continue
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ err := o.enc_len_struct(p.sprop, structp, &state)
+ if err != nil && !state.shouldContinue(err, nil) {
+ if err == ErrNil {
+ return errRepeatedHasNil
+ }
+ return err
+ }
+ }
+ return state.err
+}
+
+func size_slice_struct_message(p *Properties, base structPointer) (n int) {
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ structp := s.Index(i)
+ if structPointer_IsNil(structp) {
+ return // return the size up to this point
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, _ := m.Marshal()
+ n += sizeRawBytes(data)
+ continue
+ }
+
+ n0 := size_struct(p.sprop, structp)
+ n1 := sizeVarint(uint64(n0)) // size of encoded length
+ n += n0 + n1
+ }
+ return
+}
+
+// Encode a slice of group structs ([]*struct).
+func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error {
+ var state errorState
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+
+ for i := 0; i < l; i++ {
+ b := s.Index(i)
+ if structPointer_IsNil(b) {
+ return errRepeatedHasNil
+ }
+
+ o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))
+
+ err := o.enc_struct(p.sprop, b)
+
+ if err != nil && !state.shouldContinue(err, nil) {
+ if err == ErrNil {
+ return errRepeatedHasNil
+ }
+ return err
+ }
+
+ o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup))
+ }
+ return state.err
+}
+
+func size_slice_struct_group(p *Properties, base structPointer) (n int) {
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+
+ n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup))
+ n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup))
+ for i := 0; i < l; i++ {
+ b := s.Index(i)
+ if structPointer_IsNil(b) {
+ return // return size up to this point
+ }
+
+ n += size_struct(p.sprop, b)
+ }
+ return
+}
+
+// Encode an extension map.
+func (o *Buffer) enc_map(p *Properties, base structPointer) error {
+ exts := structPointer_ExtMap(base, p.field)
+ if err := encodeExtensionsMap(*exts); err != nil {
+ return err
+ }
+
+ return o.enc_map_body(*exts)
+}
+
+func (o *Buffer) enc_exts(p *Properties, base structPointer) error {
+ exts := structPointer_Extensions(base, p.field)
+
+ v, mu := exts.extensionsRead()
+ if v == nil {
+ return nil
+ }
+
+ mu.Lock()
+ defer mu.Unlock()
+ if err := encodeExtensionsMap(v); err != nil {
+ return err
+ }
+
+ return o.enc_map_body(v)
+}
+
+func (o *Buffer) enc_map_body(v map[int32]Extension) error {
+ // Fast-path for common cases: zero or one extensions.
+ if len(v) <= 1 {
+ for _, e := range v {
+ o.buf = append(o.buf, e.enc...)
+ }
+ return nil
+ }
+
+ // Sort keys to provide a deterministic encoding.
+ keys := make([]int, 0, len(v))
+ for k := range v {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+
+ for _, k := range keys {
+ o.buf = append(o.buf, v[int32(k)].enc...)
+ }
+ return nil
+}
+
+func size_map(p *Properties, base structPointer) int {
+ v := structPointer_ExtMap(base, p.field)
+ return extensionsMapSize(*v)
+}
+
+func size_exts(p *Properties, base structPointer) int {
+ v := structPointer_Extensions(base, p.field)
+ return extensionsSize(v)
+}
+
+// Encode a map field.
+func (o *Buffer) enc_new_map(p *Properties, base structPointer) error {
+ var state errorState // XXX: or do we need to plumb this through?
+
+ /*
+ A map defined as
+ map map_field = N;
+ is encoded in the same way as
+ message MapFieldEntry {
+ key_type key = 1;
+ value_type value = 2;
+ }
+ repeated MapFieldEntry map_field = N;
+ */
+
+ v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V
+ if v.Len() == 0 {
+ return nil
+ }
+
+ keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)
+
+ enc := func() error {
+ if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil {
+ return err
+ }
+ if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil && err != ErrNil {
+ return err
+ }
+ return nil
+ }
+
+ // Don't sort map keys. It is not required by the spec, and C++ doesn't do it.
+ for _, key := range v.MapKeys() {
+ val := v.MapIndex(key)
+
+ keycopy.Set(key)
+ valcopy.Set(val)
+
+ o.buf = append(o.buf, p.tagcode...)
+ if err := o.enc_len_thing(enc, &state); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func size_new_map(p *Properties, base structPointer) int {
+ v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V
+
+ keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)
+
+ n := 0
+ for _, key := range v.MapKeys() {
+ val := v.MapIndex(key)
+ keycopy.Set(key)
+ valcopy.Set(val)
+
+ // Tag codes for key and val are the responsibility of the sub-sizer.
+ keysize := p.mkeyprop.size(p.mkeyprop, keybase)
+ valsize := p.mvalprop.size(p.mvalprop, valbase)
+ entry := keysize + valsize
+ // Add on tag code and length of map entry itself.
+ n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry
+ }
+ return n
+}
+
+// mapEncodeScratch returns a new reflect.Value matching the map's value type,
+// and a structPointer suitable for passing to an encoder or sizer.
+func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) {
+ // Prepare addressable doubly-indirect placeholders for the key and value types.
+ // This is needed because the element-type encoders expect **T, but the map iteration produces T.
+
+ keycopy = reflect.New(mapType.Key()).Elem() // addressable K
+ keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K
+ keyptr.Set(keycopy.Addr()) //
+ keybase = toStructPointer(keyptr.Addr()) // **K
+
+ // Value types are more varied and require special handling.
+ switch mapType.Elem().Kind() {
+ case reflect.Slice:
+ // []byte
+ var dummy []byte
+ valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte
+ valbase = toStructPointer(valcopy.Addr())
+ case reflect.Ptr:
+ // message; the generated field type is map[K]*Msg (so V is *Msg),
+ // so we only need one level of indirection.
+ valcopy = reflect.New(mapType.Elem()).Elem() // addressable V
+ valbase = toStructPointer(valcopy.Addr())
+ default:
+ // everything else
+ valcopy = reflect.New(mapType.Elem()).Elem() // addressable V
+ valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V
+ valptr.Set(valcopy.Addr()) //
+ valbase = toStructPointer(valptr.Addr()) // **V
+ }
+ return
+}
+
+// Encode a struct.
+func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error {
+ var state errorState
+ // Encode fields in tag order so that decoders may use optimizations
+ // that depend on the ordering.
+ // https://developers.google.com/protocol-buffers/docs/encoding#order
+ for _, i := range prop.order {
+ p := prop.Prop[i]
+ if p.enc != nil {
+ err := p.enc(o, p, base)
+ if err != nil {
+ if err == ErrNil {
+ if p.Required && state.err == nil {
+ state.err = &RequiredNotSetError{p.Name}
+ }
+ } else if err == errRepeatedHasNil {
+ // Give more context to nil values in repeated fields.
+ return errors.New("repeated field " + p.OrigName + " has nil element")
+ } else if !state.shouldContinue(err, p) {
+ return err
+ }
+ }
+ if len(o.buf) > maxMarshalSize {
+ return ErrTooLarge
+ }
+ }
+ }
+
+ // Do oneof fields.
+ if prop.oneofMarshaler != nil {
+ m := structPointer_Interface(base, prop.stype).(Message)
+ if err := prop.oneofMarshaler(m, o); err == ErrNil {
+ return errOneofHasNil
+ } else if err != nil {
+ return err
+ }
+ }
+
+ // Add unrecognized fields at the end.
+ if prop.unrecField.IsValid() {
+ v := *structPointer_Bytes(base, prop.unrecField)
+ if len(o.buf)+len(v) > maxMarshalSize {
+ return ErrTooLarge
+ }
+ if len(v) > 0 {
+ o.buf = append(o.buf, v...)
+ }
+ }
+
+ return state.err
+}
+
+func size_struct(prop *StructProperties, base structPointer) (n int) {
+ for _, i := range prop.order {
+ p := prop.Prop[i]
+ if p.size != nil {
+ n += p.size(p, base)
+ }
+ }
+
+ // Add unrecognized fields at the end.
+ if prop.unrecField.IsValid() {
+ v := *structPointer_Bytes(base, prop.unrecField)
+ n += len(v)
+ }
+
+ // Factor in any oneof fields.
+ if prop.oneofSizer != nil {
+ m := structPointer_Interface(base, prop.stype).(Message)
+ n += prop.oneofSizer(m)
+ }
+
+ return
+}
+
+var zeroes [20]byte // longer than any conceivable sizeVarint
+
+// Encode a struct, preceded by its encoded length (as a varint).
+func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error {
+ return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state)
+}
+
+// Encode something, preceded by its encoded length (as a varint).
+func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error {
+ iLen := len(o.buf)
+ o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length
+ iMsg := len(o.buf)
+ err := enc()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ lMsg := len(o.buf) - iMsg
+ lLen := sizeVarint(uint64(lMsg))
+ switch x := lLen - (iMsg - iLen); {
+ case x > 0: // actual length is x bytes larger than the space we reserved
+ // Move msg x bytes right.
+ o.buf = append(o.buf, zeroes[:x]...)
+ copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])
+ case x < 0: // actual length is x bytes smaller than the space we reserved
+ // Move msg x bytes left.
+ copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])
+ o.buf = o.buf[:len(o.buf)+x] // x is negative
+ }
+ // Encode the length in the reserved space.
+ o.buf = o.buf[:iLen]
+ o.EncodeVarint(uint64(lMsg))
+ o.buf = o.buf[:len(o.buf)+lMsg]
+ return state.err
+}
+
+// errorState maintains the first error that occurs and updates that error
+// with additional context.
+type errorState struct {
+ err error
+}
+
+// shouldContinue reports whether encoding should continue upon encountering the
+// given error. If the error is RequiredNotSetError, shouldContinue returns true
+// and, if this is the first appearance of that error, remembers it for future
+// reporting.
+//
+// If prop is not nil, it may update any error with additional context about the
+// field with the error.
+func (s *errorState) shouldContinue(err error, prop *Properties) bool {
+ // Ignore unset required fields.
+ reqNotSet, ok := err.(*RequiredNotSetError)
+ if !ok {
+ return false
+ }
+ if s.err == nil {
+ if prop != nil {
+ err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field}
+ }
+ s.err = err
+ }
+ return true
+}
diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go
new file mode 100644
index 0000000..2ed1cf5
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/equal.go
@@ -0,0 +1,300 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer comparison.
+
+package proto
+
+import (
+ "bytes"
+ "log"
+ "reflect"
+ "strings"
+)
+
+/*
+Equal returns true iff protocol buffers a and b are equal.
+The arguments must both be pointers to protocol buffer structs.
+
+Equality is defined in this way:
+ - Two messages are equal iff they are the same type,
+ corresponding fields are equal, unknown field sets
+ are equal, and extensions sets are equal.
+ - Two set scalar fields are equal iff their values are equal.
+ If the fields are of a floating-point type, remember that
+ NaN != x for all x, including NaN. If the message is defined
+ in a proto3 .proto file, fields are not "set"; specifically,
+ zero length proto3 "bytes" fields are equal (nil == {}).
+ - Two repeated fields are equal iff their lengths are the same,
+ and their corresponding elements are equal. Note a "bytes" field,
+ although represented by []byte, is not a repeated field and the
+ rule for the scalar fields described above applies.
+ - Two unset fields are equal.
+ - Two unknown field sets are equal if their current
+ encoded state is equal.
+ - Two extension sets are equal iff they have corresponding
+ elements that are pairwise equal.
+ - Two map fields are equal iff their lengths are the same,
+ and they contain the same set of elements. Zero-length map
+ fields are equal.
+ - Every other combination of things are not equal.
+
+The return value is undefined if a and b are not protocol buffers.
+*/
+func Equal(a, b Message) bool {
+ if a == nil || b == nil {
+ return a == b
+ }
+ v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
+ if v1.Type() != v2.Type() {
+ return false
+ }
+ if v1.Kind() == reflect.Ptr {
+ if v1.IsNil() {
+ return v2.IsNil()
+ }
+ if v2.IsNil() {
+ return false
+ }
+ v1, v2 = v1.Elem(), v2.Elem()
+ }
+ if v1.Kind() != reflect.Struct {
+ return false
+ }
+ return equalStruct(v1, v2)
+}
+
+// v1 and v2 are known to have the same type.
+func equalStruct(v1, v2 reflect.Value) bool {
+ sprop := GetProperties(v1.Type())
+ for i := 0; i < v1.NumField(); i++ {
+ f := v1.Type().Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ f1, f2 := v1.Field(i), v2.Field(i)
+ if f.Type.Kind() == reflect.Ptr {
+ if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
+ // both unset
+ continue
+ } else if n1 != n2 {
+ // set/unset mismatch
+ return false
+ }
+ b1, ok := f1.Interface().(raw)
+ if ok {
+ b2 := f2.Interface().(raw)
+ // RawMessage
+ if !bytes.Equal(b1.Bytes(), b2.Bytes()) {
+ return false
+ }
+ continue
+ }
+ f1, f2 = f1.Elem(), f2.Elem()
+ }
+ if !equalAny(f1, f2, sprop.Prop[i]) {
+ return false
+ }
+ }
+
+ if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() {
+ em2 := v2.FieldByName("XXX_InternalExtensions")
+ if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) {
+ return false
+ }
+ }
+
+ if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
+ em2 := v2.FieldByName("XXX_extensions")
+ if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
+ return false
+ }
+ }
+
+ uf := v1.FieldByName("XXX_unrecognized")
+ if !uf.IsValid() {
+ return true
+ }
+
+ u1 := uf.Bytes()
+ u2 := v2.FieldByName("XXX_unrecognized").Bytes()
+ if !bytes.Equal(u1, u2) {
+ return false
+ }
+
+ return true
+}
+
+// v1 and v2 are known to have the same type.
+// prop may be nil.
+func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
+ if v1.Type() == protoMessageType {
+ m1, _ := v1.Interface().(Message)
+ m2, _ := v2.Interface().(Message)
+ return Equal(m1, m2)
+ }
+ switch v1.Kind() {
+ case reflect.Bool:
+ return v1.Bool() == v2.Bool()
+ case reflect.Float32, reflect.Float64:
+ return v1.Float() == v2.Float()
+ case reflect.Int32, reflect.Int64:
+ return v1.Int() == v2.Int()
+ case reflect.Interface:
+ // Probably a oneof field; compare the inner values.
+ n1, n2 := v1.IsNil(), v2.IsNil()
+ if n1 || n2 {
+ return n1 == n2
+ }
+ e1, e2 := v1.Elem(), v2.Elem()
+ if e1.Type() != e2.Type() {
+ return false
+ }
+ return equalAny(e1, e2, nil)
+ case reflect.Map:
+ if v1.Len() != v2.Len() {
+ return false
+ }
+ for _, key := range v1.MapKeys() {
+ val2 := v2.MapIndex(key)
+ if !val2.IsValid() {
+ // This key was not found in the second map.
+ return false
+ }
+ if !equalAny(v1.MapIndex(key), val2, nil) {
+ return false
+ }
+ }
+ return true
+ case reflect.Ptr:
+ // Maps may have nil values in them, so check for nil.
+ if v1.IsNil() && v2.IsNil() {
+ return true
+ }
+ if v1.IsNil() != v2.IsNil() {
+ return false
+ }
+ return equalAny(v1.Elem(), v2.Elem(), prop)
+ case reflect.Slice:
+ if v1.Type().Elem().Kind() == reflect.Uint8 {
+ // short circuit: []byte
+
+ // Edge case: if this is in a proto3 message, a zero length
+ // bytes field is considered the zero value.
+ if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 {
+ return true
+ }
+ if v1.IsNil() != v2.IsNil() {
+ return false
+ }
+ return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
+ }
+
+ if v1.Len() != v2.Len() {
+ return false
+ }
+ for i := 0; i < v1.Len(); i++ {
+ if !equalAny(v1.Index(i), v2.Index(i), prop) {
+ return false
+ }
+ }
+ return true
+ case reflect.String:
+ return v1.Interface().(string) == v2.Interface().(string)
+ case reflect.Struct:
+ return equalStruct(v1, v2)
+ case reflect.Uint32, reflect.Uint64:
+ return v1.Uint() == v2.Uint()
+ }
+
+ // unknown type, so not a protocol buffer
+ log.Printf("proto: don't know how to compare %v", v1)
+ return false
+}
+
+// base is the struct type that the extensions are based on.
+// x1 and x2 are InternalExtensions.
+func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool {
+ em1, _ := x1.extensionsRead()
+ em2, _ := x2.extensionsRead()
+ return equalExtMap(base, em1, em2)
+}
+
+func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
+ if len(em1) != len(em2) {
+ return false
+ }
+
+ for extNum, e1 := range em1 {
+ e2, ok := em2[extNum]
+ if !ok {
+ return false
+ }
+
+ m1, m2 := e1.value, e2.value
+
+ if m1 != nil && m2 != nil {
+ // Both are unencoded.
+ if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
+ return false
+ }
+ continue
+ }
+
+ // At least one is encoded. To do a semantically correct comparison
+ // we need to unmarshal them first.
+ var desc *ExtensionDesc
+ if m := extensionMaps[base]; m != nil {
+ desc = m[extNum]
+ }
+ if desc == nil {
+ log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
+ continue
+ }
+ var err error
+ if m1 == nil {
+ m1, err = decodeExtension(e1.enc, desc)
+ }
+ if m2 == nil && err == nil {
+ m2, err = decodeExtension(e2.enc, desc)
+ }
+ if err != nil {
+ // The encoded form is invalid.
+ log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
+ return false
+ }
+ if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go
new file mode 100644
index 0000000..eaad218
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/extensions.go
@@ -0,0 +1,587 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Types and routines for supporting protocol buffer extensions.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "sync"
+)
+
+// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
+var ErrMissingExtension = errors.New("proto: missing extension")
+
+// ExtensionRange represents a range of message extensions for a protocol buffer.
+// Used in code generated by the protocol compiler.
+type ExtensionRange struct {
+ Start, End int32 // both inclusive
+}
+
+// extendableProto is an interface implemented by any protocol buffer generated by the current
+// proto compiler that may be extended.
+type extendableProto interface {
+ Message
+ ExtensionRangeArray() []ExtensionRange
+ extensionsWrite() map[int32]Extension
+ extensionsRead() (map[int32]Extension, sync.Locker)
+}
+
+// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous
+// version of the proto compiler that may be extended.
+type extendableProtoV1 interface {
+ Message
+ ExtensionRangeArray() []ExtensionRange
+ ExtensionMap() map[int32]Extension
+}
+
+// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto.
+type extensionAdapter struct {
+ extendableProtoV1
+}
+
+func (e extensionAdapter) extensionsWrite() map[int32]Extension {
+ return e.ExtensionMap()
+}
+
+func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) {
+ return e.ExtensionMap(), notLocker{}
+}
+
+// notLocker is a sync.Locker whose Lock and Unlock methods are nops.
+type notLocker struct{}
+
+func (n notLocker) Lock() {}
+func (n notLocker) Unlock() {}
+
+// extendable returns the extendableProto interface for the given generated proto message.
+// If the proto message has the old extension format, it returns a wrapper that implements
+// the extendableProto interface.
+func extendable(p interface{}) (extendableProto, bool) {
+ if ep, ok := p.(extendableProto); ok {
+ return ep, ok
+ }
+ if ep, ok := p.(extendableProtoV1); ok {
+ return extensionAdapter{ep}, ok
+ }
+ return nil, false
+}
+
+// XXX_InternalExtensions is an internal representation of proto extensions.
+//
+// Each generated message struct type embeds an anonymous XXX_InternalExtensions field,
+// thus gaining the unexported 'extensions' method, which can be called only from the proto package.
+//
+// The methods of XXX_InternalExtensions are not concurrency safe in general,
+// but calls to logically read-only methods such as has and get may be executed concurrently.
+type XXX_InternalExtensions struct {
+ // The struct must be indirect so that if a user inadvertently copies a
+ // generated message and its embedded XXX_InternalExtensions, they
+ // avoid the mayhem of a copied mutex.
+ //
+ // The mutex serializes all logically read-only operations to p.extensionMap.
+ // It is up to the client to ensure that write operations to p.extensionMap are
+ // mutually exclusive with other accesses.
+ p *struct {
+ mu sync.Mutex
+ extensionMap map[int32]Extension
+ }
+}
+
+// extensionsWrite returns the extension map, creating it on first use.
+func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension {
+ if e.p == nil {
+ e.p = new(struct {
+ mu sync.Mutex
+ extensionMap map[int32]Extension
+ })
+ e.p.extensionMap = make(map[int32]Extension)
+ }
+ return e.p.extensionMap
+}
+
+// extensionsRead returns the extensions map for read-only use. It may be nil.
+// The caller must hold the returned mutex's lock when accessing Elements within the map.
+func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) {
+ if e.p == nil {
+ return nil, nil
+ }
+ return e.p.extensionMap, &e.p.mu
+}
+
+var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem()
+var extendableProtoV1Type = reflect.TypeOf((*extendableProtoV1)(nil)).Elem()
+
+// ExtensionDesc represents an extension specification.
+// Used in generated code from the protocol compiler.
+type ExtensionDesc struct {
+ ExtendedType Message // nil pointer to the type that is being extended
+ ExtensionType interface{} // nil pointer to the extension type
+ Field int32 // field number
+ Name string // fully-qualified name of extension, for text formatting
+ Tag string // protobuf tag style
+ Filename string // name of the file in which the extension is defined
+}
+
+func (ed *ExtensionDesc) repeated() bool {
+ t := reflect.TypeOf(ed.ExtensionType)
+ return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
+}
+
+// Extension represents an extension in a message.
+type Extension struct {
+ // When an extension is stored in a message using SetExtension
+ // only desc and value are set. When the message is marshaled
+ // enc will be set to the encoded form of the message.
+ //
+ // When a message is unmarshaled and contains extensions, each
+ // extension will have only enc set. When such an extension is
+ // accessed using GetExtension (or GetExtensions) desc and value
+ // will be set.
+ desc *ExtensionDesc
+ value interface{}
+ enc []byte
+}
+
+// SetRawExtension is for testing only.
+func SetRawExtension(base Message, id int32, b []byte) {
+ epb, ok := extendable(base)
+ if !ok {
+ return
+ }
+ extmap := epb.extensionsWrite()
+ extmap[id] = Extension{enc: b}
+}
+
+// isExtensionField returns true iff the given field number is in an extension range.
+func isExtensionField(pb extendableProto, field int32) bool {
+ for _, er := range pb.ExtensionRangeArray() {
+ if er.Start <= field && field <= er.End {
+ return true
+ }
+ }
+ return false
+}
+
+// checkExtensionTypes checks that the given extension is valid for pb.
+func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
+ var pbi interface{} = pb
+ // Check the extended type.
+ if ea, ok := pbi.(extensionAdapter); ok {
+ pbi = ea.extendableProtoV1
+ }
+ if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b {
+ return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String())
+ }
+ // Check the range.
+ if !isExtensionField(pb, extension.Field) {
+ return errors.New("proto: bad extension number; not in declared ranges")
+ }
+ return nil
+}
+
+// extPropKey is sufficient to uniquely identify an extension.
+type extPropKey struct {
+ base reflect.Type
+ field int32
+}
+
+var extProp = struct {
+ sync.RWMutex
+ m map[extPropKey]*Properties
+}{
+ m: make(map[extPropKey]*Properties),
+}
+
+func extensionProperties(ed *ExtensionDesc) *Properties {
+ key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
+
+ extProp.RLock()
+ if prop, ok := extProp.m[key]; ok {
+ extProp.RUnlock()
+ return prop
+ }
+ extProp.RUnlock()
+
+ extProp.Lock()
+ defer extProp.Unlock()
+ // Check again.
+ if prop, ok := extProp.m[key]; ok {
+ return prop
+ }
+
+ prop := new(Properties)
+ prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
+ extProp.m[key] = prop
+ return prop
+}
+
+// encode encodes any unmarshaled (unencoded) extensions in e.
+func encodeExtensions(e *XXX_InternalExtensions) error {
+ m, mu := e.extensionsRead()
+ if m == nil {
+ return nil // fast path
+ }
+ mu.Lock()
+ defer mu.Unlock()
+ return encodeExtensionsMap(m)
+}
+
+// encode encodes any unmarshaled (unencoded) extensions in e.
+func encodeExtensionsMap(m map[int32]Extension) error {
+ for k, e := range m {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ et := reflect.TypeOf(e.desc.ExtensionType)
+ props := extensionProperties(e.desc)
+
+ p := NewBuffer(nil)
+ // If e.value has type T, the encoder expects a *struct{ X T }.
+ // Pass a *T with a zero field and hope it all works out.
+ x := reflect.New(et)
+ x.Elem().Set(reflect.ValueOf(e.value))
+ if err := props.enc(p, props, toStructPointer(x)); err != nil {
+ return err
+ }
+ e.enc = p.buf
+ m[k] = e
+ }
+ return nil
+}
+
+func extensionsSize(e *XXX_InternalExtensions) (n int) {
+ m, mu := e.extensionsRead()
+ if m == nil {
+ return 0
+ }
+ mu.Lock()
+ defer mu.Unlock()
+ return extensionsMapSize(m)
+}
+
+func extensionsMapSize(m map[int32]Extension) (n int) {
+ for _, e := range m {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ n += len(e.enc)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ et := reflect.TypeOf(e.desc.ExtensionType)
+ props := extensionProperties(e.desc)
+
+ // If e.value has type T, the encoder expects a *struct{ X T }.
+ // Pass a *T with a zero field and hope it all works out.
+ x := reflect.New(et)
+ x.Elem().Set(reflect.ValueOf(e.value))
+ n += props.size(props, toStructPointer(x))
+ }
+ return
+}
+
+// HasExtension returns whether the given extension is present in pb.
+func HasExtension(pb Message, extension *ExtensionDesc) bool {
+ // TODO: Check types, field numbers, etc.?
+ epb, ok := extendable(pb)
+ if !ok {
+ return false
+ }
+ extmap, mu := epb.extensionsRead()
+ if extmap == nil {
+ return false
+ }
+ mu.Lock()
+ _, ok = extmap[extension.Field]
+ mu.Unlock()
+ return ok
+}
+
+// ClearExtension removes the given extension from pb.
+func ClearExtension(pb Message, extension *ExtensionDesc) {
+ epb, ok := extendable(pb)
+ if !ok {
+ return
+ }
+ // TODO: Check types, field numbers, etc.?
+ extmap := epb.extensionsWrite()
+ delete(extmap, extension.Field)
+}
+
+// GetExtension parses and returns the given extension of pb.
+// If the extension is not present and has no default value it returns ErrMissingExtension.
+func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
+ epb, ok := extendable(pb)
+ if !ok {
+ return nil, errors.New("proto: not an extendable proto")
+ }
+
+ if err := checkExtensionTypes(epb, extension); err != nil {
+ return nil, err
+ }
+
+ emap, mu := epb.extensionsRead()
+ if emap == nil {
+ return defaultExtensionValue(extension)
+ }
+ mu.Lock()
+ defer mu.Unlock()
+ e, ok := emap[extension.Field]
+ if !ok {
+ // defaultExtensionValue returns the default value or
+ // ErrMissingExtension if there is no default.
+ return defaultExtensionValue(extension)
+ }
+
+ if e.value != nil {
+ // Already decoded. Check the descriptor, though.
+ if e.desc != extension {
+ // This shouldn't happen. If it does, it means that
+ // GetExtension was called twice with two different
+ // descriptors with the same field number.
+ return nil, errors.New("proto: descriptor conflict")
+ }
+ return e.value, nil
+ }
+
+ v, err := decodeExtension(e.enc, extension)
+ if err != nil {
+ return nil, err
+ }
+
+ // Remember the decoded version and drop the encoded version.
+ // That way it is safe to mutate what we return.
+ e.value = v
+ e.desc = extension
+ e.enc = nil
+ emap[extension.Field] = e
+ return e.value, nil
+}
+
+// defaultExtensionValue returns the default value for extension.
+// If no default for an extension is defined ErrMissingExtension is returned.
+func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
+ t := reflect.TypeOf(extension.ExtensionType)
+ props := extensionProperties(extension)
+
+ sf, _, err := fieldDefault(t, props)
+ if err != nil {
+ return nil, err
+ }
+
+ if sf == nil || sf.value == nil {
+ // There is no default value.
+ return nil, ErrMissingExtension
+ }
+
+ if t.Kind() != reflect.Ptr {
+ // We do not need to return a Ptr, we can directly return sf.value.
+ return sf.value, nil
+ }
+
+ // We need to return an interface{} that is a pointer to sf.value.
+ value := reflect.New(t).Elem()
+ value.Set(reflect.New(value.Type().Elem()))
+ if sf.kind == reflect.Int32 {
+ // We may have an int32 or an enum, but the underlying data is int32.
+ // Since we can't set an int32 into a non int32 reflect.value directly
+ // set it as a int32.
+ value.Elem().SetInt(int64(sf.value.(int32)))
+ } else {
+ value.Elem().Set(reflect.ValueOf(sf.value))
+ }
+ return value.Interface(), nil
+}
+
+// decodeExtension decodes an extension encoded in b.
+func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
+ o := NewBuffer(b)
+
+ t := reflect.TypeOf(extension.ExtensionType)
+
+ props := extensionProperties(extension)
+
+ // t is a pointer to a struct, pointer to basic type or a slice.
+ // Allocate a "field" to store the pointer/slice itself; the
+ // pointer/slice will be stored here. We pass
+ // the address of this field to props.dec.
+ // This passes a zero field and a *t and lets props.dec
+ // interpret it as a *struct{ x t }.
+ value := reflect.New(t).Elem()
+
+ for {
+ // Discard wire type and field number varint. It isn't needed.
+ if _, err := o.DecodeVarint(); err != nil {
+ return nil, err
+ }
+
+ if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil {
+ return nil, err
+ }
+
+ if o.index >= len(o.buf) {
+ break
+ }
+ }
+ return value.Interface(), nil
+}
+
+// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
+// The returned slice has the same length as es; missing extensions will appear as nil elements.
+func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
+ epb, ok := extendable(pb)
+ if !ok {
+ return nil, errors.New("proto: not an extendable proto")
+ }
+ extensions = make([]interface{}, len(es))
+ for i, e := range es {
+ extensions[i], err = GetExtension(epb, e)
+ if err == ErrMissingExtension {
+ err = nil
+ }
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order.
+// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
+// just the Field field, which defines the extension's field number.
+func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
+ epb, ok := extendable(pb)
+ if !ok {
+ return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb)
+ }
+ registeredExtensions := RegisteredExtensions(pb)
+
+ emap, mu := epb.extensionsRead()
+ if emap == nil {
+ return nil, nil
+ }
+ mu.Lock()
+ defer mu.Unlock()
+ extensions := make([]*ExtensionDesc, 0, len(emap))
+ for extid, e := range emap {
+ desc := e.desc
+ if desc == nil {
+ desc = registeredExtensions[extid]
+ if desc == nil {
+ desc = &ExtensionDesc{Field: extid}
+ }
+ }
+
+ extensions = append(extensions, desc)
+ }
+ return extensions, nil
+}
+
+// SetExtension sets the specified extension of pb to the specified value.
+func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
+ epb, ok := extendable(pb)
+ if !ok {
+ return errors.New("proto: not an extendable proto")
+ }
+ if err := checkExtensionTypes(epb, extension); err != nil {
+ return err
+ }
+ typ := reflect.TypeOf(extension.ExtensionType)
+ if typ != reflect.TypeOf(value) {
+ return errors.New("proto: bad extension value type")
+ }
+ // nil extension values need to be caught early, because the
+ // encoder can't distinguish an ErrNil due to a nil extension
+ // from an ErrNil due to a missing field. Extensions are
+ // always optional, so the encoder would just swallow the error
+ // and drop all the extensions from the encoded message.
+ if reflect.ValueOf(value).IsNil() {
+ return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
+ }
+
+ extmap := epb.extensionsWrite()
+ extmap[extension.Field] = Extension{desc: extension, value: value}
+ return nil
+}
+
+// ClearAllExtensions clears all extensions from pb.
+func ClearAllExtensions(pb Message) {
+ epb, ok := extendable(pb)
+ if !ok {
+ return
+ }
+ m := epb.extensionsWrite()
+ for k := range m {
+ delete(m, k)
+ }
+}
+
+// A global registry of extensions.
+// The generated code will register the generated descriptors by calling RegisterExtension.
+
+var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
+
+// RegisterExtension is called from the generated code.
+func RegisterExtension(desc *ExtensionDesc) {
+ st := reflect.TypeOf(desc.ExtendedType).Elem()
+ m := extensionMaps[st]
+ if m == nil {
+ m = make(map[int32]*ExtensionDesc)
+ extensionMaps[st] = m
+ }
+ if _, ok := m[desc.Field]; ok {
+ panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
+ }
+ m[desc.Field] = desc
+}
+
+// RegisteredExtensions returns a map of the registered extensions of a
+// protocol buffer struct, indexed by the extension number.
+// The argument pb should be a nil pointer to the struct type.
+func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
+ return extensionMaps[reflect.TypeOf(pb).Elem()]
+}
diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go
new file mode 100644
index 0000000..ac4ddbc
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/lib.go
@@ -0,0 +1,898 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+Package proto converts data structures to and from the wire format of
+protocol buffers. It works in concert with the Go source code generated
+for .proto files by the protocol compiler.
+
+A summary of the properties of the protocol buffer interface
+for a protocol buffer variable v:
+
+ - Names are turned from camel_case to CamelCase for export.
+ - There are no methods on v to set fields; just treat
+ them as structure fields.
+ - There are getters that return a field's value if set,
+ and return the field's default value if unset.
+ The getters work even if the receiver is a nil message.
+ - The zero value for a struct is its correct initialization state.
+ All desired fields must be set before marshaling.
+ - A Reset() method will restore a protobuf struct to its zero state.
+ - Non-repeated fields are pointers to the values; nil means unset.
+ That is, optional or required field int32 f becomes F *int32.
+ - Repeated fields are slices.
+ - Helper functions are available to aid the setting of fields.
+ msg.Foo = proto.String("hello") // set field
+ - Constants are defined to hold the default values of all fields that
+ have them. They have the form Default_StructName_FieldName.
+ Because the getter methods handle defaulted values,
+ direct use of these constants should be rare.
+ - Enums are given type names and maps from names to values.
+ Enum values are prefixed by the enclosing message's name, or by the
+ enum's type name if it is a top-level enum. Enum types have a String
+ method, and a Enum method to assist in message construction.
+ - Nested messages, groups and enums have type names prefixed with the name of
+ the surrounding message type.
+ - Extensions are given descriptor names that start with E_,
+ followed by an underscore-delimited list of the nested messages
+ that contain it (if any) followed by the CamelCased name of the
+ extension field itself. HasExtension, ClearExtension, GetExtension
+ and SetExtension are functions for manipulating extensions.
+ - Oneof field sets are given a single field in their message,
+ with distinguished wrapper types for each possible field value.
+ - Marshal and Unmarshal are functions to encode and decode the wire format.
+
+When the .proto file specifies `syntax="proto3"`, there are some differences:
+
+ - Non-repeated fields of non-message type are values instead of pointers.
+ - Getters are only generated for message and oneof fields.
+ - Enum types do not get an Enum method.
+
+The simplest way to describe this is to see an example.
+Given file test.proto, containing
+
+ package example;
+
+ enum FOO { X = 17; }
+
+ message Test {
+ required string label = 1;
+ optional int32 type = 2 [default=77];
+ repeated int64 reps = 3;
+ optional group OptionalGroup = 4 {
+ required string RequiredField = 5;
+ }
+ oneof union {
+ int32 number = 6;
+ string name = 7;
+ }
+ }
+
+The resulting file, test.pb.go, is:
+
+ package example
+
+ import proto "github.com/golang/protobuf/proto"
+ import math "math"
+
+ type FOO int32
+ const (
+ FOO_X FOO = 17
+ )
+ var FOO_name = map[int32]string{
+ 17: "X",
+ }
+ var FOO_value = map[string]int32{
+ "X": 17,
+ }
+
+ func (x FOO) Enum() *FOO {
+ p := new(FOO)
+ *p = x
+ return p
+ }
+ func (x FOO) String() string {
+ return proto.EnumName(FOO_name, int32(x))
+ }
+ func (x *FOO) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(FOO_value, data)
+ if err != nil {
+ return err
+ }
+ *x = FOO(value)
+ return nil
+ }
+
+ type Test struct {
+ Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
+ Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
+ Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
+ Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
+ // Types that are valid to be assigned to Union:
+ // *Test_Number
+ // *Test_Name
+ Union isTest_Union `protobuf_oneof:"union"`
+ XXX_unrecognized []byte `json:"-"`
+ }
+ func (m *Test) Reset() { *m = Test{} }
+ func (m *Test) String() string { return proto.CompactTextString(m) }
+ func (*Test) ProtoMessage() {}
+
+ type isTest_Union interface {
+ isTest_Union()
+ }
+
+ type Test_Number struct {
+ Number int32 `protobuf:"varint,6,opt,name=number"`
+ }
+ type Test_Name struct {
+ Name string `protobuf:"bytes,7,opt,name=name"`
+ }
+
+ func (*Test_Number) isTest_Union() {}
+ func (*Test_Name) isTest_Union() {}
+
+ func (m *Test) GetUnion() isTest_Union {
+ if m != nil {
+ return m.Union
+ }
+ return nil
+ }
+ const Default_Test_Type int32 = 77
+
+ func (m *Test) GetLabel() string {
+ if m != nil && m.Label != nil {
+ return *m.Label
+ }
+ return ""
+ }
+
+ func (m *Test) GetType() int32 {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return Default_Test_Type
+ }
+
+ func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
+ if m != nil {
+ return m.Optionalgroup
+ }
+ return nil
+ }
+
+ type Test_OptionalGroup struct {
+ RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
+ }
+ func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} }
+ func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
+
+ func (m *Test_OptionalGroup) GetRequiredField() string {
+ if m != nil && m.RequiredField != nil {
+ return *m.RequiredField
+ }
+ return ""
+ }
+
+ func (m *Test) GetNumber() int32 {
+ if x, ok := m.GetUnion().(*Test_Number); ok {
+ return x.Number
+ }
+ return 0
+ }
+
+ func (m *Test) GetName() string {
+ if x, ok := m.GetUnion().(*Test_Name); ok {
+ return x.Name
+ }
+ return ""
+ }
+
+ func init() {
+ proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
+ }
+
+To create and play with a Test object:
+
+ package main
+
+ import (
+ "log"
+
+ "github.com/golang/protobuf/proto"
+ pb "./example.pb"
+ )
+
+ func main() {
+ test := &pb.Test{
+ Label: proto.String("hello"),
+ Type: proto.Int32(17),
+ Reps: []int64{1, 2, 3},
+ Optionalgroup: &pb.Test_OptionalGroup{
+ RequiredField: proto.String("good bye"),
+ },
+ Union: &pb.Test_Name{"fred"},
+ }
+ data, err := proto.Marshal(test)
+ if err != nil {
+ log.Fatal("marshaling error: ", err)
+ }
+ newTest := &pb.Test{}
+ err = proto.Unmarshal(data, newTest)
+ if err != nil {
+ log.Fatal("unmarshaling error: ", err)
+ }
+ // Now test and newTest contain the same data.
+ if test.GetLabel() != newTest.GetLabel() {
+ log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
+ }
+ // Use a type switch to determine which oneof was set.
+ switch u := test.Union.(type) {
+ case *pb.Test_Number: // u.Number contains the number.
+ case *pb.Test_Name: // u.Name contains the string.
+ }
+ // etc.
+ }
+*/
+package proto
+
+import (
+ "encoding/json"
+ "fmt"
+ "log"
+ "reflect"
+ "sort"
+ "strconv"
+ "sync"
+)
+
+// Message is implemented by generated protocol buffer messages.
+type Message interface {
+ Reset()
+ String() string
+ ProtoMessage()
+}
+
+// Stats records allocation details about the protocol buffer encoders
+// and decoders. Useful for tuning the library itself.
+type Stats struct {
+ Emalloc uint64 // mallocs in encode
+ Dmalloc uint64 // mallocs in decode
+ Encode uint64 // number of encodes
+ Decode uint64 // number of decodes
+ Chit uint64 // number of cache hits
+ Cmiss uint64 // number of cache misses
+ Size uint64 // number of sizes
+}
+
+// Set to true to enable stats collection.
+const collectStats = false
+
+var stats Stats
+
+// GetStats returns a copy of the global Stats structure.
+func GetStats() Stats { return stats }
+
+// A Buffer is a buffer manager for marshaling and unmarshaling
+// protocol buffers. It may be reused between invocations to
+// reduce memory usage. It is not necessary to use a Buffer;
+// the global functions Marshal and Unmarshal create a
+// temporary Buffer and are fine for most applications.
+type Buffer struct {
+ buf []byte // encode/decode byte stream
+ index int // read point
+
+ // pools of basic types to amortize allocation.
+ bools []bool
+ uint32s []uint32
+ uint64s []uint64
+
+ // extra pools, only used with pointer_reflect.go
+ int32s []int32
+ int64s []int64
+ float32s []float32
+ float64s []float64
+}
+
+// NewBuffer allocates a new Buffer and initializes its internal data to
+// the contents of the argument slice.
+func NewBuffer(e []byte) *Buffer {
+ return &Buffer{buf: e}
+}
+
+// Reset resets the Buffer, ready for marshaling a new protocol buffer.
+func (p *Buffer) Reset() {
+ p.buf = p.buf[0:0] // for reading/writing
+ p.index = 0 // for reading
+}
+
+// SetBuf replaces the internal buffer with the slice,
+// ready for unmarshaling the contents of the slice.
+func (p *Buffer) SetBuf(s []byte) {
+ p.buf = s
+ p.index = 0
+}
+
+// Bytes returns the contents of the Buffer.
+func (p *Buffer) Bytes() []byte { return p.buf }
+
+/*
+ * Helper routines for simplifying the creation of optional fields of basic type.
+ */
+
+// Bool is a helper routine that allocates a new bool value
+// to store v and returns a pointer to it.
+func Bool(v bool) *bool {
+ return &v
+}
+
+// Int32 is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it.
+func Int32(v int32) *int32 {
+ return &v
+}
+
+// Int is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it, but unlike Int32
+// its argument value is an int.
+func Int(v int) *int32 {
+ p := new(int32)
+ *p = int32(v)
+ return p
+}
+
+// Int64 is a helper routine that allocates a new int64 value
+// to store v and returns a pointer to it.
+func Int64(v int64) *int64 {
+ return &v
+}
+
+// Float32 is a helper routine that allocates a new float32 value
+// to store v and returns a pointer to it.
+func Float32(v float32) *float32 {
+ return &v
+}
+
+// Float64 is a helper routine that allocates a new float64 value
+// to store v and returns a pointer to it.
+func Float64(v float64) *float64 {
+ return &v
+}
+
+// Uint32 is a helper routine that allocates a new uint32 value
+// to store v and returns a pointer to it.
+func Uint32(v uint32) *uint32 {
+ return &v
+}
+
+// Uint64 is a helper routine that allocates a new uint64 value
+// to store v and returns a pointer to it.
+func Uint64(v uint64) *uint64 {
+ return &v
+}
+
+// String is a helper routine that allocates a new string value
+// to store v and returns a pointer to it.
+func String(v string) *string {
+ return &v
+}
+
+// EnumName is a helper function to simplify printing protocol buffer enums
+// by name. Given an enum map and a value, it returns a useful string.
+func EnumName(m map[int32]string, v int32) string {
+ s, ok := m[v]
+ if ok {
+ return s
+ }
+ return strconv.Itoa(int(v))
+}
+
+// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
+// from their JSON-encoded representation. Given a map from the enum's symbolic
+// names to its int values, and a byte buffer containing the JSON-encoded
+// value, it returns an int32 that can be cast to the enum type by the caller.
+//
+// The function can deal with both JSON representations, numeric and symbolic.
+func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
+ if data[0] == '"' {
+ // New style: enums are strings.
+ var repr string
+ if err := json.Unmarshal(data, &repr); err != nil {
+ return -1, err
+ }
+ val, ok := m[repr]
+ if !ok {
+ return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
+ }
+ return val, nil
+ }
+ // Old style: enums are ints.
+ var val int32
+ if err := json.Unmarshal(data, &val); err != nil {
+ return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
+ }
+ return val, nil
+}
+
+// DebugPrint dumps the encoded data in b in a debugging format with a header
+// including the string s. Used in testing but made available for general debugging.
+func (p *Buffer) DebugPrint(s string, b []byte) {
+ var u uint64
+
+ obuf := p.buf
+ index := p.index
+ p.buf = b
+ p.index = 0
+ depth := 0
+
+ fmt.Printf("\n--- %s ---\n", s)
+
+out:
+ for {
+ for i := 0; i < depth; i++ {
+ fmt.Print(" ")
+ }
+
+ index := p.index
+ if index == len(p.buf) {
+ break
+ }
+
+ op, err := p.DecodeVarint()
+ if err != nil {
+ fmt.Printf("%3d: fetching op err %v\n", index, err)
+ break out
+ }
+ tag := op >> 3
+ wire := op & 7
+
+ switch wire {
+ default:
+ fmt.Printf("%3d: t=%3d unknown wire=%d\n",
+ index, tag, wire)
+ break out
+
+ case WireBytes:
+ var r []byte
+
+ r, err = p.DecodeRawBytes(false)
+ if err != nil {
+ break out
+ }
+ fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
+ if len(r) <= 6 {
+ for i := 0; i < len(r); i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ } else {
+ for i := 0; i < 3; i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ fmt.Printf(" ..")
+ for i := len(r) - 3; i < len(r); i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ }
+ fmt.Printf("\n")
+
+ case WireFixed32:
+ u, err = p.DecodeFixed32()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
+
+ case WireFixed64:
+ u, err = p.DecodeFixed64()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
+
+ case WireVarint:
+ u, err = p.DecodeVarint()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
+
+ case WireStartGroup:
+ fmt.Printf("%3d: t=%3d start\n", index, tag)
+ depth++
+
+ case WireEndGroup:
+ depth--
+ fmt.Printf("%3d: t=%3d end\n", index, tag)
+ }
+ }
+
+ if depth != 0 {
+ fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
+ }
+ fmt.Printf("\n")
+
+ p.buf = obuf
+ p.index = index
+}
+
+// SetDefaults sets unset protocol buffer fields to their default values.
+// It only modifies fields that are both unset and have defined defaults.
+// It recursively sets default values in any non-nil sub-messages.
+func SetDefaults(pb Message) {
+ setDefaults(reflect.ValueOf(pb), true, false)
+}
+
+// v is a pointer to a struct.
+func setDefaults(v reflect.Value, recur, zeros bool) {
+ v = v.Elem()
+
+ defaultMu.RLock()
+ dm, ok := defaults[v.Type()]
+ defaultMu.RUnlock()
+ if !ok {
+ dm = buildDefaultMessage(v.Type())
+ defaultMu.Lock()
+ defaults[v.Type()] = dm
+ defaultMu.Unlock()
+ }
+
+ for _, sf := range dm.scalars {
+ f := v.Field(sf.index)
+ if !f.IsNil() {
+ // field already set
+ continue
+ }
+ dv := sf.value
+ if dv == nil && !zeros {
+ // no explicit default, and don't want to set zeros
+ continue
+ }
+ fptr := f.Addr().Interface() // **T
+ // TODO: Consider batching the allocations we do here.
+ switch sf.kind {
+ case reflect.Bool:
+ b := new(bool)
+ if dv != nil {
+ *b = dv.(bool)
+ }
+ *(fptr.(**bool)) = b
+ case reflect.Float32:
+ f := new(float32)
+ if dv != nil {
+ *f = dv.(float32)
+ }
+ *(fptr.(**float32)) = f
+ case reflect.Float64:
+ f := new(float64)
+ if dv != nil {
+ *f = dv.(float64)
+ }
+ *(fptr.(**float64)) = f
+ case reflect.Int32:
+ // might be an enum
+ if ft := f.Type(); ft != int32PtrType {
+ // enum
+ f.Set(reflect.New(ft.Elem()))
+ if dv != nil {
+ f.Elem().SetInt(int64(dv.(int32)))
+ }
+ } else {
+ // int32 field
+ i := new(int32)
+ if dv != nil {
+ *i = dv.(int32)
+ }
+ *(fptr.(**int32)) = i
+ }
+ case reflect.Int64:
+ i := new(int64)
+ if dv != nil {
+ *i = dv.(int64)
+ }
+ *(fptr.(**int64)) = i
+ case reflect.String:
+ s := new(string)
+ if dv != nil {
+ *s = dv.(string)
+ }
+ *(fptr.(**string)) = s
+ case reflect.Uint8:
+ // exceptional case: []byte
+ var b []byte
+ if dv != nil {
+ db := dv.([]byte)
+ b = make([]byte, len(db))
+ copy(b, db)
+ } else {
+ b = []byte{}
+ }
+ *(fptr.(*[]byte)) = b
+ case reflect.Uint32:
+ u := new(uint32)
+ if dv != nil {
+ *u = dv.(uint32)
+ }
+ *(fptr.(**uint32)) = u
+ case reflect.Uint64:
+ u := new(uint64)
+ if dv != nil {
+ *u = dv.(uint64)
+ }
+ *(fptr.(**uint64)) = u
+ default:
+ log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
+ }
+ }
+
+ for _, ni := range dm.nested {
+ f := v.Field(ni)
+ // f is *T or []*T or map[T]*T
+ switch f.Kind() {
+ case reflect.Ptr:
+ if f.IsNil() {
+ continue
+ }
+ setDefaults(f, recur, zeros)
+
+ case reflect.Slice:
+ for i := 0; i < f.Len(); i++ {
+ e := f.Index(i)
+ if e.IsNil() {
+ continue
+ }
+ setDefaults(e, recur, zeros)
+ }
+
+ case reflect.Map:
+ for _, k := range f.MapKeys() {
+ e := f.MapIndex(k)
+ if e.IsNil() {
+ continue
+ }
+ setDefaults(e, recur, zeros)
+ }
+ }
+ }
+}
+
+var (
+ // defaults maps a protocol buffer struct type to a slice of the fields,
+ // with its scalar fields set to their proto-declared non-zero default values.
+ defaultMu sync.RWMutex
+ defaults = make(map[reflect.Type]defaultMessage)
+
+ int32PtrType = reflect.TypeOf((*int32)(nil))
+)
+
+// defaultMessage represents information about the default values of a message.
+type defaultMessage struct {
+ scalars []scalarField
+ nested []int // struct field index of nested messages
+}
+
+type scalarField struct {
+ index int // struct field index
+ kind reflect.Kind // element type (the T in *T or []T)
+ value interface{} // the proto-declared default value, or nil
+}
+
+// t is a struct type.
+func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
+ sprop := GetProperties(t)
+ for _, prop := range sprop.Prop {
+ fi, ok := sprop.decoderTags.get(prop.Tag)
+ if !ok {
+ // XXX_unrecognized
+ continue
+ }
+ ft := t.Field(fi).Type
+
+ sf, nested, err := fieldDefault(ft, prop)
+ switch {
+ case err != nil:
+ log.Print(err)
+ case nested:
+ dm.nested = append(dm.nested, fi)
+ case sf != nil:
+ sf.index = fi
+ dm.scalars = append(dm.scalars, *sf)
+ }
+ }
+
+ return dm
+}
+
+// fieldDefault returns the scalarField for field type ft.
+// sf will be nil if the field can not have a default.
+// nestedMessage will be true if this is a nested message.
+// Note that sf.index is not set on return.
+func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
+ var canHaveDefault bool
+ switch ft.Kind() {
+ case reflect.Ptr:
+ if ft.Elem().Kind() == reflect.Struct {
+ nestedMessage = true
+ } else {
+ canHaveDefault = true // proto2 scalar field
+ }
+
+ case reflect.Slice:
+ switch ft.Elem().Kind() {
+ case reflect.Ptr:
+ nestedMessage = true // repeated message
+ case reflect.Uint8:
+ canHaveDefault = true // bytes field
+ }
+
+ case reflect.Map:
+ if ft.Elem().Kind() == reflect.Ptr {
+ nestedMessage = true // map with message values
+ }
+ }
+
+ if !canHaveDefault {
+ if nestedMessage {
+ return nil, true, nil
+ }
+ return nil, false, nil
+ }
+
+ // We now know that ft is a pointer or slice.
+ sf = &scalarField{kind: ft.Elem().Kind()}
+
+ // scalar fields without defaults
+ if !prop.HasDefault {
+ return sf, false, nil
+ }
+
+ // a scalar field: either *T or []byte
+ switch ft.Elem().Kind() {
+ case reflect.Bool:
+ x, err := strconv.ParseBool(prop.Default)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.Float32:
+ x, err := strconv.ParseFloat(prop.Default, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
+ }
+ sf.value = float32(x)
+ case reflect.Float64:
+ x, err := strconv.ParseFloat(prop.Default, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.Int32:
+ x, err := strconv.ParseInt(prop.Default, 10, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
+ }
+ sf.value = int32(x)
+ case reflect.Int64:
+ x, err := strconv.ParseInt(prop.Default, 10, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.String:
+ sf.value = prop.Default
+ case reflect.Uint8:
+ // []byte (not *uint8)
+ sf.value = []byte(prop.Default)
+ case reflect.Uint32:
+ x, err := strconv.ParseUint(prop.Default, 10, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
+ }
+ sf.value = uint32(x)
+ case reflect.Uint64:
+ x, err := strconv.ParseUint(prop.Default, 10, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ default:
+ return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
+ }
+
+ return sf, false, nil
+}
+
+// Map fields may have key types of non-float scalars, strings and enums.
+// The easiest way to sort them in some deterministic order is to use fmt.
+// If this turns out to be inefficient we can always consider other options,
+// such as doing a Schwartzian transform.
+
+func mapKeys(vs []reflect.Value) sort.Interface {
+ s := mapKeySorter{
+ vs: vs,
+ // default Less function: textual comparison
+ less: func(a, b reflect.Value) bool {
+ return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface())
+ },
+ }
+
+ // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps;
+ // numeric keys are sorted numerically.
+ if len(vs) == 0 {
+ return s
+ }
+ switch vs[0].Kind() {
+ case reflect.Int32, reflect.Int64:
+ s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
+ case reflect.Uint32, reflect.Uint64:
+ s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
+ }
+
+ return s
+}
+
+type mapKeySorter struct {
+ vs []reflect.Value
+ less func(a, b reflect.Value) bool
+}
+
+func (s mapKeySorter) Len() int { return len(s.vs) }
+func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
+func (s mapKeySorter) Less(i, j int) bool {
+ return s.less(s.vs[i], s.vs[j])
+}
+
+// isProto3Zero reports whether v is a zero proto3 value.
+func isProto3Zero(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint32, reflect.Uint64:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.String:
+ return v.String() == ""
+ }
+ return false
+}
+
+// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
+// to assert that that code is compatible with this version of the proto package.
+const ProtoPackageIsVersion2 = true
+
+// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
+// to assert that that code is compatible with this version of the proto package.
+const ProtoPackageIsVersion1 = true
diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go
new file mode 100644
index 0000000..fd982de
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/message_set.go
@@ -0,0 +1,311 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Support for message sets.
+ */
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+)
+
+// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
+// A message type ID is required for storing a protocol buffer in a message set.
+var errNoMessageTypeID = errors.New("proto does not have a message type ID")
+
+// The first two types (_MessageSet_Item and messageSet)
+// model what the protocol compiler produces for the following protocol message:
+// message MessageSet {
+// repeated group Item = 1 {
+// required int32 type_id = 2;
+// required string message = 3;
+// };
+// }
+// That is the MessageSet wire format. We can't use a proto to generate these
+// because that would introduce a circular dependency between it and this package.
+
+type _MessageSet_Item struct {
+ TypeId *int32 `protobuf:"varint,2,req,name=type_id"`
+ Message []byte `protobuf:"bytes,3,req,name=message"`
+}
+
+type messageSet struct {
+ Item []*_MessageSet_Item `protobuf:"group,1,rep"`
+ XXX_unrecognized []byte
+ // TODO: caching?
+}
+
+// Make sure messageSet is a Message.
+var _ Message = (*messageSet)(nil)
+
+// messageTypeIder is an interface satisfied by a protocol buffer type
+// that may be stored in a MessageSet.
+type messageTypeIder interface {
+ MessageTypeId() int32
+}
+
+func (ms *messageSet) find(pb Message) *_MessageSet_Item {
+ mti, ok := pb.(messageTypeIder)
+ if !ok {
+ return nil
+ }
+ id := mti.MessageTypeId()
+ for _, item := range ms.Item {
+ if *item.TypeId == id {
+ return item
+ }
+ }
+ return nil
+}
+
+func (ms *messageSet) Has(pb Message) bool {
+ if ms.find(pb) != nil {
+ return true
+ }
+ return false
+}
+
+func (ms *messageSet) Unmarshal(pb Message) error {
+ if item := ms.find(pb); item != nil {
+ return Unmarshal(item.Message, pb)
+ }
+ if _, ok := pb.(messageTypeIder); !ok {
+ return errNoMessageTypeID
+ }
+ return nil // TODO: return error instead?
+}
+
+func (ms *messageSet) Marshal(pb Message) error {
+ msg, err := Marshal(pb)
+ if err != nil {
+ return err
+ }
+ if item := ms.find(pb); item != nil {
+ // reuse existing item
+ item.Message = msg
+ return nil
+ }
+
+ mti, ok := pb.(messageTypeIder)
+ if !ok {
+ return errNoMessageTypeID
+ }
+
+ mtid := mti.MessageTypeId()
+ ms.Item = append(ms.Item, &_MessageSet_Item{
+ TypeId: &mtid,
+ Message: msg,
+ })
+ return nil
+}
+
+func (ms *messageSet) Reset() { *ms = messageSet{} }
+func (ms *messageSet) String() string { return CompactTextString(ms) }
+func (*messageSet) ProtoMessage() {}
+
+// Support for the message_set_wire_format message option.
+
+func skipVarint(buf []byte) []byte {
+ i := 0
+ for ; buf[i]&0x80 != 0; i++ {
+ }
+ return buf[i+1:]
+}
+
+// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
+// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
+func MarshalMessageSet(exts interface{}) ([]byte, error) {
+ var m map[int32]Extension
+ switch exts := exts.(type) {
+ case *XXX_InternalExtensions:
+ if err := encodeExtensions(exts); err != nil {
+ return nil, err
+ }
+ m, _ = exts.extensionsRead()
+ case map[int32]Extension:
+ if err := encodeExtensionsMap(exts); err != nil {
+ return nil, err
+ }
+ m = exts
+ default:
+ return nil, errors.New("proto: not an extension map")
+ }
+
+ // Sort extension IDs to provide a deterministic encoding.
+ // See also enc_map in encode.go.
+ ids := make([]int, 0, len(m))
+ for id := range m {
+ ids = append(ids, int(id))
+ }
+ sort.Ints(ids)
+
+ ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))}
+ for _, id := range ids {
+ e := m[int32(id)]
+ // Remove the wire type and field number varint, as well as the length varint.
+ msg := skipVarint(skipVarint(e.enc))
+
+ ms.Item = append(ms.Item, &_MessageSet_Item{
+ TypeId: Int32(int32(id)),
+ Message: msg,
+ })
+ }
+ return Marshal(ms)
+}
+
+// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
+// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
+func UnmarshalMessageSet(buf []byte, exts interface{}) error {
+ var m map[int32]Extension
+ switch exts := exts.(type) {
+ case *XXX_InternalExtensions:
+ m = exts.extensionsWrite()
+ case map[int32]Extension:
+ m = exts
+ default:
+ return errors.New("proto: not an extension map")
+ }
+
+ ms := new(messageSet)
+ if err := Unmarshal(buf, ms); err != nil {
+ return err
+ }
+ for _, item := range ms.Item {
+ id := *item.TypeId
+ msg := item.Message
+
+ // Restore wire type and field number varint, plus length varint.
+ // Be careful to preserve duplicate items.
+ b := EncodeVarint(uint64(id)<<3 | WireBytes)
+ if ext, ok := m[id]; ok {
+ // Existing data; rip off the tag and length varint
+ // so we join the new data correctly.
+ // We can assume that ext.enc is set because we are unmarshaling.
+ o := ext.enc[len(b):] // skip wire type and field number
+ _, n := DecodeVarint(o) // calculate length of length varint
+ o = o[n:] // skip length varint
+ msg = append(o, msg...) // join old data and new data
+ }
+ b = append(b, EncodeVarint(uint64(len(msg)))...)
+ b = append(b, msg...)
+
+ m[id] = Extension{enc: b}
+ }
+ return nil
+}
+
+// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
+// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
+func MarshalMessageSetJSON(exts interface{}) ([]byte, error) {
+ var m map[int32]Extension
+ switch exts := exts.(type) {
+ case *XXX_InternalExtensions:
+ m, _ = exts.extensionsRead()
+ case map[int32]Extension:
+ m = exts
+ default:
+ return nil, errors.New("proto: not an extension map")
+ }
+ var b bytes.Buffer
+ b.WriteByte('{')
+
+ // Process the map in key order for deterministic output.
+ ids := make([]int32, 0, len(m))
+ for id := range m {
+ ids = append(ids, id)
+ }
+ sort.Sort(int32Slice(ids)) // int32Slice defined in text.go
+
+ for i, id := range ids {
+ ext := m[id]
+ if i > 0 {
+ b.WriteByte(',')
+ }
+
+ msd, ok := messageSetMap[id]
+ if !ok {
+ // Unknown type; we can't render it, so skip it.
+ continue
+ }
+ fmt.Fprintf(&b, `"[%s]":`, msd.name)
+
+ x := ext.value
+ if x == nil {
+ x = reflect.New(msd.t.Elem()).Interface()
+ if err := Unmarshal(ext.enc, x.(Message)); err != nil {
+ return nil, err
+ }
+ }
+ d, err := json.Marshal(x)
+ if err != nil {
+ return nil, err
+ }
+ b.Write(d)
+ }
+ b.WriteByte('}')
+ return b.Bytes(), nil
+}
+
+// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
+// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
+func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error {
+ // Common-case fast path.
+ if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
+ return nil
+ }
+
+ // This is fairly tricky, and it's not clear that it is needed.
+ return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented")
+}
+
+// A global registry of types that can be used in a MessageSet.
+
+var messageSetMap = make(map[int32]messageSetDesc)
+
+type messageSetDesc struct {
+ t reflect.Type // pointer to struct
+ name string
+}
+
+// RegisterMessageSetType is called from the generated code.
+func RegisterMessageSetType(m Message, fieldNum int32, name string) {
+ messageSetMap[fieldNum] = messageSetDesc{
+ t: reflect.TypeOf(m),
+ name: name,
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
new file mode 100644
index 0000000..fb512e2
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
@@ -0,0 +1,484 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build appengine js
+
+// This file contains an implementation of proto field accesses using package reflect.
+// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
+// be used on App Engine.
+
+package proto
+
+import (
+ "math"
+ "reflect"
+)
+
+// A structPointer is a pointer to a struct.
+type structPointer struct {
+ v reflect.Value
+}
+
+// toStructPointer returns a structPointer equivalent to the given reflect value.
+// The reflect value must itself be a pointer to a struct.
+func toStructPointer(v reflect.Value) structPointer {
+ return structPointer{v}
+}
+
+// IsNil reports whether p is nil.
+func structPointer_IsNil(p structPointer) bool {
+ return p.v.IsNil()
+}
+
+// Interface returns the struct pointer as an interface value.
+func structPointer_Interface(p structPointer, _ reflect.Type) interface{} {
+ return p.v.Interface()
+}
+
+// A field identifies a field in a struct, accessible from a structPointer.
+// In this implementation, a field is identified by the sequence of field indices
+// passed to reflect's FieldByIndex.
+type field []int
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+ return f.Index
+}
+
+// invalidField is an invalid field identifier.
+var invalidField = field(nil)
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool { return f != nil }
+
+// field returns the given field in the struct as a reflect value.
+func structPointer_field(p structPointer, f field) reflect.Value {
+ // Special case: an extension map entry with a value of type T
+ // passes a *T to the struct-handling code with a zero field,
+ // expecting that it will be treated as equivalent to *struct{ X T },
+ // which has the same memory layout. We have to handle that case
+ // specially, because reflect will panic if we call FieldByIndex on a
+ // non-struct.
+ if f == nil {
+ return p.v.Elem()
+ }
+
+ return p.v.Elem().FieldByIndex(f)
+}
+
+// ifield returns the given field in the struct as an interface value.
+func structPointer_ifield(p structPointer, f field) interface{} {
+ return structPointer_field(p, f).Addr().Interface()
+}
+
+// Bytes returns the address of a []byte field in the struct.
+func structPointer_Bytes(p structPointer, f field) *[]byte {
+ return structPointer_ifield(p, f).(*[]byte)
+}
+
+// BytesSlice returns the address of a [][]byte field in the struct.
+func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
+ return structPointer_ifield(p, f).(*[][]byte)
+}
+
+// Bool returns the address of a *bool field in the struct.
+func structPointer_Bool(p structPointer, f field) **bool {
+ return structPointer_ifield(p, f).(**bool)
+}
+
+// BoolVal returns the address of a bool field in the struct.
+func structPointer_BoolVal(p structPointer, f field) *bool {
+ return structPointer_ifield(p, f).(*bool)
+}
+
+// BoolSlice returns the address of a []bool field in the struct.
+func structPointer_BoolSlice(p structPointer, f field) *[]bool {
+ return structPointer_ifield(p, f).(*[]bool)
+}
+
+// String returns the address of a *string field in the struct.
+func structPointer_String(p structPointer, f field) **string {
+ return structPointer_ifield(p, f).(**string)
+}
+
+// StringVal returns the address of a string field in the struct.
+func structPointer_StringVal(p structPointer, f field) *string {
+ return structPointer_ifield(p, f).(*string)
+}
+
+// StringSlice returns the address of a []string field in the struct.
+func structPointer_StringSlice(p structPointer, f field) *[]string {
+ return structPointer_ifield(p, f).(*[]string)
+}
+
+// Extensions returns the address of an extension map field in the struct.
+func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions {
+ return structPointer_ifield(p, f).(*XXX_InternalExtensions)
+}
+
+// ExtMap returns the address of an extension map field in the struct.
+func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
+ return structPointer_ifield(p, f).(*map[int32]Extension)
+}
+
+// NewAt returns the reflect.Value for a pointer to a field in the struct.
+func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
+ return structPointer_field(p, f).Addr()
+}
+
+// SetStructPointer writes a *struct field in the struct.
+func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
+ structPointer_field(p, f).Set(q.v)
+}
+
+// GetStructPointer reads a *struct field in the struct.
+func structPointer_GetStructPointer(p structPointer, f field) structPointer {
+ return structPointer{structPointer_field(p, f)}
+}
+
+// StructPointerSlice the address of a []*struct field in the struct.
+func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice {
+ return structPointerSlice{structPointer_field(p, f)}
+}
+
+// A structPointerSlice represents the address of a slice of pointers to structs
+// (themselves messages or groups). That is, v.Type() is *[]*struct{...}.
+type structPointerSlice struct {
+ v reflect.Value
+}
+
+func (p structPointerSlice) Len() int { return p.v.Len() }
+func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} }
+func (p structPointerSlice) Append(q structPointer) {
+ p.v.Set(reflect.Append(p.v, q.v))
+}
+
+var (
+ int32Type = reflect.TypeOf(int32(0))
+ uint32Type = reflect.TypeOf(uint32(0))
+ float32Type = reflect.TypeOf(float32(0))
+ int64Type = reflect.TypeOf(int64(0))
+ uint64Type = reflect.TypeOf(uint64(0))
+ float64Type = reflect.TypeOf(float64(0))
+)
+
+// A word32 represents a field of type *int32, *uint32, *float32, or *enum.
+// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable.
+type word32 struct {
+ v reflect.Value
+}
+
+// IsNil reports whether p is nil.
+func word32_IsNil(p word32) bool {
+ return p.v.IsNil()
+}
+
+// Set sets p to point at a newly allocated word with bits set to x.
+func word32_Set(p word32, o *Buffer, x uint32) {
+ t := p.v.Type().Elem()
+ switch t {
+ case int32Type:
+ if len(o.int32s) == 0 {
+ o.int32s = make([]int32, uint32PoolSize)
+ }
+ o.int32s[0] = int32(x)
+ p.v.Set(reflect.ValueOf(&o.int32s[0]))
+ o.int32s = o.int32s[1:]
+ return
+ case uint32Type:
+ if len(o.uint32s) == 0 {
+ o.uint32s = make([]uint32, uint32PoolSize)
+ }
+ o.uint32s[0] = x
+ p.v.Set(reflect.ValueOf(&o.uint32s[0]))
+ o.uint32s = o.uint32s[1:]
+ return
+ case float32Type:
+ if len(o.float32s) == 0 {
+ o.float32s = make([]float32, uint32PoolSize)
+ }
+ o.float32s[0] = math.Float32frombits(x)
+ p.v.Set(reflect.ValueOf(&o.float32s[0]))
+ o.float32s = o.float32s[1:]
+ return
+ }
+
+ // must be enum
+ p.v.Set(reflect.New(t))
+ p.v.Elem().SetInt(int64(int32(x)))
+}
+
+// Get gets the bits pointed at by p, as a uint32.
+func word32_Get(p word32) uint32 {
+ elem := p.v.Elem()
+ switch elem.Kind() {
+ case reflect.Int32:
+ return uint32(elem.Int())
+ case reflect.Uint32:
+ return uint32(elem.Uint())
+ case reflect.Float32:
+ return math.Float32bits(float32(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct.
+func structPointer_Word32(p structPointer, f field) word32 {
+ return word32{structPointer_field(p, f)}
+}
+
+// A word32Val represents a field of type int32, uint32, float32, or enum.
+// That is, v.Type() is int32, uint32, float32, or enum and v is assignable.
+type word32Val struct {
+ v reflect.Value
+}
+
+// Set sets *p to x.
+func word32Val_Set(p word32Val, x uint32) {
+ switch p.v.Type() {
+ case int32Type:
+ p.v.SetInt(int64(x))
+ return
+ case uint32Type:
+ p.v.SetUint(uint64(x))
+ return
+ case float32Type:
+ p.v.SetFloat(float64(math.Float32frombits(x)))
+ return
+ }
+
+ // must be enum
+ p.v.SetInt(int64(int32(x)))
+}
+
+// Get gets the bits pointed at by p, as a uint32.
+func word32Val_Get(p word32Val) uint32 {
+ elem := p.v
+ switch elem.Kind() {
+ case reflect.Int32:
+ return uint32(elem.Int())
+ case reflect.Uint32:
+ return uint32(elem.Uint())
+ case reflect.Float32:
+ return math.Float32bits(float32(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct.
+func structPointer_Word32Val(p structPointer, f field) word32Val {
+ return word32Val{structPointer_field(p, f)}
+}
+
+// A word32Slice is a slice of 32-bit values.
+// That is, v.Type() is []int32, []uint32, []float32, or []enum.
+type word32Slice struct {
+ v reflect.Value
+}
+
+func (p word32Slice) Append(x uint32) {
+ n, m := p.v.Len(), p.v.Cap()
+ if n < m {
+ p.v.SetLen(n + 1)
+ } else {
+ t := p.v.Type().Elem()
+ p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
+ }
+ elem := p.v.Index(n)
+ switch elem.Kind() {
+ case reflect.Int32:
+ elem.SetInt(int64(int32(x)))
+ case reflect.Uint32:
+ elem.SetUint(uint64(x))
+ case reflect.Float32:
+ elem.SetFloat(float64(math.Float32frombits(x)))
+ }
+}
+
+func (p word32Slice) Len() int {
+ return p.v.Len()
+}
+
+func (p word32Slice) Index(i int) uint32 {
+ elem := p.v.Index(i)
+ switch elem.Kind() {
+ case reflect.Int32:
+ return uint32(elem.Int())
+ case reflect.Uint32:
+ return uint32(elem.Uint())
+ case reflect.Float32:
+ return math.Float32bits(float32(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct.
+func structPointer_Word32Slice(p structPointer, f field) word32Slice {
+ return word32Slice{structPointer_field(p, f)}
+}
+
+// word64 is like word32 but for 64-bit values.
+type word64 struct {
+ v reflect.Value
+}
+
+func word64_Set(p word64, o *Buffer, x uint64) {
+ t := p.v.Type().Elem()
+ switch t {
+ case int64Type:
+ if len(o.int64s) == 0 {
+ o.int64s = make([]int64, uint64PoolSize)
+ }
+ o.int64s[0] = int64(x)
+ p.v.Set(reflect.ValueOf(&o.int64s[0]))
+ o.int64s = o.int64s[1:]
+ return
+ case uint64Type:
+ if len(o.uint64s) == 0 {
+ o.uint64s = make([]uint64, uint64PoolSize)
+ }
+ o.uint64s[0] = x
+ p.v.Set(reflect.ValueOf(&o.uint64s[0]))
+ o.uint64s = o.uint64s[1:]
+ return
+ case float64Type:
+ if len(o.float64s) == 0 {
+ o.float64s = make([]float64, uint64PoolSize)
+ }
+ o.float64s[0] = math.Float64frombits(x)
+ p.v.Set(reflect.ValueOf(&o.float64s[0]))
+ o.float64s = o.float64s[1:]
+ return
+ }
+ panic("unreachable")
+}
+
+func word64_IsNil(p word64) bool {
+ return p.v.IsNil()
+}
+
+func word64_Get(p word64) uint64 {
+ elem := p.v.Elem()
+ switch elem.Kind() {
+ case reflect.Int64:
+ return uint64(elem.Int())
+ case reflect.Uint64:
+ return elem.Uint()
+ case reflect.Float64:
+ return math.Float64bits(elem.Float())
+ }
+ panic("unreachable")
+}
+
+func structPointer_Word64(p structPointer, f field) word64 {
+ return word64{structPointer_field(p, f)}
+}
+
+// word64Val is like word32Val but for 64-bit values.
+type word64Val struct {
+ v reflect.Value
+}
+
+func word64Val_Set(p word64Val, o *Buffer, x uint64) {
+ switch p.v.Type() {
+ case int64Type:
+ p.v.SetInt(int64(x))
+ return
+ case uint64Type:
+ p.v.SetUint(x)
+ return
+ case float64Type:
+ p.v.SetFloat(math.Float64frombits(x))
+ return
+ }
+ panic("unreachable")
+}
+
+func word64Val_Get(p word64Val) uint64 {
+ elem := p.v
+ switch elem.Kind() {
+ case reflect.Int64:
+ return uint64(elem.Int())
+ case reflect.Uint64:
+ return elem.Uint()
+ case reflect.Float64:
+ return math.Float64bits(elem.Float())
+ }
+ panic("unreachable")
+}
+
+func structPointer_Word64Val(p structPointer, f field) word64Val {
+ return word64Val{structPointer_field(p, f)}
+}
+
+type word64Slice struct {
+ v reflect.Value
+}
+
+func (p word64Slice) Append(x uint64) {
+ n, m := p.v.Len(), p.v.Cap()
+ if n < m {
+ p.v.SetLen(n + 1)
+ } else {
+ t := p.v.Type().Elem()
+ p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
+ }
+ elem := p.v.Index(n)
+ switch elem.Kind() {
+ case reflect.Int64:
+ elem.SetInt(int64(int64(x)))
+ case reflect.Uint64:
+ elem.SetUint(uint64(x))
+ case reflect.Float64:
+ elem.SetFloat(float64(math.Float64frombits(x)))
+ }
+}
+
+func (p word64Slice) Len() int {
+ return p.v.Len()
+}
+
+func (p word64Slice) Index(i int) uint64 {
+ elem := p.v.Index(i)
+ switch elem.Kind() {
+ case reflect.Int64:
+ return uint64(elem.Int())
+ case reflect.Uint64:
+ return uint64(elem.Uint())
+ case reflect.Float64:
+ return math.Float64bits(float64(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+func structPointer_Word64Slice(p structPointer, f field) word64Slice {
+ return word64Slice{structPointer_field(p, f)}
+}
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
new file mode 100644
index 0000000..6b5567d
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
@@ -0,0 +1,270 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build !appengine,!js
+
+// This file contains the implementation of the proto field accesses using package unsafe.
+
+package proto
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+// NOTE: These type_Foo functions would more idiomatically be methods,
+// but Go does not allow methods on pointer types, and we must preserve
+// some pointer type for the garbage collector. We use these
+// funcs with clunky names as our poor approximation to methods.
+//
+// An alternative would be
+// type structPointer struct { p unsafe.Pointer }
+// but that does not registerize as well.
+
+// A structPointer is a pointer to a struct.
+type structPointer unsafe.Pointer
+
+// toStructPointer returns a structPointer equivalent to the given reflect value.
+func toStructPointer(v reflect.Value) structPointer {
+ return structPointer(unsafe.Pointer(v.Pointer()))
+}
+
+// IsNil reports whether p is nil.
+func structPointer_IsNil(p structPointer) bool {
+ return p == nil
+}
+
+// Interface returns the struct pointer, assumed to have element type t,
+// as an interface value.
+func structPointer_Interface(p structPointer, t reflect.Type) interface{} {
+ return reflect.NewAt(t, unsafe.Pointer(p)).Interface()
+}
+
+// A field identifies a field in a struct, accessible from a structPointer.
+// In this implementation, a field is identified by its byte offset from the start of the struct.
+type field uintptr
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+ return field(f.Offset)
+}
+
+// invalidField is an invalid field identifier.
+const invalidField = ^field(0)
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool {
+ return f != ^field(0)
+}
+
+// Bytes returns the address of a []byte field in the struct.
+func structPointer_Bytes(p structPointer, f field) *[]byte {
+ return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// BytesSlice returns the address of a [][]byte field in the struct.
+func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
+ return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// Bool returns the address of a *bool field in the struct.
+func structPointer_Bool(p structPointer, f field) **bool {
+ return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// BoolVal returns the address of a bool field in the struct.
+func structPointer_BoolVal(p structPointer, f field) *bool {
+ return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// BoolSlice returns the address of a []bool field in the struct.
+func structPointer_BoolSlice(p structPointer, f field) *[]bool {
+ return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// String returns the address of a *string field in the struct.
+func structPointer_String(p structPointer, f field) **string {
+ return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// StringVal returns the address of a string field in the struct.
+func structPointer_StringVal(p structPointer, f field) *string {
+ return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// StringSlice returns the address of a []string field in the struct.
+func structPointer_StringSlice(p structPointer, f field) *[]string {
+ return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// ExtMap returns the address of an extension map field in the struct.
+func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions {
+ return (*XXX_InternalExtensions)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
+ return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// NewAt returns the reflect.Value for a pointer to a field in the struct.
+func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
+ return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f)))
+}
+
+// SetStructPointer writes a *struct field in the struct.
+func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
+ *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q
+}
+
+// GetStructPointer reads a *struct field in the struct.
+func structPointer_GetStructPointer(p structPointer, f field) structPointer {
+ return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// StructPointerSlice the address of a []*struct field in the struct.
+func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice {
+ return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups).
+type structPointerSlice []structPointer
+
+func (v *structPointerSlice) Len() int { return len(*v) }
+func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] }
+func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) }
+
+// A word32 is the address of a "pointer to 32-bit value" field.
+type word32 **uint32
+
+// IsNil reports whether *v is nil.
+func word32_IsNil(p word32) bool {
+ return *p == nil
+}
+
+// Set sets *v to point at a newly allocated word set to x.
+func word32_Set(p word32, o *Buffer, x uint32) {
+ if len(o.uint32s) == 0 {
+ o.uint32s = make([]uint32, uint32PoolSize)
+ }
+ o.uint32s[0] = x
+ *p = &o.uint32s[0]
+ o.uint32s = o.uint32s[1:]
+}
+
+// Get gets the value pointed at by *v.
+func word32_Get(p word32) uint32 {
+ return **p
+}
+
+// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
+func structPointer_Word32(p structPointer, f field) word32 {
+ return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// A word32Val is the address of a 32-bit value field.
+type word32Val *uint32
+
+// Set sets *p to x.
+func word32Val_Set(p word32Val, x uint32) {
+ *p = x
+}
+
+// Get gets the value pointed at by p.
+func word32Val_Get(p word32Val) uint32 {
+ return *p
+}
+
+// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
+func structPointer_Word32Val(p structPointer, f field) word32Val {
+ return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// A word32Slice is a slice of 32-bit values.
+type word32Slice []uint32
+
+func (v *word32Slice) Append(x uint32) { *v = append(*v, x) }
+func (v *word32Slice) Len() int { return len(*v) }
+func (v *word32Slice) Index(i int) uint32 { return (*v)[i] }
+
+// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct.
+func structPointer_Word32Slice(p structPointer, f field) *word32Slice {
+ return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// word64 is like word32 but for 64-bit values.
+type word64 **uint64
+
+func word64_Set(p word64, o *Buffer, x uint64) {
+ if len(o.uint64s) == 0 {
+ o.uint64s = make([]uint64, uint64PoolSize)
+ }
+ o.uint64s[0] = x
+ *p = &o.uint64s[0]
+ o.uint64s = o.uint64s[1:]
+}
+
+func word64_IsNil(p word64) bool {
+ return *p == nil
+}
+
+func word64_Get(p word64) uint64 {
+ return **p
+}
+
+func structPointer_Word64(p structPointer, f field) word64 {
+ return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// word64Val is like word32Val but for 64-bit values.
+type word64Val *uint64
+
+func word64Val_Set(p word64Val, o *Buffer, x uint64) {
+ *p = x
+}
+
+func word64Val_Get(p word64Val) uint64 {
+ return *p
+}
+
+func structPointer_Word64Val(p structPointer, f field) word64Val {
+ return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// word64Slice is like word32Slice but for 64-bit values.
+type word64Slice []uint64
+
+func (v *word64Slice) Append(x uint64) { *v = append(*v, x) }
+func (v *word64Slice) Len() int { return len(*v) }
+func (v *word64Slice) Index(i int) uint64 { return (*v)[i] }
+
+func structPointer_Word64Slice(p structPointer, f field) *word64Slice {
+ return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go
new file mode 100644
index 0000000..ec2289c
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/properties.go
@@ -0,0 +1,872 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+const debug bool = false
+
+// Constants that identify the encoding of a value on the wire.
+const (
+ WireVarint = 0
+ WireFixed64 = 1
+ WireBytes = 2
+ WireStartGroup = 3
+ WireEndGroup = 4
+ WireFixed32 = 5
+)
+
+const startSize = 10 // initial slice/string sizes
+
+// Encoders are defined in encode.go
+// An encoder outputs the full representation of a field, including its
+// tag and encoder type.
+type encoder func(p *Buffer, prop *Properties, base structPointer) error
+
+// A valueEncoder encodes a single integer in a particular encoding.
+type valueEncoder func(o *Buffer, x uint64) error
+
+// Sizers are defined in encode.go
+// A sizer returns the encoded size of a field, including its tag and encoder
+// type.
+type sizer func(prop *Properties, base structPointer) int
+
+// A valueSizer returns the encoded size of a single integer in a particular
+// encoding.
+type valueSizer func(x uint64) int
+
+// Decoders are defined in decode.go
+// A decoder creates a value from its wire representation.
+// Unrecognized subelements are saved in unrec.
+type decoder func(p *Buffer, prop *Properties, base structPointer) error
+
+// A valueDecoder decodes a single integer in a particular encoding.
+type valueDecoder func(o *Buffer) (x uint64, err error)
+
+// A oneofMarshaler does the marshaling for all oneof fields in a message.
+type oneofMarshaler func(Message, *Buffer) error
+
+// A oneofUnmarshaler does the unmarshaling for a oneof field in a message.
+type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error)
+
+// A oneofSizer does the sizing for all oneof fields in a message.
+type oneofSizer func(Message) int
+
+// tagMap is an optimization over map[int]int for typical protocol buffer
+// use-cases. Encoded protocol buffers are often in tag order with small tag
+// numbers.
+type tagMap struct {
+ fastTags []int
+ slowTags map[int]int
+}
+
+// tagMapFastLimit is the upper bound on the tag number that will be stored in
+// the tagMap slice rather than its map.
+const tagMapFastLimit = 1024
+
+func (p *tagMap) get(t int) (int, bool) {
+ if t > 0 && t < tagMapFastLimit {
+ if t >= len(p.fastTags) {
+ return 0, false
+ }
+ fi := p.fastTags[t]
+ return fi, fi >= 0
+ }
+ fi, ok := p.slowTags[t]
+ return fi, ok
+}
+
+func (p *tagMap) put(t int, fi int) {
+ if t > 0 && t < tagMapFastLimit {
+ for len(p.fastTags) < t+1 {
+ p.fastTags = append(p.fastTags, -1)
+ }
+ p.fastTags[t] = fi
+ return
+ }
+ if p.slowTags == nil {
+ p.slowTags = make(map[int]int)
+ }
+ p.slowTags[t] = fi
+}
+
+// StructProperties represents properties for all the fields of a struct.
+// decoderTags and decoderOrigNames should only be used by the decoder.
+type StructProperties struct {
+ Prop []*Properties // properties for each field
+ reqCount int // required count
+ decoderTags tagMap // map from proto tag to struct field number
+ decoderOrigNames map[string]int // map from original name to struct field number
+ order []int // list of struct field numbers in tag order
+ unrecField field // field id of the XXX_unrecognized []byte field
+ extendable bool // is this an extendable proto
+
+ oneofMarshaler oneofMarshaler
+ oneofUnmarshaler oneofUnmarshaler
+ oneofSizer oneofSizer
+ stype reflect.Type
+
+ // OneofTypes contains information about the oneof fields in this message.
+ // It is keyed by the original name of a field.
+ OneofTypes map[string]*OneofProperties
+}
+
+// OneofProperties represents information about a specific field in a oneof.
+type OneofProperties struct {
+ Type reflect.Type // pointer to generated struct type for this oneof field
+ Field int // struct field number of the containing oneof in the message
+ Prop *Properties
+}
+
+// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
+// See encode.go, (*Buffer).enc_struct.
+
+func (sp *StructProperties) Len() int { return len(sp.order) }
+func (sp *StructProperties) Less(i, j int) bool {
+ return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
+}
+func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
+
+// Properties represents the protocol-specific behavior of a single struct field.
+type Properties struct {
+ Name string // name of the field, for error messages
+ OrigName string // original name before protocol compiler (always set)
+ JSONName string // name to use for JSON; determined by protoc
+ Wire string
+ WireType int
+ Tag int
+ Required bool
+ Optional bool
+ Repeated bool
+ Packed bool // relevant for repeated primitives only
+ Enum string // set for enum types only
+ proto3 bool // whether this is known to be a proto3 field; set for []byte only
+ oneof bool // whether this is a oneof field
+
+ Default string // default value
+ HasDefault bool // whether an explicit default was provided
+ def_uint64 uint64
+
+ enc encoder
+ valEnc valueEncoder // set for bool and numeric types only
+ field field
+ tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType)
+ tagbuf [8]byte
+ stype reflect.Type // set for struct types only
+ sprop *StructProperties // set for struct types only
+ isMarshaler bool
+ isUnmarshaler bool
+
+ mtype reflect.Type // set for map types only
+ mkeyprop *Properties // set for map types only
+ mvalprop *Properties // set for map types only
+
+ size sizer
+ valSize valueSizer // set for bool and numeric types only
+
+ dec decoder
+ valDec valueDecoder // set for bool and numeric types only
+
+ // If this is a packable field, this will be the decoder for the packed version of the field.
+ packedDec decoder
+}
+
+// String formats the properties in the protobuf struct field tag style.
+func (p *Properties) String() string {
+ s := p.Wire
+ s = ","
+ s += strconv.Itoa(p.Tag)
+ if p.Required {
+ s += ",req"
+ }
+ if p.Optional {
+ s += ",opt"
+ }
+ if p.Repeated {
+ s += ",rep"
+ }
+ if p.Packed {
+ s += ",packed"
+ }
+ s += ",name=" + p.OrigName
+ if p.JSONName != p.OrigName {
+ s += ",json=" + p.JSONName
+ }
+ if p.proto3 {
+ s += ",proto3"
+ }
+ if p.oneof {
+ s += ",oneof"
+ }
+ if len(p.Enum) > 0 {
+ s += ",enum=" + p.Enum
+ }
+ if p.HasDefault {
+ s += ",def=" + p.Default
+ }
+ return s
+}
+
+// Parse populates p by parsing a string in the protobuf struct field tag style.
+func (p *Properties) Parse(s string) {
+ // "bytes,49,opt,name=foo,def=hello!"
+ fields := strings.Split(s, ",") // breaks def=, but handled below.
+ if len(fields) < 2 {
+ fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s)
+ return
+ }
+
+ p.Wire = fields[0]
+ switch p.Wire {
+ case "varint":
+ p.WireType = WireVarint
+ p.valEnc = (*Buffer).EncodeVarint
+ p.valDec = (*Buffer).DecodeVarint
+ p.valSize = sizeVarint
+ case "fixed32":
+ p.WireType = WireFixed32
+ p.valEnc = (*Buffer).EncodeFixed32
+ p.valDec = (*Buffer).DecodeFixed32
+ p.valSize = sizeFixed32
+ case "fixed64":
+ p.WireType = WireFixed64
+ p.valEnc = (*Buffer).EncodeFixed64
+ p.valDec = (*Buffer).DecodeFixed64
+ p.valSize = sizeFixed64
+ case "zigzag32":
+ p.WireType = WireVarint
+ p.valEnc = (*Buffer).EncodeZigzag32
+ p.valDec = (*Buffer).DecodeZigzag32
+ p.valSize = sizeZigzag32
+ case "zigzag64":
+ p.WireType = WireVarint
+ p.valEnc = (*Buffer).EncodeZigzag64
+ p.valDec = (*Buffer).DecodeZigzag64
+ p.valSize = sizeZigzag64
+ case "bytes", "group":
+ p.WireType = WireBytes
+ // no numeric converter for non-numeric types
+ default:
+ fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s)
+ return
+ }
+
+ var err error
+ p.Tag, err = strconv.Atoi(fields[1])
+ if err != nil {
+ return
+ }
+
+ for i := 2; i < len(fields); i++ {
+ f := fields[i]
+ switch {
+ case f == "req":
+ p.Required = true
+ case f == "opt":
+ p.Optional = true
+ case f == "rep":
+ p.Repeated = true
+ case f == "packed":
+ p.Packed = true
+ case strings.HasPrefix(f, "name="):
+ p.OrigName = f[5:]
+ case strings.HasPrefix(f, "json="):
+ p.JSONName = f[5:]
+ case strings.HasPrefix(f, "enum="):
+ p.Enum = f[5:]
+ case f == "proto3":
+ p.proto3 = true
+ case f == "oneof":
+ p.oneof = true
+ case strings.HasPrefix(f, "def="):
+ p.HasDefault = true
+ p.Default = f[4:] // rest of string
+ if i+1 < len(fields) {
+ // Commas aren't escaped, and def is always last.
+ p.Default += "," + strings.Join(fields[i+1:], ",")
+ break
+ }
+ }
+ }
+}
+
+func logNoSliceEnc(t1, t2 reflect.Type) {
+ fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2)
+}
+
+var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
+
+// Initialize the fields for encoding and decoding.
+func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
+ p.enc = nil
+ p.dec = nil
+ p.size = nil
+
+ switch t1 := typ; t1.Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1)
+
+ // proto3 scalar types
+
+ case reflect.Bool:
+ p.enc = (*Buffer).enc_proto3_bool
+ p.dec = (*Buffer).dec_proto3_bool
+ p.size = size_proto3_bool
+ case reflect.Int32:
+ p.enc = (*Buffer).enc_proto3_int32
+ p.dec = (*Buffer).dec_proto3_int32
+ p.size = size_proto3_int32
+ case reflect.Uint32:
+ p.enc = (*Buffer).enc_proto3_uint32
+ p.dec = (*Buffer).dec_proto3_int32 // can reuse
+ p.size = size_proto3_uint32
+ case reflect.Int64, reflect.Uint64:
+ p.enc = (*Buffer).enc_proto3_int64
+ p.dec = (*Buffer).dec_proto3_int64
+ p.size = size_proto3_int64
+ case reflect.Float32:
+ p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits
+ p.dec = (*Buffer).dec_proto3_int32
+ p.size = size_proto3_uint32
+ case reflect.Float64:
+ p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits
+ p.dec = (*Buffer).dec_proto3_int64
+ p.size = size_proto3_int64
+ case reflect.String:
+ p.enc = (*Buffer).enc_proto3_string
+ p.dec = (*Buffer).dec_proto3_string
+ p.size = size_proto3_string
+
+ case reflect.Ptr:
+ switch t2 := t1.Elem(); t2.Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2)
+ break
+ case reflect.Bool:
+ p.enc = (*Buffer).enc_bool
+ p.dec = (*Buffer).dec_bool
+ p.size = size_bool
+ case reflect.Int32:
+ p.enc = (*Buffer).enc_int32
+ p.dec = (*Buffer).dec_int32
+ p.size = size_int32
+ case reflect.Uint32:
+ p.enc = (*Buffer).enc_uint32
+ p.dec = (*Buffer).dec_int32 // can reuse
+ p.size = size_uint32
+ case reflect.Int64, reflect.Uint64:
+ p.enc = (*Buffer).enc_int64
+ p.dec = (*Buffer).dec_int64
+ p.size = size_int64
+ case reflect.Float32:
+ p.enc = (*Buffer).enc_uint32 // can just treat them as bits
+ p.dec = (*Buffer).dec_int32
+ p.size = size_uint32
+ case reflect.Float64:
+ p.enc = (*Buffer).enc_int64 // can just treat them as bits
+ p.dec = (*Buffer).dec_int64
+ p.size = size_int64
+ case reflect.String:
+ p.enc = (*Buffer).enc_string
+ p.dec = (*Buffer).dec_string
+ p.size = size_string
+ case reflect.Struct:
+ p.stype = t1.Elem()
+ p.isMarshaler = isMarshaler(t1)
+ p.isUnmarshaler = isUnmarshaler(t1)
+ if p.Wire == "bytes" {
+ p.enc = (*Buffer).enc_struct_message
+ p.dec = (*Buffer).dec_struct_message
+ p.size = size_struct_message
+ } else {
+ p.enc = (*Buffer).enc_struct_group
+ p.dec = (*Buffer).dec_struct_group
+ p.size = size_struct_group
+ }
+ }
+
+ case reflect.Slice:
+ switch t2 := t1.Elem(); t2.Kind() {
+ default:
+ logNoSliceEnc(t1, t2)
+ break
+ case reflect.Bool:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_bool
+ p.size = size_slice_packed_bool
+ } else {
+ p.enc = (*Buffer).enc_slice_bool
+ p.size = size_slice_bool
+ }
+ p.dec = (*Buffer).dec_slice_bool
+ p.packedDec = (*Buffer).dec_slice_packed_bool
+ case reflect.Int32:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_int32
+ p.size = size_slice_packed_int32
+ } else {
+ p.enc = (*Buffer).enc_slice_int32
+ p.size = size_slice_int32
+ }
+ p.dec = (*Buffer).dec_slice_int32
+ p.packedDec = (*Buffer).dec_slice_packed_int32
+ case reflect.Uint32:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_uint32
+ p.size = size_slice_packed_uint32
+ } else {
+ p.enc = (*Buffer).enc_slice_uint32
+ p.size = size_slice_uint32
+ }
+ p.dec = (*Buffer).dec_slice_int32
+ p.packedDec = (*Buffer).dec_slice_packed_int32
+ case reflect.Int64, reflect.Uint64:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_int64
+ p.size = size_slice_packed_int64
+ } else {
+ p.enc = (*Buffer).enc_slice_int64
+ p.size = size_slice_int64
+ }
+ p.dec = (*Buffer).dec_slice_int64
+ p.packedDec = (*Buffer).dec_slice_packed_int64
+ case reflect.Uint8:
+ p.dec = (*Buffer).dec_slice_byte
+ if p.proto3 {
+ p.enc = (*Buffer).enc_proto3_slice_byte
+ p.size = size_proto3_slice_byte
+ } else {
+ p.enc = (*Buffer).enc_slice_byte
+ p.size = size_slice_byte
+ }
+ case reflect.Float32, reflect.Float64:
+ switch t2.Bits() {
+ case 32:
+ // can just treat them as bits
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_uint32
+ p.size = size_slice_packed_uint32
+ } else {
+ p.enc = (*Buffer).enc_slice_uint32
+ p.size = size_slice_uint32
+ }
+ p.dec = (*Buffer).dec_slice_int32
+ p.packedDec = (*Buffer).dec_slice_packed_int32
+ case 64:
+ // can just treat them as bits
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_int64
+ p.size = size_slice_packed_int64
+ } else {
+ p.enc = (*Buffer).enc_slice_int64
+ p.size = size_slice_int64
+ }
+ p.dec = (*Buffer).dec_slice_int64
+ p.packedDec = (*Buffer).dec_slice_packed_int64
+ default:
+ logNoSliceEnc(t1, t2)
+ break
+ }
+ case reflect.String:
+ p.enc = (*Buffer).enc_slice_string
+ p.dec = (*Buffer).dec_slice_string
+ p.size = size_slice_string
+ case reflect.Ptr:
+ switch t3 := t2.Elem(); t3.Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3)
+ break
+ case reflect.Struct:
+ p.stype = t2.Elem()
+ p.isMarshaler = isMarshaler(t2)
+ p.isUnmarshaler = isUnmarshaler(t2)
+ if p.Wire == "bytes" {
+ p.enc = (*Buffer).enc_slice_struct_message
+ p.dec = (*Buffer).dec_slice_struct_message
+ p.size = size_slice_struct_message
+ } else {
+ p.enc = (*Buffer).enc_slice_struct_group
+ p.dec = (*Buffer).dec_slice_struct_group
+ p.size = size_slice_struct_group
+ }
+ }
+ case reflect.Slice:
+ switch t2.Elem().Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem())
+ break
+ case reflect.Uint8:
+ p.enc = (*Buffer).enc_slice_slice_byte
+ p.dec = (*Buffer).dec_slice_slice_byte
+ p.size = size_slice_slice_byte
+ }
+ }
+
+ case reflect.Map:
+ p.enc = (*Buffer).enc_new_map
+ p.dec = (*Buffer).dec_new_map
+ p.size = size_new_map
+
+ p.mtype = t1
+ p.mkeyprop = &Properties{}
+ p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
+ p.mvalprop = &Properties{}
+ vtype := p.mtype.Elem()
+ if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
+ // The value type is not a message (*T) or bytes ([]byte),
+ // so we need encoders for the pointer to this type.
+ vtype = reflect.PtrTo(vtype)
+ }
+ p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
+ }
+
+ // precalculate tag code
+ wire := p.WireType
+ if p.Packed {
+ wire = WireBytes
+ }
+ x := uint32(p.Tag)<<3 | uint32(wire)
+ i := 0
+ for i = 0; x > 127; i++ {
+ p.tagbuf[i] = 0x80 | uint8(x&0x7F)
+ x >>= 7
+ }
+ p.tagbuf[i] = uint8(x)
+ p.tagcode = p.tagbuf[0 : i+1]
+
+ if p.stype != nil {
+ if lockGetProp {
+ p.sprop = GetProperties(p.stype)
+ } else {
+ p.sprop = getPropertiesLocked(p.stype)
+ }
+ }
+}
+
+var (
+ marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
+ unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
+)
+
+// isMarshaler reports whether type t implements Marshaler.
+func isMarshaler(t reflect.Type) bool {
+ // We're checking for (likely) pointer-receiver methods
+ // so if t is not a pointer, something is very wrong.
+ // The calls above only invoke isMarshaler on pointer types.
+ if t.Kind() != reflect.Ptr {
+ panic("proto: misuse of isMarshaler")
+ }
+ return t.Implements(marshalerType)
+}
+
+// isUnmarshaler reports whether type t implements Unmarshaler.
+func isUnmarshaler(t reflect.Type) bool {
+ // We're checking for (likely) pointer-receiver methods
+ // so if t is not a pointer, something is very wrong.
+ // The calls above only invoke isUnmarshaler on pointer types.
+ if t.Kind() != reflect.Ptr {
+ panic("proto: misuse of isUnmarshaler")
+ }
+ return t.Implements(unmarshalerType)
+}
+
+// Init populates the properties from a protocol buffer struct tag.
+func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
+ p.init(typ, name, tag, f, true)
+}
+
+func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
+ // "bytes,49,opt,def=hello!"
+ p.Name = name
+ p.OrigName = name
+ if f != nil {
+ p.field = toField(f)
+ }
+ if tag == "" {
+ return
+ }
+ p.Parse(tag)
+ p.setEncAndDec(typ, f, lockGetProp)
+}
+
+var (
+ propertiesMu sync.RWMutex
+ propertiesMap = make(map[reflect.Type]*StructProperties)
+)
+
+// GetProperties returns the list of properties for the type represented by t.
+// t must represent a generated struct type of a protocol message.
+func GetProperties(t reflect.Type) *StructProperties {
+ if t.Kind() != reflect.Struct {
+ panic("proto: type must have kind struct")
+ }
+
+ // Most calls to GetProperties in a long-running program will be
+ // retrieving details for types we have seen before.
+ propertiesMu.RLock()
+ sprop, ok := propertiesMap[t]
+ propertiesMu.RUnlock()
+ if ok {
+ if collectStats {
+ stats.Chit++
+ }
+ return sprop
+ }
+
+ propertiesMu.Lock()
+ sprop = getPropertiesLocked(t)
+ propertiesMu.Unlock()
+ return sprop
+}
+
+// getPropertiesLocked requires that propertiesMu is held.
+func getPropertiesLocked(t reflect.Type) *StructProperties {
+ if prop, ok := propertiesMap[t]; ok {
+ if collectStats {
+ stats.Chit++
+ }
+ return prop
+ }
+ if collectStats {
+ stats.Cmiss++
+ }
+
+ prop := new(StructProperties)
+ // in case of recursive protos, fill this in now.
+ propertiesMap[t] = prop
+
+ // build properties
+ prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) ||
+ reflect.PtrTo(t).Implements(extendableProtoV1Type)
+ prop.unrecField = invalidField
+ prop.Prop = make([]*Properties, t.NumField())
+ prop.order = make([]int, t.NumField())
+
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ p := new(Properties)
+ name := f.Name
+ p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
+
+ if f.Name == "XXX_InternalExtensions" { // special case
+ p.enc = (*Buffer).enc_exts
+ p.dec = nil // not needed
+ p.size = size_exts
+ } else if f.Name == "XXX_extensions" { // special case
+ p.enc = (*Buffer).enc_map
+ p.dec = nil // not needed
+ p.size = size_map
+ } else if f.Name == "XXX_unrecognized" { // special case
+ prop.unrecField = toField(&f)
+ }
+ oneof := f.Tag.Get("protobuf_oneof") // special case
+ if oneof != "" {
+ // Oneof fields don't use the traditional protobuf tag.
+ p.OrigName = oneof
+ }
+ prop.Prop[i] = p
+ prop.order[i] = i
+ if debug {
+ print(i, " ", f.Name, " ", t.String(), " ")
+ if p.Tag > 0 {
+ print(p.String())
+ }
+ print("\n")
+ }
+ if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" {
+ fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]")
+ }
+ }
+
+ // Re-order prop.order.
+ sort.Sort(prop)
+
+ type oneofMessage interface {
+ XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
+ }
+ if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
+ var oots []interface{}
+ prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs()
+ prop.stype = t
+
+ // Interpret oneof metadata.
+ prop.OneofTypes = make(map[string]*OneofProperties)
+ for _, oot := range oots {
+ oop := &OneofProperties{
+ Type: reflect.ValueOf(oot).Type(), // *T
+ Prop: new(Properties),
+ }
+ sft := oop.Type.Elem().Field(0)
+ oop.Prop.Name = sft.Name
+ oop.Prop.Parse(sft.Tag.Get("protobuf"))
+ // There will be exactly one interface field that
+ // this new value is assignable to.
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ if f.Type.Kind() != reflect.Interface {
+ continue
+ }
+ if !oop.Type.AssignableTo(f.Type) {
+ continue
+ }
+ oop.Field = i
+ break
+ }
+ prop.OneofTypes[oop.Prop.OrigName] = oop
+ }
+ }
+
+ // build required counts
+ // build tags
+ reqCount := 0
+ prop.decoderOrigNames = make(map[string]int)
+ for i, p := range prop.Prop {
+ if strings.HasPrefix(p.Name, "XXX_") {
+ // Internal fields should not appear in tags/origNames maps.
+ // They are handled specially when encoding and decoding.
+ continue
+ }
+ if p.Required {
+ reqCount++
+ }
+ prop.decoderTags.put(p.Tag, i)
+ prop.decoderOrigNames[p.OrigName] = i
+ }
+ prop.reqCount = reqCount
+
+ return prop
+}
+
+// Return the Properties object for the x[0]'th field of the structure.
+func propByIndex(t reflect.Type, x []int) *Properties {
+ if len(x) != 1 {
+ fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t)
+ return nil
+ }
+ prop := GetProperties(t)
+ return prop.Prop[x[0]]
+}
+
+// Get the address and type of a pointer to a struct from an interface.
+func getbase(pb Message) (t reflect.Type, b structPointer, err error) {
+ if pb == nil {
+ err = ErrNil
+ return
+ }
+ // get the reflect type of the pointer to the struct.
+ t = reflect.TypeOf(pb)
+ // get the address of the struct.
+ value := reflect.ValueOf(pb)
+ b = toStructPointer(value)
+ return
+}
+
+// A global registry of enum types.
+// The generated code will register the generated maps by calling RegisterEnum.
+
+var enumValueMaps = make(map[string]map[string]int32)
+
+// RegisterEnum is called from the generated code to install the enum descriptor
+// maps into the global table to aid parsing text format protocol buffers.
+func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
+ if _, ok := enumValueMaps[typeName]; ok {
+ panic("proto: duplicate enum registered: " + typeName)
+ }
+ enumValueMaps[typeName] = valueMap
+}
+
+// EnumValueMap returns the mapping from names to integers of the
+// enum type enumType, or a nil if not found.
+func EnumValueMap(enumType string) map[string]int32 {
+ return enumValueMaps[enumType]
+}
+
+// A registry of all linked message types.
+// The string is a fully-qualified proto name ("pkg.Message").
+var (
+ protoTypes = make(map[string]reflect.Type)
+ revProtoTypes = make(map[reflect.Type]string)
+)
+
+// RegisterType is called from generated code and maps from the fully qualified
+// proto name to the type (pointer to struct) of the protocol buffer.
+func RegisterType(x Message, name string) {
+ if _, ok := protoTypes[name]; ok {
+ // TODO: Some day, make this a panic.
+ log.Printf("proto: duplicate proto type registered: %s", name)
+ return
+ }
+ t := reflect.TypeOf(x)
+ protoTypes[name] = t
+ revProtoTypes[t] = name
+}
+
+// MessageName returns the fully-qualified proto name for the given message type.
+func MessageName(x Message) string {
+ type xname interface {
+ XXX_MessageName() string
+ }
+ if m, ok := x.(xname); ok {
+ return m.XXX_MessageName()
+ }
+ return revProtoTypes[reflect.TypeOf(x)]
+}
+
+// MessageType returns the message type (pointer to struct) for a named message.
+func MessageType(name string) reflect.Type { return protoTypes[name] }
+
+// A registry of all linked proto files.
+var (
+ protoFiles = make(map[string][]byte) // file name => fileDescriptor
+)
+
+// RegisterFile is called from generated code and maps from the
+// full file name of a .proto file to its compressed FileDescriptorProto.
+func RegisterFile(filename string, fileDescriptor []byte) {
+ protoFiles[filename] = fileDescriptor
+}
+
+// FileDescriptor returns the compressed FileDescriptorProto for a .proto file.
+func FileDescriptor(filename string) []byte { return protoFiles[filename] }
diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go
new file mode 100644
index 0000000..965876b
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text.go
@@ -0,0 +1,854 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for writing the text protocol buffer format.
+
+import (
+ "bufio"
+ "bytes"
+ "encoding"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "math"
+ "reflect"
+ "sort"
+ "strings"
+)
+
+var (
+ newline = []byte("\n")
+ spaces = []byte(" ")
+ gtNewline = []byte(">\n")
+ endBraceNewline = []byte("}\n")
+ backslashN = []byte{'\\', 'n'}
+ backslashR = []byte{'\\', 'r'}
+ backslashT = []byte{'\\', 't'}
+ backslashDQ = []byte{'\\', '"'}
+ backslashBS = []byte{'\\', '\\'}
+ posInf = []byte("inf")
+ negInf = []byte("-inf")
+ nan = []byte("nan")
+)
+
+type writer interface {
+ io.Writer
+ WriteByte(byte) error
+}
+
+// textWriter is an io.Writer that tracks its indentation level.
+type textWriter struct {
+ ind int
+ complete bool // if the current position is a complete line
+ compact bool // whether to write out as a one-liner
+ w writer
+}
+
+func (w *textWriter) WriteString(s string) (n int, err error) {
+ if !strings.Contains(s, "\n") {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ w.complete = false
+ return io.WriteString(w.w, s)
+ }
+ // WriteString is typically called without newlines, so this
+ // codepath and its copy are rare. We copy to avoid
+ // duplicating all of Write's logic here.
+ return w.Write([]byte(s))
+}
+
+func (w *textWriter) Write(p []byte) (n int, err error) {
+ newlines := bytes.Count(p, newline)
+ if newlines == 0 {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ n, err = w.w.Write(p)
+ w.complete = false
+ return n, err
+ }
+
+ frags := bytes.SplitN(p, newline, newlines+1)
+ if w.compact {
+ for i, frag := range frags {
+ if i > 0 {
+ if err := w.w.WriteByte(' '); err != nil {
+ return n, err
+ }
+ n++
+ }
+ nn, err := w.w.Write(frag)
+ n += nn
+ if err != nil {
+ return n, err
+ }
+ }
+ return n, nil
+ }
+
+ for i, frag := range frags {
+ if w.complete {
+ w.writeIndent()
+ }
+ nn, err := w.w.Write(frag)
+ n += nn
+ if err != nil {
+ return n, err
+ }
+ if i+1 < len(frags) {
+ if err := w.w.WriteByte('\n'); err != nil {
+ return n, err
+ }
+ n++
+ }
+ }
+ w.complete = len(frags[len(frags)-1]) == 0
+ return n, nil
+}
+
+func (w *textWriter) WriteByte(c byte) error {
+ if w.compact && c == '\n' {
+ c = ' '
+ }
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ err := w.w.WriteByte(c)
+ w.complete = c == '\n'
+ return err
+}
+
+func (w *textWriter) indent() { w.ind++ }
+
+func (w *textWriter) unindent() {
+ if w.ind == 0 {
+ log.Print("proto: textWriter unindented too far")
+ return
+ }
+ w.ind--
+}
+
+func writeName(w *textWriter, props *Properties) error {
+ if _, err := w.WriteString(props.OrigName); err != nil {
+ return err
+ }
+ if props.Wire != "group" {
+ return w.WriteByte(':')
+ }
+ return nil
+}
+
+// raw is the interface satisfied by RawMessage.
+type raw interface {
+ Bytes() []byte
+}
+
+func requiresQuotes(u string) bool {
+ // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
+ for _, ch := range u {
+ switch {
+ case ch == '.' || ch == '/' || ch == '_':
+ continue
+ case '0' <= ch && ch <= '9':
+ continue
+ case 'A' <= ch && ch <= 'Z':
+ continue
+ case 'a' <= ch && ch <= 'z':
+ continue
+ default:
+ return true
+ }
+ }
+ return false
+}
+
+// isAny reports whether sv is a google.protobuf.Any message
+func isAny(sv reflect.Value) bool {
+ type wkt interface {
+ XXX_WellKnownType() string
+ }
+ t, ok := sv.Addr().Interface().(wkt)
+ return ok && t.XXX_WellKnownType() == "Any"
+}
+
+// writeProto3Any writes an expanded google.protobuf.Any message.
+//
+// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
+// required messages are not linked in).
+//
+// It returns (true, error) when sv was written in expanded format or an error
+// was encountered.
+func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) {
+ turl := sv.FieldByName("TypeUrl")
+ val := sv.FieldByName("Value")
+ if !turl.IsValid() || !val.IsValid() {
+ return true, errors.New("proto: invalid google.protobuf.Any message")
+ }
+
+ b, ok := val.Interface().([]byte)
+ if !ok {
+ return true, errors.New("proto: invalid google.protobuf.Any message")
+ }
+
+ parts := strings.Split(turl.String(), "/")
+ mt := MessageType(parts[len(parts)-1])
+ if mt == nil {
+ return false, nil
+ }
+ m := reflect.New(mt.Elem())
+ if err := Unmarshal(b, m.Interface().(Message)); err != nil {
+ return false, nil
+ }
+ w.Write([]byte("["))
+ u := turl.String()
+ if requiresQuotes(u) {
+ writeString(w, u)
+ } else {
+ w.Write([]byte(u))
+ }
+ if w.compact {
+ w.Write([]byte("]:<"))
+ } else {
+ w.Write([]byte("]: <\n"))
+ w.ind++
+ }
+ if err := tm.writeStruct(w, m.Elem()); err != nil {
+ return true, err
+ }
+ if w.compact {
+ w.Write([]byte("> "))
+ } else {
+ w.ind--
+ w.Write([]byte(">\n"))
+ }
+ return true, nil
+}
+
+func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
+ if tm.ExpandAny && isAny(sv) {
+ if canExpand, err := tm.writeProto3Any(w, sv); canExpand {
+ return err
+ }
+ }
+ st := sv.Type()
+ sprops := GetProperties(st)
+ for i := 0; i < sv.NumField(); i++ {
+ fv := sv.Field(i)
+ props := sprops.Prop[i]
+ name := st.Field(i).Name
+
+ if strings.HasPrefix(name, "XXX_") {
+ // There are two XXX_ fields:
+ // XXX_unrecognized []byte
+ // XXX_extensions map[int32]proto.Extension
+ // The first is handled here;
+ // the second is handled at the bottom of this function.
+ if name == "XXX_unrecognized" && !fv.IsNil() {
+ if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if fv.Kind() == reflect.Ptr && fv.IsNil() {
+ // Field not filled in. This could be an optional field or
+ // a required field that wasn't filled in. Either way, there
+ // isn't anything we can show for it.
+ continue
+ }
+ if fv.Kind() == reflect.Slice && fv.IsNil() {
+ // Repeated field that is empty, or a bytes field that is unused.
+ continue
+ }
+
+ if props.Repeated && fv.Kind() == reflect.Slice {
+ // Repeated field.
+ for j := 0; j < fv.Len(); j++ {
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ v := fv.Index(j)
+ if v.Kind() == reflect.Ptr && v.IsNil() {
+ // A nil message in a repeated field is not valid,
+ // but we can handle that more gracefully than panicking.
+ if _, err := w.Write([]byte("\n")); err != nil {
+ return err
+ }
+ continue
+ }
+ if err := tm.writeAny(w, v, props); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if fv.Kind() == reflect.Map {
+ // Map fields are rendered as a repeated struct with key/value fields.
+ keys := fv.MapKeys()
+ sort.Sort(mapKeys(keys))
+ for _, key := range keys {
+ val := fv.MapIndex(key)
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ // open struct
+ if err := w.WriteByte('<'); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ // key
+ if _, err := w.WriteString("key:"); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := tm.writeAny(w, key, props.mkeyprop); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ // nil values aren't legal, but we can avoid panicking because of them.
+ if val.Kind() != reflect.Ptr || !val.IsNil() {
+ // value
+ if _, err := w.WriteString("value:"); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := tm.writeAny(w, val, props.mvalprop); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ // close struct
+ w.unindent()
+ if err := w.WriteByte('>'); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
+ // empty bytes field
+ continue
+ }
+ if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
+ // proto3 non-repeated scalar field; skip if zero value
+ if isProto3Zero(fv) {
+ continue
+ }
+ }
+
+ if fv.Kind() == reflect.Interface {
+ // Check if it is a oneof.
+ if st.Field(i).Tag.Get("protobuf_oneof") != "" {
+ // fv is nil, or holds a pointer to generated struct.
+ // That generated struct has exactly one field,
+ // which has a protobuf struct tag.
+ if fv.IsNil() {
+ continue
+ }
+ inner := fv.Elem().Elem() // interface -> *T -> T
+ tag := inner.Type().Field(0).Tag.Get("protobuf")
+ props = new(Properties) // Overwrite the outer props var, but not its pointee.
+ props.Parse(tag)
+ // Write the value in the oneof, not the oneof itself.
+ fv = inner.Field(0)
+
+ // Special case to cope with malformed messages gracefully:
+ // If the value in the oneof is a nil pointer, don't panic
+ // in writeAny.
+ if fv.Kind() == reflect.Ptr && fv.IsNil() {
+ // Use errors.New so writeAny won't render quotes.
+ msg := errors.New("/* nil */")
+ fv = reflect.ValueOf(&msg).Elem()
+ }
+ }
+ }
+
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if b, ok := fv.Interface().(raw); ok {
+ if err := writeRaw(w, b.Bytes()); err != nil {
+ return err
+ }
+ continue
+ }
+
+ // Enums have a String method, so writeAny will work fine.
+ if err := tm.writeAny(w, fv, props); err != nil {
+ return err
+ }
+
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+
+ // Extensions (the XXX_extensions field).
+ pv := sv.Addr()
+ if _, ok := extendable(pv.Interface()); ok {
+ if err := tm.writeExtensions(w, pv); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// writeRaw writes an uninterpreted raw message.
+func writeRaw(w *textWriter, b []byte) error {
+ if err := w.WriteByte('<'); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ if err := writeUnknownStruct(w, b); err != nil {
+ return err
+ }
+ w.unindent()
+ if err := w.WriteByte('>'); err != nil {
+ return err
+ }
+ return nil
+}
+
+// writeAny writes an arbitrary field.
+func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
+ v = reflect.Indirect(v)
+
+ // Floats have special cases.
+ if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
+ x := v.Float()
+ var b []byte
+ switch {
+ case math.IsInf(x, 1):
+ b = posInf
+ case math.IsInf(x, -1):
+ b = negInf
+ case math.IsNaN(x):
+ b = nan
+ }
+ if b != nil {
+ _, err := w.Write(b)
+ return err
+ }
+ // Other values are handled below.
+ }
+
+ // We don't attempt to serialise every possible value type; only those
+ // that can occur in protocol buffers.
+ switch v.Kind() {
+ case reflect.Slice:
+ // Should only be a []byte; repeated fields are handled in writeStruct.
+ if err := writeString(w, string(v.Bytes())); err != nil {
+ return err
+ }
+ case reflect.String:
+ if err := writeString(w, v.String()); err != nil {
+ return err
+ }
+ case reflect.Struct:
+ // Required/optional group/message.
+ var bra, ket byte = '<', '>'
+ if props != nil && props.Wire == "group" {
+ bra, ket = '{', '}'
+ }
+ if err := w.WriteByte(bra); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
+ text, err := etm.MarshalText()
+ if err != nil {
+ return err
+ }
+ if _, err = w.Write(text); err != nil {
+ return err
+ }
+ } else if err := tm.writeStruct(w, v); err != nil {
+ return err
+ }
+ w.unindent()
+ if err := w.WriteByte(ket); err != nil {
+ return err
+ }
+ default:
+ _, err := fmt.Fprint(w, v.Interface())
+ return err
+ }
+ return nil
+}
+
+// equivalent to C's isprint.
+func isprint(c byte) bool {
+ return c >= 0x20 && c < 0x7f
+}
+
+// writeString writes a string in the protocol buffer text format.
+// It is similar to strconv.Quote except we don't use Go escape sequences,
+// we treat the string as a byte sequence, and we use octal escapes.
+// These differences are to maintain interoperability with the other
+// languages' implementations of the text format.
+func writeString(w *textWriter, s string) error {
+ // use WriteByte here to get any needed indent
+ if err := w.WriteByte('"'); err != nil {
+ return err
+ }
+ // Loop over the bytes, not the runes.
+ for i := 0; i < len(s); i++ {
+ var err error
+ // Divergence from C++: we don't escape apostrophes.
+ // There's no need to escape them, and the C++ parser
+ // copes with a naked apostrophe.
+ switch c := s[i]; c {
+ case '\n':
+ _, err = w.w.Write(backslashN)
+ case '\r':
+ _, err = w.w.Write(backslashR)
+ case '\t':
+ _, err = w.w.Write(backslashT)
+ case '"':
+ _, err = w.w.Write(backslashDQ)
+ case '\\':
+ _, err = w.w.Write(backslashBS)
+ default:
+ if isprint(c) {
+ err = w.w.WriteByte(c)
+ } else {
+ _, err = fmt.Fprintf(w.w, "\\%03o", c)
+ }
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return w.WriteByte('"')
+}
+
+func writeUnknownStruct(w *textWriter, data []byte) (err error) {
+ if !w.compact {
+ if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
+ return err
+ }
+ }
+ b := NewBuffer(data)
+ for b.index < len(b.buf) {
+ x, err := b.DecodeVarint()
+ if err != nil {
+ _, err := fmt.Fprintf(w, "/* %v */\n", err)
+ return err
+ }
+ wire, tag := x&7, x>>3
+ if wire == WireEndGroup {
+ w.unindent()
+ if _, err := w.Write(endBraceNewline); err != nil {
+ return err
+ }
+ continue
+ }
+ if _, err := fmt.Fprint(w, tag); err != nil {
+ return err
+ }
+ if wire != WireStartGroup {
+ if err := w.WriteByte(':'); err != nil {
+ return err
+ }
+ }
+ if !w.compact || wire == WireStartGroup {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ switch wire {
+ case WireBytes:
+ buf, e := b.DecodeRawBytes(false)
+ if e == nil {
+ _, err = fmt.Fprintf(w, "%q", buf)
+ } else {
+ _, err = fmt.Fprintf(w, "/* %v */", e)
+ }
+ case WireFixed32:
+ x, err = b.DecodeFixed32()
+ err = writeUnknownInt(w, x, err)
+ case WireFixed64:
+ x, err = b.DecodeFixed64()
+ err = writeUnknownInt(w, x, err)
+ case WireStartGroup:
+ err = w.WriteByte('{')
+ w.indent()
+ case WireVarint:
+ x, err = b.DecodeVarint()
+ err = writeUnknownInt(w, x, err)
+ default:
+ _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
+ }
+ if err != nil {
+ return err
+ }
+ if err = w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func writeUnknownInt(w *textWriter, x uint64, err error) error {
+ if err == nil {
+ _, err = fmt.Fprint(w, x)
+ } else {
+ _, err = fmt.Fprintf(w, "/* %v */", err)
+ }
+ return err
+}
+
+type int32Slice []int32
+
+func (s int32Slice) Len() int { return len(s) }
+func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
+func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+// writeExtensions writes all the extensions in pv.
+// pv is assumed to be a pointer to a protocol message struct that is extendable.
+func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error {
+ emap := extensionMaps[pv.Type().Elem()]
+ ep, _ := extendable(pv.Interface())
+
+ // Order the extensions by ID.
+ // This isn't strictly necessary, but it will give us
+ // canonical output, which will also make testing easier.
+ m, mu := ep.extensionsRead()
+ if m == nil {
+ return nil
+ }
+ mu.Lock()
+ ids := make([]int32, 0, len(m))
+ for id := range m {
+ ids = append(ids, id)
+ }
+ sort.Sort(int32Slice(ids))
+ mu.Unlock()
+
+ for _, extNum := range ids {
+ ext := m[extNum]
+ var desc *ExtensionDesc
+ if emap != nil {
+ desc = emap[extNum]
+ }
+ if desc == nil {
+ // Unknown extension.
+ if err := writeUnknownStruct(w, ext.enc); err != nil {
+ return err
+ }
+ continue
+ }
+
+ pb, err := GetExtension(ep, desc)
+ if err != nil {
+ return fmt.Errorf("failed getting extension: %v", err)
+ }
+
+ // Repeated extensions will appear as a slice.
+ if !desc.repeated() {
+ if err := tm.writeExtension(w, desc.Name, pb); err != nil {
+ return err
+ }
+ } else {
+ v := reflect.ValueOf(pb)
+ for i := 0; i < v.Len(); i++ {
+ if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error {
+ if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (w *textWriter) writeIndent() {
+ if !w.complete {
+ return
+ }
+ remain := w.ind * 2
+ for remain > 0 {
+ n := remain
+ if n > len(spaces) {
+ n = len(spaces)
+ }
+ w.w.Write(spaces[:n])
+ remain -= n
+ }
+ w.complete = false
+}
+
+// TextMarshaler is a configurable text format marshaler.
+type TextMarshaler struct {
+ Compact bool // use compact text format (one line).
+ ExpandAny bool // expand google.protobuf.Any messages of known types
+}
+
+// Marshal writes a given protocol buffer in text format.
+// The only errors returned are from w.
+func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
+ val := reflect.ValueOf(pb)
+ if pb == nil || val.IsNil() {
+ w.Write([]byte(""))
+ return nil
+ }
+ var bw *bufio.Writer
+ ww, ok := w.(writer)
+ if !ok {
+ bw = bufio.NewWriter(w)
+ ww = bw
+ }
+ aw := &textWriter{
+ w: ww,
+ complete: true,
+ compact: tm.Compact,
+ }
+
+ if etm, ok := pb.(encoding.TextMarshaler); ok {
+ text, err := etm.MarshalText()
+ if err != nil {
+ return err
+ }
+ if _, err = aw.Write(text); err != nil {
+ return err
+ }
+ if bw != nil {
+ return bw.Flush()
+ }
+ return nil
+ }
+ // Dereference the received pointer so we don't have outer < and >.
+ v := reflect.Indirect(val)
+ if err := tm.writeStruct(aw, v); err != nil {
+ return err
+ }
+ if bw != nil {
+ return bw.Flush()
+ }
+ return nil
+}
+
+// Text is the same as Marshal, but returns the string directly.
+func (tm *TextMarshaler) Text(pb Message) string {
+ var buf bytes.Buffer
+ tm.Marshal(&buf, pb)
+ return buf.String()
+}
+
+var (
+ defaultTextMarshaler = TextMarshaler{}
+ compactTextMarshaler = TextMarshaler{Compact: true}
+)
+
+// TODO: consider removing some of the Marshal functions below.
+
+// MarshalText writes a given protocol buffer in text format.
+// The only errors returned are from w.
+func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) }
+
+// MarshalTextString is the same as MarshalText, but returns the string directly.
+func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) }
+
+// CompactText writes a given protocol buffer in compact text format (one line).
+func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) }
+
+// CompactTextString is the same as CompactText, but returns the string directly.
+func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) }
diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go
new file mode 100644
index 0000000..61f83c1
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text_parser.go
@@ -0,0 +1,895 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for parsing the Text protocol buffer format.
+// TODO: message sets.
+
+import (
+ "encoding"
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+// Error string emitted when deserializing Any and fields are already set
+const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set"
+
+type ParseError struct {
+ Message string
+ Line int // 1-based line number
+ Offset int // 0-based byte offset from start of input
+}
+
+func (p *ParseError) Error() string {
+ if p.Line == 1 {
+ // show offset only for first line
+ return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
+ }
+ return fmt.Sprintf("line %d: %v", p.Line, p.Message)
+}
+
+type token struct {
+ value string
+ err *ParseError
+ line int // line number
+ offset int // byte number from start of input, not start of line
+ unquoted string // the unquoted version of value, if it was a quoted string
+}
+
+func (t *token) String() string {
+ if t.err == nil {
+ return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
+ }
+ return fmt.Sprintf("parse error: %v", t.err)
+}
+
+type textParser struct {
+ s string // remaining input
+ done bool // whether the parsing is finished (success or error)
+ backed bool // whether back() was called
+ offset, line int
+ cur token
+}
+
+func newTextParser(s string) *textParser {
+ p := new(textParser)
+ p.s = s
+ p.line = 1
+ p.cur.line = 1
+ return p
+}
+
+func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
+ pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
+ p.cur.err = pe
+ p.done = true
+ return pe
+}
+
+// Numbers and identifiers are matched by [-+._A-Za-z0-9]
+func isIdentOrNumberChar(c byte) bool {
+ switch {
+ case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
+ return true
+ case '0' <= c && c <= '9':
+ return true
+ }
+ switch c {
+ case '-', '+', '.', '_':
+ return true
+ }
+ return false
+}
+
+func isWhitespace(c byte) bool {
+ switch c {
+ case ' ', '\t', '\n', '\r':
+ return true
+ }
+ return false
+}
+
+func isQuote(c byte) bool {
+ switch c {
+ case '"', '\'':
+ return true
+ }
+ return false
+}
+
+func (p *textParser) skipWhitespace() {
+ i := 0
+ for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
+ if p.s[i] == '#' {
+ // comment; skip to end of line or input
+ for i < len(p.s) && p.s[i] != '\n' {
+ i++
+ }
+ if i == len(p.s) {
+ break
+ }
+ }
+ if p.s[i] == '\n' {
+ p.line++
+ }
+ i++
+ }
+ p.offset += i
+ p.s = p.s[i:len(p.s)]
+ if len(p.s) == 0 {
+ p.done = true
+ }
+}
+
+func (p *textParser) advance() {
+ // Skip whitespace
+ p.skipWhitespace()
+ if p.done {
+ return
+ }
+
+ // Start of non-whitespace
+ p.cur.err = nil
+ p.cur.offset, p.cur.line = p.offset, p.line
+ p.cur.unquoted = ""
+ switch p.s[0] {
+ case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
+ // Single symbol
+ p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
+ case '"', '\'':
+ // Quoted string
+ i := 1
+ for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
+ if p.s[i] == '\\' && i+1 < len(p.s) {
+ // skip escaped char
+ i++
+ }
+ i++
+ }
+ if i >= len(p.s) || p.s[i] != p.s[0] {
+ p.errorf("unmatched quote")
+ return
+ }
+ unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
+ if err != nil {
+ p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
+ return
+ }
+ p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
+ p.cur.unquoted = unq
+ default:
+ i := 0
+ for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
+ i++
+ }
+ if i == 0 {
+ p.errorf("unexpected byte %#x", p.s[0])
+ return
+ }
+ p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
+ }
+ p.offset += len(p.cur.value)
+}
+
+var (
+ errBadUTF8 = errors.New("proto: bad UTF-8")
+ errBadHex = errors.New("proto: bad hexadecimal")
+)
+
+func unquoteC(s string, quote rune) (string, error) {
+ // This is based on C++'s tokenizer.cc.
+ // Despite its name, this is *not* parsing C syntax.
+ // For instance, "\0" is an invalid quoted string.
+
+ // Avoid allocation in trivial cases.
+ simple := true
+ for _, r := range s {
+ if r == '\\' || r == quote {
+ simple = false
+ break
+ }
+ }
+ if simple {
+ return s, nil
+ }
+
+ buf := make([]byte, 0, 3*len(s)/2)
+ for len(s) > 0 {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", errBadUTF8
+ }
+ s = s[n:]
+ if r != '\\' {
+ if r < utf8.RuneSelf {
+ buf = append(buf, byte(r))
+ } else {
+ buf = append(buf, string(r)...)
+ }
+ continue
+ }
+
+ ch, tail, err := unescape(s)
+ if err != nil {
+ return "", err
+ }
+ buf = append(buf, ch...)
+ s = tail
+ }
+ return string(buf), nil
+}
+
+func unescape(s string) (ch string, tail string, err error) {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", "", errBadUTF8
+ }
+ s = s[n:]
+ switch r {
+ case 'a':
+ return "\a", s, nil
+ case 'b':
+ return "\b", s, nil
+ case 'f':
+ return "\f", s, nil
+ case 'n':
+ return "\n", s, nil
+ case 'r':
+ return "\r", s, nil
+ case 't':
+ return "\t", s, nil
+ case 'v':
+ return "\v", s, nil
+ case '?':
+ return "?", s, nil // trigraph workaround
+ case '\'', '"', '\\':
+ return string(r), s, nil
+ case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X':
+ if len(s) < 2 {
+ return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
+ }
+ base := 8
+ ss := s[:2]
+ s = s[2:]
+ if r == 'x' || r == 'X' {
+ base = 16
+ } else {
+ ss = string(r) + ss
+ }
+ i, err := strconv.ParseUint(ss, base, 8)
+ if err != nil {
+ return "", "", err
+ }
+ return string([]byte{byte(i)}), s, nil
+ case 'u', 'U':
+ n := 4
+ if r == 'U' {
+ n = 8
+ }
+ if len(s) < n {
+ return "", "", fmt.Errorf(`\%c requires %d digits`, r, n)
+ }
+
+ bs := make([]byte, n/2)
+ for i := 0; i < n; i += 2 {
+ a, ok1 := unhex(s[i])
+ b, ok2 := unhex(s[i+1])
+ if !ok1 || !ok2 {
+ return "", "", errBadHex
+ }
+ bs[i/2] = a<<4 | b
+ }
+ s = s[n:]
+ return string(bs), s, nil
+ }
+ return "", "", fmt.Errorf(`unknown escape \%c`, r)
+}
+
+// Adapted from src/pkg/strconv/quote.go.
+func unhex(b byte) (v byte, ok bool) {
+ switch {
+ case '0' <= b && b <= '9':
+ return b - '0', true
+ case 'a' <= b && b <= 'f':
+ return b - 'a' + 10, true
+ case 'A' <= b && b <= 'F':
+ return b - 'A' + 10, true
+ }
+ return 0, false
+}
+
+// Back off the parser by one token. Can only be done between calls to next().
+// It makes the next advance() a no-op.
+func (p *textParser) back() { p.backed = true }
+
+// Advances the parser and returns the new current token.
+func (p *textParser) next() *token {
+ if p.backed || p.done {
+ p.backed = false
+ return &p.cur
+ }
+ p.advance()
+ if p.done {
+ p.cur.value = ""
+ } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
+ // Look for multiple quoted strings separated by whitespace,
+ // and concatenate them.
+ cat := p.cur
+ for {
+ p.skipWhitespace()
+ if p.done || !isQuote(p.s[0]) {
+ break
+ }
+ p.advance()
+ if p.cur.err != nil {
+ return &p.cur
+ }
+ cat.value += " " + p.cur.value
+ cat.unquoted += p.cur.unquoted
+ }
+ p.done = false // parser may have seen EOF, but we want to return cat
+ p.cur = cat
+ }
+ return &p.cur
+}
+
+func (p *textParser) consumeToken(s string) error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != s {
+ p.back()
+ return p.errorf("expected %q, found %q", s, tok.value)
+ }
+ return nil
+}
+
+// Return a RequiredNotSetError indicating which required field was not set.
+func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
+ st := sv.Type()
+ sprops := GetProperties(st)
+ for i := 0; i < st.NumField(); i++ {
+ if !isNil(sv.Field(i)) {
+ continue
+ }
+
+ props := sprops.Prop[i]
+ if props.Required {
+ return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
+ }
+ }
+ return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen
+}
+
+// Returns the index in the struct for the named field, as well as the parsed tag properties.
+func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {
+ i, ok := sprops.decoderOrigNames[name]
+ if ok {
+ return i, sprops.Prop[i], true
+ }
+ return -1, nil, false
+}
+
+// Consume a ':' from the input stream (if the next token is a colon),
+// returning an error if a colon is needed but not present.
+func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ":" {
+ // Colon is optional when the field is a group or message.
+ needColon := true
+ switch props.Wire {
+ case "group":
+ needColon = false
+ case "bytes":
+ // A "bytes" field is either a message, a string, or a repeated field;
+ // those three become *T, *string and []T respectively, so we can check for
+ // this field being a pointer to a non-string.
+ if typ.Kind() == reflect.Ptr {
+ // *T or *string
+ if typ.Elem().Kind() == reflect.String {
+ break
+ }
+ } else if typ.Kind() == reflect.Slice {
+ // []T or []*T
+ if typ.Elem().Kind() != reflect.Ptr {
+ break
+ }
+ } else if typ.Kind() == reflect.String {
+ // The proto3 exception is for a string field,
+ // which requires a colon.
+ break
+ }
+ needColon = false
+ }
+ if needColon {
+ return p.errorf("expected ':', found %q", tok.value)
+ }
+ p.back()
+ }
+ return nil
+}
+
+func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
+ st := sv.Type()
+ sprops := GetProperties(st)
+ reqCount := sprops.reqCount
+ var reqFieldErr error
+ fieldSet := make(map[string]bool)
+ // A struct is a sequence of "name: value", terminated by one of
+ // '>' or '}', or the end of the input. A name may also be
+ // "[extension]" or "[type/url]".
+ //
+ // The whole struct can also be an expanded Any message, like:
+ // [type/url] < ... struct contents ... >
+ for {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == terminator {
+ break
+ }
+ if tok.value == "[" {
+ // Looks like an extension or an Any.
+ //
+ // TODO: Check whether we need to handle
+ // namespace rooted names (e.g. ".something.Foo").
+ extName, err := p.consumeExtName()
+ if err != nil {
+ return err
+ }
+
+ if s := strings.LastIndex(extName, "/"); s >= 0 {
+ // If it contains a slash, it's an Any type URL.
+ messageName := extName[s+1:]
+ mt := MessageType(messageName)
+ if mt == nil {
+ return p.errorf("unrecognized message %q in google.protobuf.Any", messageName)
+ }
+ tok = p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ // consume an optional colon
+ if tok.value == ":" {
+ tok = p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ }
+ var terminator string
+ switch tok.value {
+ case "<":
+ terminator = ">"
+ case "{":
+ terminator = "}"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ v := reflect.New(mt.Elem())
+ if pe := p.readStruct(v.Elem(), terminator); pe != nil {
+ return pe
+ }
+ b, err := Marshal(v.Interface().(Message))
+ if err != nil {
+ return p.errorf("failed to marshal message of type %q: %v", messageName, err)
+ }
+ if fieldSet["type_url"] {
+ return p.errorf(anyRepeatedlyUnpacked, "type_url")
+ }
+ if fieldSet["value"] {
+ return p.errorf(anyRepeatedlyUnpacked, "value")
+ }
+ sv.FieldByName("TypeUrl").SetString(extName)
+ sv.FieldByName("Value").SetBytes(b)
+ fieldSet["type_url"] = true
+ fieldSet["value"] = true
+ continue
+ }
+
+ var desc *ExtensionDesc
+ // This could be faster, but it's functional.
+ // TODO: Do something smarter than a linear scan.
+ for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
+ if d.Name == extName {
+ desc = d
+ break
+ }
+ }
+ if desc == nil {
+ return p.errorf("unrecognized extension %q", extName)
+ }
+
+ props := &Properties{}
+ props.Parse(desc.Tag)
+
+ typ := reflect.TypeOf(desc.ExtensionType)
+ if err := p.checkForColon(props, typ); err != nil {
+ return err
+ }
+
+ rep := desc.repeated()
+
+ // Read the extension structure, and set it in
+ // the value we're constructing.
+ var ext reflect.Value
+ if !rep {
+ ext = reflect.New(typ).Elem()
+ } else {
+ ext = reflect.New(typ.Elem()).Elem()
+ }
+ if err := p.readAny(ext, props); err != nil {
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ return err
+ }
+ reqFieldErr = err
+ }
+ ep := sv.Addr().Interface().(Message)
+ if !rep {
+ SetExtension(ep, desc, ext.Interface())
+ } else {
+ old, err := GetExtension(ep, desc)
+ var sl reflect.Value
+ if err == nil {
+ sl = reflect.ValueOf(old) // existing slice
+ } else {
+ sl = reflect.MakeSlice(typ, 0, 1)
+ }
+ sl = reflect.Append(sl, ext)
+ SetExtension(ep, desc, sl.Interface())
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ continue
+ }
+
+ // This is a normal, non-extension field.
+ name := tok.value
+ var dst reflect.Value
+ fi, props, ok := structFieldByName(sprops, name)
+ if ok {
+ dst = sv.Field(fi)
+ } else if oop, ok := sprops.OneofTypes[name]; ok {
+ // It is a oneof.
+ props = oop.Prop
+ nv := reflect.New(oop.Type.Elem())
+ dst = nv.Elem().Field(0)
+ field := sv.Field(oop.Field)
+ if !field.IsNil() {
+ return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name)
+ }
+ field.Set(nv)
+ }
+ if !dst.IsValid() {
+ return p.errorf("unknown field name %q in %v", name, st)
+ }
+
+ if dst.Kind() == reflect.Map {
+ // Consume any colon.
+ if err := p.checkForColon(props, dst.Type()); err != nil {
+ return err
+ }
+
+ // Construct the map if it doesn't already exist.
+ if dst.IsNil() {
+ dst.Set(reflect.MakeMap(dst.Type()))
+ }
+ key := reflect.New(dst.Type().Key()).Elem()
+ val := reflect.New(dst.Type().Elem()).Elem()
+
+ // The map entry should be this sequence of tokens:
+ // < key : KEY value : VALUE >
+ // However, implementations may omit key or value, and technically
+ // we should support them in any order. See b/28924776 for a time
+ // this went wrong.
+
+ tok := p.next()
+ var terminator string
+ switch tok.value {
+ case "<":
+ terminator = ">"
+ case "{":
+ terminator = "}"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ for {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == terminator {
+ break
+ }
+ switch tok.value {
+ case "key":
+ if err := p.consumeToken(":"); err != nil {
+ return err
+ }
+ if err := p.readAny(key, props.mkeyprop); err != nil {
+ return err
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ case "value":
+ if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil {
+ return err
+ }
+ if err := p.readAny(val, props.mvalprop); err != nil {
+ return err
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ default:
+ p.back()
+ return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
+ }
+ }
+
+ dst.SetMapIndex(key, val)
+ continue
+ }
+
+ // Check that it's not already set if it's not a repeated field.
+ if !props.Repeated && fieldSet[name] {
+ return p.errorf("non-repeated field %q was repeated", name)
+ }
+
+ if err := p.checkForColon(props, dst.Type()); err != nil {
+ return err
+ }
+
+ // Parse into the field.
+ fieldSet[name] = true
+ if err := p.readAny(dst, props); err != nil {
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ return err
+ }
+ reqFieldErr = err
+ }
+ if props.Required {
+ reqCount--
+ }
+
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+
+ }
+
+ if reqCount > 0 {
+ return p.missingRequiredFieldError(sv)
+ }
+ return reqFieldErr
+}
+
+// consumeExtName consumes extension name or expanded Any type URL and the
+// following ']'. It returns the name or URL consumed.
+func (p *textParser) consumeExtName() (string, error) {
+ tok := p.next()
+ if tok.err != nil {
+ return "", tok.err
+ }
+
+ // If extension name or type url is quoted, it's a single token.
+ if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
+ name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
+ if err != nil {
+ return "", err
+ }
+ return name, p.consumeToken("]")
+ }
+
+ // Consume everything up to "]"
+ var parts []string
+ for tok.value != "]" {
+ parts = append(parts, tok.value)
+ tok = p.next()
+ if tok.err != nil {
+ return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
+ }
+ }
+ return strings.Join(parts, ""), nil
+}
+
+// consumeOptionalSeparator consumes an optional semicolon or comma.
+// It is used in readStruct to provide backward compatibility.
+func (p *textParser) consumeOptionalSeparator() error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ";" && tok.value != "," {
+ p.back()
+ }
+ return nil
+}
+
+func (p *textParser) readAny(v reflect.Value, props *Properties) error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == "" {
+ return p.errorf("unexpected EOF")
+ }
+
+ switch fv := v; fv.Kind() {
+ case reflect.Slice:
+ at := v.Type()
+ if at.Elem().Kind() == reflect.Uint8 {
+ // Special case for []byte
+ if tok.value[0] != '"' && tok.value[0] != '\'' {
+ // Deliberately written out here, as the error after
+ // this switch statement would write "invalid []byte: ...",
+ // which is not as user-friendly.
+ return p.errorf("invalid string: %v", tok.value)
+ }
+ bytes := []byte(tok.unquoted)
+ fv.Set(reflect.ValueOf(bytes))
+ return nil
+ }
+ // Repeated field.
+ if tok.value == "[" {
+ // Repeated field with list notation, like [1,2,3].
+ for {
+ fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
+ err := p.readAny(fv.Index(fv.Len()-1), props)
+ if err != nil {
+ return err
+ }
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == "]" {
+ break
+ }
+ if tok.value != "," {
+ return p.errorf("Expected ']' or ',' found %q", tok.value)
+ }
+ }
+ return nil
+ }
+ // One value of the repeated field.
+ p.back()
+ fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
+ return p.readAny(fv.Index(fv.Len()-1), props)
+ case reflect.Bool:
+ // true/1/t/True or false/f/0/False.
+ switch tok.value {
+ case "true", "1", "t", "True":
+ fv.SetBool(true)
+ return nil
+ case "false", "0", "f", "False":
+ fv.SetBool(false)
+ return nil
+ }
+ case reflect.Float32, reflect.Float64:
+ v := tok.value
+ // Ignore 'f' for compatibility with output generated by C++, but don't
+ // remove 'f' when the value is "-inf" or "inf".
+ if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
+ v = v[:len(v)-1]
+ }
+ if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
+ fv.SetFloat(f)
+ return nil
+ }
+ case reflect.Int32:
+ if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
+ fv.SetInt(x)
+ return nil
+ }
+
+ if len(props.Enum) == 0 {
+ break
+ }
+ m, ok := enumValueMaps[props.Enum]
+ if !ok {
+ break
+ }
+ x, ok := m[tok.value]
+ if !ok {
+ break
+ }
+ fv.SetInt(int64(x))
+ return nil
+ case reflect.Int64:
+ if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
+ fv.SetInt(x)
+ return nil
+ }
+
+ case reflect.Ptr:
+ // A basic field (indirected through pointer), or a repeated message/group
+ p.back()
+ fv.Set(reflect.New(fv.Type().Elem()))
+ return p.readAny(fv.Elem(), props)
+ case reflect.String:
+ if tok.value[0] == '"' || tok.value[0] == '\'' {
+ fv.SetString(tok.unquoted)
+ return nil
+ }
+ case reflect.Struct:
+ var terminator string
+ switch tok.value {
+ case "{":
+ terminator = "}"
+ case "<":
+ terminator = ">"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ // TODO: Handle nested messages which implement encoding.TextUnmarshaler.
+ return p.readStruct(fv, terminator)
+ case reflect.Uint32:
+ if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
+ fv.SetUint(uint64(x))
+ return nil
+ }
+ case reflect.Uint64:
+ if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
+ fv.SetUint(x)
+ return nil
+ }
+ }
+ return p.errorf("invalid %v: %v", v.Type(), tok.value)
+}
+
+// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
+// before starting to unmarshal, so any existing data in pb is always removed.
+// If a required field is not set and no other error occurs,
+// UnmarshalText returns *RequiredNotSetError.
+func UnmarshalText(s string, pb Message) error {
+ if um, ok := pb.(encoding.TextUnmarshaler); ok {
+ err := um.UnmarshalText([]byte(s))
+ return err
+ }
+ pb.Reset()
+ v := reflect.ValueOf(pb)
+ if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil {
+ return pe
+ }
+ return nil
+}
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE b/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE
new file mode 100644
index 0000000..8dada3e
--- /dev/null
+++ b/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE b/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE
new file mode 100644
index 0000000..5d8cb5b
--- /dev/null
+++ b/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE
@@ -0,0 +1 @@
+Copyright 2012 Matt T. Proud (matt.proud@gmail.com)
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile
new file mode 100644
index 0000000..81be214
--- /dev/null
+++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile
@@ -0,0 +1,7 @@
+all:
+
+cover:
+ go test -cover -v -coverprofile=cover.dat ./...
+ go tool cover -func cover.dat
+
+.PHONY: cover
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go
new file mode 100644
index 0000000..258c063
--- /dev/null
+++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go
@@ -0,0 +1,75 @@
+// Copyright 2013 Matt T. Proud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pbutil
+
+import (
+ "encoding/binary"
+ "errors"
+ "io"
+
+ "github.com/golang/protobuf/proto"
+)
+
+var errInvalidVarint = errors.New("invalid varint32 encountered")
+
+// ReadDelimited decodes a message from the provided length-delimited stream,
+// where the length is encoded as 32-bit varint prefix to the message body.
+// It returns the total number of bytes read and any applicable error. This is
+// roughly equivalent to the companion Java API's
+// MessageLite#parseDelimitedFrom. As per the reader contract, this function
+// calls r.Read repeatedly as required until exactly one message including its
+// prefix is read and decoded (or an error has occurred). The function never
+// reads more bytes from the stream than required. The function never returns
+// an error if a message has been read and decoded correctly, even if the end
+// of the stream has been reached in doing so. In that case, any subsequent
+// calls return (0, io.EOF).
+func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) {
+ // Per AbstractParser#parsePartialDelimitedFrom with
+ // CodedInputStream#readRawVarint32.
+ var headerBuf [binary.MaxVarintLen32]byte
+ var bytesRead, varIntBytes int
+ var messageLength uint64
+ for varIntBytes == 0 { // i.e. no varint has been decoded yet.
+ if bytesRead >= len(headerBuf) {
+ return bytesRead, errInvalidVarint
+ }
+ // We have to read byte by byte here to avoid reading more bytes
+ // than required. Each read byte is appended to what we have
+ // read before.
+ newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1])
+ if newBytesRead == 0 {
+ if err != nil {
+ return bytesRead, err
+ }
+ // A Reader should not return (0, nil), but if it does,
+ // it should be treated as no-op (according to the
+ // Reader contract). So let's go on...
+ continue
+ }
+ bytesRead += newBytesRead
+ // Now present everything read so far to the varint decoder and
+ // see if a varint can be decoded already.
+ messageLength, varIntBytes = proto.DecodeVarint(headerBuf[:bytesRead])
+ }
+
+ messageBuf := make([]byte, messageLength)
+ newBytesRead, err := io.ReadFull(r, messageBuf)
+ bytesRead += newBytesRead
+ if err != nil {
+ return bytesRead, err
+ }
+
+ return bytesRead, proto.Unmarshal(messageBuf, m)
+}
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go
new file mode 100644
index 0000000..c318385
--- /dev/null
+++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2013 Matt T. Proud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package pbutil provides record length-delimited Protocol Buffer streaming.
+package pbutil
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go
new file mode 100644
index 0000000..8fb59ad
--- /dev/null
+++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go
@@ -0,0 +1,46 @@
+// Copyright 2013 Matt T. Proud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pbutil
+
+import (
+ "encoding/binary"
+ "io"
+
+ "github.com/golang/protobuf/proto"
+)
+
+// WriteDelimited encodes and dumps a message to the provided writer prefixed
+// with a 32-bit varint indicating the length of the encoded message, producing
+// a length-delimited record stream, which can be used to chain together
+// encoded messages of the same type together in a file. It returns the total
+// number of bytes written and any applicable error. This is roughly
+// equivalent to the companion Java API's MessageLite#writeDelimitedTo.
+func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) {
+ buffer, err := proto.Marshal(m)
+ if err != nil {
+ return 0, err
+ }
+
+ var buf [binary.MaxVarintLen32]byte
+ encodedLength := binary.PutUvarint(buf[:], uint64(len(buffer)))
+
+ sync, err := w.Write(buf[:encodedLength])
+ if err != nil {
+ return sync, err
+ }
+
+ n, err = w.Write(buffer)
+ return n + sync, err
+}
diff --git a/vendor/github.com/prometheus/client_golang/LICENSE b/vendor/github.com/prometheus/client_golang/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/prometheus/client_golang/NOTICE b/vendor/github.com/prometheus/client_golang/NOTICE
new file mode 100644
index 0000000..dd878a3
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/NOTICE
@@ -0,0 +1,23 @@
+Prometheus instrumentation library for Go applications
+Copyright 2012-2015 The Prometheus Authors
+
+This product includes software developed at
+SoundCloud Ltd. (http://soundcloud.com/).
+
+
+The following components are included in this product:
+
+perks - a fork of https://github.com/bmizerany/perks
+https://github.com/beorn7/perks
+Copyright 2013-2015 Blake Mizerany, Björn Rabenstein
+See https://github.com/beorn7/perks/blob/master/README.md for license details.
+
+Go support for Protocol Buffers - Google's data interchange format
+http://github.com/golang/protobuf/
+Copyright 2010 The Go Authors
+See source code for license details.
+
+Support for streaming Protocol Buffer messages for the Go language (golang).
+https://github.com/matttproud/golang_protobuf_extensions
+Copyright 2013 Matt T. Proud
+Licensed under the Apache License, Version 2.0
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/README.md b/vendor/github.com/prometheus/client_golang/prometheus/README.md
new file mode 100644
index 0000000..44986bf
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/README.md
@@ -0,0 +1 @@
+See [](https://godoc.org/github.com/prometheus/client_golang/prometheus).
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collector.go
new file mode 100644
index 0000000..623d3d8
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/collector.go
@@ -0,0 +1,75 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+// Collector is the interface implemented by anything that can be used by
+// Prometheus to collect metrics. A Collector has to be registered for
+// collection. See Registerer.Register.
+//
+// The stock metrics provided by this package (Gauge, Counter, Summary,
+// Histogram, Untyped) are also Collectors (which only ever collect one metric,
+// namely itself). An implementer of Collector may, however, collect multiple
+// metrics in a coordinated fashion and/or create metrics on the fly. Examples
+// for collectors already implemented in this library are the metric vectors
+// (i.e. collection of multiple instances of the same Metric but with different
+// label values) like GaugeVec or SummaryVec, and the ExpvarCollector.
+type Collector interface {
+ // Describe sends the super-set of all possible descriptors of metrics
+ // collected by this Collector to the provided channel and returns once
+ // the last descriptor has been sent. The sent descriptors fulfill the
+ // consistency and uniqueness requirements described in the Desc
+ // documentation. (It is valid if one and the same Collector sends
+ // duplicate descriptors. Those duplicates are simply ignored. However,
+ // two different Collectors must not send duplicate descriptors.) This
+ // method idempotently sends the same descriptors throughout the
+ // lifetime of the Collector. If a Collector encounters an error while
+ // executing this method, it must send an invalid descriptor (created
+ // with NewInvalidDesc) to signal the error to the registry.
+ Describe(chan<- *Desc)
+ // Collect is called by the Prometheus registry when collecting
+ // metrics. The implementation sends each collected metric via the
+ // provided channel and returns once the last metric has been sent. The
+ // descriptor of each sent metric is one of those returned by
+ // Describe. Returned metrics that share the same descriptor must differ
+ // in their variable label values. This method may be called
+ // concurrently and must therefore be implemented in a concurrency safe
+ // way. Blocking occurs at the expense of total performance of rendering
+ // all registered metrics. Ideally, Collector implementations support
+ // concurrent readers.
+ Collect(chan<- Metric)
+}
+
+// selfCollector implements Collector for a single Metric so that the Metric
+// collects itself. Add it as an anonymous field to a struct that implements
+// Metric, and call init with the Metric itself as an argument.
+type selfCollector struct {
+ self Metric
+}
+
+// init provides the selfCollector with a reference to the metric it is supposed
+// to collect. It is usually called within the factory function to create a
+// metric. See example.
+func (c *selfCollector) init(self Metric) {
+ c.self = self
+}
+
+// Describe implements Collector.
+func (c *selfCollector) Describe(ch chan<- *Desc) {
+ ch <- c.self.Desc()
+}
+
+// Collect implements Collector.
+func (c *selfCollector) Collect(ch chan<- Metric) {
+ ch <- c.self
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go
new file mode 100644
index 0000000..72d5256
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go
@@ -0,0 +1,164 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "errors"
+)
+
+// Counter is a Metric that represents a single numerical value that only ever
+// goes up. That implies that it cannot be used to count items whose number can
+// also go down, e.g. the number of currently running goroutines. Those
+// "counters" are represented by Gauges.
+//
+// A Counter is typically used to count requests served, tasks completed, errors
+// occurred, etc.
+//
+// To create Counter instances, use NewCounter.
+type Counter interface {
+ Metric
+ Collector
+
+ // Inc increments the counter by 1. Use Add to increment it by arbitrary
+ // non-negative values.
+ Inc()
+ // Add adds the given value to the counter. It panics if the value is <
+ // 0.
+ Add(float64)
+}
+
+// CounterOpts is an alias for Opts. See there for doc comments.
+type CounterOpts Opts
+
+// NewCounter creates a new Counter based on the provided CounterOpts.
+func NewCounter(opts CounterOpts) Counter {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ )
+ result := &counter{value: value{desc: desc, valType: CounterValue, labelPairs: desc.constLabelPairs}}
+ result.init(result) // Init self-collection.
+ return result
+}
+
+type counter struct {
+ value
+}
+
+func (c *counter) Add(v float64) {
+ if v < 0 {
+ panic(errors.New("counter cannot decrease in value"))
+ }
+ c.value.Add(v)
+}
+
+// CounterVec is a Collector that bundles a set of Counters that all share the
+// same Desc, but have different values for their variable labels. This is used
+// if you want to count the same thing partitioned by various dimensions
+// (e.g. number of HTTP requests, partitioned by response code and
+// method). Create instances with NewCounterVec.
+//
+// CounterVec embeds MetricVec. See there for a full list of methods with
+// detailed documentation.
+type CounterVec struct {
+ *MetricVec
+}
+
+// NewCounterVec creates a new CounterVec based on the provided CounterOpts and
+// partitioned by the given label names. At least one label name must be
+// provided.
+func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &CounterVec{
+ MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
+ result := &counter{value: value{
+ desc: desc,
+ valType: CounterValue,
+ labelPairs: makeLabelPairs(desc, lvs),
+ }}
+ result.init(result) // Init self-collection.
+ return result
+ }),
+ }
+}
+
+// GetMetricWithLabelValues replaces the method of the same name in
+// MetricVec. The difference is that this method returns a Counter and not a
+// Metric so that no type conversion is required.
+func (m *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) {
+ metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Counter), err
+ }
+ return nil, err
+}
+
+// GetMetricWith replaces the method of the same name in MetricVec. The
+// difference is that this method returns a Counter and not a Metric so that no
+// type conversion is required.
+func (m *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
+ metric, err := m.MetricVec.GetMetricWith(labels)
+ if metric != nil {
+ return metric.(Counter), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. By not returning an
+// error, WithLabelValues allows shortcuts like
+// myVec.WithLabelValues("404", "GET").Add(42)
+func (m *CounterVec) WithLabelValues(lvs ...string) Counter {
+ return m.MetricVec.WithLabelValues(lvs...).(Counter)
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. By not returning an error, With allows shortcuts like
+// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
+func (m *CounterVec) With(labels Labels) Counter {
+ return m.MetricVec.With(labels).(Counter)
+}
+
+// CounterFunc is a Counter whose value is determined at collect time by calling a
+// provided function.
+//
+// To create CounterFunc instances, use NewCounterFunc.
+type CounterFunc interface {
+ Metric
+ Collector
+}
+
+// NewCounterFunc creates a new CounterFunc based on the provided
+// CounterOpts. The value reported is determined by calling the given function
+// from within the Write method. Take into account that metric collection may
+// happen concurrently. If that results in concurrent calls to Write, like in
+// the case where a CounterFunc is directly registered with Prometheus, the
+// provided function must be concurrency-safe. The function should also honor
+// the contract for a Counter (values only go up, not down), but compliance will
+// not be checked.
+func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc {
+ return newValueFunc(NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ), CounterValue, function)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go
new file mode 100644
index 0000000..1835b16
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go
@@ -0,0 +1,200 @@
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "errors"
+ "fmt"
+ "sort"
+ "strings"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/prometheus/common/model"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// reservedLabelPrefix is a prefix which is not legal in user-supplied
+// label names.
+const reservedLabelPrefix = "__"
+
+// Labels represents a collection of label name -> value mappings. This type is
+// commonly used with the With(Labels) and GetMetricWith(Labels) methods of
+// metric vector Collectors, e.g.:
+// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
+//
+// The other use-case is the specification of constant label pairs in Opts or to
+// create a Desc.
+type Labels map[string]string
+
+// Desc is the descriptor used by every Prometheus Metric. It is essentially
+// the immutable meta-data of a Metric. The normal Metric implementations
+// included in this package manage their Desc under the hood. Users only have to
+// deal with Desc if they use advanced features like the ExpvarCollector or
+// custom Collectors and Metrics.
+//
+// Descriptors registered with the same registry have to fulfill certain
+// consistency and uniqueness criteria if they share the same fully-qualified
+// name: They must have the same help string and the same label names (aka label
+// dimensions) in each, constLabels and variableLabels, but they must differ in
+// the values of the constLabels.
+//
+// Descriptors that share the same fully-qualified names and the same label
+// values of their constLabels are considered equal.
+//
+// Use NewDesc to create new Desc instances.
+type Desc struct {
+ // fqName has been built from Namespace, Subsystem, and Name.
+ fqName string
+ // help provides some helpful information about this metric.
+ help string
+ // constLabelPairs contains precalculated DTO label pairs based on
+ // the constant labels.
+ constLabelPairs []*dto.LabelPair
+ // VariableLabels contains names of labels for which the metric
+ // maintains variable values.
+ variableLabels []string
+ // id is a hash of the values of the ConstLabels and fqName. This
+ // must be unique among all registered descriptors and can therefore be
+ // used as an identifier of the descriptor.
+ id uint64
+ // dimHash is a hash of the label names (preset and variable) and the
+ // Help string. Each Desc with the same fqName must have the same
+ // dimHash.
+ dimHash uint64
+ // err is an error that occurred during construction. It is reported on
+ // registration time.
+ err error
+}
+
+// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc
+// and will be reported on registration time. variableLabels and constLabels can
+// be nil if no such labels should be set. fqName and help must not be empty.
+//
+// variableLabels only contain the label names. Their label values are variable
+// and therefore not part of the Desc. (They are managed within the Metric.)
+//
+// For constLabels, the label values are constant. Therefore, they are fully
+// specified in the Desc. See the Opts documentation for the implications of
+// constant labels.
+func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc {
+ d := &Desc{
+ fqName: fqName,
+ help: help,
+ variableLabels: variableLabels,
+ }
+ if help == "" {
+ d.err = errors.New("empty help string")
+ return d
+ }
+ if !model.IsValidMetricName(model.LabelValue(fqName)) {
+ d.err = fmt.Errorf("%q is not a valid metric name", fqName)
+ return d
+ }
+ // labelValues contains the label values of const labels (in order of
+ // their sorted label names) plus the fqName (at position 0).
+ labelValues := make([]string, 1, len(constLabels)+1)
+ labelValues[0] = fqName
+ labelNames := make([]string, 0, len(constLabels)+len(variableLabels))
+ labelNameSet := map[string]struct{}{}
+ // First add only the const label names and sort them...
+ for labelName := range constLabels {
+ if !checkLabelName(labelName) {
+ d.err = fmt.Errorf("%q is not a valid label name", labelName)
+ return d
+ }
+ labelNames = append(labelNames, labelName)
+ labelNameSet[labelName] = struct{}{}
+ }
+ sort.Strings(labelNames)
+ // ... so that we can now add const label values in the order of their names.
+ for _, labelName := range labelNames {
+ labelValues = append(labelValues, constLabels[labelName])
+ }
+ // Now add the variable label names, but prefix them with something that
+ // cannot be in a regular label name. That prevents matching the label
+ // dimension with a different mix between preset and variable labels.
+ for _, labelName := range variableLabels {
+ if !checkLabelName(labelName) {
+ d.err = fmt.Errorf("%q is not a valid label name", labelName)
+ return d
+ }
+ labelNames = append(labelNames, "$"+labelName)
+ labelNameSet[labelName] = struct{}{}
+ }
+ if len(labelNames) != len(labelNameSet) {
+ d.err = errors.New("duplicate label names")
+ return d
+ }
+ vh := hashNew()
+ for _, val := range labelValues {
+ vh = hashAdd(vh, val)
+ vh = hashAddByte(vh, separatorByte)
+ }
+ d.id = vh
+ // Sort labelNames so that order doesn't matter for the hash.
+ sort.Strings(labelNames)
+ // Now hash together (in this order) the help string and the sorted
+ // label names.
+ lh := hashNew()
+ lh = hashAdd(lh, help)
+ lh = hashAddByte(lh, separatorByte)
+ for _, labelName := range labelNames {
+ lh = hashAdd(lh, labelName)
+ lh = hashAddByte(lh, separatorByte)
+ }
+ d.dimHash = lh
+
+ d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels))
+ for n, v := range constLabels {
+ d.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{
+ Name: proto.String(n),
+ Value: proto.String(v),
+ })
+ }
+ sort.Sort(LabelPairSorter(d.constLabelPairs))
+ return d
+}
+
+// NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the
+// provided error set. If a collector returning such a descriptor is registered,
+// registration will fail with the provided error. NewInvalidDesc can be used by
+// a Collector to signal inability to describe itself.
+func NewInvalidDesc(err error) *Desc {
+ return &Desc{
+ err: err,
+ }
+}
+
+func (d *Desc) String() string {
+ lpStrings := make([]string, 0, len(d.constLabelPairs))
+ for _, lp := range d.constLabelPairs {
+ lpStrings = append(
+ lpStrings,
+ fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()),
+ )
+ }
+ return fmt.Sprintf(
+ "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}",
+ d.fqName,
+ d.help,
+ strings.Join(lpStrings, ","),
+ d.variableLabels,
+ )
+}
+
+func checkLabelName(l string) bool {
+ return model.LabelName(l).IsValid() &&
+ !strings.HasPrefix(l, reservedLabelPrefix)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go
new file mode 100644
index 0000000..618c4de
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go
@@ -0,0 +1,181 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package prometheus provides metrics primitives to instrument code for
+// monitoring. It also offers a registry for metrics. Sub-packages allow to
+// expose the registered metrics via HTTP (package promhttp) or push them to a
+// Pushgateway (package push).
+//
+// All exported functions and methods are safe to be used concurrently unless
+//specified otherwise.
+//
+// A Basic Example
+//
+// As a starting point, a very basic usage example:
+//
+// package main
+//
+// import (
+// "net/http"
+//
+// "github.com/prometheus/client_golang/prometheus"
+// "github.com/prometheus/client_golang/prometheus/promhttp"
+// )
+//
+// var (
+// cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{
+// Name: "cpu_temperature_celsius",
+// Help: "Current temperature of the CPU.",
+// })
+// hdFailures = prometheus.NewCounterVec(
+// prometheus.CounterOpts{
+// Name: "hd_errors_total",
+// Help: "Number of hard-disk errors.",
+// },
+// []string{"device"},
+// )
+// )
+//
+// func init() {
+// // Metrics have to be registered to be exposed:
+// prometheus.MustRegister(cpuTemp)
+// prometheus.MustRegister(hdFailures)
+// }
+//
+// func main() {
+// cpuTemp.Set(65.3)
+// hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc()
+//
+// // The Handler function provides a default handler to expose metrics
+// // via an HTTP server. "/metrics" is the usual endpoint for that.
+// http.Handle("/metrics", promhttp.Handler())
+// log.Fatal(http.ListenAndServe(":8080", nil))
+// }
+//
+//
+// This is a complete program that exports two metrics, a Gauge and a Counter,
+// the latter with a label attached to turn it into a (one-dimensional) vector.
+//
+// Metrics
+//
+// The number of exported identifiers in this package might appear a bit
+// overwhelming. Hovever, in addition to the basic plumbing shown in the example
+// above, you only need to understand the different metric types and their
+// vector versions for basic usage.
+//
+// Above, you have already touched the Counter and the Gauge. There are two more
+// advanced metric types: the Summary and Histogram. A more thorough description
+// of those four metric types can be found in the Prometheus docs:
+// https://prometheus.io/docs/concepts/metric_types/
+//
+// A fifth "type" of metric is Untyped. It behaves like a Gauge, but signals the
+// Prometheus server not to assume anything about its type.
+//
+// In addition to the fundamental metric types Gauge, Counter, Summary,
+// Histogram, and Untyped, a very important part of the Prometheus data model is
+// the partitioning of samples along dimensions called labels, which results in
+// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec,
+// HistogramVec, and UntypedVec.
+//
+// While only the fundamental metric types implement the Metric interface, both
+// the metrics and their vector versions implement the Collector interface. A
+// Collector manages the collection of a number of Metrics, but for convenience,
+// a Metric can also “collect itself”. Note that Gauge, Counter, Summary,
+// Histogram, and Untyped are interfaces themselves while GaugeVec, CounterVec,
+// SummaryVec, HistogramVec, and UntypedVec are not.
+//
+// To create instances of Metrics and their vector versions, you need a suitable
+// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts,
+// HistogramOpts, or UntypedOpts.
+//
+// Custom Collectors and constant Metrics
+//
+// While you could create your own implementations of Metric, most likely you
+// will only ever implement the Collector interface on your own. At a first
+// glance, a custom Collector seems handy to bundle Metrics for common
+// registration (with the prime example of the different metric vectors above,
+// which bundle all the metrics of the same name but with different labels).
+//
+// There is a more involved use case, too: If you already have metrics
+// available, created outside of the Prometheus context, you don't need the
+// interface of the various Metric types. You essentially want to mirror the
+// existing numbers into Prometheus Metrics during collection. An own
+// implementation of the Collector interface is perfect for that. You can create
+// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and
+// NewConstSummary (and their respective Must… versions). That will happen in
+// the Collect method. The Describe method has to return separate Desc
+// instances, representative of the “throw-away” metrics to be created
+// later. NewDesc comes in handy to create those Desc instances.
+//
+// The Collector example illustrates the use case. You can also look at the
+// source code of the processCollector (mirroring process metrics), the
+// goCollector (mirroring Go metrics), or the expvarCollector (mirroring expvar
+// metrics) as examples that are used in this package itself.
+//
+// If you just need to call a function to get a single float value to collect as
+// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting
+// shortcuts.
+//
+// Advanced Uses of the Registry
+//
+// While MustRegister is the by far most common way of registering a Collector,
+// sometimes you might want to handle the errors the registration might
+// cause. As suggested by the name, MustRegister panics if an error occurs. With
+// the Register function, the error is returned and can be handled.
+//
+// An error is returned if the registered Collector is incompatible or
+// inconsistent with already registered metrics. The registry aims for
+// consistency of the collected metrics according to the Prometheus data
+// model. Inconsistencies are ideally detected at registration time, not at
+// collect time. The former will usually be detected at start-up time of a
+// program, while the latter will only happen at scrape time, possibly not even
+// on the first scrape if the inconsistency only becomes relevant later. That is
+// the main reason why a Collector and a Metric have to describe themselves to
+// the registry.
+//
+// So far, everything we did operated on the so-called default registry, as it
+// can be found in the global DefaultRegistry variable. With NewRegistry, you
+// can create a custom registry, or you can even implement the Registerer or
+// Gatherer interfaces yourself. The methods Register and Unregister work in
+// the same way on a custom registry as the global functions Register and
+// Unregister on the default registry.
+//
+// There are a number of uses for custom registries: You can use registries
+// with special properties, see NewPedanticRegistry. You can avoid global state,
+// as it is imposed by the DefaultRegistry. You can use multiple registries at
+// the same time to expose different metrics in different ways. You can use
+// separate registries for testing purposes.
+//
+// Also note that the DefaultRegistry comes registered with a Collector for Go
+// runtime metrics (via NewGoCollector) and a Collector for process metrics (via
+// NewProcessCollector). With a custom registry, you are in control and decide
+// yourself about the Collectors to register.
+//
+// HTTP Exposition
+//
+// The Registry implements the Gatherer interface. The caller of the Gather
+// method can then expose the gathered metrics in some way. Usually, the metrics
+// are served via HTTP on the /metrics endpoint. That's happening in the example
+// above. The tools to expose metrics via HTTP are in the promhttp
+// sub-package. (The top-level functions in the prometheus package are
+// deprecated.)
+//
+// Pushing to the Pushgateway
+//
+// Function for pushing to the Pushgateway can be found in the push sub-package.
+//
+// Other Means of Exposition
+//
+// More ways of exposing metrics can easily be added. Sending metrics to
+// Graphite would be an example that will soon be implemented.
+package prometheus
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go
new file mode 100644
index 0000000..18a99d5
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go
@@ -0,0 +1,119 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "encoding/json"
+ "expvar"
+)
+
+type expvarCollector struct {
+ exports map[string]*Desc
+}
+
+// NewExpvarCollector returns a newly allocated expvar Collector that still has
+// to be registered with a Prometheus registry.
+//
+// An expvar Collector collects metrics from the expvar interface. It provides a
+// quick way to expose numeric values that are already exported via expvar as
+// Prometheus metrics. Note that the data models of expvar and Prometheus are
+// fundamentally different, and that the expvar Collector is inherently slower
+// than native Prometheus metrics. Thus, the expvar Collector is probably great
+// for experiments and prototying, but you should seriously consider a more
+// direct implementation of Prometheus metrics for monitoring production
+// systems.
+//
+// The exports map has the following meaning:
+//
+// The keys in the map correspond to expvar keys, i.e. for every expvar key you
+// want to export as Prometheus metric, you need an entry in the exports
+// map. The descriptor mapped to each key describes how to export the expvar
+// value. It defines the name and the help string of the Prometheus metric
+// proxying the expvar value. The type will always be Untyped.
+//
+// For descriptors without variable labels, the expvar value must be a number or
+// a bool. The number is then directly exported as the Prometheus sample
+// value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values
+// that are not numbers or bools are silently ignored.
+//
+// If the descriptor has one variable label, the expvar value must be an expvar
+// map. The keys in the expvar map become the various values of the one
+// Prometheus label. The values in the expvar map must be numbers or bools again
+// as above.
+//
+// For descriptors with more than one variable label, the expvar must be a
+// nested expvar map, i.e. where the values of the topmost map are maps again
+// etc. until a depth is reached that corresponds to the number of labels. The
+// leaves of that structure must be numbers or bools as above to serve as the
+// sample values.
+//
+// Anything that does not fit into the scheme above is silently ignored.
+func NewExpvarCollector(exports map[string]*Desc) Collector {
+ return &expvarCollector{
+ exports: exports,
+ }
+}
+
+// Describe implements Collector.
+func (e *expvarCollector) Describe(ch chan<- *Desc) {
+ for _, desc := range e.exports {
+ ch <- desc
+ }
+}
+
+// Collect implements Collector.
+func (e *expvarCollector) Collect(ch chan<- Metric) {
+ for name, desc := range e.exports {
+ var m Metric
+ expVar := expvar.Get(name)
+ if expVar == nil {
+ continue
+ }
+ var v interface{}
+ labels := make([]string, len(desc.variableLabels))
+ if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil {
+ ch <- NewInvalidMetric(desc, err)
+ continue
+ }
+ var processValue func(v interface{}, i int)
+ processValue = func(v interface{}, i int) {
+ if i >= len(labels) {
+ copiedLabels := append(make([]string, 0, len(labels)), labels...)
+ switch v := v.(type) {
+ case float64:
+ m = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...)
+ case bool:
+ if v {
+ m = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...)
+ } else {
+ m = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...)
+ }
+ default:
+ return
+ }
+ ch <- m
+ return
+ }
+ vm, ok := v.(map[string]interface{})
+ if !ok {
+ return
+ }
+ for lv, val := range vm {
+ labels[i] = lv
+ processValue(val, i+1)
+ }
+ }
+ processValue(v, 0)
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/fnv.go b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go
new file mode 100644
index 0000000..e3b67df
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go
@@ -0,0 +1,29 @@
+package prometheus
+
+// Inline and byte-free variant of hash/fnv's fnv64a.
+
+const (
+ offset64 = 14695981039346656037
+ prime64 = 1099511628211
+)
+
+// hashNew initializies a new fnv64a hash value.
+func hashNew() uint64 {
+ return offset64
+}
+
+// hashAdd adds a string to a fnv64a hash value, returning the updated hash.
+func hashAdd(h uint64, s string) uint64 {
+ for i := 0; i < len(s); i++ {
+ h ^= uint64(s[i])
+ h *= prime64
+ }
+ return h
+}
+
+// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash.
+func hashAddByte(h uint64, b byte) uint64 {
+ h ^= uint64(b)
+ h *= prime64
+ return h
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
new file mode 100644
index 0000000..9ab5a3d
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
@@ -0,0 +1,145 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+// Gauge is a Metric that represents a single numerical value that can
+// arbitrarily go up and down.
+//
+// A Gauge is typically used for measured values like temperatures or current
+// memory usage, but also "counts" that can go up and down, like the number of
+// running goroutines.
+//
+// To create Gauge instances, use NewGauge.
+type Gauge interface {
+ Metric
+ Collector
+
+ // Set sets the Gauge to an arbitrary value.
+ Set(float64)
+ // Inc increments the Gauge by 1. Use Add to increment it by arbitrary
+ // values.
+ Inc()
+ // Dec decrements the Gauge by 1. Use Sub to decrement it by arbitrary
+ // values.
+ Dec()
+ // Add adds the given value to the Gauge. (The value can be negative,
+ // resulting in a decrease of the Gauge.)
+ Add(float64)
+ // Sub subtracts the given value from the Gauge. (The value can be
+ // negative, resulting in an increase of the Gauge.)
+ Sub(float64)
+
+ // SetToCurrentTime sets the Gauge to the current Unix time in seconds.
+ SetToCurrentTime()
+}
+
+// GaugeOpts is an alias for Opts. See there for doc comments.
+type GaugeOpts Opts
+
+// NewGauge creates a new Gauge based on the provided GaugeOpts.
+func NewGauge(opts GaugeOpts) Gauge {
+ return newValue(NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ), GaugeValue, 0)
+}
+
+// GaugeVec is a Collector that bundles a set of Gauges that all share the same
+// Desc, but have different values for their variable labels. This is used if
+// you want to count the same thing partitioned by various dimensions
+// (e.g. number of operations queued, partitioned by user and operation
+// type). Create instances with NewGaugeVec.
+type GaugeVec struct {
+ *MetricVec
+}
+
+// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
+// partitioned by the given label names. At least one label name must be
+// provided.
+func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &GaugeVec{
+ MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
+ return newValue(desc, GaugeValue, 0, lvs...)
+ }),
+ }
+}
+
+// GetMetricWithLabelValues replaces the method of the same name in
+// MetricVec. The difference is that this method returns a Gauge and not a
+// Metric so that no type conversion is required.
+func (m *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) {
+ metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Gauge), err
+ }
+ return nil, err
+}
+
+// GetMetricWith replaces the method of the same name in MetricVec. The
+// difference is that this method returns a Gauge and not a Metric so that no
+// type conversion is required.
+func (m *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
+ metric, err := m.MetricVec.GetMetricWith(labels)
+ if metric != nil {
+ return metric.(Gauge), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. By not returning an
+// error, WithLabelValues allows shortcuts like
+// myVec.WithLabelValues("404", "GET").Add(42)
+func (m *GaugeVec) WithLabelValues(lvs ...string) Gauge {
+ return m.MetricVec.WithLabelValues(lvs...).(Gauge)
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. By not returning an error, With allows shortcuts like
+// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
+func (m *GaugeVec) With(labels Labels) Gauge {
+ return m.MetricVec.With(labels).(Gauge)
+}
+
+// GaugeFunc is a Gauge whose value is determined at collect time by calling a
+// provided function.
+//
+// To create GaugeFunc instances, use NewGaugeFunc.
+type GaugeFunc interface {
+ Metric
+ Collector
+}
+
+// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The
+// value reported is determined by calling the given function from within the
+// Write method. Take into account that metric collection may happen
+// concurrently. If that results in concurrent calls to Write, like in the case
+// where a GaugeFunc is directly registered with Prometheus, the provided
+// function must be concurrency-safe.
+func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc {
+ return newValueFunc(NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ), GaugeValue, function)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
new file mode 100644
index 0000000..6dea674
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
@@ -0,0 +1,263 @@
+package prometheus
+
+import (
+ "fmt"
+ "runtime"
+ "runtime/debug"
+ "time"
+)
+
+type goCollector struct {
+ goroutines Gauge
+ gcDesc *Desc
+
+ // metrics to describe and collect
+ metrics memStatsMetrics
+}
+
+// NewGoCollector returns a collector which exports metrics about the current
+// go process.
+func NewGoCollector() Collector {
+ return &goCollector{
+ goroutines: NewGauge(GaugeOpts{
+ Namespace: "go",
+ Name: "goroutines",
+ Help: "Number of goroutines that currently exist.",
+ }),
+ gcDesc: NewDesc(
+ "go_gc_duration_seconds",
+ "A summary of the GC invocation durations.",
+ nil, nil),
+ metrics: memStatsMetrics{
+ {
+ desc: NewDesc(
+ memstatNamespace("alloc_bytes"),
+ "Number of bytes allocated and still in use.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("alloc_bytes_total"),
+ "Total number of bytes allocated, even if freed.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) },
+ valType: CounterValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("sys_bytes"),
+ "Number of bytes obtained from system.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("lookups_total"),
+ "Total number of pointer lookups.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) },
+ valType: CounterValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mallocs_total"),
+ "Total number of mallocs.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) },
+ valType: CounterValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("frees_total"),
+ "Total number of frees.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) },
+ valType: CounterValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_alloc_bytes"),
+ "Number of heap bytes allocated and still in use.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_sys_bytes"),
+ "Number of heap bytes obtained from system.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_idle_bytes"),
+ "Number of heap bytes waiting to be used.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_inuse_bytes"),
+ "Number of heap bytes that are in use.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_released_bytes"),
+ "Number of heap bytes released to OS.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_objects"),
+ "Number of allocated objects.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("stack_inuse_bytes"),
+ "Number of bytes in use by the stack allocator.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("stack_sys_bytes"),
+ "Number of bytes obtained from system for stack allocator.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mspan_inuse_bytes"),
+ "Number of bytes in use by mspan structures.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mspan_sys_bytes"),
+ "Number of bytes used for mspan structures obtained from system.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mcache_inuse_bytes"),
+ "Number of bytes in use by mcache structures.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mcache_sys_bytes"),
+ "Number of bytes used for mcache structures obtained from system.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("buck_hash_sys_bytes"),
+ "Number of bytes used by the profiling bucket hash table.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("gc_sys_bytes"),
+ "Number of bytes used for garbage collection system metadata.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("other_sys_bytes"),
+ "Number of bytes used for other system allocations.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("next_gc_bytes"),
+ "Number of heap bytes when next garbage collection will take place.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("last_gc_time_seconds"),
+ "Number of seconds since 1970 of last garbage collection.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 },
+ valType: GaugeValue,
+ },
+ },
+ }
+}
+
+func memstatNamespace(s string) string {
+ return fmt.Sprintf("go_memstats_%s", s)
+}
+
+// Describe returns all descriptions of the collector.
+func (c *goCollector) Describe(ch chan<- *Desc) {
+ ch <- c.goroutines.Desc()
+ ch <- c.gcDesc
+
+ for _, i := range c.metrics {
+ ch <- i.desc
+ }
+}
+
+// Collect returns the current state of all metrics of the collector.
+func (c *goCollector) Collect(ch chan<- Metric) {
+ c.goroutines.Set(float64(runtime.NumGoroutine()))
+ ch <- c.goroutines
+
+ var stats debug.GCStats
+ stats.PauseQuantiles = make([]time.Duration, 5)
+ debug.ReadGCStats(&stats)
+
+ quantiles := make(map[float64]float64)
+ for idx, pq := range stats.PauseQuantiles[1:] {
+ quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds()
+ }
+ quantiles[0.0] = stats.PauseQuantiles[0].Seconds()
+ ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), float64(stats.PauseTotal.Seconds()), quantiles)
+
+ ms := &runtime.MemStats{}
+ runtime.ReadMemStats(ms)
+ for _, i := range c.metrics {
+ ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms))
+ }
+}
+
+// memStatsMetrics provide description, value, and value type for memstat metrics.
+type memStatsMetrics []struct {
+ desc *Desc
+ eval func(*runtime.MemStats) float64
+ valType ValueType
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
new file mode 100644
index 0000000..9719e8f
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
@@ -0,0 +1,444 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "fmt"
+ "math"
+ "sort"
+ "sync/atomic"
+
+ "github.com/golang/protobuf/proto"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// A Histogram counts individual observations from an event or sample stream in
+// configurable buckets. Similar to a summary, it also provides a sum of
+// observations and an observation count.
+//
+// On the Prometheus server, quantiles can be calculated from a Histogram using
+// the histogram_quantile function in the query language.
+//
+// Note that Histograms, in contrast to Summaries, can be aggregated with the
+// Prometheus query language (see the documentation for detailed
+// procedures). However, Histograms require the user to pre-define suitable
+// buckets, and they are in general less accurate. The Observe method of a
+// Histogram has a very low performance overhead in comparison with the Observe
+// method of a Summary.
+//
+// To create Histogram instances, use NewHistogram.
+type Histogram interface {
+ Metric
+ Collector
+
+ // Observe adds a single observation to the histogram.
+ Observe(float64)
+}
+
+// bucketLabel is used for the label that defines the upper bound of a
+// bucket of a histogram ("le" -> "less or equal").
+const bucketLabel = "le"
+
+// DefBuckets are the default Histogram buckets. The default buckets are
+// tailored to broadly measure the response time (in seconds) of a network
+// service. Most likely, however, you will be required to define buckets
+// customized to your use case.
+var (
+ DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}
+
+ errBucketLabelNotAllowed = fmt.Errorf(
+ "%q is not allowed as label name in histograms", bucketLabel,
+ )
+)
+
+// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest
+// bucket has an upper bound of 'start'. The final +Inf bucket is not counted
+// and not included in the returned slice. The returned slice is meant to be
+// used for the Buckets field of HistogramOpts.
+//
+// The function panics if 'count' is zero or negative.
+func LinearBuckets(start, width float64, count int) []float64 {
+ if count < 1 {
+ panic("LinearBuckets needs a positive count")
+ }
+ buckets := make([]float64, count)
+ for i := range buckets {
+ buckets[i] = start
+ start += width
+ }
+ return buckets
+}
+
+// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an
+// upper bound of 'start' and each following bucket's upper bound is 'factor'
+// times the previous bucket's upper bound. The final +Inf bucket is not counted
+// and not included in the returned slice. The returned slice is meant to be
+// used for the Buckets field of HistogramOpts.
+//
+// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative,
+// or if 'factor' is less than or equal 1.
+func ExponentialBuckets(start, factor float64, count int) []float64 {
+ if count < 1 {
+ panic("ExponentialBuckets needs a positive count")
+ }
+ if start <= 0 {
+ panic("ExponentialBuckets needs a positive start value")
+ }
+ if factor <= 1 {
+ panic("ExponentialBuckets needs a factor greater than 1")
+ }
+ buckets := make([]float64, count)
+ for i := range buckets {
+ buckets[i] = start
+ start *= factor
+ }
+ return buckets
+}
+
+// HistogramOpts bundles the options for creating a Histogram metric. It is
+// mandatory to set Name and Help to a non-empty string. All other fields are
+// optional and can safely be left at their zero value.
+type HistogramOpts struct {
+ // Namespace, Subsystem, and Name are components of the fully-qualified
+ // name of the Histogram (created by joining these components with
+ // "_"). Only Name is mandatory, the others merely help structuring the
+ // name. Note that the fully-qualified name of the Histogram must be a
+ // valid Prometheus metric name.
+ Namespace string
+ Subsystem string
+ Name string
+
+ // Help provides information about this Histogram. Mandatory!
+ //
+ // Metrics with the same fully-qualified name must have the same Help
+ // string.
+ Help string
+
+ // ConstLabels are used to attach fixed labels to this
+ // Histogram. Histograms with the same fully-qualified name must have the
+ // same label names in their ConstLabels.
+ //
+ // Note that in most cases, labels have a value that varies during the
+ // lifetime of a process. Those labels are usually managed with a
+ // HistogramVec. ConstLabels serve only special purposes. One is for the
+ // special case where the value of a label does not change during the
+ // lifetime of a process, e.g. if the revision of the running binary is
+ // put into a label. Another, more advanced purpose is if more than one
+ // Collector needs to collect Histograms with the same fully-qualified
+ // name. In that case, those Summaries must differ in the values of
+ // their ConstLabels. See the Collector examples.
+ //
+ // If the value of a label never changes (not even between binaries),
+ // that label most likely should not be a label at all (but part of the
+ // metric name).
+ ConstLabels Labels
+
+ // Buckets defines the buckets into which observations are counted. Each
+ // element in the slice is the upper inclusive bound of a bucket. The
+ // values must be sorted in strictly increasing order. There is no need
+ // to add a highest bucket with +Inf bound, it will be added
+ // implicitly. The default value is DefBuckets.
+ Buckets []float64
+}
+
+// NewHistogram creates a new Histogram based on the provided HistogramOpts. It
+// panics if the buckets in HistogramOpts are not in strictly increasing order.
+func NewHistogram(opts HistogramOpts) Histogram {
+ return newHistogram(
+ NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ),
+ opts,
+ )
+}
+
+func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram {
+ if len(desc.variableLabels) != len(labelValues) {
+ panic(errInconsistentCardinality)
+ }
+
+ for _, n := range desc.variableLabels {
+ if n == bucketLabel {
+ panic(errBucketLabelNotAllowed)
+ }
+ }
+ for _, lp := range desc.constLabelPairs {
+ if lp.GetName() == bucketLabel {
+ panic(errBucketLabelNotAllowed)
+ }
+ }
+
+ if len(opts.Buckets) == 0 {
+ opts.Buckets = DefBuckets
+ }
+
+ h := &histogram{
+ desc: desc,
+ upperBounds: opts.Buckets,
+ labelPairs: makeLabelPairs(desc, labelValues),
+ }
+ for i, upperBound := range h.upperBounds {
+ if i < len(h.upperBounds)-1 {
+ if upperBound >= h.upperBounds[i+1] {
+ panic(fmt.Errorf(
+ "histogram buckets must be in increasing order: %f >= %f",
+ upperBound, h.upperBounds[i+1],
+ ))
+ }
+ } else {
+ if math.IsInf(upperBound, +1) {
+ // The +Inf bucket is implicit. Remove it here.
+ h.upperBounds = h.upperBounds[:i]
+ }
+ }
+ }
+ // Finally we know the final length of h.upperBounds and can make counts.
+ h.counts = make([]uint64, len(h.upperBounds))
+
+ h.init(h) // Init self-collection.
+ return h
+}
+
+type histogram struct {
+ // sumBits contains the bits of the float64 representing the sum of all
+ // observations. sumBits and count have to go first in the struct to
+ // guarantee alignment for atomic operations.
+ // http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+ sumBits uint64
+ count uint64
+
+ selfCollector
+ // Note that there is no mutex required.
+
+ desc *Desc
+
+ upperBounds []float64
+ counts []uint64
+
+ labelPairs []*dto.LabelPair
+}
+
+func (h *histogram) Desc() *Desc {
+ return h.desc
+}
+
+func (h *histogram) Observe(v float64) {
+ // TODO(beorn7): For small numbers of buckets (<30), a linear search is
+ // slightly faster than the binary search. If we really care, we could
+ // switch from one search strategy to the other depending on the number
+ // of buckets.
+ //
+ // Microbenchmarks (BenchmarkHistogramNoLabels):
+ // 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op
+ // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op
+ // 300 buckets: 154 ns/op linear - binary 61.6 ns/op
+ i := sort.SearchFloat64s(h.upperBounds, v)
+ if i < len(h.counts) {
+ atomic.AddUint64(&h.counts[i], 1)
+ }
+ atomic.AddUint64(&h.count, 1)
+ for {
+ oldBits := atomic.LoadUint64(&h.sumBits)
+ newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
+ if atomic.CompareAndSwapUint64(&h.sumBits, oldBits, newBits) {
+ break
+ }
+ }
+}
+
+func (h *histogram) Write(out *dto.Metric) error {
+ his := &dto.Histogram{}
+ buckets := make([]*dto.Bucket, len(h.upperBounds))
+
+ his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&h.sumBits)))
+ his.SampleCount = proto.Uint64(atomic.LoadUint64(&h.count))
+ var count uint64
+ for i, upperBound := range h.upperBounds {
+ count += atomic.LoadUint64(&h.counts[i])
+ buckets[i] = &dto.Bucket{
+ CumulativeCount: proto.Uint64(count),
+ UpperBound: proto.Float64(upperBound),
+ }
+ }
+ his.Bucket = buckets
+ out.Histogram = his
+ out.Label = h.labelPairs
+ return nil
+}
+
+// HistogramVec is a Collector that bundles a set of Histograms that all share the
+// same Desc, but have different values for their variable labels. This is used
+// if you want to count the same thing partitioned by various dimensions
+// (e.g. HTTP request latencies, partitioned by status code and method). Create
+// instances with NewHistogramVec.
+type HistogramVec struct {
+ *MetricVec
+}
+
+// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
+// partitioned by the given label names. At least one label name must be
+// provided.
+func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &HistogramVec{
+ MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
+ return newHistogram(desc, opts, lvs...)
+ }),
+ }
+}
+
+// GetMetricWithLabelValues replaces the method of the same name in
+// MetricVec. The difference is that this method returns a Histogram and not a
+// Metric so that no type conversion is required.
+func (m *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Histogram, error) {
+ metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Histogram), err
+ }
+ return nil, err
+}
+
+// GetMetricWith replaces the method of the same name in MetricVec. The
+// difference is that this method returns a Histogram and not a Metric so that no
+// type conversion is required.
+func (m *HistogramVec) GetMetricWith(labels Labels) (Histogram, error) {
+ metric, err := m.MetricVec.GetMetricWith(labels)
+ if metric != nil {
+ return metric.(Histogram), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. By not returning an
+// error, WithLabelValues allows shortcuts like
+// myVec.WithLabelValues("404", "GET").Observe(42.21)
+func (m *HistogramVec) WithLabelValues(lvs ...string) Histogram {
+ return m.MetricVec.WithLabelValues(lvs...).(Histogram)
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. By not returning an error, With allows shortcuts like
+// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21)
+func (m *HistogramVec) With(labels Labels) Histogram {
+ return m.MetricVec.With(labels).(Histogram)
+}
+
+type constHistogram struct {
+ desc *Desc
+ count uint64
+ sum float64
+ buckets map[float64]uint64
+ labelPairs []*dto.LabelPair
+}
+
+func (h *constHistogram) Desc() *Desc {
+ return h.desc
+}
+
+func (h *constHistogram) Write(out *dto.Metric) error {
+ his := &dto.Histogram{}
+ buckets := make([]*dto.Bucket, 0, len(h.buckets))
+
+ his.SampleCount = proto.Uint64(h.count)
+ his.SampleSum = proto.Float64(h.sum)
+
+ for upperBound, count := range h.buckets {
+ buckets = append(buckets, &dto.Bucket{
+ CumulativeCount: proto.Uint64(count),
+ UpperBound: proto.Float64(upperBound),
+ })
+ }
+
+ if len(buckets) > 0 {
+ sort.Sort(buckSort(buckets))
+ }
+ his.Bucket = buckets
+
+ out.Histogram = his
+ out.Label = h.labelPairs
+
+ return nil
+}
+
+// NewConstHistogram returns a metric representing a Prometheus histogram with
+// fixed values for the count, sum, and bucket counts. As those parameters
+// cannot be changed, the returned value does not implement the Histogram
+// interface (but only the Metric interface). Users of this package will not
+// have much use for it in regular operations. However, when implementing custom
+// Collectors, it is useful as a throw-away metric that is generated on the fly
+// to send it to Prometheus in the Collect method.
+//
+// buckets is a map of upper bounds to cumulative counts, excluding the +Inf
+// bucket.
+//
+// NewConstHistogram returns an error if the length of labelValues is not
+// consistent with the variable labels in Desc.
+func NewConstHistogram(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ buckets map[float64]uint64,
+ labelValues ...string,
+) (Metric, error) {
+ if len(desc.variableLabels) != len(labelValues) {
+ return nil, errInconsistentCardinality
+ }
+ return &constHistogram{
+ desc: desc,
+ count: count,
+ sum: sum,
+ buckets: buckets,
+ labelPairs: makeLabelPairs(desc, labelValues),
+ }, nil
+}
+
+// MustNewConstHistogram is a version of NewConstHistogram that panics where
+// NewConstMetric would have returned an error.
+func MustNewConstHistogram(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ buckets map[float64]uint64,
+ labelValues ...string,
+) Metric {
+ m, err := NewConstHistogram(desc, count, sum, buckets, labelValues...)
+ if err != nil {
+ panic(err)
+ }
+ return m
+}
+
+type buckSort []*dto.Bucket
+
+func (s buckSort) Len() int {
+ return len(s)
+}
+
+func (s buckSort) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s buckSort) Less(i, j int) bool {
+ return s[i].GetUpperBound() < s[j].GetUpperBound()
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/http.go b/vendor/github.com/prometheus/client_golang/prometheus/http.go
new file mode 100644
index 0000000..a397277
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/http.go
@@ -0,0 +1,523 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "bufio"
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/prometheus/common/expfmt"
+)
+
+// TODO(beorn7): Remove this whole file. It is a partial mirror of
+// promhttp/http.go (to avoid circular import chains) where everything HTTP
+// related should live. The functions here are just for avoiding
+// breakage. Everything is deprecated.
+
+const (
+ contentTypeHeader = "Content-Type"
+ contentLengthHeader = "Content-Length"
+ contentEncodingHeader = "Content-Encoding"
+ acceptEncodingHeader = "Accept-Encoding"
+)
+
+var bufPool sync.Pool
+
+func getBuf() *bytes.Buffer {
+ buf := bufPool.Get()
+ if buf == nil {
+ return &bytes.Buffer{}
+ }
+ return buf.(*bytes.Buffer)
+}
+
+func giveBuf(buf *bytes.Buffer) {
+ buf.Reset()
+ bufPool.Put(buf)
+}
+
+// Handler returns an HTTP handler for the DefaultGatherer. It is
+// already instrumented with InstrumentHandler (using "prometheus" as handler
+// name).
+//
+// Deprecated: Please note the issues described in the doc comment of
+// InstrumentHandler. You might want to consider using promhttp.Handler instead
+// (which is not instrumented).
+func Handler() http.Handler {
+ return InstrumentHandler("prometheus", UninstrumentedHandler())
+}
+
+// UninstrumentedHandler returns an HTTP handler for the DefaultGatherer.
+//
+// Deprecated: Use promhttp.Handler instead. See there for further documentation.
+func UninstrumentedHandler() http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ mfs, err := DefaultGatherer.Gather()
+ if err != nil {
+ http.Error(w, "An error has occurred during metrics collection:\n\n"+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ contentType := expfmt.Negotiate(req.Header)
+ buf := getBuf()
+ defer giveBuf(buf)
+ writer, encoding := decorateWriter(req, buf)
+ enc := expfmt.NewEncoder(writer, contentType)
+ var lastErr error
+ for _, mf := range mfs {
+ if err := enc.Encode(mf); err != nil {
+ lastErr = err
+ http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError)
+ return
+ }
+ }
+ if closer, ok := writer.(io.Closer); ok {
+ closer.Close()
+ }
+ if lastErr != nil && buf.Len() == 0 {
+ http.Error(w, "No metrics encoded, last error:\n\n"+err.Error(), http.StatusInternalServerError)
+ return
+ }
+ header := w.Header()
+ header.Set(contentTypeHeader, string(contentType))
+ header.Set(contentLengthHeader, fmt.Sprint(buf.Len()))
+ if encoding != "" {
+ header.Set(contentEncodingHeader, encoding)
+ }
+ w.Write(buf.Bytes())
+ })
+}
+
+// decorateWriter wraps a writer to handle gzip compression if requested. It
+// returns the decorated writer and the appropriate "Content-Encoding" header
+// (which is empty if no compression is enabled).
+func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) {
+ header := request.Header.Get(acceptEncodingHeader)
+ parts := strings.Split(header, ",")
+ for _, part := range parts {
+ part := strings.TrimSpace(part)
+ if part == "gzip" || strings.HasPrefix(part, "gzip;") {
+ return gzip.NewWriter(writer), "gzip"
+ }
+ }
+ return writer, ""
+}
+
+var instLabels = []string{"method", "code"}
+
+type nower interface {
+ Now() time.Time
+}
+
+type nowFunc func() time.Time
+
+func (n nowFunc) Now() time.Time {
+ return n()
+}
+
+var now nower = nowFunc(func() time.Time {
+ return time.Now()
+})
+
+func nowSeries(t ...time.Time) nower {
+ return nowFunc(func() time.Time {
+ defer func() {
+ t = t[1:]
+ }()
+
+ return t[0]
+ })
+}
+
+// InstrumentHandler wraps the given HTTP handler for instrumentation. It
+// registers four metric collectors (if not already done) and reports HTTP
+// metrics to the (newly or already) registered collectors: http_requests_total
+// (CounterVec), http_request_duration_microseconds (Summary),
+// http_request_size_bytes (Summary), http_response_size_bytes (Summary). Each
+// has a constant label named "handler" with the provided handlerName as
+// value. http_requests_total is a metric vector partitioned by HTTP method
+// (label name "method") and HTTP status code (label name "code").
+//
+// Deprecated: InstrumentHandler has several issues:
+//
+// - It uses Summaries rather than Histograms. Summaries are not useful if
+// aggregation across multiple instances is required.
+//
+// - It uses microseconds as unit, which is deprecated and should be replaced by
+// seconds.
+//
+// - The size of the request is calculated in a separate goroutine. Since this
+// calculator requires access to the request header, it creates a race with
+// any writes to the header performed during request handling.
+// httputil.ReverseProxy is a prominent example for a handler
+// performing such writes.
+//
+// Upcoming versions of this package will provide ways of instrumenting HTTP
+// handlers that are more flexible and have fewer issues. Please prefer direct
+// instrumentation in the meantime.
+func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc {
+ return InstrumentHandlerFunc(handlerName, handler.ServeHTTP)
+}
+
+// InstrumentHandlerFunc wraps the given function for instrumentation. It
+// otherwise works in the same way as InstrumentHandler (and shares the same
+// issues).
+//
+// Deprecated: InstrumentHandlerFunc is deprecated for the same reasons as
+// InstrumentHandler is.
+func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
+ return InstrumentHandlerFuncWithOpts(
+ SummaryOpts{
+ Subsystem: "http",
+ ConstLabels: Labels{"handler": handlerName},
+ Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
+ },
+ handlerFunc,
+ )
+}
+
+// InstrumentHandlerWithOpts works like InstrumentHandler (and shares the same
+// issues) but provides more flexibility (at the cost of a more complex call
+// syntax). As InstrumentHandler, this function registers four metric
+// collectors, but it uses the provided SummaryOpts to create them. However, the
+// fields "Name" and "Help" in the SummaryOpts are ignored. "Name" is replaced
+// by "requests_total", "request_duration_microseconds", "request_size_bytes",
+// and "response_size_bytes", respectively. "Help" is replaced by an appropriate
+// help string. The names of the variable labels of the http_requests_total
+// CounterVec are "method" (get, post, etc.), and "code" (HTTP status code).
+//
+// If InstrumentHandlerWithOpts is called as follows, it mimics exactly the
+// behavior of InstrumentHandler:
+//
+// prometheus.InstrumentHandlerWithOpts(
+// prometheus.SummaryOpts{
+// Subsystem: "http",
+// ConstLabels: prometheus.Labels{"handler": handlerName},
+// },
+// handler,
+// )
+//
+// Technical detail: "requests_total" is a CounterVec, not a SummaryVec, so it
+// cannot use SummaryOpts. Instead, a CounterOpts struct is created internally,
+// and all its fields are set to the equally named fields in the provided
+// SummaryOpts.
+//
+// Deprecated: InstrumentHandlerWithOpts is deprecated for the same reasons as
+// InstrumentHandler is.
+func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc {
+ return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP)
+}
+
+// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc (and shares
+// the same issues) but provides more flexibility (at the cost of a more complex
+// call syntax). See InstrumentHandlerWithOpts for details how the provided
+// SummaryOpts are used.
+//
+// Deprecated: InstrumentHandlerFuncWithOpts is deprecated for the same reasons
+// as InstrumentHandler is.
+func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
+ reqCnt := NewCounterVec(
+ CounterOpts{
+ Namespace: opts.Namespace,
+ Subsystem: opts.Subsystem,
+ Name: "requests_total",
+ Help: "Total number of HTTP requests made.",
+ ConstLabels: opts.ConstLabels,
+ },
+ instLabels,
+ )
+ if err := Register(reqCnt); err != nil {
+ if are, ok := err.(AlreadyRegisteredError); ok {
+ reqCnt = are.ExistingCollector.(*CounterVec)
+ } else {
+ panic(err)
+ }
+ }
+
+ opts.Name = "request_duration_microseconds"
+ opts.Help = "The HTTP request latencies in microseconds."
+ reqDur := NewSummary(opts)
+ if err := Register(reqDur); err != nil {
+ if are, ok := err.(AlreadyRegisteredError); ok {
+ reqDur = are.ExistingCollector.(Summary)
+ } else {
+ panic(err)
+ }
+ }
+
+ opts.Name = "request_size_bytes"
+ opts.Help = "The HTTP request sizes in bytes."
+ reqSz := NewSummary(opts)
+ if err := Register(reqSz); err != nil {
+ if are, ok := err.(AlreadyRegisteredError); ok {
+ reqSz = are.ExistingCollector.(Summary)
+ } else {
+ panic(err)
+ }
+ }
+
+ opts.Name = "response_size_bytes"
+ opts.Help = "The HTTP response sizes in bytes."
+ resSz := NewSummary(opts)
+ if err := Register(resSz); err != nil {
+ if are, ok := err.(AlreadyRegisteredError); ok {
+ resSz = are.ExistingCollector.(Summary)
+ } else {
+ panic(err)
+ }
+ }
+
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ now := time.Now()
+
+ delegate := &responseWriterDelegator{ResponseWriter: w}
+ out := computeApproximateRequestSize(r)
+
+ _, cn := w.(http.CloseNotifier)
+ _, fl := w.(http.Flusher)
+ _, hj := w.(http.Hijacker)
+ _, rf := w.(io.ReaderFrom)
+ var rw http.ResponseWriter
+ if cn && fl && hj && rf {
+ rw = &fancyResponseWriterDelegator{delegate}
+ } else {
+ rw = delegate
+ }
+ handlerFunc(rw, r)
+
+ elapsed := float64(time.Since(now)) / float64(time.Microsecond)
+
+ method := sanitizeMethod(r.Method)
+ code := sanitizeCode(delegate.status)
+ reqCnt.WithLabelValues(method, code).Inc()
+ reqDur.Observe(elapsed)
+ resSz.Observe(float64(delegate.written))
+ reqSz.Observe(float64(<-out))
+ })
+}
+
+func computeApproximateRequestSize(r *http.Request) <-chan int {
+ // Get URL length in current go routine for avoiding a race condition.
+ // HandlerFunc that runs in parallel may modify the URL.
+ s := 0
+ if r.URL != nil {
+ s += len(r.URL.String())
+ }
+
+ out := make(chan int, 1)
+
+ go func() {
+ s += len(r.Method)
+ s += len(r.Proto)
+ for name, values := range r.Header {
+ s += len(name)
+ for _, value := range values {
+ s += len(value)
+ }
+ }
+ s += len(r.Host)
+
+ // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
+
+ if r.ContentLength != -1 {
+ s += int(r.ContentLength)
+ }
+ out <- s
+ close(out)
+ }()
+
+ return out
+}
+
+type responseWriterDelegator struct {
+ http.ResponseWriter
+
+ handler, method string
+ status int
+ written int64
+ wroteHeader bool
+}
+
+func (r *responseWriterDelegator) WriteHeader(code int) {
+ r.status = code
+ r.wroteHeader = true
+ r.ResponseWriter.WriteHeader(code)
+}
+
+func (r *responseWriterDelegator) Write(b []byte) (int, error) {
+ if !r.wroteHeader {
+ r.WriteHeader(http.StatusOK)
+ }
+ n, err := r.ResponseWriter.Write(b)
+ r.written += int64(n)
+ return n, err
+}
+
+type fancyResponseWriterDelegator struct {
+ *responseWriterDelegator
+}
+
+func (f *fancyResponseWriterDelegator) CloseNotify() <-chan bool {
+ return f.ResponseWriter.(http.CloseNotifier).CloseNotify()
+}
+
+func (f *fancyResponseWriterDelegator) Flush() {
+ f.ResponseWriter.(http.Flusher).Flush()
+}
+
+func (f *fancyResponseWriterDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+ return f.ResponseWriter.(http.Hijacker).Hijack()
+}
+
+func (f *fancyResponseWriterDelegator) ReadFrom(r io.Reader) (int64, error) {
+ if !f.wroteHeader {
+ f.WriteHeader(http.StatusOK)
+ }
+ n, err := f.ResponseWriter.(io.ReaderFrom).ReadFrom(r)
+ f.written += n
+ return n, err
+}
+
+func sanitizeMethod(m string) string {
+ switch m {
+ case "GET", "get":
+ return "get"
+ case "PUT", "put":
+ return "put"
+ case "HEAD", "head":
+ return "head"
+ case "POST", "post":
+ return "post"
+ case "DELETE", "delete":
+ return "delete"
+ case "CONNECT", "connect":
+ return "connect"
+ case "OPTIONS", "options":
+ return "options"
+ case "NOTIFY", "notify":
+ return "notify"
+ default:
+ return strings.ToLower(m)
+ }
+}
+
+func sanitizeCode(s int) string {
+ switch s {
+ case 100:
+ return "100"
+ case 101:
+ return "101"
+
+ case 200:
+ return "200"
+ case 201:
+ return "201"
+ case 202:
+ return "202"
+ case 203:
+ return "203"
+ case 204:
+ return "204"
+ case 205:
+ return "205"
+ case 206:
+ return "206"
+
+ case 300:
+ return "300"
+ case 301:
+ return "301"
+ case 302:
+ return "302"
+ case 304:
+ return "304"
+ case 305:
+ return "305"
+ case 307:
+ return "307"
+
+ case 400:
+ return "400"
+ case 401:
+ return "401"
+ case 402:
+ return "402"
+ case 403:
+ return "403"
+ case 404:
+ return "404"
+ case 405:
+ return "405"
+ case 406:
+ return "406"
+ case 407:
+ return "407"
+ case 408:
+ return "408"
+ case 409:
+ return "409"
+ case 410:
+ return "410"
+ case 411:
+ return "411"
+ case 412:
+ return "412"
+ case 413:
+ return "413"
+ case 414:
+ return "414"
+ case 415:
+ return "415"
+ case 416:
+ return "416"
+ case 417:
+ return "417"
+ case 418:
+ return "418"
+
+ case 500:
+ return "500"
+ case 501:
+ return "501"
+ case 502:
+ return "502"
+ case 503:
+ return "503"
+ case 504:
+ return "504"
+ case 505:
+ return "505"
+
+ case 428:
+ return "428"
+ case 429:
+ return "429"
+ case 431:
+ return "431"
+ case 511:
+ return "511"
+
+ default:
+ return strconv.Itoa(s)
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
new file mode 100644
index 0000000..d4063d9
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
@@ -0,0 +1,166 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "strings"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+const separatorByte byte = 255
+
+// A Metric models a single sample value with its meta data being exported to
+// Prometheus. Implementations of Metric in this package are Gauge, Counter,
+// Histogram, Summary, and Untyped.
+type Metric interface {
+ // Desc returns the descriptor for the Metric. This method idempotently
+ // returns the same descriptor throughout the lifetime of the
+ // Metric. The returned descriptor is immutable by contract. A Metric
+ // unable to describe itself must return an invalid descriptor (created
+ // with NewInvalidDesc).
+ Desc() *Desc
+ // Write encodes the Metric into a "Metric" Protocol Buffer data
+ // transmission object.
+ //
+ // Metric implementations must observe concurrency safety as reads of
+ // this metric may occur at any time, and any blocking occurs at the
+ // expense of total performance of rendering all registered
+ // metrics. Ideally, Metric implementations should support concurrent
+ // readers.
+ //
+ // While populating dto.Metric, it is the responsibility of the
+ // implementation to ensure validity of the Metric protobuf (like valid
+ // UTF-8 strings or syntactically valid metric and label names). It is
+ // recommended to sort labels lexicographically. (Implementers may find
+ // LabelPairSorter useful for that.) Callers of Write should still make
+ // sure of sorting if they depend on it.
+ Write(*dto.Metric) error
+ // TODO(beorn7): The original rationale of passing in a pre-allocated
+ // dto.Metric protobuf to save allocations has disappeared. The
+ // signature of this method should be changed to "Write() (*dto.Metric,
+ // error)".
+}
+
+// Opts bundles the options for creating most Metric types. Each metric
+// implementation XXX has its own XXXOpts type, but in most cases, it is just be
+// an alias of this type (which might change when the requirement arises.)
+//
+// It is mandatory to set Name and Help to a non-empty string. All other fields
+// are optional and can safely be left at their zero value.
+type Opts struct {
+ // Namespace, Subsystem, and Name are components of the fully-qualified
+ // name of the Metric (created by joining these components with
+ // "_"). Only Name is mandatory, the others merely help structuring the
+ // name. Note that the fully-qualified name of the metric must be a
+ // valid Prometheus metric name.
+ Namespace string
+ Subsystem string
+ Name string
+
+ // Help provides information about this metric. Mandatory!
+ //
+ // Metrics with the same fully-qualified name must have the same Help
+ // string.
+ Help string
+
+ // ConstLabels are used to attach fixed labels to this metric. Metrics
+ // with the same fully-qualified name must have the same label names in
+ // their ConstLabels.
+ //
+ // Note that in most cases, labels have a value that varies during the
+ // lifetime of a process. Those labels are usually managed with a metric
+ // vector collector (like CounterVec, GaugeVec, UntypedVec). ConstLabels
+ // serve only special purposes. One is for the special case where the
+ // value of a label does not change during the lifetime of a process,
+ // e.g. if the revision of the running binary is put into a
+ // label. Another, more advanced purpose is if more than one Collector
+ // needs to collect Metrics with the same fully-qualified name. In that
+ // case, those Metrics must differ in the values of their
+ // ConstLabels. See the Collector examples.
+ //
+ // If the value of a label never changes (not even between binaries),
+ // that label most likely should not be a label at all (but part of the
+ // metric name).
+ ConstLabels Labels
+}
+
+// BuildFQName joins the given three name components by "_". Empty name
+// components are ignored. If the name parameter itself is empty, an empty
+// string is returned, no matter what. Metric implementations included in this
+// library use this function internally to generate the fully-qualified metric
+// name from the name component in their Opts. Users of the library will only
+// need this function if they implement their own Metric or instantiate a Desc
+// (with NewDesc) directly.
+func BuildFQName(namespace, subsystem, name string) string {
+ if name == "" {
+ return ""
+ }
+ switch {
+ case namespace != "" && subsystem != "":
+ return strings.Join([]string{namespace, subsystem, name}, "_")
+ case namespace != "":
+ return strings.Join([]string{namespace, name}, "_")
+ case subsystem != "":
+ return strings.Join([]string{subsystem, name}, "_")
+ }
+ return name
+}
+
+// LabelPairSorter implements sort.Interface. It is used to sort a slice of
+// dto.LabelPair pointers. This is useful for implementing the Write method of
+// custom metrics.
+type LabelPairSorter []*dto.LabelPair
+
+func (s LabelPairSorter) Len() int {
+ return len(s)
+}
+
+func (s LabelPairSorter) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s LabelPairSorter) Less(i, j int) bool {
+ return s[i].GetName() < s[j].GetName()
+}
+
+type hashSorter []uint64
+
+func (s hashSorter) Len() int {
+ return len(s)
+}
+
+func (s hashSorter) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s hashSorter) Less(i, j int) bool {
+ return s[i] < s[j]
+}
+
+type invalidMetric struct {
+ desc *Desc
+ err error
+}
+
+// NewInvalidMetric returns a metric whose Write method always returns the
+// provided error. It is useful if a Collector finds itself unable to collect
+// a metric and wishes to report an error to the registry.
+func NewInvalidMetric(desc *Desc, err error) Metric {
+ return &invalidMetric{desc, err}
+}
+
+func (m *invalidMetric) Desc() *Desc { return m.desc }
+
+func (m *invalidMetric) Write(*dto.Metric) error { return m.err }
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
new file mode 100644
index 0000000..94b2553
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
@@ -0,0 +1,140 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import "github.com/prometheus/procfs"
+
+type processCollector struct {
+ pid int
+ collectFn func(chan<- Metric)
+ pidFn func() (int, error)
+ cpuTotal *Desc
+ openFDs, maxFDs *Desc
+ vsize, rss *Desc
+ startTime *Desc
+}
+
+// NewProcessCollector returns a collector which exports the current state of
+// process metrics including cpu, memory and file descriptor usage as well as
+// the process start time for the given process id under the given namespace.
+func NewProcessCollector(pid int, namespace string) Collector {
+ return NewProcessCollectorPIDFn(
+ func() (int, error) { return pid, nil },
+ namespace,
+ )
+}
+
+// NewProcessCollectorPIDFn returns a collector which exports the current state
+// of process metrics including cpu, memory and file descriptor usage as well
+// as the process start time under the given namespace. The given pidFn is
+// called on each collect and is used to determine the process to export
+// metrics for.
+func NewProcessCollectorPIDFn(
+ pidFn func() (int, error),
+ namespace string,
+) Collector {
+ ns := ""
+ if len(namespace) > 0 {
+ ns = namespace + "_"
+ }
+
+ c := processCollector{
+ pidFn: pidFn,
+ collectFn: func(chan<- Metric) {},
+
+ cpuTotal: NewDesc(
+ ns+"process_cpu_seconds_total",
+ "Total user and system CPU time spent in seconds.",
+ nil, nil,
+ ),
+ openFDs: NewDesc(
+ ns+"process_open_fds",
+ "Number of open file descriptors.",
+ nil, nil,
+ ),
+ maxFDs: NewDesc(
+ ns+"process_max_fds",
+ "Maximum number of open file descriptors.",
+ nil, nil,
+ ),
+ vsize: NewDesc(
+ ns+"process_virtual_memory_bytes",
+ "Virtual memory size in bytes.",
+ nil, nil,
+ ),
+ rss: NewDesc(
+ ns+"process_resident_memory_bytes",
+ "Resident memory size in bytes.",
+ nil, nil,
+ ),
+ startTime: NewDesc(
+ ns+"process_start_time_seconds",
+ "Start time of the process since unix epoch in seconds.",
+ nil, nil,
+ ),
+ }
+
+ // Set up process metric collection if supported by the runtime.
+ if _, err := procfs.NewStat(); err == nil {
+ c.collectFn = c.processCollect
+ }
+
+ return &c
+}
+
+// Describe returns all descriptions of the collector.
+func (c *processCollector) Describe(ch chan<- *Desc) {
+ ch <- c.cpuTotal
+ ch <- c.openFDs
+ ch <- c.maxFDs
+ ch <- c.vsize
+ ch <- c.rss
+ ch <- c.startTime
+}
+
+// Collect returns the current state of all metrics of the collector.
+func (c *processCollector) Collect(ch chan<- Metric) {
+ c.collectFn(ch)
+}
+
+// TODO(ts): Bring back error reporting by reverting 7faf9e7 as soon as the
+// client allows users to configure the error behavior.
+func (c *processCollector) processCollect(ch chan<- Metric) {
+ pid, err := c.pidFn()
+ if err != nil {
+ return
+ }
+
+ p, err := procfs.NewProc(pid)
+ if err != nil {
+ return
+ }
+
+ if stat, err := p.NewStat(); err == nil {
+ ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime())
+ ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory()))
+ ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory()))
+ if startTime, err := stat.StartTime(); err == nil {
+ ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime)
+ }
+ }
+
+ if fds, err := p.FileDescriptorsLen(); err == nil {
+ ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds))
+ }
+
+ if limits, err := p.NewLimits(); err == nil {
+ ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles))
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
new file mode 100644
index 0000000..b6dd5a2
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
@@ -0,0 +1,201 @@
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Copyright (c) 2013, The Prometheus Authors
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be found
+// in the LICENSE file.
+
+// Package promhttp contains functions to create http.Handler instances to
+// expose Prometheus metrics via HTTP. In later versions of this package, it
+// will also contain tooling to instrument instances of http.Handler and
+// http.RoundTripper.
+//
+// promhttp.Handler acts on the prometheus.DefaultGatherer. With HandlerFor,
+// you can create a handler for a custom registry or anything that implements
+// the Gatherer interface. It also allows to create handlers that act
+// differently on errors or allow to log errors.
+package promhttp
+
+import (
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "io"
+ "net/http"
+ "strings"
+ "sync"
+
+ "github.com/prometheus/common/expfmt"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+const (
+ contentTypeHeader = "Content-Type"
+ contentLengthHeader = "Content-Length"
+ contentEncodingHeader = "Content-Encoding"
+ acceptEncodingHeader = "Accept-Encoding"
+)
+
+var bufPool sync.Pool
+
+func getBuf() *bytes.Buffer {
+ buf := bufPool.Get()
+ if buf == nil {
+ return &bytes.Buffer{}
+ }
+ return buf.(*bytes.Buffer)
+}
+
+func giveBuf(buf *bytes.Buffer) {
+ buf.Reset()
+ bufPool.Put(buf)
+}
+
+// Handler returns an HTTP handler for the prometheus.DefaultGatherer. The
+// Handler uses the default HandlerOpts, i.e. report the first error as an HTTP
+// error, no error logging, and compression if requested by the client.
+//
+// If you want to create a Handler for the DefaultGatherer with different
+// HandlerOpts, create it with HandlerFor with prometheus.DefaultGatherer and
+// your desired HandlerOpts.
+func Handler() http.Handler {
+ return HandlerFor(prometheus.DefaultGatherer, HandlerOpts{})
+}
+
+// HandlerFor returns an http.Handler for the provided Gatherer. The behavior
+// of the Handler is defined by the provided HandlerOpts.
+func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ mfs, err := reg.Gather()
+ if err != nil {
+ if opts.ErrorLog != nil {
+ opts.ErrorLog.Println("error gathering metrics:", err)
+ }
+ switch opts.ErrorHandling {
+ case PanicOnError:
+ panic(err)
+ case ContinueOnError:
+ if len(mfs) == 0 {
+ http.Error(w, "No metrics gathered, last error:\n\n"+err.Error(), http.StatusInternalServerError)
+ return
+ }
+ case HTTPErrorOnError:
+ http.Error(w, "An error has occurred during metrics gathering:\n\n"+err.Error(), http.StatusInternalServerError)
+ return
+ }
+ }
+
+ contentType := expfmt.Negotiate(req.Header)
+ buf := getBuf()
+ defer giveBuf(buf)
+ writer, encoding := decorateWriter(req, buf, opts.DisableCompression)
+ enc := expfmt.NewEncoder(writer, contentType)
+ var lastErr error
+ for _, mf := range mfs {
+ if err := enc.Encode(mf); err != nil {
+ lastErr = err
+ if opts.ErrorLog != nil {
+ opts.ErrorLog.Println("error encoding metric family:", err)
+ }
+ switch opts.ErrorHandling {
+ case PanicOnError:
+ panic(err)
+ case ContinueOnError:
+ // Handled later.
+ case HTTPErrorOnError:
+ http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError)
+ return
+ }
+ }
+ }
+ if closer, ok := writer.(io.Closer); ok {
+ closer.Close()
+ }
+ if lastErr != nil && buf.Len() == 0 {
+ http.Error(w, "No metrics encoded, last error:\n\n"+err.Error(), http.StatusInternalServerError)
+ return
+ }
+ header := w.Header()
+ header.Set(contentTypeHeader, string(contentType))
+ header.Set(contentLengthHeader, fmt.Sprint(buf.Len()))
+ if encoding != "" {
+ header.Set(contentEncodingHeader, encoding)
+ }
+ w.Write(buf.Bytes())
+ // TODO(beorn7): Consider streaming serving of metrics.
+ })
+}
+
+// HandlerErrorHandling defines how a Handler serving metrics will handle
+// errors.
+type HandlerErrorHandling int
+
+// These constants cause handlers serving metrics to behave as described if
+// errors are encountered.
+const (
+ // Serve an HTTP status code 500 upon the first error
+ // encountered. Report the error message in the body.
+ HTTPErrorOnError HandlerErrorHandling = iota
+ // Ignore errors and try to serve as many metrics as possible. However,
+ // if no metrics can be served, serve an HTTP status code 500 and the
+ // last error message in the body. Only use this in deliberate "best
+ // effort" metrics collection scenarios. It is recommended to at least
+ // log errors (by providing an ErrorLog in HandlerOpts) to not mask
+ // errors completely.
+ ContinueOnError
+ // Panic upon the first error encountered (useful for "crash only" apps).
+ PanicOnError
+)
+
+// Logger is the minimal interface HandlerOpts needs for logging. Note that
+// log.Logger from the standard library implements this interface, and it is
+// easy to implement by custom loggers, if they don't do so already anyway.
+type Logger interface {
+ Println(v ...interface{})
+}
+
+// HandlerOpts specifies options how to serve metrics via an http.Handler. The
+// zero value of HandlerOpts is a reasonable default.
+type HandlerOpts struct {
+ // ErrorLog specifies an optional logger for errors collecting and
+ // serving metrics. If nil, errors are not logged at all.
+ ErrorLog Logger
+ // ErrorHandling defines how errors are handled. Note that errors are
+ // logged regardless of the configured ErrorHandling provided ErrorLog
+ // is not nil.
+ ErrorHandling HandlerErrorHandling
+ // If DisableCompression is true, the handler will never compress the
+ // response, even if requested by the client.
+ DisableCompression bool
+}
+
+// decorateWriter wraps a writer to handle gzip compression if requested. It
+// returns the decorated writer and the appropriate "Content-Encoding" header
+// (which is empty if no compression is enabled).
+func decorateWriter(request *http.Request, writer io.Writer, compressionDisabled bool) (io.Writer, string) {
+ if compressionDisabled {
+ return writer, ""
+ }
+ header := request.Header.Get(acceptEncodingHeader)
+ parts := strings.Split(header, ",")
+ for _, part := range parts {
+ part := strings.TrimSpace(part)
+ if part == "gzip" || strings.HasPrefix(part, "gzip;") {
+ return gzip.NewWriter(writer), "gzip"
+ }
+ }
+ return writer, ""
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
new file mode 100644
index 0000000..78d5f19
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
@@ -0,0 +1,755 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "os"
+ "sort"
+ "sync"
+
+ "github.com/golang/protobuf/proto"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+const (
+ // Capacity for the channel to collect metrics and descriptors.
+ capMetricChan = 1000
+ capDescChan = 10
+)
+
+// DefaultRegisterer and DefaultGatherer are the implementations of the
+// Registerer and Gatherer interface a number of convenience functions in this
+// package act on. Initially, both variables point to the same Registry, which
+// has a process collector (see NewProcessCollector) and a Go collector (see
+// NewGoCollector) already registered. This approach to keep default instances
+// as global state mirrors the approach of other packages in the Go standard
+// library. Note that there are caveats. Change the variables with caution and
+// only if you understand the consequences. Users who want to avoid global state
+// altogether should not use the convenience function and act on custom
+// instances instead.
+var (
+ defaultRegistry = NewRegistry()
+ DefaultRegisterer Registerer = defaultRegistry
+ DefaultGatherer Gatherer = defaultRegistry
+)
+
+func init() {
+ MustRegister(NewProcessCollector(os.Getpid(), ""))
+ MustRegister(NewGoCollector())
+}
+
+// NewRegistry creates a new vanilla Registry without any Collectors
+// pre-registered.
+func NewRegistry() *Registry {
+ return &Registry{
+ collectorsByID: map[uint64]Collector{},
+ descIDs: map[uint64]struct{}{},
+ dimHashesByName: map[string]uint64{},
+ }
+}
+
+// NewPedanticRegistry returns a registry that checks during collection if each
+// collected Metric is consistent with its reported Desc, and if the Desc has
+// actually been registered with the registry.
+//
+// Usually, a Registry will be happy as long as the union of all collected
+// Metrics is consistent and valid even if some metrics are not consistent with
+// their own Desc or a Desc provided by their registered Collector. Well-behaved
+// Collectors and Metrics will only provide consistent Descs. This Registry is
+// useful to test the implementation of Collectors and Metrics.
+func NewPedanticRegistry() *Registry {
+ r := NewRegistry()
+ r.pedanticChecksEnabled = true
+ return r
+}
+
+// Registerer is the interface for the part of a registry in charge of
+// registering and unregistering. Users of custom registries should use
+// Registerer as type for registration purposes (rather then the Registry type
+// directly). In that way, they are free to use custom Registerer implementation
+// (e.g. for testing purposes).
+type Registerer interface {
+ // Register registers a new Collector to be included in metrics
+ // collection. It returns an error if the descriptors provided by the
+ // Collector are invalid or if they — in combination with descriptors of
+ // already registered Collectors — do not fulfill the consistency and
+ // uniqueness criteria described in the documentation of metric.Desc.
+ //
+ // If the provided Collector is equal to a Collector already registered
+ // (which includes the case of re-registering the same Collector), the
+ // returned error is an instance of AlreadyRegisteredError, which
+ // contains the previously registered Collector.
+ //
+ // It is in general not safe to register the same Collector multiple
+ // times concurrently.
+ Register(Collector) error
+ // MustRegister works like Register but registers any number of
+ // Collectors and panics upon the first registration that causes an
+ // error.
+ MustRegister(...Collector)
+ // Unregister unregisters the Collector that equals the Collector passed
+ // in as an argument. (Two Collectors are considered equal if their
+ // Describe method yields the same set of descriptors.) The function
+ // returns whether a Collector was unregistered.
+ //
+ // Note that even after unregistering, it will not be possible to
+ // register a new Collector that is inconsistent with the unregistered
+ // Collector, e.g. a Collector collecting metrics with the same name but
+ // a different help string. The rationale here is that the same registry
+ // instance must only collect consistent metrics throughout its
+ // lifetime.
+ Unregister(Collector) bool
+}
+
+// Gatherer is the interface for the part of a registry in charge of gathering
+// the collected metrics into a number of MetricFamilies. The Gatherer interface
+// comes with the same general implication as described for the Registerer
+// interface.
+type Gatherer interface {
+ // Gather calls the Collect method of the registered Collectors and then
+ // gathers the collected metrics into a lexicographically sorted slice
+ // of MetricFamily protobufs. Even if an error occurs, Gather attempts
+ // to gather as many metrics as possible. Hence, if a non-nil error is
+ // returned, the returned MetricFamily slice could be nil (in case of a
+ // fatal error that prevented any meaningful metric collection) or
+ // contain a number of MetricFamily protobufs, some of which might be
+ // incomplete, and some might be missing altogether. The returned error
+ // (which might be a MultiError) explains the details. In scenarios
+ // where complete collection is critical, the returned MetricFamily
+ // protobufs should be disregarded if the returned error is non-nil.
+ Gather() ([]*dto.MetricFamily, error)
+}
+
+// Register registers the provided Collector with the DefaultRegisterer.
+//
+// Register is a shortcut for DefaultRegisterer.Register(c). See there for more
+// details.
+func Register(c Collector) error {
+ return DefaultRegisterer.Register(c)
+}
+
+// MustRegister registers the provided Collectors with the DefaultRegisterer and
+// panics if any error occurs.
+//
+// MustRegister is a shortcut for DefaultRegisterer.MustRegister(cs...). See
+// there for more details.
+func MustRegister(cs ...Collector) {
+ DefaultRegisterer.MustRegister(cs...)
+}
+
+// Unregister removes the registration of the provided Collector from the
+// DefaultRegisterer.
+//
+// Unregister is a shortcut for DefaultRegisterer.Unregister(c). See there for
+// more details.
+func Unregister(c Collector) bool {
+ return DefaultRegisterer.Unregister(c)
+}
+
+// GathererFunc turns a function into a Gatherer.
+type GathererFunc func() ([]*dto.MetricFamily, error)
+
+// Gather implements Gatherer.
+func (gf GathererFunc) Gather() ([]*dto.MetricFamily, error) {
+ return gf()
+}
+
+// AlreadyRegisteredError is returned by the Register method if the Collector to
+// be registered has already been registered before, or a different Collector
+// that collects the same metrics has been registered before. Registration fails
+// in that case, but you can detect from the kind of error what has
+// happened. The error contains fields for the existing Collector and the
+// (rejected) new Collector that equals the existing one. This can be used to
+// find out if an equal Collector has been registered before and switch over to
+// using the old one, as demonstrated in the example.
+type AlreadyRegisteredError struct {
+ ExistingCollector, NewCollector Collector
+}
+
+func (err AlreadyRegisteredError) Error() string {
+ return "duplicate metrics collector registration attempted"
+}
+
+// MultiError is a slice of errors implementing the error interface. It is used
+// by a Gatherer to report multiple errors during MetricFamily gathering.
+type MultiError []error
+
+func (errs MultiError) Error() string {
+ if len(errs) == 0 {
+ return ""
+ }
+ buf := &bytes.Buffer{}
+ fmt.Fprintf(buf, "%d error(s) occurred:", len(errs))
+ for _, err := range errs {
+ fmt.Fprintf(buf, "\n* %s", err)
+ }
+ return buf.String()
+}
+
+// MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only
+// contained error as error if len(errs is 1). In all other cases, it returns
+// the MultiError directly. This is helpful for returning a MultiError in a way
+// that only uses the MultiError if needed.
+func (errs MultiError) MaybeUnwrap() error {
+ switch len(errs) {
+ case 0:
+ return nil
+ case 1:
+ return errs[0]
+ default:
+ return errs
+ }
+}
+
+// Registry registers Prometheus collectors, collects their metrics, and gathers
+// them into MetricFamilies for exposition. It implements both Registerer and
+// Gatherer. The zero value is not usable. Create instances with NewRegistry or
+// NewPedanticRegistry.
+type Registry struct {
+ mtx sync.RWMutex
+ collectorsByID map[uint64]Collector // ID is a hash of the descIDs.
+ descIDs map[uint64]struct{}
+ dimHashesByName map[string]uint64
+ pedanticChecksEnabled bool
+}
+
+// Register implements Registerer.
+func (r *Registry) Register(c Collector) error {
+ var (
+ descChan = make(chan *Desc, capDescChan)
+ newDescIDs = map[uint64]struct{}{}
+ newDimHashesByName = map[string]uint64{}
+ collectorID uint64 // Just a sum of all desc IDs.
+ duplicateDescErr error
+ )
+ go func() {
+ c.Describe(descChan)
+ close(descChan)
+ }()
+ r.mtx.Lock()
+ defer r.mtx.Unlock()
+ // Coduct various tests...
+ for desc := range descChan {
+
+ // Is the descriptor valid at all?
+ if desc.err != nil {
+ return fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err)
+ }
+
+ // Is the descID unique?
+ // (In other words: Is the fqName + constLabel combination unique?)
+ if _, exists := r.descIDs[desc.id]; exists {
+ duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc)
+ }
+ // If it is not a duplicate desc in this collector, add it to
+ // the collectorID. (We allow duplicate descs within the same
+ // collector, but their existence must be a no-op.)
+ if _, exists := newDescIDs[desc.id]; !exists {
+ newDescIDs[desc.id] = struct{}{}
+ collectorID += desc.id
+ }
+
+ // Are all the label names and the help string consistent with
+ // previous descriptors of the same name?
+ // First check existing descriptors...
+ if dimHash, exists := r.dimHashesByName[desc.fqName]; exists {
+ if dimHash != desc.dimHash {
+ return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc)
+ }
+ } else {
+ // ...then check the new descriptors already seen.
+ if dimHash, exists := newDimHashesByName[desc.fqName]; exists {
+ if dimHash != desc.dimHash {
+ return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc)
+ }
+ } else {
+ newDimHashesByName[desc.fqName] = desc.dimHash
+ }
+ }
+ }
+ // Did anything happen at all?
+ if len(newDescIDs) == 0 {
+ return errors.New("collector has no descriptors")
+ }
+ if existing, exists := r.collectorsByID[collectorID]; exists {
+ return AlreadyRegisteredError{
+ ExistingCollector: existing,
+ NewCollector: c,
+ }
+ }
+ // If the collectorID is new, but at least one of the descs existed
+ // before, we are in trouble.
+ if duplicateDescErr != nil {
+ return duplicateDescErr
+ }
+
+ // Only after all tests have passed, actually register.
+ r.collectorsByID[collectorID] = c
+ for hash := range newDescIDs {
+ r.descIDs[hash] = struct{}{}
+ }
+ for name, dimHash := range newDimHashesByName {
+ r.dimHashesByName[name] = dimHash
+ }
+ return nil
+}
+
+// Unregister implements Registerer.
+func (r *Registry) Unregister(c Collector) bool {
+ var (
+ descChan = make(chan *Desc, capDescChan)
+ descIDs = map[uint64]struct{}{}
+ collectorID uint64 // Just a sum of the desc IDs.
+ )
+ go func() {
+ c.Describe(descChan)
+ close(descChan)
+ }()
+ for desc := range descChan {
+ if _, exists := descIDs[desc.id]; !exists {
+ collectorID += desc.id
+ descIDs[desc.id] = struct{}{}
+ }
+ }
+
+ r.mtx.RLock()
+ if _, exists := r.collectorsByID[collectorID]; !exists {
+ r.mtx.RUnlock()
+ return false
+ }
+ r.mtx.RUnlock()
+
+ r.mtx.Lock()
+ defer r.mtx.Unlock()
+
+ delete(r.collectorsByID, collectorID)
+ for id := range descIDs {
+ delete(r.descIDs, id)
+ }
+ // dimHashesByName is left untouched as those must be consistent
+ // throughout the lifetime of a program.
+ return true
+}
+
+// MustRegister implements Registerer.
+func (r *Registry) MustRegister(cs ...Collector) {
+ for _, c := range cs {
+ if err := r.Register(c); err != nil {
+ panic(err)
+ }
+ }
+}
+
+// Gather implements Gatherer.
+func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
+ var (
+ metricChan = make(chan Metric, capMetricChan)
+ metricHashes = map[uint64]struct{}{}
+ dimHashes = map[string]uint64{}
+ wg sync.WaitGroup
+ errs MultiError // The collected errors to return in the end.
+ registeredDescIDs map[uint64]struct{} // Only used for pedantic checks
+ )
+
+ r.mtx.RLock()
+ metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName))
+
+ // Scatter.
+ // (Collectors could be complex and slow, so we call them all at once.)
+ wg.Add(len(r.collectorsByID))
+ go func() {
+ wg.Wait()
+ close(metricChan)
+ }()
+ for _, collector := range r.collectorsByID {
+ go func(collector Collector) {
+ defer wg.Done()
+ collector.Collect(metricChan)
+ }(collector)
+ }
+
+ // In case pedantic checks are enabled, we have to copy the map before
+ // giving up the RLock.
+ if r.pedanticChecksEnabled {
+ registeredDescIDs = make(map[uint64]struct{}, len(r.descIDs))
+ for id := range r.descIDs {
+ registeredDescIDs[id] = struct{}{}
+ }
+ }
+
+ r.mtx.RUnlock()
+
+ // Drain metricChan in case of premature return.
+ defer func() {
+ for range metricChan {
+ }
+ }()
+
+ // Gather.
+ for metric := range metricChan {
+ // This could be done concurrently, too, but it required locking
+ // of metricFamiliesByName (and of metricHashes if checks are
+ // enabled). Most likely not worth it.
+ desc := metric.Desc()
+ dtoMetric := &dto.Metric{}
+ if err := metric.Write(dtoMetric); err != nil {
+ errs = append(errs, fmt.Errorf(
+ "error collecting metric %v: %s", desc, err,
+ ))
+ continue
+ }
+ metricFamily, ok := metricFamiliesByName[desc.fqName]
+ if ok {
+ if metricFamily.GetHelp() != desc.help {
+ errs = append(errs, fmt.Errorf(
+ "collected metric %s %s has help %q but should have %q",
+ desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(),
+ ))
+ continue
+ }
+ // TODO(beorn7): Simplify switch once Desc has type.
+ switch metricFamily.GetType() {
+ case dto.MetricType_COUNTER:
+ if dtoMetric.Counter == nil {
+ errs = append(errs, fmt.Errorf(
+ "collected metric %s %s should be a Counter",
+ desc.fqName, dtoMetric,
+ ))
+ continue
+ }
+ case dto.MetricType_GAUGE:
+ if dtoMetric.Gauge == nil {
+ errs = append(errs, fmt.Errorf(
+ "collected metric %s %s should be a Gauge",
+ desc.fqName, dtoMetric,
+ ))
+ continue
+ }
+ case dto.MetricType_SUMMARY:
+ if dtoMetric.Summary == nil {
+ errs = append(errs, fmt.Errorf(
+ "collected metric %s %s should be a Summary",
+ desc.fqName, dtoMetric,
+ ))
+ continue
+ }
+ case dto.MetricType_UNTYPED:
+ if dtoMetric.Untyped == nil {
+ errs = append(errs, fmt.Errorf(
+ "collected metric %s %s should be Untyped",
+ desc.fqName, dtoMetric,
+ ))
+ continue
+ }
+ case dto.MetricType_HISTOGRAM:
+ if dtoMetric.Histogram == nil {
+ errs = append(errs, fmt.Errorf(
+ "collected metric %s %s should be a Histogram",
+ desc.fqName, dtoMetric,
+ ))
+ continue
+ }
+ default:
+ panic("encountered MetricFamily with invalid type")
+ }
+ } else {
+ metricFamily = &dto.MetricFamily{}
+ metricFamily.Name = proto.String(desc.fqName)
+ metricFamily.Help = proto.String(desc.help)
+ // TODO(beorn7): Simplify switch once Desc has type.
+ switch {
+ case dtoMetric.Gauge != nil:
+ metricFamily.Type = dto.MetricType_GAUGE.Enum()
+ case dtoMetric.Counter != nil:
+ metricFamily.Type = dto.MetricType_COUNTER.Enum()
+ case dtoMetric.Summary != nil:
+ metricFamily.Type = dto.MetricType_SUMMARY.Enum()
+ case dtoMetric.Untyped != nil:
+ metricFamily.Type = dto.MetricType_UNTYPED.Enum()
+ case dtoMetric.Histogram != nil:
+ metricFamily.Type = dto.MetricType_HISTOGRAM.Enum()
+ default:
+ errs = append(errs, fmt.Errorf(
+ "empty metric collected: %s", dtoMetric,
+ ))
+ continue
+ }
+ metricFamiliesByName[desc.fqName] = metricFamily
+ }
+ if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes, dimHashes); err != nil {
+ errs = append(errs, err)
+ continue
+ }
+ if r.pedanticChecksEnabled {
+ // Is the desc registered at all?
+ if _, exist := registeredDescIDs[desc.id]; !exist {
+ errs = append(errs, fmt.Errorf(
+ "collected metric %s %s with unregistered descriptor %s",
+ metricFamily.GetName(), dtoMetric, desc,
+ ))
+ continue
+ }
+ if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil {
+ errs = append(errs, err)
+ continue
+ }
+ }
+ metricFamily.Metric = append(metricFamily.Metric, dtoMetric)
+ }
+ return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
+}
+
+// Gatherers is a slice of Gatherer instances that implements the Gatherer
+// interface itself. Its Gather method calls Gather on all Gatherers in the
+// slice in order and returns the merged results. Errors returned from the
+// Gather calles are all returned in a flattened MultiError. Duplicate and
+// inconsistent Metrics are skipped (first occurrence in slice order wins) and
+// reported in the returned error.
+//
+// Gatherers can be used to merge the Gather results from multiple
+// Registries. It also provides a way to directly inject existing MetricFamily
+// protobufs into the gathering by creating a custom Gatherer with a Gather
+// method that simply returns the existing MetricFamily protobufs. Note that no
+// registration is involved (in contrast to Collector registration), so
+// obviously registration-time checks cannot happen. Any inconsistencies between
+// the gathered MetricFamilies are reported as errors by the Gather method, and
+// inconsistent Metrics are dropped. Invalid parts of the MetricFamilies
+// (e.g. syntactically invalid metric or label names) will go undetected.
+type Gatherers []Gatherer
+
+// Gather implements Gatherer.
+func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) {
+ var (
+ metricFamiliesByName = map[string]*dto.MetricFamily{}
+ metricHashes = map[uint64]struct{}{}
+ dimHashes = map[string]uint64{}
+ errs MultiError // The collected errors to return in the end.
+ )
+
+ for i, g := range gs {
+ mfs, err := g.Gather()
+ if err != nil {
+ if multiErr, ok := err.(MultiError); ok {
+ for _, err := range multiErr {
+ errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err))
+ }
+ } else {
+ errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err))
+ }
+ }
+ for _, mf := range mfs {
+ existingMF, exists := metricFamiliesByName[mf.GetName()]
+ if exists {
+ if existingMF.GetHelp() != mf.GetHelp() {
+ errs = append(errs, fmt.Errorf(
+ "gathered metric family %s has help %q but should have %q",
+ mf.GetName(), mf.GetHelp(), existingMF.GetHelp(),
+ ))
+ continue
+ }
+ if existingMF.GetType() != mf.GetType() {
+ errs = append(errs, fmt.Errorf(
+ "gathered metric family %s has type %s but should have %s",
+ mf.GetName(), mf.GetType(), existingMF.GetType(),
+ ))
+ continue
+ }
+ } else {
+ existingMF = &dto.MetricFamily{}
+ existingMF.Name = mf.Name
+ existingMF.Help = mf.Help
+ existingMF.Type = mf.Type
+ metricFamiliesByName[mf.GetName()] = existingMF
+ }
+ for _, m := range mf.Metric {
+ if err := checkMetricConsistency(existingMF, m, metricHashes, dimHashes); err != nil {
+ errs = append(errs, err)
+ continue
+ }
+ existingMF.Metric = append(existingMF.Metric, m)
+ }
+ }
+ }
+ return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
+}
+
+// metricSorter is a sortable slice of *dto.Metric.
+type metricSorter []*dto.Metric
+
+func (s metricSorter) Len() int {
+ return len(s)
+}
+
+func (s metricSorter) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s metricSorter) Less(i, j int) bool {
+ if len(s[i].Label) != len(s[j].Label) {
+ // This should not happen. The metrics are
+ // inconsistent. However, we have to deal with the fact, as
+ // people might use custom collectors or metric family injection
+ // to create inconsistent metrics. So let's simply compare the
+ // number of labels in this case. That will still yield
+ // reproducible sorting.
+ return len(s[i].Label) < len(s[j].Label)
+ }
+ for n, lp := range s[i].Label {
+ vi := lp.GetValue()
+ vj := s[j].Label[n].GetValue()
+ if vi != vj {
+ return vi < vj
+ }
+ }
+
+ // We should never arrive here. Multiple metrics with the same
+ // label set in the same scrape will lead to undefined ingestion
+ // behavior. However, as above, we have to provide stable sorting
+ // here, even for inconsistent metrics. So sort equal metrics
+ // by their timestamp, with missing timestamps (implying "now")
+ // coming last.
+ if s[i].TimestampMs == nil {
+ return false
+ }
+ if s[j].TimestampMs == nil {
+ return true
+ }
+ return s[i].GetTimestampMs() < s[j].GetTimestampMs()
+}
+
+// normalizeMetricFamilies returns a MetricFamily slice with empty
+// MetricFamilies pruned and the remaining MetricFamilies sorted by name within
+// the slice, with the contained Metrics sorted within each MetricFamily.
+func normalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily {
+ for _, mf := range metricFamiliesByName {
+ sort.Sort(metricSorter(mf.Metric))
+ }
+ names := make([]string, 0, len(metricFamiliesByName))
+ for name, mf := range metricFamiliesByName {
+ if len(mf.Metric) > 0 {
+ names = append(names, name)
+ }
+ }
+ sort.Strings(names)
+ result := make([]*dto.MetricFamily, 0, len(names))
+ for _, name := range names {
+ result = append(result, metricFamiliesByName[name])
+ }
+ return result
+}
+
+// checkMetricConsistency checks if the provided Metric is consistent with the
+// provided MetricFamily. It also hashed the Metric labels and the MetricFamily
+// name. If the resulting hash is alread in the provided metricHashes, an error
+// is returned. If not, it is added to metricHashes. The provided dimHashes maps
+// MetricFamily names to their dimHash (hashed sorted label names). If dimHashes
+// doesn't yet contain a hash for the provided MetricFamily, it is
+// added. Otherwise, an error is returned if the existing dimHashes in not equal
+// the calculated dimHash.
+func checkMetricConsistency(
+ metricFamily *dto.MetricFamily,
+ dtoMetric *dto.Metric,
+ metricHashes map[uint64]struct{},
+ dimHashes map[string]uint64,
+) error {
+ // Type consistency with metric family.
+ if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil ||
+ metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil ||
+ metricFamily.GetType() == dto.MetricType_SUMMARY && dtoMetric.Summary == nil ||
+ metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil ||
+ metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil {
+ return fmt.Errorf(
+ "collected metric %s %s is not a %s",
+ metricFamily.GetName(), dtoMetric, metricFamily.GetType(),
+ )
+ }
+
+ // Is the metric unique (i.e. no other metric with the same name and the same label values)?
+ h := hashNew()
+ h = hashAdd(h, metricFamily.GetName())
+ h = hashAddByte(h, separatorByte)
+ dh := hashNew()
+ // Make sure label pairs are sorted. We depend on it for the consistency
+ // check.
+ sort.Sort(LabelPairSorter(dtoMetric.Label))
+ for _, lp := range dtoMetric.Label {
+ h = hashAdd(h, lp.GetValue())
+ h = hashAddByte(h, separatorByte)
+ dh = hashAdd(dh, lp.GetName())
+ dh = hashAddByte(dh, separatorByte)
+ }
+ if _, exists := metricHashes[h]; exists {
+ return fmt.Errorf(
+ "collected metric %s %s was collected before with the same name and label values",
+ metricFamily.GetName(), dtoMetric,
+ )
+ }
+ if dimHash, ok := dimHashes[metricFamily.GetName()]; ok {
+ if dimHash != dh {
+ return fmt.Errorf(
+ "collected metric %s %s has label dimensions inconsistent with previously collected metrics in the same metric family",
+ metricFamily.GetName(), dtoMetric,
+ )
+ }
+ } else {
+ dimHashes[metricFamily.GetName()] = dh
+ }
+ metricHashes[h] = struct{}{}
+ return nil
+}
+
+func checkDescConsistency(
+ metricFamily *dto.MetricFamily,
+ dtoMetric *dto.Metric,
+ desc *Desc,
+) error {
+ // Desc help consistency with metric family help.
+ if metricFamily.GetHelp() != desc.help {
+ return fmt.Errorf(
+ "collected metric %s %s has help %q but should have %q",
+ metricFamily.GetName(), dtoMetric, metricFamily.GetHelp(), desc.help,
+ )
+ }
+
+ // Is the desc consistent with the content of the metric?
+ lpsFromDesc := make([]*dto.LabelPair, 0, len(dtoMetric.Label))
+ lpsFromDesc = append(lpsFromDesc, desc.constLabelPairs...)
+ for _, l := range desc.variableLabels {
+ lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{
+ Name: proto.String(l),
+ })
+ }
+ if len(lpsFromDesc) != len(dtoMetric.Label) {
+ return fmt.Errorf(
+ "labels in collected metric %s %s are inconsistent with descriptor %s",
+ metricFamily.GetName(), dtoMetric, desc,
+ )
+ }
+ sort.Sort(LabelPairSorter(lpsFromDesc))
+ for i, lpFromDesc := range lpsFromDesc {
+ lpFromMetric := dtoMetric.Label[i]
+ if lpFromDesc.GetName() != lpFromMetric.GetName() ||
+ lpFromDesc.Value != nil && lpFromDesc.GetValue() != lpFromMetric.GetValue() {
+ return fmt.Errorf(
+ "labels in collected metric %s %s are inconsistent with descriptor %s",
+ metricFamily.GetName(), dtoMetric, desc,
+ )
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go
new file mode 100644
index 0000000..82b8850
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go
@@ -0,0 +1,543 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "fmt"
+ "math"
+ "sort"
+ "sync"
+ "time"
+
+ "github.com/beorn7/perks/quantile"
+ "github.com/golang/protobuf/proto"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// quantileLabel is used for the label that defines the quantile in a
+// summary.
+const quantileLabel = "quantile"
+
+// A Summary captures individual observations from an event or sample stream and
+// summarizes them in a manner similar to traditional summary statistics: 1. sum
+// of observations, 2. observation count, 3. rank estimations.
+//
+// A typical use-case is the observation of request latencies. By default, a
+// Summary provides the median, the 90th and the 99th percentile of the latency
+// as rank estimations.
+//
+// Note that the rank estimations cannot be aggregated in a meaningful way with
+// the Prometheus query language (i.e. you cannot average or add them). If you
+// need aggregatable quantiles (e.g. you want the 99th percentile latency of all
+// queries served across all instances of a service), consider the Histogram
+// metric type. See the Prometheus documentation for more details.
+//
+// To create Summary instances, use NewSummary.
+type Summary interface {
+ Metric
+ Collector
+
+ // Observe adds a single observation to the summary.
+ Observe(float64)
+}
+
+// DefObjectives are the default Summary quantile values.
+//
+// Deprecated: DefObjectives will not be used as the default objectives in
+// v0.10 of the library. The default Summary will have no quantiles then.
+var (
+ DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}
+
+ errQuantileLabelNotAllowed = fmt.Errorf(
+ "%q is not allowed as label name in summaries", quantileLabel,
+ )
+)
+
+// Default values for SummaryOpts.
+const (
+ // DefMaxAge is the default duration for which observations stay
+ // relevant.
+ DefMaxAge time.Duration = 10 * time.Minute
+ // DefAgeBuckets is the default number of buckets used to calculate the
+ // age of observations.
+ DefAgeBuckets = 5
+ // DefBufCap is the standard buffer size for collecting Summary observations.
+ DefBufCap = 500
+)
+
+// SummaryOpts bundles the options for creating a Summary metric. It is
+// mandatory to set Name and Help to a non-empty string. All other fields are
+// optional and can safely be left at their zero value.
+type SummaryOpts struct {
+ // Namespace, Subsystem, and Name are components of the fully-qualified
+ // name of the Summary (created by joining these components with
+ // "_"). Only Name is mandatory, the others merely help structuring the
+ // name. Note that the fully-qualified name of the Summary must be a
+ // valid Prometheus metric name.
+ Namespace string
+ Subsystem string
+ Name string
+
+ // Help provides information about this Summary. Mandatory!
+ //
+ // Metrics with the same fully-qualified name must have the same Help
+ // string.
+ Help string
+
+ // ConstLabels are used to attach fixed labels to this
+ // Summary. Summaries with the same fully-qualified name must have the
+ // same label names in their ConstLabels.
+ //
+ // Note that in most cases, labels have a value that varies during the
+ // lifetime of a process. Those labels are usually managed with a
+ // SummaryVec. ConstLabels serve only special purposes. One is for the
+ // special case where the value of a label does not change during the
+ // lifetime of a process, e.g. if the revision of the running binary is
+ // put into a label. Another, more advanced purpose is if more than one
+ // Collector needs to collect Summaries with the same fully-qualified
+ // name. In that case, those Summaries must differ in the values of
+ // their ConstLabels. See the Collector examples.
+ //
+ // If the value of a label never changes (not even between binaries),
+ // that label most likely should not be a label at all (but part of the
+ // metric name).
+ ConstLabels Labels
+
+ // Objectives defines the quantile rank estimates with their respective
+ // absolute error. If Objectives[q] = e, then the value reported for q
+ // will be the φ-quantile value for some φ between q-e and q+e. The
+ // default value is DefObjectives. It is used if Objectives is left at
+ // its zero value (i.e. nil). To create a Summary without Objectives,
+ // set it to an empty map (i.e. map[float64]float64{}).
+ //
+ // Deprecated: Note that the current value of DefObjectives is
+ // deprecated. It will be replaced by an empty map in v0.10 of the
+ // library. Please explicitly set Objectives to the desired value.
+ Objectives map[float64]float64
+
+ // MaxAge defines the duration for which an observation stays relevant
+ // for the summary. Must be positive. The default value is DefMaxAge.
+ MaxAge time.Duration
+
+ // AgeBuckets is the number of buckets used to exclude observations that
+ // are older than MaxAge from the summary. A higher number has a
+ // resource penalty, so only increase it if the higher resolution is
+ // really required. For very high observation rates, you might want to
+ // reduce the number of age buckets. With only one age bucket, you will
+ // effectively see a complete reset of the summary each time MaxAge has
+ // passed. The default value is DefAgeBuckets.
+ AgeBuckets uint32
+
+ // BufCap defines the default sample stream buffer size. The default
+ // value of DefBufCap should suffice for most uses. If there is a need
+ // to increase the value, a multiple of 500 is recommended (because that
+ // is the internal buffer size of the underlying package
+ // "github.com/bmizerany/perks/quantile").
+ BufCap uint32
+}
+
+// Great fuck-up with the sliding-window decay algorithm... The Merge method of
+// perk/quantile is actually not working as advertised - and it might be
+// unfixable, as the underlying algorithm is apparently not capable of merging
+// summaries in the first place. To avoid using Merge, we are currently adding
+// observations to _each_ age bucket, i.e. the effort to add a sample is
+// essentially multiplied by the number of age buckets. When rotating age
+// buckets, we empty the previous head stream. On scrape time, we simply take
+// the quantiles from the head stream (no merging required). Result: More effort
+// on observation time, less effort on scrape time, which is exactly the
+// opposite of what we try to accomplish, but at least the results are correct.
+//
+// The quite elegant previous contraption to merge the age buckets efficiently
+// on scrape time (see code up commit 6b9530d72ea715f0ba612c0120e6e09fbf1d49d0)
+// can't be used anymore.
+
+// NewSummary creates a new Summary based on the provided SummaryOpts.
+func NewSummary(opts SummaryOpts) Summary {
+ return newSummary(
+ NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ),
+ opts,
+ )
+}
+
+func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
+ if len(desc.variableLabels) != len(labelValues) {
+ panic(errInconsistentCardinality)
+ }
+
+ for _, n := range desc.variableLabels {
+ if n == quantileLabel {
+ panic(errQuantileLabelNotAllowed)
+ }
+ }
+ for _, lp := range desc.constLabelPairs {
+ if lp.GetName() == quantileLabel {
+ panic(errQuantileLabelNotAllowed)
+ }
+ }
+
+ if opts.Objectives == nil {
+ opts.Objectives = DefObjectives
+ }
+
+ if opts.MaxAge < 0 {
+ panic(fmt.Errorf("illegal max age MaxAge=%v", opts.MaxAge))
+ }
+ if opts.MaxAge == 0 {
+ opts.MaxAge = DefMaxAge
+ }
+
+ if opts.AgeBuckets == 0 {
+ opts.AgeBuckets = DefAgeBuckets
+ }
+
+ if opts.BufCap == 0 {
+ opts.BufCap = DefBufCap
+ }
+
+ s := &summary{
+ desc: desc,
+
+ objectives: opts.Objectives,
+ sortedObjectives: make([]float64, 0, len(opts.Objectives)),
+
+ labelPairs: makeLabelPairs(desc, labelValues),
+
+ hotBuf: make([]float64, 0, opts.BufCap),
+ coldBuf: make([]float64, 0, opts.BufCap),
+ streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets),
+ }
+ s.headStreamExpTime = time.Now().Add(s.streamDuration)
+ s.hotBufExpTime = s.headStreamExpTime
+
+ for i := uint32(0); i < opts.AgeBuckets; i++ {
+ s.streams = append(s.streams, s.newStream())
+ }
+ s.headStream = s.streams[0]
+
+ for qu := range s.objectives {
+ s.sortedObjectives = append(s.sortedObjectives, qu)
+ }
+ sort.Float64s(s.sortedObjectives)
+
+ s.init(s) // Init self-collection.
+ return s
+}
+
+type summary struct {
+ selfCollector
+
+ bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime.
+ mtx sync.Mutex // Protects every other moving part.
+ // Lock bufMtx before mtx if both are needed.
+
+ desc *Desc
+
+ objectives map[float64]float64
+ sortedObjectives []float64
+
+ labelPairs []*dto.LabelPair
+
+ sum float64
+ cnt uint64
+
+ hotBuf, coldBuf []float64
+
+ streams []*quantile.Stream
+ streamDuration time.Duration
+ headStream *quantile.Stream
+ headStreamIdx int
+ headStreamExpTime, hotBufExpTime time.Time
+}
+
+func (s *summary) Desc() *Desc {
+ return s.desc
+}
+
+func (s *summary) Observe(v float64) {
+ s.bufMtx.Lock()
+ defer s.bufMtx.Unlock()
+
+ now := time.Now()
+ if now.After(s.hotBufExpTime) {
+ s.asyncFlush(now)
+ }
+ s.hotBuf = append(s.hotBuf, v)
+ if len(s.hotBuf) == cap(s.hotBuf) {
+ s.asyncFlush(now)
+ }
+}
+
+func (s *summary) Write(out *dto.Metric) error {
+ sum := &dto.Summary{}
+ qs := make([]*dto.Quantile, 0, len(s.objectives))
+
+ s.bufMtx.Lock()
+ s.mtx.Lock()
+ // Swap bufs even if hotBuf is empty to set new hotBufExpTime.
+ s.swapBufs(time.Now())
+ s.bufMtx.Unlock()
+
+ s.flushColdBuf()
+ sum.SampleCount = proto.Uint64(s.cnt)
+ sum.SampleSum = proto.Float64(s.sum)
+
+ for _, rank := range s.sortedObjectives {
+ var q float64
+ if s.headStream.Count() == 0 {
+ q = math.NaN()
+ } else {
+ q = s.headStream.Query(rank)
+ }
+ qs = append(qs, &dto.Quantile{
+ Quantile: proto.Float64(rank),
+ Value: proto.Float64(q),
+ })
+ }
+
+ s.mtx.Unlock()
+
+ if len(qs) > 0 {
+ sort.Sort(quantSort(qs))
+ }
+ sum.Quantile = qs
+
+ out.Summary = sum
+ out.Label = s.labelPairs
+ return nil
+}
+
+func (s *summary) newStream() *quantile.Stream {
+ return quantile.NewTargeted(s.objectives)
+}
+
+// asyncFlush needs bufMtx locked.
+func (s *summary) asyncFlush(now time.Time) {
+ s.mtx.Lock()
+ s.swapBufs(now)
+
+ // Unblock the original goroutine that was responsible for the mutation
+ // that triggered the compaction. But hold onto the global non-buffer
+ // state mutex until the operation finishes.
+ go func() {
+ s.flushColdBuf()
+ s.mtx.Unlock()
+ }()
+}
+
+// rotateStreams needs mtx AND bufMtx locked.
+func (s *summary) maybeRotateStreams() {
+ for !s.hotBufExpTime.Equal(s.headStreamExpTime) {
+ s.headStream.Reset()
+ s.headStreamIdx++
+ if s.headStreamIdx >= len(s.streams) {
+ s.headStreamIdx = 0
+ }
+ s.headStream = s.streams[s.headStreamIdx]
+ s.headStreamExpTime = s.headStreamExpTime.Add(s.streamDuration)
+ }
+}
+
+// flushColdBuf needs mtx locked.
+func (s *summary) flushColdBuf() {
+ for _, v := range s.coldBuf {
+ for _, stream := range s.streams {
+ stream.Insert(v)
+ }
+ s.cnt++
+ s.sum += v
+ }
+ s.coldBuf = s.coldBuf[0:0]
+ s.maybeRotateStreams()
+}
+
+// swapBufs needs mtx AND bufMtx locked, coldBuf must be empty.
+func (s *summary) swapBufs(now time.Time) {
+ if len(s.coldBuf) != 0 {
+ panic("coldBuf is not empty")
+ }
+ s.hotBuf, s.coldBuf = s.coldBuf, s.hotBuf
+ // hotBuf is now empty and gets new expiration set.
+ for now.After(s.hotBufExpTime) {
+ s.hotBufExpTime = s.hotBufExpTime.Add(s.streamDuration)
+ }
+}
+
+type quantSort []*dto.Quantile
+
+func (s quantSort) Len() int {
+ return len(s)
+}
+
+func (s quantSort) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s quantSort) Less(i, j int) bool {
+ return s[i].GetQuantile() < s[j].GetQuantile()
+}
+
+// SummaryVec is a Collector that bundles a set of Summaries that all share the
+// same Desc, but have different values for their variable labels. This is used
+// if you want to count the same thing partitioned by various dimensions
+// (e.g. HTTP request latencies, partitioned by status code and method). Create
+// instances with NewSummaryVec.
+type SummaryVec struct {
+ *MetricVec
+}
+
+// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and
+// partitioned by the given label names. At least one label name must be
+// provided.
+func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &SummaryVec{
+ MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
+ return newSummary(desc, opts, lvs...)
+ }),
+ }
+}
+
+// GetMetricWithLabelValues replaces the method of the same name in
+// MetricVec. The difference is that this method returns a Summary and not a
+// Metric so that no type conversion is required.
+func (m *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Summary, error) {
+ metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Summary), err
+ }
+ return nil, err
+}
+
+// GetMetricWith replaces the method of the same name in MetricVec. The
+// difference is that this method returns a Summary and not a Metric so that no
+// type conversion is required.
+func (m *SummaryVec) GetMetricWith(labels Labels) (Summary, error) {
+ metric, err := m.MetricVec.GetMetricWith(labels)
+ if metric != nil {
+ return metric.(Summary), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. By not returning an
+// error, WithLabelValues allows shortcuts like
+// myVec.WithLabelValues("404", "GET").Observe(42.21)
+func (m *SummaryVec) WithLabelValues(lvs ...string) Summary {
+ return m.MetricVec.WithLabelValues(lvs...).(Summary)
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. By not returning an error, With allows shortcuts like
+// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21)
+func (m *SummaryVec) With(labels Labels) Summary {
+ return m.MetricVec.With(labels).(Summary)
+}
+
+type constSummary struct {
+ desc *Desc
+ count uint64
+ sum float64
+ quantiles map[float64]float64
+ labelPairs []*dto.LabelPair
+}
+
+func (s *constSummary) Desc() *Desc {
+ return s.desc
+}
+
+func (s *constSummary) Write(out *dto.Metric) error {
+ sum := &dto.Summary{}
+ qs := make([]*dto.Quantile, 0, len(s.quantiles))
+
+ sum.SampleCount = proto.Uint64(s.count)
+ sum.SampleSum = proto.Float64(s.sum)
+
+ for rank, q := range s.quantiles {
+ qs = append(qs, &dto.Quantile{
+ Quantile: proto.Float64(rank),
+ Value: proto.Float64(q),
+ })
+ }
+
+ if len(qs) > 0 {
+ sort.Sort(quantSort(qs))
+ }
+ sum.Quantile = qs
+
+ out.Summary = sum
+ out.Label = s.labelPairs
+
+ return nil
+}
+
+// NewConstSummary returns a metric representing a Prometheus summary with fixed
+// values for the count, sum, and quantiles. As those parameters cannot be
+// changed, the returned value does not implement the Summary interface (but
+// only the Metric interface). Users of this package will not have much use for
+// it in regular operations. However, when implementing custom Collectors, it is
+// useful as a throw-away metric that is generated on the fly to send it to
+// Prometheus in the Collect method.
+//
+// quantiles maps ranks to quantile values. For example, a median latency of
+// 0.23s and a 99th percentile latency of 0.56s would be expressed as:
+// map[float64]float64{0.5: 0.23, 0.99: 0.56}
+//
+// NewConstSummary returns an error if the length of labelValues is not
+// consistent with the variable labels in Desc.
+func NewConstSummary(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ quantiles map[float64]float64,
+ labelValues ...string,
+) (Metric, error) {
+ if len(desc.variableLabels) != len(labelValues) {
+ return nil, errInconsistentCardinality
+ }
+ return &constSummary{
+ desc: desc,
+ count: count,
+ sum: sum,
+ quantiles: quantiles,
+ labelPairs: makeLabelPairs(desc, labelValues),
+ }, nil
+}
+
+// MustNewConstSummary is a version of NewConstSummary that panics where
+// NewConstMetric would have returned an error.
+func MustNewConstSummary(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ quantiles map[float64]float64,
+ labelValues ...string,
+) Metric {
+ m, err := NewConstSummary(desc, count, sum, quantiles, labelValues...)
+ if err != nil {
+ panic(err)
+ }
+ return m
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/timer.go b/vendor/github.com/prometheus/client_golang/prometheus/timer.go
new file mode 100644
index 0000000..f4cac5a
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/timer.go
@@ -0,0 +1,74 @@
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import "time"
+
+// Observer is the interface that wraps the Observe method, which is used by
+// Histogram and Summary to add observations.
+type Observer interface {
+ Observe(float64)
+}
+
+// The ObserverFunc type is an adapter to allow the use of ordinary
+// functions as Observers. If f is a function with the appropriate
+// signature, ObserverFunc(f) is an Observer that calls f.
+//
+// This adapter is usually used in connection with the Timer type, and there are
+// two general use cases:
+//
+// The most common one is to use a Gauge as the Observer for a Timer.
+// See the "Gauge" Timer example.
+//
+// The more advanced use case is to create a function that dynamically decides
+// which Observer to use for observing the duration. See the "Complex" Timer
+// example.
+type ObserverFunc func(float64)
+
+// Observe calls f(value). It implements Observer.
+func (f ObserverFunc) Observe(value float64) {
+ f(value)
+}
+
+// Timer is a helper type to time functions. Use NewTimer to create new
+// instances.
+type Timer struct {
+ begin time.Time
+ observer Observer
+}
+
+// NewTimer creates a new Timer. The provided Observer is used to observe a
+// duration in seconds. Timer is usually used to time a function call in the
+// following way:
+// func TimeMe() {
+// timer := NewTimer(myHistogram)
+// defer timer.ObserveDuration()
+// // Do actual work.
+// }
+func NewTimer(o Observer) *Timer {
+ return &Timer{
+ begin: time.Now(),
+ observer: o,
+ }
+}
+
+// ObserveDuration records the duration passed since the Timer was created with
+// NewTimer. It calls the Observe method of the Observer provided during
+// construction with the duration in seconds as an argument. ObserveDuration is
+// usually called with a defer statement.
+func (t *Timer) ObserveDuration() {
+ if t.observer != nil {
+ t.observer.Observe(time.Since(t.begin).Seconds())
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go
new file mode 100644
index 0000000..065501d
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go
@@ -0,0 +1,143 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+// Untyped is a Metric that represents a single numerical value that can
+// arbitrarily go up and down.
+//
+// An Untyped metric works the same as a Gauge. The only difference is that to
+// no type information is implied.
+//
+// To create Untyped instances, use NewUntyped.
+//
+// Deprecated: The Untyped type is deprecated because it doesn't make sense in
+// direct instrumentation. If you need to mirror an external metric of unknown
+// type (usually while writing exporters), Use MustNewConstMetric to create an
+// untyped metric instance on the fly.
+type Untyped interface {
+ Metric
+ Collector
+
+ // Set sets the Untyped metric to an arbitrary value.
+ Set(float64)
+ // Inc increments the Untyped metric by 1.
+ Inc()
+ // Dec decrements the Untyped metric by 1.
+ Dec()
+ // Add adds the given value to the Untyped metric. (The value can be
+ // negative, resulting in a decrease.)
+ Add(float64)
+ // Sub subtracts the given value from the Untyped metric. (The value can
+ // be negative, resulting in an increase.)
+ Sub(float64)
+}
+
+// UntypedOpts is an alias for Opts. See there for doc comments.
+type UntypedOpts Opts
+
+// NewUntyped creates a new Untyped metric from the provided UntypedOpts.
+func NewUntyped(opts UntypedOpts) Untyped {
+ return newValue(NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ), UntypedValue, 0)
+}
+
+// UntypedVec is a Collector that bundles a set of Untyped metrics that all
+// share the same Desc, but have different values for their variable
+// labels. This is used if you want to count the same thing partitioned by
+// various dimensions. Create instances with NewUntypedVec.
+type UntypedVec struct {
+ *MetricVec
+}
+
+// NewUntypedVec creates a new UntypedVec based on the provided UntypedOpts and
+// partitioned by the given label names. At least one label name must be
+// provided.
+func NewUntypedVec(opts UntypedOpts, labelNames []string) *UntypedVec {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &UntypedVec{
+ MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
+ return newValue(desc, UntypedValue, 0, lvs...)
+ }),
+ }
+}
+
+// GetMetricWithLabelValues replaces the method of the same name in
+// MetricVec. The difference is that this method returns an Untyped and not a
+// Metric so that no type conversion is required.
+func (m *UntypedVec) GetMetricWithLabelValues(lvs ...string) (Untyped, error) {
+ metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Untyped), err
+ }
+ return nil, err
+}
+
+// GetMetricWith replaces the method of the same name in MetricVec. The
+// difference is that this method returns an Untyped and not a Metric so that no
+// type conversion is required.
+func (m *UntypedVec) GetMetricWith(labels Labels) (Untyped, error) {
+ metric, err := m.MetricVec.GetMetricWith(labels)
+ if metric != nil {
+ return metric.(Untyped), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. By not returning an
+// error, WithLabelValues allows shortcuts like
+// myVec.WithLabelValues("404", "GET").Add(42)
+func (m *UntypedVec) WithLabelValues(lvs ...string) Untyped {
+ return m.MetricVec.WithLabelValues(lvs...).(Untyped)
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. By not returning an error, With allows shortcuts like
+// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
+func (m *UntypedVec) With(labels Labels) Untyped {
+ return m.MetricVec.With(labels).(Untyped)
+}
+
+// UntypedFunc is an Untyped whose value is determined at collect time by
+// calling a provided function.
+//
+// To create UntypedFunc instances, use NewUntypedFunc.
+type UntypedFunc interface {
+ Metric
+ Collector
+}
+
+// NewUntypedFunc creates a new UntypedFunc based on the provided
+// UntypedOpts. The value reported is determined by calling the given function
+// from within the Write method. Take into account that metric collection may
+// happen concurrently. If that results in concurrent calls to Write, like in
+// the case where an UntypedFunc is directly registered with Prometheus, the
+// provided function must be concurrency-safe.
+func NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc {
+ return newValueFunc(NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ), UntypedValue, function)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go
new file mode 100644
index 0000000..7d3e810
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go
@@ -0,0 +1,239 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "sort"
+ "sync/atomic"
+ "time"
+
+ dto "github.com/prometheus/client_model/go"
+
+ "github.com/golang/protobuf/proto"
+)
+
+// ValueType is an enumeration of metric types that represent a simple value.
+type ValueType int
+
+// Possible values for the ValueType enum.
+const (
+ _ ValueType = iota
+ CounterValue
+ GaugeValue
+ UntypedValue
+)
+
+var errInconsistentCardinality = errors.New("inconsistent label cardinality")
+
+// value is a generic metric for simple values. It implements Metric, Collector,
+// Counter, Gauge, and Untyped. Its effective type is determined by
+// ValueType. This is a low-level building block used by the library to back the
+// implementations of Counter, Gauge, and Untyped.
+type value struct {
+ // valBits containst the bits of the represented float64 value. It has
+ // to go first in the struct to guarantee alignment for atomic
+ // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+ valBits uint64
+
+ selfCollector
+
+ desc *Desc
+ valType ValueType
+ labelPairs []*dto.LabelPair
+}
+
+// newValue returns a newly allocated value with the given Desc, ValueType,
+// sample value and label values. It panics if the number of label
+// values is different from the number of variable labels in Desc.
+func newValue(desc *Desc, valueType ValueType, val float64, labelValues ...string) *value {
+ if len(labelValues) != len(desc.variableLabels) {
+ panic(errInconsistentCardinality)
+ }
+ result := &value{
+ desc: desc,
+ valType: valueType,
+ valBits: math.Float64bits(val),
+ labelPairs: makeLabelPairs(desc, labelValues),
+ }
+ result.init(result)
+ return result
+}
+
+func (v *value) Desc() *Desc {
+ return v.desc
+}
+
+func (v *value) Set(val float64) {
+ atomic.StoreUint64(&v.valBits, math.Float64bits(val))
+}
+
+func (v *value) SetToCurrentTime() {
+ v.Set(float64(time.Now().UnixNano()) / 1e9)
+}
+
+func (v *value) Inc() {
+ v.Add(1)
+}
+
+func (v *value) Dec() {
+ v.Add(-1)
+}
+
+func (v *value) Add(val float64) {
+ for {
+ oldBits := atomic.LoadUint64(&v.valBits)
+ newBits := math.Float64bits(math.Float64frombits(oldBits) + val)
+ if atomic.CompareAndSwapUint64(&v.valBits, oldBits, newBits) {
+ return
+ }
+ }
+}
+
+func (v *value) Sub(val float64) {
+ v.Add(val * -1)
+}
+
+func (v *value) Write(out *dto.Metric) error {
+ val := math.Float64frombits(atomic.LoadUint64(&v.valBits))
+ return populateMetric(v.valType, val, v.labelPairs, out)
+}
+
+// valueFunc is a generic metric for simple values retrieved on collect time
+// from a function. It implements Metric and Collector. Its effective type is
+// determined by ValueType. This is a low-level building block used by the
+// library to back the implementations of CounterFunc, GaugeFunc, and
+// UntypedFunc.
+type valueFunc struct {
+ selfCollector
+
+ desc *Desc
+ valType ValueType
+ function func() float64
+ labelPairs []*dto.LabelPair
+}
+
+// newValueFunc returns a newly allocated valueFunc with the given Desc and
+// ValueType. The value reported is determined by calling the given function
+// from within the Write method. Take into account that metric collection may
+// happen concurrently. If that results in concurrent calls to Write, like in
+// the case where a valueFunc is directly registered with Prometheus, the
+// provided function must be concurrency-safe.
+func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *valueFunc {
+ result := &valueFunc{
+ desc: desc,
+ valType: valueType,
+ function: function,
+ labelPairs: makeLabelPairs(desc, nil),
+ }
+ result.init(result)
+ return result
+}
+
+func (v *valueFunc) Desc() *Desc {
+ return v.desc
+}
+
+func (v *valueFunc) Write(out *dto.Metric) error {
+ return populateMetric(v.valType, v.function(), v.labelPairs, out)
+}
+
+// NewConstMetric returns a metric with one fixed value that cannot be
+// changed. Users of this package will not have much use for it in regular
+// operations. However, when implementing custom Collectors, it is useful as a
+// throw-away metric that is generated on the fly to send it to Prometheus in
+// the Collect method. NewConstMetric returns an error if the length of
+// labelValues is not consistent with the variable labels in Desc.
+func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) {
+ if len(desc.variableLabels) != len(labelValues) {
+ return nil, errInconsistentCardinality
+ }
+ return &constMetric{
+ desc: desc,
+ valType: valueType,
+ val: value,
+ labelPairs: makeLabelPairs(desc, labelValues),
+ }, nil
+}
+
+// MustNewConstMetric is a version of NewConstMetric that panics where
+// NewConstMetric would have returned an error.
+func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric {
+ m, err := NewConstMetric(desc, valueType, value, labelValues...)
+ if err != nil {
+ panic(err)
+ }
+ return m
+}
+
+type constMetric struct {
+ desc *Desc
+ valType ValueType
+ val float64
+ labelPairs []*dto.LabelPair
+}
+
+func (m *constMetric) Desc() *Desc {
+ return m.desc
+}
+
+func (m *constMetric) Write(out *dto.Metric) error {
+ return populateMetric(m.valType, m.val, m.labelPairs, out)
+}
+
+func populateMetric(
+ t ValueType,
+ v float64,
+ labelPairs []*dto.LabelPair,
+ m *dto.Metric,
+) error {
+ m.Label = labelPairs
+ switch t {
+ case CounterValue:
+ m.Counter = &dto.Counter{Value: proto.Float64(v)}
+ case GaugeValue:
+ m.Gauge = &dto.Gauge{Value: proto.Float64(v)}
+ case UntypedValue:
+ m.Untyped = &dto.Untyped{Value: proto.Float64(v)}
+ default:
+ return fmt.Errorf("encountered unknown type %v", t)
+ }
+ return nil
+}
+
+func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
+ totalLen := len(desc.variableLabels) + len(desc.constLabelPairs)
+ if totalLen == 0 {
+ // Super fast path.
+ return nil
+ }
+ if len(desc.variableLabels) == 0 {
+ // Moderately fast path.
+ return desc.constLabelPairs
+ }
+ labelPairs := make([]*dto.LabelPair, 0, totalLen)
+ for i, n := range desc.variableLabels {
+ labelPairs = append(labelPairs, &dto.LabelPair{
+ Name: proto.String(n),
+ Value: proto.String(labelValues[i]),
+ })
+ }
+ for _, lp := range desc.constLabelPairs {
+ labelPairs = append(labelPairs, lp)
+ }
+ sort.Sort(LabelPairSorter(labelPairs))
+ return labelPairs
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go
new file mode 100644
index 0000000..7f3eef9
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go
@@ -0,0 +1,404 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "fmt"
+ "sync"
+
+ "github.com/prometheus/common/model"
+)
+
+// MetricVec is a Collector to bundle metrics of the same name that
+// differ in their label values. MetricVec is usually not used directly but as a
+// building block for implementations of vectors of a given metric
+// type. GaugeVec, CounterVec, SummaryVec, and UntypedVec are examples already
+// provided in this package.
+type MetricVec struct {
+ mtx sync.RWMutex // Protects the children.
+ children map[uint64][]metricWithLabelValues
+ desc *Desc
+
+ newMetric func(labelValues ...string) Metric
+ hashAdd func(h uint64, s string) uint64 // replace hash function for testing collision handling
+ hashAddByte func(h uint64, b byte) uint64
+}
+
+// newMetricVec returns an initialized MetricVec. The concrete value is
+// returned for embedding into another struct.
+func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec {
+ return &MetricVec{
+ children: map[uint64][]metricWithLabelValues{},
+ desc: desc,
+ newMetric: newMetric,
+ hashAdd: hashAdd,
+ hashAddByte: hashAddByte,
+ }
+}
+
+// metricWithLabelValues provides the metric and its label values for
+// disambiguation on hash collision.
+type metricWithLabelValues struct {
+ values []string
+ metric Metric
+}
+
+// Describe implements Collector. The length of the returned slice
+// is always one.
+func (m *MetricVec) Describe(ch chan<- *Desc) {
+ ch <- m.desc
+}
+
+// Collect implements Collector.
+func (m *MetricVec) Collect(ch chan<- Metric) {
+ m.mtx.RLock()
+ defer m.mtx.RUnlock()
+
+ for _, metrics := range m.children {
+ for _, metric := range metrics {
+ ch <- metric.metric
+ }
+ }
+}
+
+// GetMetricWithLabelValues returns the Metric for the given slice of label
+// values (same order as the VariableLabels in Desc). If that combination of
+// label values is accessed for the first time, a new Metric is created.
+//
+// It is possible to call this method without using the returned Metric to only
+// create the new Metric but leave it at its start value (e.g. a Summary or
+// Histogram without any observations). See also the SummaryVec example.
+//
+// Keeping the Metric for later use is possible (and should be considered if
+// performance is critical), but keep in mind that Reset, DeleteLabelValues and
+// Delete can be used to delete the Metric from the MetricVec. In that case, the
+// Metric will still exist, but it will not be exported anymore, even if a
+// Metric with the same label values is created later. See also the CounterVec
+// example.
+//
+// An error is returned if the number of label values is not the same as the
+// number of VariableLabels in Desc.
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
+// an alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+// See also the GaugeVec example.
+func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
+ h, err := m.hashLabelValues(lvs)
+ if err != nil {
+ return nil, err
+ }
+
+ return m.getOrCreateMetricWithLabelValues(h, lvs), nil
+}
+
+// GetMetricWith returns the Metric for the given Labels map (the label names
+// must match those of the VariableLabels in Desc). If that label map is
+// accessed for the first time, a new Metric is created. Implications of
+// creating a Metric without using it and keeping the Metric for later use are
+// the same as for GetMetricWithLabelValues.
+//
+// An error is returned if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in Desc.
+//
+// This method is used for the same purpose as
+// GetMetricWithLabelValues(...string). See there for pros and cons of the two
+// methods.
+func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
+ h, err := m.hashLabels(labels)
+ if err != nil {
+ return nil, err
+ }
+
+ return m.getOrCreateMetricWithLabels(h, labels), nil
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics if an error
+// occurs. The method allows neat syntax like:
+// httpReqs.WithLabelValues("404", "POST").Inc()
+func (m *MetricVec) WithLabelValues(lvs ...string) Metric {
+ metric, err := m.GetMetricWithLabelValues(lvs...)
+ if err != nil {
+ panic(err)
+ }
+ return metric
+}
+
+// With works as GetMetricWith, but panics if an error occurs. The method allows
+// neat syntax like:
+// httpReqs.With(Labels{"status":"404", "method":"POST"}).Inc()
+func (m *MetricVec) With(labels Labels) Metric {
+ metric, err := m.GetMetricWith(labels)
+ if err != nil {
+ panic(err)
+ }
+ return metric
+}
+
+// DeleteLabelValues removes the metric where the variable labels are the same
+// as those passed in as labels (same order as the VariableLabels in Desc). It
+// returns true if a metric was deleted.
+//
+// It is not an error if the number of label values is not the same as the
+// number of VariableLabels in Desc. However, such inconsistent label count can
+// never match an actual Metric, so the method will always return false in that
+// case.
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider Delete(Labels) as an
+// alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+// See also the CounterVec example.
+func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ h, err := m.hashLabelValues(lvs)
+ if err != nil {
+ return false
+ }
+ return m.deleteByHashWithLabelValues(h, lvs)
+}
+
+// Delete deletes the metric where the variable labels are the same as those
+// passed in as labels. It returns true if a metric was deleted.
+//
+// It is not an error if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in the Desc of the MetricVec. However, such
+// inconsistent Labels can never match an actual Metric, so the method will
+// always return false in that case.
+//
+// This method is used for the same purpose as DeleteLabelValues(...string). See
+// there for pros and cons of the two methods.
+func (m *MetricVec) Delete(labels Labels) bool {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ h, err := m.hashLabels(labels)
+ if err != nil {
+ return false
+ }
+
+ return m.deleteByHashWithLabels(h, labels)
+}
+
+// deleteByHashWithLabelValues removes the metric from the hash bucket h. If
+// there are multiple matches in the bucket, use lvs to select a metric and
+// remove only that metric.
+func (m *MetricVec) deleteByHashWithLabelValues(h uint64, lvs []string) bool {
+ metrics, ok := m.children[h]
+ if !ok {
+ return false
+ }
+
+ i := m.findMetricWithLabelValues(metrics, lvs)
+ if i >= len(metrics) {
+ return false
+ }
+
+ if len(metrics) > 1 {
+ m.children[h] = append(metrics[:i], metrics[i+1:]...)
+ } else {
+ delete(m.children, h)
+ }
+ return true
+}
+
+// deleteByHashWithLabels removes the metric from the hash bucket h. If there
+// are multiple matches in the bucket, use lvs to select a metric and remove
+// only that metric.
+func (m *MetricVec) deleteByHashWithLabels(h uint64, labels Labels) bool {
+ metrics, ok := m.children[h]
+ if !ok {
+ return false
+ }
+ i := m.findMetricWithLabels(metrics, labels)
+ if i >= len(metrics) {
+ return false
+ }
+
+ if len(metrics) > 1 {
+ m.children[h] = append(metrics[:i], metrics[i+1:]...)
+ } else {
+ delete(m.children, h)
+ }
+ return true
+}
+
+// Reset deletes all metrics in this vector.
+func (m *MetricVec) Reset() {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ for h := range m.children {
+ delete(m.children, h)
+ }
+}
+
+func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
+ if len(vals) != len(m.desc.variableLabels) {
+ return 0, errInconsistentCardinality
+ }
+ h := hashNew()
+ for _, val := range vals {
+ h = m.hashAdd(h, val)
+ h = m.hashAddByte(h, model.SeparatorByte)
+ }
+ return h, nil
+}
+
+func (m *MetricVec) hashLabels(labels Labels) (uint64, error) {
+ if len(labels) != len(m.desc.variableLabels) {
+ return 0, errInconsistentCardinality
+ }
+ h := hashNew()
+ for _, label := range m.desc.variableLabels {
+ val, ok := labels[label]
+ if !ok {
+ return 0, fmt.Errorf("label name %q missing in label map", label)
+ }
+ h = m.hashAdd(h, val)
+ h = m.hashAddByte(h, model.SeparatorByte)
+ }
+ return h, nil
+}
+
+// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
+// or creates it and returns the new one.
+//
+// This function holds the mutex.
+func (m *MetricVec) getOrCreateMetricWithLabelValues(hash uint64, lvs []string) Metric {
+ m.mtx.RLock()
+ metric, ok := m.getMetricWithLabelValues(hash, lvs)
+ m.mtx.RUnlock()
+ if ok {
+ return metric
+ }
+
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+ metric, ok = m.getMetricWithLabelValues(hash, lvs)
+ if !ok {
+ // Copy to avoid allocation in case wo don't go down this code path.
+ copiedLVs := make([]string, len(lvs))
+ copy(copiedLVs, lvs)
+ metric = m.newMetric(copiedLVs...)
+ m.children[hash] = append(m.children[hash], metricWithLabelValues{values: copiedLVs, metric: metric})
+ }
+ return metric
+}
+
+// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
+// or creates it and returns the new one.
+//
+// This function holds the mutex.
+func (m *MetricVec) getOrCreateMetricWithLabels(hash uint64, labels Labels) Metric {
+ m.mtx.RLock()
+ metric, ok := m.getMetricWithLabels(hash, labels)
+ m.mtx.RUnlock()
+ if ok {
+ return metric
+ }
+
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+ metric, ok = m.getMetricWithLabels(hash, labels)
+ if !ok {
+ lvs := m.extractLabelValues(labels)
+ metric = m.newMetric(lvs...)
+ m.children[hash] = append(m.children[hash], metricWithLabelValues{values: lvs, metric: metric})
+ }
+ return metric
+}
+
+// getMetricWithLabelValues gets a metric while handling possible collisions in
+// the hash space. Must be called while holding read mutex.
+func (m *MetricVec) getMetricWithLabelValues(h uint64, lvs []string) (Metric, bool) {
+ metrics, ok := m.children[h]
+ if ok {
+ if i := m.findMetricWithLabelValues(metrics, lvs); i < len(metrics) {
+ return metrics[i].metric, true
+ }
+ }
+ return nil, false
+}
+
+// getMetricWithLabels gets a metric while handling possible collisions in
+// the hash space. Must be called while holding read mutex.
+func (m *MetricVec) getMetricWithLabels(h uint64, labels Labels) (Metric, bool) {
+ metrics, ok := m.children[h]
+ if ok {
+ if i := m.findMetricWithLabels(metrics, labels); i < len(metrics) {
+ return metrics[i].metric, true
+ }
+ }
+ return nil, false
+}
+
+// findMetricWithLabelValues returns the index of the matching metric or
+// len(metrics) if not found.
+func (m *MetricVec) findMetricWithLabelValues(metrics []metricWithLabelValues, lvs []string) int {
+ for i, metric := range metrics {
+ if m.matchLabelValues(metric.values, lvs) {
+ return i
+ }
+ }
+ return len(metrics)
+}
+
+// findMetricWithLabels returns the index of the matching metric or len(metrics)
+// if not found.
+func (m *MetricVec) findMetricWithLabels(metrics []metricWithLabelValues, labels Labels) int {
+ for i, metric := range metrics {
+ if m.matchLabels(metric.values, labels) {
+ return i
+ }
+ }
+ return len(metrics)
+}
+
+func (m *MetricVec) matchLabelValues(values []string, lvs []string) bool {
+ if len(values) != len(lvs) {
+ return false
+ }
+ for i, v := range values {
+ if v != lvs[i] {
+ return false
+ }
+ }
+ return true
+}
+
+func (m *MetricVec) matchLabels(values []string, labels Labels) bool {
+ if len(labels) != len(values) {
+ return false
+ }
+ for i, k := range m.desc.variableLabels {
+ if values[i] != labels[k] {
+ return false
+ }
+ }
+ return true
+}
+
+func (m *MetricVec) extractLabelValues(labels Labels) []string {
+ labelValues := make([]string, len(labels))
+ for i, k := range m.desc.variableLabels {
+ labelValues[i] = labels[k]
+ }
+ return labelValues
+}
diff --git a/vendor/github.com/prometheus/client_model/LICENSE b/vendor/github.com/prometheus/client_model/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/vendor/github.com/prometheus/client_model/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/prometheus/client_model/NOTICE b/vendor/github.com/prometheus/client_model/NOTICE
new file mode 100644
index 0000000..20110e4
--- /dev/null
+++ b/vendor/github.com/prometheus/client_model/NOTICE
@@ -0,0 +1,5 @@
+Data model artifacts for Prometheus.
+Copyright 2012-2015 The Prometheus Authors
+
+This product includes software developed at
+SoundCloud Ltd. (http://soundcloud.com/).
diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go
new file mode 100644
index 0000000..b065f86
--- /dev/null
+++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go
@@ -0,0 +1,364 @@
+// Code generated by protoc-gen-go.
+// source: metrics.proto
+// DO NOT EDIT!
+
+/*
+Package io_prometheus_client is a generated protocol buffer package.
+
+It is generated from these files:
+ metrics.proto
+
+It has these top-level messages:
+ LabelPair
+ Gauge
+ Counter
+ Quantile
+ Summary
+ Untyped
+ Histogram
+ Bucket
+ Metric
+ MetricFamily
+*/
+package io_prometheus_client
+
+import proto "github.com/golang/protobuf/proto"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = math.Inf
+
+type MetricType int32
+
+const (
+ MetricType_COUNTER MetricType = 0
+ MetricType_GAUGE MetricType = 1
+ MetricType_SUMMARY MetricType = 2
+ MetricType_UNTYPED MetricType = 3
+ MetricType_HISTOGRAM MetricType = 4
+)
+
+var MetricType_name = map[int32]string{
+ 0: "COUNTER",
+ 1: "GAUGE",
+ 2: "SUMMARY",
+ 3: "UNTYPED",
+ 4: "HISTOGRAM",
+}
+var MetricType_value = map[string]int32{
+ "COUNTER": 0,
+ "GAUGE": 1,
+ "SUMMARY": 2,
+ "UNTYPED": 3,
+ "HISTOGRAM": 4,
+}
+
+func (x MetricType) Enum() *MetricType {
+ p := new(MetricType)
+ *p = x
+ return p
+}
+func (x MetricType) String() string {
+ return proto.EnumName(MetricType_name, int32(x))
+}
+func (x *MetricType) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType")
+ if err != nil {
+ return err
+ }
+ *x = MetricType(value)
+ return nil
+}
+
+type LabelPair struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *LabelPair) Reset() { *m = LabelPair{} }
+func (m *LabelPair) String() string { return proto.CompactTextString(m) }
+func (*LabelPair) ProtoMessage() {}
+
+func (m *LabelPair) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *LabelPair) GetValue() string {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return ""
+}
+
+type Gauge struct {
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Gauge) Reset() { *m = Gauge{} }
+func (m *Gauge) String() string { return proto.CompactTextString(m) }
+func (*Gauge) ProtoMessage() {}
+
+func (m *Gauge) GetValue() float64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type Counter struct {
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Counter) Reset() { *m = Counter{} }
+func (m *Counter) String() string { return proto.CompactTextString(m) }
+func (*Counter) ProtoMessage() {}
+
+func (m *Counter) GetValue() float64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type Quantile struct {
+ Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"`
+ Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Quantile) Reset() { *m = Quantile{} }
+func (m *Quantile) String() string { return proto.CompactTextString(m) }
+func (*Quantile) ProtoMessage() {}
+
+func (m *Quantile) GetQuantile() float64 {
+ if m != nil && m.Quantile != nil {
+ return *m.Quantile
+ }
+ return 0
+}
+
+func (m *Quantile) GetValue() float64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type Summary struct {
+ SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"`
+ SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"`
+ Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Summary) Reset() { *m = Summary{} }
+func (m *Summary) String() string { return proto.CompactTextString(m) }
+func (*Summary) ProtoMessage() {}
+
+func (m *Summary) GetSampleCount() uint64 {
+ if m != nil && m.SampleCount != nil {
+ return *m.SampleCount
+ }
+ return 0
+}
+
+func (m *Summary) GetSampleSum() float64 {
+ if m != nil && m.SampleSum != nil {
+ return *m.SampleSum
+ }
+ return 0
+}
+
+func (m *Summary) GetQuantile() []*Quantile {
+ if m != nil {
+ return m.Quantile
+ }
+ return nil
+}
+
+type Untyped struct {
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Untyped) Reset() { *m = Untyped{} }
+func (m *Untyped) String() string { return proto.CompactTextString(m) }
+func (*Untyped) ProtoMessage() {}
+
+func (m *Untyped) GetValue() float64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type Histogram struct {
+ SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"`
+ SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"`
+ Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Histogram) Reset() { *m = Histogram{} }
+func (m *Histogram) String() string { return proto.CompactTextString(m) }
+func (*Histogram) ProtoMessage() {}
+
+func (m *Histogram) GetSampleCount() uint64 {
+ if m != nil && m.SampleCount != nil {
+ return *m.SampleCount
+ }
+ return 0
+}
+
+func (m *Histogram) GetSampleSum() float64 {
+ if m != nil && m.SampleSum != nil {
+ return *m.SampleSum
+ }
+ return 0
+}
+
+func (m *Histogram) GetBucket() []*Bucket {
+ if m != nil {
+ return m.Bucket
+ }
+ return nil
+}
+
+type Bucket struct {
+ CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count" json:"cumulative_count,omitempty"`
+ UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound" json:"upper_bound,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Bucket) Reset() { *m = Bucket{} }
+func (m *Bucket) String() string { return proto.CompactTextString(m) }
+func (*Bucket) ProtoMessage() {}
+
+func (m *Bucket) GetCumulativeCount() uint64 {
+ if m != nil && m.CumulativeCount != nil {
+ return *m.CumulativeCount
+ }
+ return 0
+}
+
+func (m *Bucket) GetUpperBound() float64 {
+ if m != nil && m.UpperBound != nil {
+ return *m.UpperBound
+ }
+ return 0
+}
+
+type Metric struct {
+ Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
+ Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"`
+ Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"`
+ Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"`
+ Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"`
+ Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"`
+ TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms" json:"timestamp_ms,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Metric) Reset() { *m = Metric{} }
+func (m *Metric) String() string { return proto.CompactTextString(m) }
+func (*Metric) ProtoMessage() {}
+
+func (m *Metric) GetLabel() []*LabelPair {
+ if m != nil {
+ return m.Label
+ }
+ return nil
+}
+
+func (m *Metric) GetGauge() *Gauge {
+ if m != nil {
+ return m.Gauge
+ }
+ return nil
+}
+
+func (m *Metric) GetCounter() *Counter {
+ if m != nil {
+ return m.Counter
+ }
+ return nil
+}
+
+func (m *Metric) GetSummary() *Summary {
+ if m != nil {
+ return m.Summary
+ }
+ return nil
+}
+
+func (m *Metric) GetUntyped() *Untyped {
+ if m != nil {
+ return m.Untyped
+ }
+ return nil
+}
+
+func (m *Metric) GetHistogram() *Histogram {
+ if m != nil {
+ return m.Histogram
+ }
+ return nil
+}
+
+func (m *Metric) GetTimestampMs() int64 {
+ if m != nil && m.TimestampMs != nil {
+ return *m.TimestampMs
+ }
+ return 0
+}
+
+type MetricFamily struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"`
+ Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"`
+ Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MetricFamily) Reset() { *m = MetricFamily{} }
+func (m *MetricFamily) String() string { return proto.CompactTextString(m) }
+func (*MetricFamily) ProtoMessage() {}
+
+func (m *MetricFamily) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *MetricFamily) GetHelp() string {
+ if m != nil && m.Help != nil {
+ return *m.Help
+ }
+ return ""
+}
+
+func (m *MetricFamily) GetType() MetricType {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return MetricType_COUNTER
+}
+
+func (m *MetricFamily) GetMetric() []*Metric {
+ if m != nil {
+ return m.Metric
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value)
+}
diff --git a/vendor/github.com/prometheus/common/LICENSE b/vendor/github.com/prometheus/common/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/vendor/github.com/prometheus/common/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/prometheus/common/NOTICE b/vendor/github.com/prometheus/common/NOTICE
new file mode 100644
index 0000000..636a2c1
--- /dev/null
+++ b/vendor/github.com/prometheus/common/NOTICE
@@ -0,0 +1,5 @@
+Common libraries shared by Prometheus Go components.
+Copyright 2015 The Prometheus Authors
+
+This product includes software developed at
+SoundCloud Ltd. (http://soundcloud.com/).
diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go
new file mode 100644
index 0000000..487fdc6
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/decode.go
@@ -0,0 +1,412 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "fmt"
+ "io"
+ "math"
+ "mime"
+ "net/http"
+
+ dto "github.com/prometheus/client_model/go"
+
+ "github.com/matttproud/golang_protobuf_extensions/pbutil"
+ "github.com/prometheus/common/model"
+)
+
+// Decoder types decode an input stream into metric families.
+type Decoder interface {
+ Decode(*dto.MetricFamily) error
+}
+
+type DecodeOptions struct {
+ // Timestamp is added to each value from the stream that has no explicit timestamp set.
+ Timestamp model.Time
+}
+
+// ResponseFormat extracts the correct format from a HTTP response header.
+// If no matching format can be found FormatUnknown is returned.
+func ResponseFormat(h http.Header) Format {
+ ct := h.Get(hdrContentType)
+
+ mediatype, params, err := mime.ParseMediaType(ct)
+ if err != nil {
+ return FmtUnknown
+ }
+
+ const textType = "text/plain"
+
+ switch mediatype {
+ case ProtoType:
+ if p, ok := params["proto"]; ok && p != ProtoProtocol {
+ return FmtUnknown
+ }
+ if e, ok := params["encoding"]; ok && e != "delimited" {
+ return FmtUnknown
+ }
+ return FmtProtoDelim
+
+ case textType:
+ if v, ok := params["version"]; ok && v != TextVersion {
+ return FmtUnknown
+ }
+ return FmtText
+ }
+
+ return FmtUnknown
+}
+
+// NewDecoder returns a new decoder based on the given input format.
+// If the input format does not imply otherwise, a text format decoder is returned.
+func NewDecoder(r io.Reader, format Format) Decoder {
+ switch format {
+ case FmtProtoDelim:
+ return &protoDecoder{r: r}
+ }
+ return &textDecoder{r: r}
+}
+
+// protoDecoder implements the Decoder interface for protocol buffers.
+type protoDecoder struct {
+ r io.Reader
+}
+
+// Decode implements the Decoder interface.
+func (d *protoDecoder) Decode(v *dto.MetricFamily) error {
+ _, err := pbutil.ReadDelimited(d.r, v)
+ if err != nil {
+ return err
+ }
+ if !model.IsValidMetricName(model.LabelValue(v.GetName())) {
+ return fmt.Errorf("invalid metric name %q", v.GetName())
+ }
+ for _, m := range v.GetMetric() {
+ if m == nil {
+ continue
+ }
+ for _, l := range m.GetLabel() {
+ if l == nil {
+ continue
+ }
+ if !model.LabelValue(l.GetValue()).IsValid() {
+ return fmt.Errorf("invalid label value %q", l.GetValue())
+ }
+ if !model.LabelName(l.GetName()).IsValid() {
+ return fmt.Errorf("invalid label name %q", l.GetName())
+ }
+ }
+ }
+ return nil
+}
+
+// textDecoder implements the Decoder interface for the text protocol.
+type textDecoder struct {
+ r io.Reader
+ p TextParser
+ fams []*dto.MetricFamily
+}
+
+// Decode implements the Decoder interface.
+func (d *textDecoder) Decode(v *dto.MetricFamily) error {
+ // TODO(fabxc): Wrap this as a line reader to make streaming safer.
+ if len(d.fams) == 0 {
+ // No cached metric families, read everything and parse metrics.
+ fams, err := d.p.TextToMetricFamilies(d.r)
+ if err != nil {
+ return err
+ }
+ if len(fams) == 0 {
+ return io.EOF
+ }
+ d.fams = make([]*dto.MetricFamily, 0, len(fams))
+ for _, f := range fams {
+ d.fams = append(d.fams, f)
+ }
+ }
+
+ *v = *d.fams[0]
+ d.fams = d.fams[1:]
+
+ return nil
+}
+
+type SampleDecoder struct {
+ Dec Decoder
+ Opts *DecodeOptions
+
+ f dto.MetricFamily
+}
+
+func (sd *SampleDecoder) Decode(s *model.Vector) error {
+ if err := sd.Dec.Decode(&sd.f); err != nil {
+ return err
+ }
+ *s = extractSamples(&sd.f, sd.Opts)
+ return nil
+}
+
+// Extract samples builds a slice of samples from the provided metric families.
+func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) model.Vector {
+ var all model.Vector
+ for _, f := range fams {
+ all = append(all, extractSamples(f, o)...)
+ }
+ return all
+}
+
+func extractSamples(f *dto.MetricFamily, o *DecodeOptions) model.Vector {
+ switch f.GetType() {
+ case dto.MetricType_COUNTER:
+ return extractCounter(o, f)
+ case dto.MetricType_GAUGE:
+ return extractGauge(o, f)
+ case dto.MetricType_SUMMARY:
+ return extractSummary(o, f)
+ case dto.MetricType_UNTYPED:
+ return extractUntyped(o, f)
+ case dto.MetricType_HISTOGRAM:
+ return extractHistogram(o, f)
+ }
+ panic("expfmt.extractSamples: unknown metric family type")
+}
+
+func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Counter == nil {
+ continue
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+ smpl := &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Counter.GetValue()),
+ }
+
+ if m.TimestampMs != nil {
+ smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ } else {
+ smpl.Timestamp = o.Timestamp
+ }
+
+ samples = append(samples, smpl)
+ }
+
+ return samples
+}
+
+func extractGauge(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Gauge == nil {
+ continue
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+ smpl := &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Gauge.GetValue()),
+ }
+
+ if m.TimestampMs != nil {
+ smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ } else {
+ smpl.Timestamp = o.Timestamp
+ }
+
+ samples = append(samples, smpl)
+ }
+
+ return samples
+}
+
+func extractUntyped(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Untyped == nil {
+ continue
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+ smpl := &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Untyped.GetValue()),
+ }
+
+ if m.TimestampMs != nil {
+ smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ } else {
+ smpl.Timestamp = o.Timestamp
+ }
+
+ samples = append(samples, smpl)
+ }
+
+ return samples
+}
+
+func extractSummary(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Summary == nil {
+ continue
+ }
+
+ timestamp := o.Timestamp
+ if m.TimestampMs != nil {
+ timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ }
+
+ for _, q := range m.Summary.Quantile {
+ lset := make(model.LabelSet, len(m.Label)+2)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ // BUG(matt): Update other names to "quantile".
+ lset[model.LabelName(model.QuantileLabel)] = model.LabelValue(fmt.Sprint(q.GetQuantile()))
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(q.GetValue()),
+ Timestamp: timestamp,
+ })
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum")
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Summary.GetSampleSum()),
+ Timestamp: timestamp,
+ })
+
+ lset = make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count")
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Summary.GetSampleCount()),
+ Timestamp: timestamp,
+ })
+ }
+
+ return samples
+}
+
+func extractHistogram(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Histogram == nil {
+ continue
+ }
+
+ timestamp := o.Timestamp
+ if m.TimestampMs != nil {
+ timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ }
+
+ infSeen := false
+
+ for _, q := range m.Histogram.Bucket {
+ lset := make(model.LabelSet, len(m.Label)+2)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.LabelName(model.BucketLabel)] = model.LabelValue(fmt.Sprint(q.GetUpperBound()))
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket")
+
+ if math.IsInf(q.GetUpperBound(), +1) {
+ infSeen = true
+ }
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(q.GetCumulativeCount()),
+ Timestamp: timestamp,
+ })
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum")
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Histogram.GetSampleSum()),
+ Timestamp: timestamp,
+ })
+
+ lset = make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count")
+
+ count := &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Histogram.GetSampleCount()),
+ Timestamp: timestamp,
+ }
+ samples = append(samples, count)
+
+ if !infSeen {
+ // Append an infinity bucket sample.
+ lset := make(model.LabelSet, len(m.Label)+2)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.LabelName(model.BucketLabel)] = model.LabelValue("+Inf")
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket")
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: count.Value,
+ Timestamp: timestamp,
+ })
+ }
+ }
+
+ return samples
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go
new file mode 100644
index 0000000..11839ed
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/encode.go
@@ -0,0 +1,88 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "fmt"
+ "io"
+ "net/http"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/matttproud/golang_protobuf_extensions/pbutil"
+ "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// Encoder types encode metric families into an underlying wire protocol.
+type Encoder interface {
+ Encode(*dto.MetricFamily) error
+}
+
+type encoder func(*dto.MetricFamily) error
+
+func (e encoder) Encode(v *dto.MetricFamily) error {
+ return e(v)
+}
+
+// Negotiate returns the Content-Type based on the given Accept header.
+// If no appropriate accepted type is found, FmtText is returned.
+func Negotiate(h http.Header) Format {
+ for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
+ // Check for protocol buffer
+ if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
+ switch ac.Params["encoding"] {
+ case "delimited":
+ return FmtProtoDelim
+ case "text":
+ return FmtProtoText
+ case "compact-text":
+ return FmtProtoCompact
+ }
+ }
+ // Check for text format.
+ ver := ac.Params["version"]
+ if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
+ return FmtText
+ }
+ }
+ return FmtText
+}
+
+// NewEncoder returns a new encoder based on content type negotiation.
+func NewEncoder(w io.Writer, format Format) Encoder {
+ switch format {
+ case FmtProtoDelim:
+ return encoder(func(v *dto.MetricFamily) error {
+ _, err := pbutil.WriteDelimited(w, v)
+ return err
+ })
+ case FmtProtoCompact:
+ return encoder(func(v *dto.MetricFamily) error {
+ _, err := fmt.Fprintln(w, v.String())
+ return err
+ })
+ case FmtProtoText:
+ return encoder(func(v *dto.MetricFamily) error {
+ _, err := fmt.Fprintln(w, proto.MarshalTextString(v))
+ return err
+ })
+ case FmtText:
+ return encoder(func(v *dto.MetricFamily) error {
+ _, err := MetricFamilyToText(w, v)
+ return err
+ })
+ }
+ panic("expfmt.NewEncoder: unknown format")
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go
new file mode 100644
index 0000000..fae10f6
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go
@@ -0,0 +1,37 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// A package for reading and writing Prometheus metrics.
+package expfmt
+
+type Format string
+
+const (
+ TextVersion = "0.0.4"
+
+ ProtoType = `application/vnd.google.protobuf`
+ ProtoProtocol = `io.prometheus.client.MetricFamily`
+ ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
+
+ // The Content-Type values for the different wire protocols.
+ FmtUnknown Format = ``
+ FmtText Format = `text/plain; version=` + TextVersion
+ FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
+ FmtProtoText Format = ProtoFmt + ` encoding=text`
+ FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
+)
+
+const (
+ hdrContentType = "Content-Type"
+ hdrAccept = "Accept"
+)
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz.go b/vendor/github.com/prometheus/common/expfmt/fuzz.go
new file mode 100644
index 0000000..dc2eede
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz.go
@@ -0,0 +1,36 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Build only when actually fuzzing
+// +build gofuzz
+
+package expfmt
+
+import "bytes"
+
+// Fuzz text metric parser with with github.com/dvyukov/go-fuzz:
+//
+// go-fuzz-build github.com/prometheus/common/expfmt
+// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz
+//
+// Further input samples should go in the folder fuzz/corpus.
+func Fuzz(in []byte) int {
+ parser := TextParser{}
+ _, err := parser.TextToMetricFamilies(bytes.NewReader(in))
+
+ if err != nil {
+ return 0
+ }
+
+ return 1
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go
new file mode 100644
index 0000000..f11321c
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/text_create.go
@@ -0,0 +1,303 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "fmt"
+ "io"
+ "math"
+ "strings"
+
+ dto "github.com/prometheus/client_model/go"
+ "github.com/prometheus/common/model"
+)
+
+// MetricFamilyToText converts a MetricFamily proto message into text format and
+// writes the resulting lines to 'out'. It returns the number of bytes written
+// and any error encountered. The output will have the same order as the input,
+// no further sorting is performed. Furthermore, this function assumes the input
+// is already sanitized and does not perform any sanity checks. If the input
+// contains duplicate metrics or invalid metric or label names, the conversion
+// will result in invalid text format output.
+//
+// This method fulfills the type 'prometheus.encoder'.
+func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
+ var written int
+
+ // Fail-fast checks.
+ if len(in.Metric) == 0 {
+ return written, fmt.Errorf("MetricFamily has no metrics: %s", in)
+ }
+ name := in.GetName()
+ if name == "" {
+ return written, fmt.Errorf("MetricFamily has no name: %s", in)
+ }
+
+ // Comments, first HELP, then TYPE.
+ if in.Help != nil {
+ n, err := fmt.Fprintf(
+ out, "# HELP %s %s\n",
+ name, escapeString(*in.Help, false),
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ metricType := in.GetType()
+ n, err := fmt.Fprintf(
+ out, "# TYPE %s %s\n",
+ name, strings.ToLower(metricType.String()),
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+
+ // Finally the samples, one line for each.
+ for _, metric := range in.Metric {
+ switch metricType {
+ case dto.MetricType_COUNTER:
+ if metric.Counter == nil {
+ return written, fmt.Errorf(
+ "expected counter in metric %s %s", name, metric,
+ )
+ }
+ n, err = writeSample(
+ name, metric, "", "",
+ metric.Counter.GetValue(),
+ out,
+ )
+ case dto.MetricType_GAUGE:
+ if metric.Gauge == nil {
+ return written, fmt.Errorf(
+ "expected gauge in metric %s %s", name, metric,
+ )
+ }
+ n, err = writeSample(
+ name, metric, "", "",
+ metric.Gauge.GetValue(),
+ out,
+ )
+ case dto.MetricType_UNTYPED:
+ if metric.Untyped == nil {
+ return written, fmt.Errorf(
+ "expected untyped in metric %s %s", name, metric,
+ )
+ }
+ n, err = writeSample(
+ name, metric, "", "",
+ metric.Untyped.GetValue(),
+ out,
+ )
+ case dto.MetricType_SUMMARY:
+ if metric.Summary == nil {
+ return written, fmt.Errorf(
+ "expected summary in metric %s %s", name, metric,
+ )
+ }
+ for _, q := range metric.Summary.Quantile {
+ n, err = writeSample(
+ name, metric,
+ model.QuantileLabel, fmt.Sprint(q.GetQuantile()),
+ q.GetValue(),
+ out,
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ n, err = writeSample(
+ name+"_sum", metric, "", "",
+ metric.Summary.GetSampleSum(),
+ out,
+ )
+ if err != nil {
+ return written, err
+ }
+ written += n
+ n, err = writeSample(
+ name+"_count", metric, "", "",
+ float64(metric.Summary.GetSampleCount()),
+ out,
+ )
+ case dto.MetricType_HISTOGRAM:
+ if metric.Histogram == nil {
+ return written, fmt.Errorf(
+ "expected histogram in metric %s %s", name, metric,
+ )
+ }
+ infSeen := false
+ for _, q := range metric.Histogram.Bucket {
+ n, err = writeSample(
+ name+"_bucket", metric,
+ model.BucketLabel, fmt.Sprint(q.GetUpperBound()),
+ float64(q.GetCumulativeCount()),
+ out,
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ if math.IsInf(q.GetUpperBound(), +1) {
+ infSeen = true
+ }
+ }
+ if !infSeen {
+ n, err = writeSample(
+ name+"_bucket", metric,
+ model.BucketLabel, "+Inf",
+ float64(metric.Histogram.GetSampleCount()),
+ out,
+ )
+ if err != nil {
+ return written, err
+ }
+ written += n
+ }
+ n, err = writeSample(
+ name+"_sum", metric, "", "",
+ metric.Histogram.GetSampleSum(),
+ out,
+ )
+ if err != nil {
+ return written, err
+ }
+ written += n
+ n, err = writeSample(
+ name+"_count", metric, "", "",
+ float64(metric.Histogram.GetSampleCount()),
+ out,
+ )
+ default:
+ return written, fmt.Errorf(
+ "unexpected type in metric %s %s", name, metric,
+ )
+ }
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ return written, nil
+}
+
+// writeSample writes a single sample in text format to out, given the metric
+// name, the metric proto message itself, optionally an additional label name
+// and value (use empty strings if not required), and the value. The function
+// returns the number of bytes written and any error encountered.
+func writeSample(
+ name string,
+ metric *dto.Metric,
+ additionalLabelName, additionalLabelValue string,
+ value float64,
+ out io.Writer,
+) (int, error) {
+ var written int
+ n, err := fmt.Fprint(out, name)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ n, err = labelPairsToText(
+ metric.Label,
+ additionalLabelName, additionalLabelValue,
+ out,
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ n, err = fmt.Fprintf(out, " %v", value)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ if metric.TimestampMs != nil {
+ n, err = fmt.Fprintf(out, " %v", *metric.TimestampMs)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ n, err = out.Write([]byte{'\n'})
+ written += n
+ if err != nil {
+ return written, err
+ }
+ return written, nil
+}
+
+// labelPairsToText converts a slice of LabelPair proto messages plus the
+// explicitly given additional label pair into text formatted as required by the
+// text format and writes it to 'out'. An empty slice in combination with an
+// empty string 'additionalLabelName' results in nothing being
+// written. Otherwise, the label pairs are written, escaped as required by the
+// text format, and enclosed in '{...}'. The function returns the number of
+// bytes written and any error encountered.
+func labelPairsToText(
+ in []*dto.LabelPair,
+ additionalLabelName, additionalLabelValue string,
+ out io.Writer,
+) (int, error) {
+ if len(in) == 0 && additionalLabelName == "" {
+ return 0, nil
+ }
+ var written int
+ separator := '{'
+ for _, lp := range in {
+ n, err := fmt.Fprintf(
+ out, `%c%s="%s"`,
+ separator, lp.GetName(), escapeString(lp.GetValue(), true),
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ separator = ','
+ }
+ if additionalLabelName != "" {
+ n, err := fmt.Fprintf(
+ out, `%c%s="%s"`,
+ separator, additionalLabelName,
+ escapeString(additionalLabelValue, true),
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ n, err := out.Write([]byte{'}'})
+ written += n
+ if err != nil {
+ return written, err
+ }
+ return written, nil
+}
+
+var (
+ escape = strings.NewReplacer("\\", `\\`, "\n", `\n`)
+ escapeWithDoubleQuote = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`)
+)
+
+// escapeString replaces '\' by '\\', new line character by '\n', and - if
+// includeDoubleQuote is true - '"' by '\"'.
+func escapeString(v string, includeDoubleQuote bool) string {
+ if includeDoubleQuote {
+ return escapeWithDoubleQuote.Replace(v)
+ }
+
+ return escape.Replace(v)
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go
new file mode 100644
index 0000000..ef9a150
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go
@@ -0,0 +1,753 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "math"
+ "strconv"
+ "strings"
+
+ dto "github.com/prometheus/client_model/go"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/prometheus/common/model"
+)
+
+// A stateFn is a function that represents a state in a state machine. By
+// executing it, the state is progressed to the next state. The stateFn returns
+// another stateFn, which represents the new state. The end state is represented
+// by nil.
+type stateFn func() stateFn
+
+// ParseError signals errors while parsing the simple and flat text-based
+// exchange format.
+type ParseError struct {
+ Line int
+ Msg string
+}
+
+// Error implements the error interface.
+func (e ParseError) Error() string {
+ return fmt.Sprintf("text format parsing error in line %d: %s", e.Line, e.Msg)
+}
+
+// TextParser is used to parse the simple and flat text-based exchange format. Its
+// zero value is ready to use.
+type TextParser struct {
+ metricFamiliesByName map[string]*dto.MetricFamily
+ buf *bufio.Reader // Where the parsed input is read through.
+ err error // Most recent error.
+ lineCount int // Tracks the line count for error messages.
+ currentByte byte // The most recent byte read.
+ currentToken bytes.Buffer // Re-used each time a token has to be gathered from multiple bytes.
+ currentMF *dto.MetricFamily
+ currentMetric *dto.Metric
+ currentLabelPair *dto.LabelPair
+
+ // The remaining member variables are only used for summaries/histograms.
+ currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le'
+ // Summary specific.
+ summaries map[uint64]*dto.Metric // Key is created with LabelsToSignature.
+ currentQuantile float64
+ // Histogram specific.
+ histograms map[uint64]*dto.Metric // Key is created with LabelsToSignature.
+ currentBucket float64
+ // These tell us if the currently processed line ends on '_count' or
+ // '_sum' respectively and belong to a summary/histogram, representing the sample
+ // count and sum of that summary/histogram.
+ currentIsSummaryCount, currentIsSummarySum bool
+ currentIsHistogramCount, currentIsHistogramSum bool
+}
+
+// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange
+// format and creates MetricFamily proto messages. It returns the MetricFamily
+// proto messages in a map where the metric names are the keys, along with any
+// error encountered.
+//
+// If the input contains duplicate metrics (i.e. lines with the same metric name
+// and exactly the same label set), the resulting MetricFamily will contain
+// duplicate Metric proto messages. Similar is true for duplicate label
+// names. Checks for duplicates have to be performed separately, if required.
+// Also note that neither the metrics within each MetricFamily are sorted nor
+// the label pairs within each Metric. Sorting is not required for the most
+// frequent use of this method, which is sample ingestion in the Prometheus
+// server. However, for presentation purposes, you might want to sort the
+// metrics, and in some cases, you must sort the labels, e.g. for consumption by
+// the metric family injection hook of the Prometheus registry.
+//
+// Summaries and histograms are rather special beasts. You would probably not
+// use them in the simple text format anyway. This method can deal with
+// summaries and histograms if they are presented in exactly the way the
+// text.Create function creates them.
+//
+// This method must not be called concurrently. If you want to parse different
+// input concurrently, instantiate a separate Parser for each goroutine.
+func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricFamily, error) {
+ p.reset(in)
+ for nextState := p.startOfLine; nextState != nil; nextState = nextState() {
+ // Magic happens here...
+ }
+ // Get rid of empty metric families.
+ for k, mf := range p.metricFamiliesByName {
+ if len(mf.GetMetric()) == 0 {
+ delete(p.metricFamiliesByName, k)
+ }
+ }
+ // If p.err is io.EOF now, we have run into a premature end of the input
+ // stream. Turn this error into something nicer and more
+ // meaningful. (io.EOF is often used as a signal for the legitimate end
+ // of an input stream.)
+ if p.err == io.EOF {
+ p.parseError("unexpected end of input stream")
+ }
+ return p.metricFamiliesByName, p.err
+}
+
+func (p *TextParser) reset(in io.Reader) {
+ p.metricFamiliesByName = map[string]*dto.MetricFamily{}
+ if p.buf == nil {
+ p.buf = bufio.NewReader(in)
+ } else {
+ p.buf.Reset(in)
+ }
+ p.err = nil
+ p.lineCount = 0
+ if p.summaries == nil || len(p.summaries) > 0 {
+ p.summaries = map[uint64]*dto.Metric{}
+ }
+ if p.histograms == nil || len(p.histograms) > 0 {
+ p.histograms = map[uint64]*dto.Metric{}
+ }
+ p.currentQuantile = math.NaN()
+ p.currentBucket = math.NaN()
+}
+
+// startOfLine represents the state where the next byte read from p.buf is the
+// start of a line (or whitespace leading up to it).
+func (p *TextParser) startOfLine() stateFn {
+ p.lineCount++
+ if p.skipBlankTab(); p.err != nil {
+ // End of input reached. This is the only case where
+ // that is not an error but a signal that we are done.
+ p.err = nil
+ return nil
+ }
+ switch p.currentByte {
+ case '#':
+ return p.startComment
+ case '\n':
+ return p.startOfLine // Empty line, start the next one.
+ }
+ return p.readingMetricName
+}
+
+// startComment represents the state where the next byte read from p.buf is the
+// start of a comment (or whitespace leading up to it).
+func (p *TextParser) startComment() stateFn {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte == '\n' {
+ return p.startOfLine
+ }
+ if p.readTokenUntilWhitespace(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ // If we have hit the end of line already, there is nothing left
+ // to do. This is not considered a syntax error.
+ if p.currentByte == '\n' {
+ return p.startOfLine
+ }
+ keyword := p.currentToken.String()
+ if keyword != "HELP" && keyword != "TYPE" {
+ // Generic comment, ignore by fast forwarding to end of line.
+ for p.currentByte != '\n' {
+ if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ }
+ return p.startOfLine
+ }
+ // There is something. Next has to be a metric name.
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.readTokenAsMetricName(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte == '\n' {
+ // At the end of the line already.
+ // Again, this is not considered a syntax error.
+ return p.startOfLine
+ }
+ if !isBlankOrTab(p.currentByte) {
+ p.parseError("invalid metric name in comment")
+ return nil
+ }
+ p.setOrCreateCurrentMF()
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte == '\n' {
+ // At the end of the line already.
+ // Again, this is not considered a syntax error.
+ return p.startOfLine
+ }
+ switch keyword {
+ case "HELP":
+ return p.readingHelp
+ case "TYPE":
+ return p.readingType
+ }
+ panic(fmt.Sprintf("code error: unexpected keyword %q", keyword))
+}
+
+// readingMetricName represents the state where the last byte read (now in
+// p.currentByte) is the first byte of a metric name.
+func (p *TextParser) readingMetricName() stateFn {
+ if p.readTokenAsMetricName(); p.err != nil {
+ return nil
+ }
+ if p.currentToken.Len() == 0 {
+ p.parseError("invalid metric name")
+ return nil
+ }
+ p.setOrCreateCurrentMF()
+ // Now is the time to fix the type if it hasn't happened yet.
+ if p.currentMF.Type == nil {
+ p.currentMF.Type = dto.MetricType_UNTYPED.Enum()
+ }
+ p.currentMetric = &dto.Metric{}
+ // Do not append the newly created currentMetric to
+ // currentMF.Metric right now. First wait if this is a summary,
+ // and the metric exists already, which we can only know after
+ // having read all the labels.
+ if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ return p.readingLabels
+}
+
+// readingLabels represents the state where the last byte read (now in
+// p.currentByte) is either the first byte of the label set (i.e. a '{'), or the
+// first byte of the value (otherwise).
+func (p *TextParser) readingLabels() stateFn {
+ // Summaries/histograms are special. We have to reset the
+ // currentLabels map, currentQuantile and currentBucket before starting to
+ // read labels.
+ if p.currentMF.GetType() == dto.MetricType_SUMMARY || p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ p.currentLabels = map[string]string{}
+ p.currentLabels[string(model.MetricNameLabel)] = p.currentMF.GetName()
+ p.currentQuantile = math.NaN()
+ p.currentBucket = math.NaN()
+ }
+ if p.currentByte != '{' {
+ return p.readingValue
+ }
+ return p.startLabelName
+}
+
+// startLabelName represents the state where the next byte read from p.buf is
+// the start of a label name (or whitespace leading up to it).
+func (p *TextParser) startLabelName() stateFn {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte == '}' {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ return p.readingValue
+ }
+ if p.readTokenAsLabelName(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentToken.Len() == 0 {
+ p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName()))
+ return nil
+ }
+ p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())}
+ if p.currentLabelPair.GetName() == string(model.MetricNameLabel) {
+ p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel))
+ return nil
+ }
+ // Special summary/histogram treatment. Don't add 'quantile' and 'le'
+ // labels to 'real' labels.
+ if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) &&
+ !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) {
+ p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair)
+ }
+ if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte != '=' {
+ p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte))
+ return nil
+ }
+ return p.startLabelValue
+}
+
+// startLabelValue represents the state where the next byte read from p.buf is
+// the start of a (quoted) label value (or whitespace leading up to it).
+func (p *TextParser) startLabelValue() stateFn {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte != '"' {
+ p.parseError(fmt.Sprintf("expected '\"' at start of label value, found %q", p.currentByte))
+ return nil
+ }
+ if p.readTokenAsLabelValue(); p.err != nil {
+ return nil
+ }
+ p.currentLabelPair.Value = proto.String(p.currentToken.String())
+ // Special treatment of summaries:
+ // - Quantile labels are special, will result in dto.Quantile later.
+ // - Other labels have to be added to currentLabels for signature calculation.
+ if p.currentMF.GetType() == dto.MetricType_SUMMARY {
+ if p.currentLabelPair.GetName() == model.QuantileLabel {
+ if p.currentQuantile, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil {
+ // Create a more helpful error message.
+ p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue()))
+ return nil
+ }
+ } else {
+ p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue()
+ }
+ }
+ // Similar special treatment of histograms.
+ if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ if p.currentLabelPair.GetName() == model.BucketLabel {
+ if p.currentBucket, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil {
+ // Create a more helpful error message.
+ p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue()))
+ return nil
+ }
+ } else {
+ p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue()
+ }
+ }
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ switch p.currentByte {
+ case ',':
+ return p.startLabelName
+
+ case '}':
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ return p.readingValue
+ default:
+ p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.Value))
+ return nil
+ }
+}
+
+// readingValue represents the state where the last byte read (now in
+// p.currentByte) is the first byte of the sample value (i.e. a float).
+func (p *TextParser) readingValue() stateFn {
+ // When we are here, we have read all the labels, so for the
+ // special case of a summary/histogram, we can finally find out
+ // if the metric already exists.
+ if p.currentMF.GetType() == dto.MetricType_SUMMARY {
+ signature := model.LabelsToSignature(p.currentLabels)
+ if summary := p.summaries[signature]; summary != nil {
+ p.currentMetric = summary
+ } else {
+ p.summaries[signature] = p.currentMetric
+ p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
+ }
+ } else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ signature := model.LabelsToSignature(p.currentLabels)
+ if histogram := p.histograms[signature]; histogram != nil {
+ p.currentMetric = histogram
+ } else {
+ p.histograms[signature] = p.currentMetric
+ p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
+ }
+ } else {
+ p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
+ }
+ if p.readTokenUntilWhitespace(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ value, err := strconv.ParseFloat(p.currentToken.String(), 64)
+ if err != nil {
+ // Create a more helpful error message.
+ p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String()))
+ return nil
+ }
+ switch p.currentMF.GetType() {
+ case dto.MetricType_COUNTER:
+ p.currentMetric.Counter = &dto.Counter{Value: proto.Float64(value)}
+ case dto.MetricType_GAUGE:
+ p.currentMetric.Gauge = &dto.Gauge{Value: proto.Float64(value)}
+ case dto.MetricType_UNTYPED:
+ p.currentMetric.Untyped = &dto.Untyped{Value: proto.Float64(value)}
+ case dto.MetricType_SUMMARY:
+ // *sigh*
+ if p.currentMetric.Summary == nil {
+ p.currentMetric.Summary = &dto.Summary{}
+ }
+ switch {
+ case p.currentIsSummaryCount:
+ p.currentMetric.Summary.SampleCount = proto.Uint64(uint64(value))
+ case p.currentIsSummarySum:
+ p.currentMetric.Summary.SampleSum = proto.Float64(value)
+ case !math.IsNaN(p.currentQuantile):
+ p.currentMetric.Summary.Quantile = append(
+ p.currentMetric.Summary.Quantile,
+ &dto.Quantile{
+ Quantile: proto.Float64(p.currentQuantile),
+ Value: proto.Float64(value),
+ },
+ )
+ }
+ case dto.MetricType_HISTOGRAM:
+ // *sigh*
+ if p.currentMetric.Histogram == nil {
+ p.currentMetric.Histogram = &dto.Histogram{}
+ }
+ switch {
+ case p.currentIsHistogramCount:
+ p.currentMetric.Histogram.SampleCount = proto.Uint64(uint64(value))
+ case p.currentIsHistogramSum:
+ p.currentMetric.Histogram.SampleSum = proto.Float64(value)
+ case !math.IsNaN(p.currentBucket):
+ p.currentMetric.Histogram.Bucket = append(
+ p.currentMetric.Histogram.Bucket,
+ &dto.Bucket{
+ UpperBound: proto.Float64(p.currentBucket),
+ CumulativeCount: proto.Uint64(uint64(value)),
+ },
+ )
+ }
+ default:
+ p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName())
+ }
+ if p.currentByte == '\n' {
+ return p.startOfLine
+ }
+ return p.startTimestamp
+}
+
+// startTimestamp represents the state where the next byte read from p.buf is
+// the start of the timestamp (or whitespace leading up to it).
+func (p *TextParser) startTimestamp() stateFn {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.readTokenUntilWhitespace(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ timestamp, err := strconv.ParseInt(p.currentToken.String(), 10, 64)
+ if err != nil {
+ // Create a more helpful error message.
+ p.parseError(fmt.Sprintf("expected integer as timestamp, got %q", p.currentToken.String()))
+ return nil
+ }
+ p.currentMetric.TimestampMs = proto.Int64(timestamp)
+ if p.readTokenUntilNewline(false); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentToken.Len() > 0 {
+ p.parseError(fmt.Sprintf("spurious string after timestamp: %q", p.currentToken.String()))
+ return nil
+ }
+ return p.startOfLine
+}
+
+// readingHelp represents the state where the last byte read (now in
+// p.currentByte) is the first byte of the docstring after 'HELP'.
+func (p *TextParser) readingHelp() stateFn {
+ if p.currentMF.Help != nil {
+ p.parseError(fmt.Sprintf("second HELP line for metric name %q", p.currentMF.GetName()))
+ return nil
+ }
+ // Rest of line is the docstring.
+ if p.readTokenUntilNewline(true); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ p.currentMF.Help = proto.String(p.currentToken.String())
+ return p.startOfLine
+}
+
+// readingType represents the state where the last byte read (now in
+// p.currentByte) is the first byte of the type hint after 'HELP'.
+func (p *TextParser) readingType() stateFn {
+ if p.currentMF.Type != nil {
+ p.parseError(fmt.Sprintf("second TYPE line for metric name %q, or TYPE reported after samples", p.currentMF.GetName()))
+ return nil
+ }
+ // Rest of line is the type.
+ if p.readTokenUntilNewline(false); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ metricType, ok := dto.MetricType_value[strings.ToUpper(p.currentToken.String())]
+ if !ok {
+ p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String()))
+ return nil
+ }
+ p.currentMF.Type = dto.MetricType(metricType).Enum()
+ return p.startOfLine
+}
+
+// parseError sets p.err to a ParseError at the current line with the given
+// message.
+func (p *TextParser) parseError(msg string) {
+ p.err = ParseError{
+ Line: p.lineCount,
+ Msg: msg,
+ }
+}
+
+// skipBlankTab reads (and discards) bytes from p.buf until it encounters a byte
+// that is neither ' ' nor '\t'. That byte is left in p.currentByte.
+func (p *TextParser) skipBlankTab() {
+ for {
+ if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil || !isBlankOrTab(p.currentByte) {
+ return
+ }
+ }
+}
+
+// skipBlankTabIfCurrentBlankTab works exactly as skipBlankTab but doesn't do
+// anything if p.currentByte is neither ' ' nor '\t'.
+func (p *TextParser) skipBlankTabIfCurrentBlankTab() {
+ if isBlankOrTab(p.currentByte) {
+ p.skipBlankTab()
+ }
+}
+
+// readTokenUntilWhitespace copies bytes from p.buf into p.currentToken. The
+// first byte considered is the byte already read (now in p.currentByte). The
+// first whitespace byte encountered is still copied into p.currentByte, but not
+// into p.currentToken.
+func (p *TextParser) readTokenUntilWhitespace() {
+ p.currentToken.Reset()
+ for p.err == nil && !isBlankOrTab(p.currentByte) && p.currentByte != '\n' {
+ p.currentToken.WriteByte(p.currentByte)
+ p.currentByte, p.err = p.buf.ReadByte()
+ }
+}
+
+// readTokenUntilNewline copies bytes from p.buf into p.currentToken. The first
+// byte considered is the byte already read (now in p.currentByte). The first
+// newline byte encountered is still copied into p.currentByte, but not into
+// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are
+// recognized: '\\' tranlates into '\', and '\n' into a line-feed character. All
+// other escape sequences are invalid and cause an error.
+func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) {
+ p.currentToken.Reset()
+ escaped := false
+ for p.err == nil {
+ if recognizeEscapeSequence && escaped {
+ switch p.currentByte {
+ case '\\':
+ p.currentToken.WriteByte(p.currentByte)
+ case 'n':
+ p.currentToken.WriteByte('\n')
+ default:
+ p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
+ return
+ }
+ escaped = false
+ } else {
+ switch p.currentByte {
+ case '\n':
+ return
+ case '\\':
+ escaped = true
+ default:
+ p.currentToken.WriteByte(p.currentByte)
+ }
+ }
+ p.currentByte, p.err = p.buf.ReadByte()
+ }
+}
+
+// readTokenAsMetricName copies a metric name from p.buf into p.currentToken.
+// The first byte considered is the byte already read (now in p.currentByte).
+// The first byte not part of a metric name is still copied into p.currentByte,
+// but not into p.currentToken.
+func (p *TextParser) readTokenAsMetricName() {
+ p.currentToken.Reset()
+ if !isValidMetricNameStart(p.currentByte) {
+ return
+ }
+ for {
+ p.currentToken.WriteByte(p.currentByte)
+ p.currentByte, p.err = p.buf.ReadByte()
+ if p.err != nil || !isValidMetricNameContinuation(p.currentByte) {
+ return
+ }
+ }
+}
+
+// readTokenAsLabelName copies a label name from p.buf into p.currentToken.
+// The first byte considered is the byte already read (now in p.currentByte).
+// The first byte not part of a label name is still copied into p.currentByte,
+// but not into p.currentToken.
+func (p *TextParser) readTokenAsLabelName() {
+ p.currentToken.Reset()
+ if !isValidLabelNameStart(p.currentByte) {
+ return
+ }
+ for {
+ p.currentToken.WriteByte(p.currentByte)
+ p.currentByte, p.err = p.buf.ReadByte()
+ if p.err != nil || !isValidLabelNameContinuation(p.currentByte) {
+ return
+ }
+ }
+}
+
+// readTokenAsLabelValue copies a label value from p.buf into p.currentToken.
+// In contrast to the other 'readTokenAs...' functions, which start with the
+// last read byte in p.currentByte, this method ignores p.currentByte and starts
+// with reading a new byte from p.buf. The first byte not part of a label value
+// is still copied into p.currentByte, but not into p.currentToken.
+func (p *TextParser) readTokenAsLabelValue() {
+ p.currentToken.Reset()
+ escaped := false
+ for {
+ if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil {
+ return
+ }
+ if escaped {
+ switch p.currentByte {
+ case '"', '\\':
+ p.currentToken.WriteByte(p.currentByte)
+ case 'n':
+ p.currentToken.WriteByte('\n')
+ default:
+ p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
+ return
+ }
+ escaped = false
+ continue
+ }
+ switch p.currentByte {
+ case '"':
+ return
+ case '\n':
+ p.parseError(fmt.Sprintf("label value %q contains unescaped new-line", p.currentToken.String()))
+ return
+ case '\\':
+ escaped = true
+ default:
+ p.currentToken.WriteByte(p.currentByte)
+ }
+ }
+}
+
+func (p *TextParser) setOrCreateCurrentMF() {
+ p.currentIsSummaryCount = false
+ p.currentIsSummarySum = false
+ p.currentIsHistogramCount = false
+ p.currentIsHistogramSum = false
+ name := p.currentToken.String()
+ if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil {
+ return
+ }
+ // Try out if this is a _sum or _count for a summary/histogram.
+ summaryName := summaryMetricName(name)
+ if p.currentMF = p.metricFamiliesByName[summaryName]; p.currentMF != nil {
+ if p.currentMF.GetType() == dto.MetricType_SUMMARY {
+ if isCount(name) {
+ p.currentIsSummaryCount = true
+ }
+ if isSum(name) {
+ p.currentIsSummarySum = true
+ }
+ return
+ }
+ }
+ histogramName := histogramMetricName(name)
+ if p.currentMF = p.metricFamiliesByName[histogramName]; p.currentMF != nil {
+ if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ if isCount(name) {
+ p.currentIsHistogramCount = true
+ }
+ if isSum(name) {
+ p.currentIsHistogramSum = true
+ }
+ return
+ }
+ }
+ p.currentMF = &dto.MetricFamily{Name: proto.String(name)}
+ p.metricFamiliesByName[name] = p.currentMF
+}
+
+func isValidLabelNameStart(b byte) bool {
+ return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_'
+}
+
+func isValidLabelNameContinuation(b byte) bool {
+ return isValidLabelNameStart(b) || (b >= '0' && b <= '9')
+}
+
+func isValidMetricNameStart(b byte) bool {
+ return isValidLabelNameStart(b) || b == ':'
+}
+
+func isValidMetricNameContinuation(b byte) bool {
+ return isValidLabelNameContinuation(b) || b == ':'
+}
+
+func isBlankOrTab(b byte) bool {
+ return b == ' ' || b == '\t'
+}
+
+func isCount(name string) bool {
+ return len(name) > 6 && name[len(name)-6:] == "_count"
+}
+
+func isSum(name string) bool {
+ return len(name) > 4 && name[len(name)-4:] == "_sum"
+}
+
+func isBucket(name string) bool {
+ return len(name) > 7 && name[len(name)-7:] == "_bucket"
+}
+
+func summaryMetricName(name string) string {
+ switch {
+ case isCount(name):
+ return name[:len(name)-6]
+ case isSum(name):
+ return name[:len(name)-4]
+ default:
+ return name
+ }
+}
+
+func histogramMetricName(name string) string {
+ switch {
+ case isCount(name):
+ return name[:len(name)-6]
+ case isSum(name):
+ return name[:len(name)-4]
+ case isBucket(name):
+ return name[:len(name)-7]
+ default:
+ return name
+ }
+}
diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt
new file mode 100644
index 0000000..7723656
--- /dev/null
+++ b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt
@@ -0,0 +1,67 @@
+PACKAGE
+
+package goautoneg
+import "bitbucket.org/ww/goautoneg"
+
+HTTP Content-Type Autonegotiation.
+
+The functions in this package implement the behaviour specified in
+http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
+
+Copyright (c) 2011, Open Knowledge Foundation Ltd.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ Neither the name of the Open Knowledge Foundation Ltd. nor the
+ names of its contributors may be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+FUNCTIONS
+
+func Negotiate(header string, alternatives []string) (content_type string)
+Negotiate the most appropriate content_type given the accept header
+and a list of alternatives.
+
+func ParseAccept(header string) (accept []Accept)
+Parse an Accept Header string returning a sorted list
+of clauses
+
+
+TYPES
+
+type Accept struct {
+ Type, SubType string
+ Q float32
+ Params map[string]string
+}
+Structure to represent a clause in an HTTP Accept Header
+
+
+SUBDIRECTORIES
+
+ .hg
diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
new file mode 100644
index 0000000..648b38c
--- /dev/null
+++ b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
@@ -0,0 +1,162 @@
+/*
+HTTP Content-Type Autonegotiation.
+
+The functions in this package implement the behaviour specified in
+http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
+
+Copyright (c) 2011, Open Knowledge Foundation Ltd.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ Neither the name of the Open Knowledge Foundation Ltd. nor the
+ names of its contributors may be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+*/
+package goautoneg
+
+import (
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// Structure to represent a clause in an HTTP Accept Header
+type Accept struct {
+ Type, SubType string
+ Q float64
+ Params map[string]string
+}
+
+// For internal use, so that we can use the sort interface
+type accept_slice []Accept
+
+func (accept accept_slice) Len() int {
+ slice := []Accept(accept)
+ return len(slice)
+}
+
+func (accept accept_slice) Less(i, j int) bool {
+ slice := []Accept(accept)
+ ai, aj := slice[i], slice[j]
+ if ai.Q > aj.Q {
+ return true
+ }
+ if ai.Type != "*" && aj.Type == "*" {
+ return true
+ }
+ if ai.SubType != "*" && aj.SubType == "*" {
+ return true
+ }
+ return false
+}
+
+func (accept accept_slice) Swap(i, j int) {
+ slice := []Accept(accept)
+ slice[i], slice[j] = slice[j], slice[i]
+}
+
+// Parse an Accept Header string returning a sorted list
+// of clauses
+func ParseAccept(header string) (accept []Accept) {
+ parts := strings.Split(header, ",")
+ accept = make([]Accept, 0, len(parts))
+ for _, part := range parts {
+ part := strings.Trim(part, " ")
+
+ a := Accept{}
+ a.Params = make(map[string]string)
+ a.Q = 1.0
+
+ mrp := strings.Split(part, ";")
+
+ media_range := mrp[0]
+ sp := strings.Split(media_range, "/")
+ a.Type = strings.Trim(sp[0], " ")
+
+ switch {
+ case len(sp) == 1 && a.Type == "*":
+ a.SubType = "*"
+ case len(sp) == 2:
+ a.SubType = strings.Trim(sp[1], " ")
+ default:
+ continue
+ }
+
+ if len(mrp) == 1 {
+ accept = append(accept, a)
+ continue
+ }
+
+ for _, param := range mrp[1:] {
+ sp := strings.SplitN(param, "=", 2)
+ if len(sp) != 2 {
+ continue
+ }
+ token := strings.Trim(sp[0], " ")
+ if token == "q" {
+ a.Q, _ = strconv.ParseFloat(sp[1], 32)
+ } else {
+ a.Params[token] = strings.Trim(sp[1], " ")
+ }
+ }
+
+ accept = append(accept, a)
+ }
+
+ slice := accept_slice(accept)
+ sort.Sort(slice)
+
+ return
+}
+
+// Negotiate the most appropriate content_type given the accept header
+// and a list of alternatives.
+func Negotiate(header string, alternatives []string) (content_type string) {
+ asp := make([][]string, 0, len(alternatives))
+ for _, ctype := range alternatives {
+ asp = append(asp, strings.SplitN(ctype, "/", 2))
+ }
+ for _, clause := range ParseAccept(header) {
+ for i, ctsp := range asp {
+ if clause.Type == ctsp[0] && clause.SubType == ctsp[1] {
+ content_type = alternatives[i]
+ return
+ }
+ if clause.Type == ctsp[0] && clause.SubType == "*" {
+ content_type = alternatives[i]
+ return
+ }
+ if clause.Type == "*" && clause.SubType == "*" {
+ content_type = alternatives[i]
+ return
+ }
+ }
+ }
+ return
+}
diff --git a/vendor/github.com/prometheus/common/log/eventlog_formatter.go b/vendor/github.com/prometheus/common/log/eventlog_formatter.go
new file mode 100644
index 0000000..6d41284
--- /dev/null
+++ b/vendor/github.com/prometheus/common/log/eventlog_formatter.go
@@ -0,0 +1,89 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build windows
+
+package log
+
+import (
+ "fmt"
+ "os"
+
+ "golang.org/x/sys/windows/svc/eventlog"
+
+ "github.com/Sirupsen/logrus"
+)
+
+func init() {
+ setEventlogFormatter = func(name string, debugAsInfo bool) error {
+ if name == "" {
+ return fmt.Errorf("missing name parameter")
+ }
+
+ fmter, err := newEventlogger(name, debugAsInfo, origLogger.Formatter)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "error creating eventlog formatter: %v\n", err)
+ origLogger.Errorf("can't connect logger to eventlog: %v", err)
+ return err
+ }
+ origLogger.Formatter = fmter
+ return nil
+ }
+}
+
+type eventlogger struct {
+ log *eventlog.Log
+ debugAsInfo bool
+ wrap logrus.Formatter
+}
+
+func newEventlogger(name string, debugAsInfo bool, fmter logrus.Formatter) (*eventlogger, error) {
+ logHandle, err := eventlog.Open(name)
+ if err != nil {
+ return nil, err
+ }
+ return &eventlogger{log: logHandle, debugAsInfo: debugAsInfo, wrap: fmter}, nil
+}
+
+func (s *eventlogger) Format(e *logrus.Entry) ([]byte, error) {
+ data, err := s.wrap.Format(e)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "eventlogger: can't format entry: %v\n", err)
+ return data, err
+ }
+
+ switch e.Level {
+ case logrus.PanicLevel:
+ fallthrough
+ case logrus.FatalLevel:
+ fallthrough
+ case logrus.ErrorLevel:
+ err = s.log.Error(102, e.Message)
+ case logrus.WarnLevel:
+ err = s.log.Warning(101, e.Message)
+ case logrus.InfoLevel:
+ err = s.log.Info(100, e.Message)
+ case logrus.DebugLevel:
+ if s.debugAsInfo {
+ err = s.log.Info(100, e.Message)
+ }
+ default:
+ err = s.log.Info(100, e.Message)
+ }
+
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "eventlogger: can't send log to eventlog: %v\n", err)
+ }
+
+ return data, err
+}
diff --git a/vendor/github.com/prometheus/common/log/log.go b/vendor/github.com/prometheus/common/log/log.go
new file mode 100644
index 0000000..efad484
--- /dev/null
+++ b/vendor/github.com/prometheus/common/log/log.go
@@ -0,0 +1,365 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package log
+
+import (
+ "flag"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "net/url"
+ "os"
+ "runtime"
+ "strconv"
+ "strings"
+
+ "github.com/Sirupsen/logrus"
+)
+
+type levelFlag string
+
+// String implements flag.Value.
+func (f levelFlag) String() string {
+ return fmt.Sprintf("%q", string(f))
+}
+
+// Set implements flag.Value.
+func (f levelFlag) Set(level string) error {
+ l, err := logrus.ParseLevel(level)
+ if err != nil {
+ return err
+ }
+ origLogger.Level = l
+ return nil
+}
+
+// setSyslogFormatter is nil if the target architecture does not support syslog.
+var setSyslogFormatter func(string, string) error
+
+// setEventlogFormatter is nil if the target OS does not support Eventlog (i.e., is not Windows).
+var setEventlogFormatter func(string, bool) error
+
+func setJSONFormatter() {
+ origLogger.Formatter = &logrus.JSONFormatter{}
+}
+
+type logFormatFlag url.URL
+
+// String implements flag.Value.
+func (f logFormatFlag) String() string {
+ u := url.URL(f)
+ return fmt.Sprintf("%q", u.String())
+}
+
+// Set implements flag.Value.
+func (f logFormatFlag) Set(format string) error {
+ u, err := url.Parse(format)
+ if err != nil {
+ return err
+ }
+ if u.Scheme != "logger" {
+ return fmt.Errorf("invalid scheme %s", u.Scheme)
+ }
+ jsonq := u.Query().Get("json")
+ if jsonq == "true" {
+ setJSONFormatter()
+ }
+
+ switch u.Opaque {
+ case "syslog":
+ if setSyslogFormatter == nil {
+ return fmt.Errorf("system does not support syslog")
+ }
+ appname := u.Query().Get("appname")
+ facility := u.Query().Get("local")
+ return setSyslogFormatter(appname, facility)
+ case "eventlog":
+ if setEventlogFormatter == nil {
+ return fmt.Errorf("system does not support eventlog")
+ }
+ name := u.Query().Get("name")
+ debugAsInfo := false
+ debugAsInfoRaw := u.Query().Get("debugAsInfo")
+ if parsedDebugAsInfo, err := strconv.ParseBool(debugAsInfoRaw); err == nil {
+ debugAsInfo = parsedDebugAsInfo
+ }
+ return setEventlogFormatter(name, debugAsInfo)
+ case "stdout":
+ origLogger.Out = os.Stdout
+ case "stderr":
+ origLogger.Out = os.Stderr
+ default:
+ return fmt.Errorf("unsupported logger %q", u.Opaque)
+ }
+ return nil
+}
+
+func init() {
+ AddFlags(flag.CommandLine)
+}
+
+// AddFlags adds the flags used by this package to the given FlagSet. That's
+// useful if working with a custom FlagSet. The init function of this package
+// adds the flags to flag.CommandLine anyway. Thus, it's usually enough to call
+// flag.Parse() to make the logging flags take effect.
+func AddFlags(fs *flag.FlagSet) {
+ fs.Var(
+ levelFlag(origLogger.Level.String()),
+ "log.level",
+ "Only log messages with the given severity or above. Valid levels: [debug, info, warn, error, fatal]",
+ )
+ fs.Var(
+ logFormatFlag(url.URL{Scheme: "logger", Opaque: "stderr"}),
+ "log.format",
+ `Set the log target and format. Example: "logger:syslog?appname=bob&local=7" or "logger:stdout?json=true"`,
+ )
+}
+
+// Logger is the interface for loggers used in the Prometheus components.
+type Logger interface {
+ Debug(...interface{})
+ Debugln(...interface{})
+ Debugf(string, ...interface{})
+
+ Info(...interface{})
+ Infoln(...interface{})
+ Infof(string, ...interface{})
+
+ Warn(...interface{})
+ Warnln(...interface{})
+ Warnf(string, ...interface{})
+
+ Error(...interface{})
+ Errorln(...interface{})
+ Errorf(string, ...interface{})
+
+ Fatal(...interface{})
+ Fatalln(...interface{})
+ Fatalf(string, ...interface{})
+
+ With(key string, value interface{}) Logger
+}
+
+type logger struct {
+ entry *logrus.Entry
+}
+
+func (l logger) With(key string, value interface{}) Logger {
+ return logger{l.entry.WithField(key, value)}
+}
+
+// Debug logs a message at level Debug on the standard logger.
+func (l logger) Debug(args ...interface{}) {
+ l.sourced().Debug(args...)
+}
+
+// Debug logs a message at level Debug on the standard logger.
+func (l logger) Debugln(args ...interface{}) {
+ l.sourced().Debugln(args...)
+}
+
+// Debugf logs a message at level Debug on the standard logger.
+func (l logger) Debugf(format string, args ...interface{}) {
+ l.sourced().Debugf(format, args...)
+}
+
+// Info logs a message at level Info on the standard logger.
+func (l logger) Info(args ...interface{}) {
+ l.sourced().Info(args...)
+}
+
+// Info logs a message at level Info on the standard logger.
+func (l logger) Infoln(args ...interface{}) {
+ l.sourced().Infoln(args...)
+}
+
+// Infof logs a message at level Info on the standard logger.
+func (l logger) Infof(format string, args ...interface{}) {
+ l.sourced().Infof(format, args...)
+}
+
+// Warn logs a message at level Warn on the standard logger.
+func (l logger) Warn(args ...interface{}) {
+ l.sourced().Warn(args...)
+}
+
+// Warn logs a message at level Warn on the standard logger.
+func (l logger) Warnln(args ...interface{}) {
+ l.sourced().Warnln(args...)
+}
+
+// Warnf logs a message at level Warn on the standard logger.
+func (l logger) Warnf(format string, args ...interface{}) {
+ l.sourced().Warnf(format, args...)
+}
+
+// Error logs a message at level Error on the standard logger.
+func (l logger) Error(args ...interface{}) {
+ l.sourced().Error(args...)
+}
+
+// Error logs a message at level Error on the standard logger.
+func (l logger) Errorln(args ...interface{}) {
+ l.sourced().Errorln(args...)
+}
+
+// Errorf logs a message at level Error on the standard logger.
+func (l logger) Errorf(format string, args ...interface{}) {
+ l.sourced().Errorf(format, args...)
+}
+
+// Fatal logs a message at level Fatal on the standard logger.
+func (l logger) Fatal(args ...interface{}) {
+ l.sourced().Fatal(args...)
+}
+
+// Fatal logs a message at level Fatal on the standard logger.
+func (l logger) Fatalln(args ...interface{}) {
+ l.sourced().Fatalln(args...)
+}
+
+// Fatalf logs a message at level Fatal on the standard logger.
+func (l logger) Fatalf(format string, args ...interface{}) {
+ l.sourced().Fatalf(format, args...)
+}
+
+// sourced adds a source field to the logger that contains
+// the file name and line where the logging happened.
+func (l logger) sourced() *logrus.Entry {
+ _, file, line, ok := runtime.Caller(2)
+ if !ok {
+ file = "??>"
+ line = 1
+ } else {
+ slash := strings.LastIndex(file, "/")
+ file = file[slash+1:]
+ }
+ return l.entry.WithField("source", fmt.Sprintf("%s:%d", file, line))
+}
+
+var origLogger = logrus.New()
+var baseLogger = logger{entry: logrus.NewEntry(origLogger)}
+
+// Base returns the default Logger logging to
+func Base() Logger {
+ return baseLogger
+}
+
+// NewLogger returns a new Logger logging to out.
+func NewLogger(w io.Writer) Logger {
+ l := logrus.New()
+ l.Out = w
+ return logger{entry: logrus.NewEntry(l)}
+}
+
+// NewNopLogger returns a logger that discards all log messages.
+func NewNopLogger() Logger {
+ l := logrus.New()
+ l.Out = ioutil.Discard
+ return logger{entry: logrus.NewEntry(l)}
+}
+
+// With adds a field to the logger.
+func With(key string, value interface{}) Logger {
+ return baseLogger.With(key, value)
+}
+
+// Debug logs a message at level Debug on the standard logger.
+func Debug(args ...interface{}) {
+ baseLogger.sourced().Debug(args...)
+}
+
+// Debugln logs a message at level Debug on the standard logger.
+func Debugln(args ...interface{}) {
+ baseLogger.sourced().Debugln(args...)
+}
+
+// Debugf logs a message at level Debug on the standard logger.
+func Debugf(format string, args ...interface{}) {
+ baseLogger.sourced().Debugf(format, args...)
+}
+
+// Info logs a message at level Info on the standard logger.
+func Info(args ...interface{}) {
+ baseLogger.sourced().Info(args...)
+}
+
+// Infoln logs a message at level Info on the standard logger.
+func Infoln(args ...interface{}) {
+ baseLogger.sourced().Infoln(args...)
+}
+
+// Infof logs a message at level Info on the standard logger.
+func Infof(format string, args ...interface{}) {
+ baseLogger.sourced().Infof(format, args...)
+}
+
+// Warn logs a message at level Warn on the standard logger.
+func Warn(args ...interface{}) {
+ baseLogger.sourced().Warn(args...)
+}
+
+// Warnln logs a message at level Warn on the standard logger.
+func Warnln(args ...interface{}) {
+ baseLogger.sourced().Warnln(args...)
+}
+
+// Warnf logs a message at level Warn on the standard logger.
+func Warnf(format string, args ...interface{}) {
+ baseLogger.sourced().Warnf(format, args...)
+}
+
+// Error logs a message at level Error on the standard logger.
+func Error(args ...interface{}) {
+ baseLogger.sourced().Error(args...)
+}
+
+// Errorln logs a message at level Error on the standard logger.
+func Errorln(args ...interface{}) {
+ baseLogger.sourced().Errorln(args...)
+}
+
+// Errorf logs a message at level Error on the standard logger.
+func Errorf(format string, args ...interface{}) {
+ baseLogger.sourced().Errorf(format, args...)
+}
+
+// Fatal logs a message at level Fatal on the standard logger.
+func Fatal(args ...interface{}) {
+ baseLogger.sourced().Fatal(args...)
+}
+
+// Fatalln logs a message at level Fatal on the standard logger.
+func Fatalln(args ...interface{}) {
+ baseLogger.sourced().Fatalln(args...)
+}
+
+// Fatalf logs a message at level Fatal on the standard logger.
+func Fatalf(format string, args ...interface{}) {
+ baseLogger.sourced().Fatalf(format, args...)
+}
+
+type errorLogWriter struct{}
+
+func (errorLogWriter) Write(b []byte) (int, error) {
+ baseLogger.sourced().Error(string(b))
+ return len(b), nil
+}
+
+// NewErrorLogger returns a log.Logger that is meant to be used
+// in the ErrorLog field of an http.Server to log HTTP server errors.
+func NewErrorLogger() *log.Logger {
+ return log.New(&errorLogWriter{}, "", 0)
+}
diff --git a/vendor/github.com/prometheus/common/log/syslog_formatter.go b/vendor/github.com/prometheus/common/log/syslog_formatter.go
new file mode 100644
index 0000000..fd8c6fb
--- /dev/null
+++ b/vendor/github.com/prometheus/common/log/syslog_formatter.go
@@ -0,0 +1,119 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !windows,!nacl,!plan9
+
+package log
+
+import (
+ "fmt"
+ "log/syslog"
+ "os"
+
+ "github.com/Sirupsen/logrus"
+)
+
+func init() {
+ setSyslogFormatter = func(appname, local string) error {
+ if appname == "" {
+ return fmt.Errorf("missing appname parameter")
+ }
+ if local == "" {
+ return fmt.Errorf("missing local parameter")
+ }
+
+ fmter, err := newSyslogger(appname, local, origLogger.Formatter)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "error creating syslog formatter: %v\n", err)
+ origLogger.Errorf("can't connect logger to syslog: %v", err)
+ return err
+ }
+ origLogger.Formatter = fmter
+ return nil
+ }
+}
+
+var ceeTag = []byte("@cee:")
+
+type syslogger struct {
+ wrap logrus.Formatter
+ out *syslog.Writer
+}
+
+func newSyslogger(appname string, facility string, fmter logrus.Formatter) (*syslogger, error) {
+ priority, err := getFacility(facility)
+ if err != nil {
+ return nil, err
+ }
+ out, err := syslog.New(priority, appname)
+ return &syslogger{
+ out: out,
+ wrap: fmter,
+ }, err
+}
+
+func getFacility(facility string) (syslog.Priority, error) {
+ switch facility {
+ case "0":
+ return syslog.LOG_LOCAL0, nil
+ case "1":
+ return syslog.LOG_LOCAL1, nil
+ case "2":
+ return syslog.LOG_LOCAL2, nil
+ case "3":
+ return syslog.LOG_LOCAL3, nil
+ case "4":
+ return syslog.LOG_LOCAL4, nil
+ case "5":
+ return syslog.LOG_LOCAL5, nil
+ case "6":
+ return syslog.LOG_LOCAL6, nil
+ case "7":
+ return syslog.LOG_LOCAL7, nil
+ }
+ return syslog.LOG_LOCAL0, fmt.Errorf("invalid local(%s) for syslog", facility)
+}
+
+func (s *syslogger) Format(e *logrus.Entry) ([]byte, error) {
+ data, err := s.wrap.Format(e)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "syslogger: can't format entry: %v\n", err)
+ return data, err
+ }
+ // only append tag to data sent to syslog (line), not to what
+ // is returned
+ line := string(append(ceeTag, data...))
+
+ switch e.Level {
+ case logrus.PanicLevel:
+ err = s.out.Crit(line)
+ case logrus.FatalLevel:
+ err = s.out.Crit(line)
+ case logrus.ErrorLevel:
+ err = s.out.Err(line)
+ case logrus.WarnLevel:
+ err = s.out.Warning(line)
+ case logrus.InfoLevel:
+ err = s.out.Info(line)
+ case logrus.DebugLevel:
+ err = s.out.Debug(line)
+ default:
+ err = s.out.Notice(line)
+ }
+
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "syslogger: can't send log to syslog: %v\n", err)
+ }
+
+ return data, err
+}
diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go
new file mode 100644
index 0000000..35e739c
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/alert.go
@@ -0,0 +1,136 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "fmt"
+ "time"
+)
+
+type AlertStatus string
+
+const (
+ AlertFiring AlertStatus = "firing"
+ AlertResolved AlertStatus = "resolved"
+)
+
+// Alert is a generic representation of an alert in the Prometheus eco-system.
+type Alert struct {
+ // Label value pairs for purpose of aggregation, matching, and disposition
+ // dispatching. This must minimally include an "alertname" label.
+ Labels LabelSet `json:"labels"`
+
+ // Extra key/value information which does not define alert identity.
+ Annotations LabelSet `json:"annotations"`
+
+ // The known time range for this alert. Both ends are optional.
+ StartsAt time.Time `json:"startsAt,omitempty"`
+ EndsAt time.Time `json:"endsAt,omitempty"`
+ GeneratorURL string `json:"generatorURL"`
+}
+
+// Name returns the name of the alert. It is equivalent to the "alertname" label.
+func (a *Alert) Name() string {
+ return string(a.Labels[AlertNameLabel])
+}
+
+// Fingerprint returns a unique hash for the alert. It is equivalent to
+// the fingerprint of the alert's label set.
+func (a *Alert) Fingerprint() Fingerprint {
+ return a.Labels.Fingerprint()
+}
+
+func (a *Alert) String() string {
+ s := fmt.Sprintf("%s[%s]", a.Name(), a.Fingerprint().String()[:7])
+ if a.Resolved() {
+ return s + "[resolved]"
+ }
+ return s + "[active]"
+}
+
+// Resolved returns true iff the activity interval ended in the past.
+func (a *Alert) Resolved() bool {
+ return a.ResolvedAt(time.Now())
+}
+
+// ResolvedAt returns true off the activity interval ended before
+// the given timestamp.
+func (a *Alert) ResolvedAt(ts time.Time) bool {
+ if a.EndsAt.IsZero() {
+ return false
+ }
+ return !a.EndsAt.After(ts)
+}
+
+// Status returns the status of the alert.
+func (a *Alert) Status() AlertStatus {
+ if a.Resolved() {
+ return AlertResolved
+ }
+ return AlertFiring
+}
+
+// Validate checks whether the alert data is inconsistent.
+func (a *Alert) Validate() error {
+ if a.StartsAt.IsZero() {
+ return fmt.Errorf("start time missing")
+ }
+ if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) {
+ return fmt.Errorf("start time must be before end time")
+ }
+ if err := a.Labels.Validate(); err != nil {
+ return fmt.Errorf("invalid label set: %s", err)
+ }
+ if len(a.Labels) == 0 {
+ return fmt.Errorf("at least one label pair required")
+ }
+ if err := a.Annotations.Validate(); err != nil {
+ return fmt.Errorf("invalid annotations: %s", err)
+ }
+ return nil
+}
+
+// Alert is a list of alerts that can be sorted in chronological order.
+type Alerts []*Alert
+
+func (as Alerts) Len() int { return len(as) }
+func (as Alerts) Swap(i, j int) { as[i], as[j] = as[j], as[i] }
+
+func (as Alerts) Less(i, j int) bool {
+ if as[i].StartsAt.Before(as[j].StartsAt) {
+ return true
+ }
+ if as[i].EndsAt.Before(as[j].EndsAt) {
+ return true
+ }
+ return as[i].Fingerprint() < as[j].Fingerprint()
+}
+
+// HasFiring returns true iff one of the alerts is not resolved.
+func (as Alerts) HasFiring() bool {
+ for _, a := range as {
+ if !a.Resolved() {
+ return true
+ }
+ }
+ return false
+}
+
+// Status returns StatusFiring iff at least one of the alerts is firing.
+func (as Alerts) Status() AlertStatus {
+ if as.HasFiring() {
+ return AlertFiring
+ }
+ return AlertResolved
+}
diff --git a/vendor/github.com/prometheus/common/model/fingerprinting.go b/vendor/github.com/prometheus/common/model/fingerprinting.go
new file mode 100644
index 0000000..fc4de41
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/fingerprinting.go
@@ -0,0 +1,105 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// Fingerprint provides a hash-capable representation of a Metric.
+// For our purposes, FNV-1A 64-bit is used.
+type Fingerprint uint64
+
+// FingerprintFromString transforms a string representation into a Fingerprint.
+func FingerprintFromString(s string) (Fingerprint, error) {
+ num, err := strconv.ParseUint(s, 16, 64)
+ return Fingerprint(num), err
+}
+
+// ParseFingerprint parses the input string into a fingerprint.
+func ParseFingerprint(s string) (Fingerprint, error) {
+ num, err := strconv.ParseUint(s, 16, 64)
+ if err != nil {
+ return 0, err
+ }
+ return Fingerprint(num), nil
+}
+
+func (f Fingerprint) String() string {
+ return fmt.Sprintf("%016x", uint64(f))
+}
+
+// Fingerprints represents a collection of Fingerprint subject to a given
+// natural sorting scheme. It implements sort.Interface.
+type Fingerprints []Fingerprint
+
+// Len implements sort.Interface.
+func (f Fingerprints) Len() int {
+ return len(f)
+}
+
+// Less implements sort.Interface.
+func (f Fingerprints) Less(i, j int) bool {
+ return f[i] < f[j]
+}
+
+// Swap implements sort.Interface.
+func (f Fingerprints) Swap(i, j int) {
+ f[i], f[j] = f[j], f[i]
+}
+
+// FingerprintSet is a set of Fingerprints.
+type FingerprintSet map[Fingerprint]struct{}
+
+// Equal returns true if both sets contain the same elements (and not more).
+func (s FingerprintSet) Equal(o FingerprintSet) bool {
+ if len(s) != len(o) {
+ return false
+ }
+
+ for k := range s {
+ if _, ok := o[k]; !ok {
+ return false
+ }
+ }
+
+ return true
+}
+
+// Intersection returns the elements contained in both sets.
+func (s FingerprintSet) Intersection(o FingerprintSet) FingerprintSet {
+ myLength, otherLength := len(s), len(o)
+ if myLength == 0 || otherLength == 0 {
+ return FingerprintSet{}
+ }
+
+ subSet := s
+ superSet := o
+
+ if otherLength < myLength {
+ subSet = o
+ superSet = s
+ }
+
+ out := FingerprintSet{}
+
+ for k := range subSet {
+ if _, ok := superSet[k]; ok {
+ out[k] = struct{}{}
+ }
+ }
+
+ return out
+}
diff --git a/vendor/github.com/prometheus/common/model/fnv.go b/vendor/github.com/prometheus/common/model/fnv.go
new file mode 100644
index 0000000..038fc1c
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/fnv.go
@@ -0,0 +1,42 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+// Inline and byte-free variant of hash/fnv's fnv64a.
+
+const (
+ offset64 = 14695981039346656037
+ prime64 = 1099511628211
+)
+
+// hashNew initializies a new fnv64a hash value.
+func hashNew() uint64 {
+ return offset64
+}
+
+// hashAdd adds a string to a fnv64a hash value, returning the updated hash.
+func hashAdd(h uint64, s string) uint64 {
+ for i := 0; i < len(s); i++ {
+ h ^= uint64(s[i])
+ h *= prime64
+ }
+ return h
+}
+
+// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash.
+func hashAddByte(h uint64, b byte) uint64 {
+ h ^= uint64(b)
+ h *= prime64
+ return h
+}
diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go
new file mode 100644
index 0000000..41051a0
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/labels.go
@@ -0,0 +1,210 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "regexp"
+ "strings"
+ "unicode/utf8"
+)
+
+const (
+ // AlertNameLabel is the name of the label containing the an alert's name.
+ AlertNameLabel = "alertname"
+
+ // ExportedLabelPrefix is the prefix to prepend to the label names present in
+ // exported metrics if a label of the same name is added by the server.
+ ExportedLabelPrefix = "exported_"
+
+ // MetricNameLabel is the label name indicating the metric name of a
+ // timeseries.
+ MetricNameLabel = "__name__"
+
+ // SchemeLabel is the name of the label that holds the scheme on which to
+ // scrape a target.
+ SchemeLabel = "__scheme__"
+
+ // AddressLabel is the name of the label that holds the address of
+ // a scrape target.
+ AddressLabel = "__address__"
+
+ // MetricsPathLabel is the name of the label that holds the path on which to
+ // scrape a target.
+ MetricsPathLabel = "__metrics_path__"
+
+ // ReservedLabelPrefix is a prefix which is not legal in user-supplied
+ // label names.
+ ReservedLabelPrefix = "__"
+
+ // MetaLabelPrefix is a prefix for labels that provide meta information.
+ // Labels with this prefix are used for intermediate label processing and
+ // will not be attached to time series.
+ MetaLabelPrefix = "__meta_"
+
+ // TmpLabelPrefix is a prefix for temporary labels as part of relabelling.
+ // Labels with this prefix are used for intermediate label processing and
+ // will not be attached to time series. This is reserved for use in
+ // Prometheus configuration files by users.
+ TmpLabelPrefix = "__tmp_"
+
+ // ParamLabelPrefix is a prefix for labels that provide URL parameters
+ // used to scrape a target.
+ ParamLabelPrefix = "__param_"
+
+ // JobLabel is the label name indicating the job from which a timeseries
+ // was scraped.
+ JobLabel = "job"
+
+ // InstanceLabel is the label name used for the instance label.
+ InstanceLabel = "instance"
+
+ // BucketLabel is used for the label that defines the upper bound of a
+ // bucket of a histogram ("le" -> "less or equal").
+ BucketLabel = "le"
+
+ // QuantileLabel is used for the label that defines the quantile in a
+ // summary.
+ QuantileLabel = "quantile"
+)
+
+// LabelNameRE is a regular expression matching valid label names. Note that the
+// IsValid method of LabelName performs the same check but faster than a match
+// with this regular expression.
+var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
+
+// A LabelName is a key for a LabelSet or Metric. It has a value associated
+// therewith.
+type LabelName string
+
+// IsValid is true iff the label name matches the pattern of LabelNameRE. This
+// method, however, does not use LabelNameRE for the check but a much faster
+// hardcoded implementation.
+func (ln LabelName) IsValid() bool {
+ if len(ln) == 0 {
+ return false
+ }
+ for i, b := range ln {
+ if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) {
+ return false
+ }
+ }
+ return true
+}
+
+// UnmarshalYAML implements the yaml.Unmarshaler interface.
+func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ var s string
+ if err := unmarshal(&s); err != nil {
+ return err
+ }
+ if !LabelName(s).IsValid() {
+ return fmt.Errorf("%q is not a valid label name", s)
+ }
+ *ln = LabelName(s)
+ return nil
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (ln *LabelName) UnmarshalJSON(b []byte) error {
+ var s string
+ if err := json.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ if !LabelName(s).IsValid() {
+ return fmt.Errorf("%q is not a valid label name", s)
+ }
+ *ln = LabelName(s)
+ return nil
+}
+
+// LabelNames is a sortable LabelName slice. In implements sort.Interface.
+type LabelNames []LabelName
+
+func (l LabelNames) Len() int {
+ return len(l)
+}
+
+func (l LabelNames) Less(i, j int) bool {
+ return l[i] < l[j]
+}
+
+func (l LabelNames) Swap(i, j int) {
+ l[i], l[j] = l[j], l[i]
+}
+
+func (l LabelNames) String() string {
+ labelStrings := make([]string, 0, len(l))
+ for _, label := range l {
+ labelStrings = append(labelStrings, string(label))
+ }
+ return strings.Join(labelStrings, ", ")
+}
+
+// A LabelValue is an associated value for a LabelName.
+type LabelValue string
+
+// IsValid returns true iff the string is a valid UTF8.
+func (lv LabelValue) IsValid() bool {
+ return utf8.ValidString(string(lv))
+}
+
+// LabelValues is a sortable LabelValue slice. It implements sort.Interface.
+type LabelValues []LabelValue
+
+func (l LabelValues) Len() int {
+ return len(l)
+}
+
+func (l LabelValues) Less(i, j int) bool {
+ return string(l[i]) < string(l[j])
+}
+
+func (l LabelValues) Swap(i, j int) {
+ l[i], l[j] = l[j], l[i]
+}
+
+// LabelPair pairs a name with a value.
+type LabelPair struct {
+ Name LabelName
+ Value LabelValue
+}
+
+// LabelPairs is a sortable slice of LabelPair pointers. It implements
+// sort.Interface.
+type LabelPairs []*LabelPair
+
+func (l LabelPairs) Len() int {
+ return len(l)
+}
+
+func (l LabelPairs) Less(i, j int) bool {
+ switch {
+ case l[i].Name > l[j].Name:
+ return false
+ case l[i].Name < l[j].Name:
+ return true
+ case l[i].Value > l[j].Value:
+ return false
+ case l[i].Value < l[j].Value:
+ return true
+ default:
+ return false
+ }
+}
+
+func (l LabelPairs) Swap(i, j int) {
+ l[i], l[j] = l[j], l[i]
+}
diff --git a/vendor/github.com/prometheus/common/model/labelset.go b/vendor/github.com/prometheus/common/model/labelset.go
new file mode 100644
index 0000000..6eda08a
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/labelset.go
@@ -0,0 +1,169 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "sort"
+ "strings"
+)
+
+// A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet
+// may be fully-qualified down to the point where it may resolve to a single
+// Metric in the data store or not. All operations that occur within the realm
+// of a LabelSet can emit a vector of Metric entities to which the LabelSet may
+// match.
+type LabelSet map[LabelName]LabelValue
+
+// Validate checks whether all names and values in the label set
+// are valid.
+func (ls LabelSet) Validate() error {
+ for ln, lv := range ls {
+ if !ln.IsValid() {
+ return fmt.Errorf("invalid name %q", ln)
+ }
+ if !lv.IsValid() {
+ return fmt.Errorf("invalid value %q", lv)
+ }
+ }
+ return nil
+}
+
+// Equal returns true iff both label sets have exactly the same key/value pairs.
+func (ls LabelSet) Equal(o LabelSet) bool {
+ if len(ls) != len(o) {
+ return false
+ }
+ for ln, lv := range ls {
+ olv, ok := o[ln]
+ if !ok {
+ return false
+ }
+ if olv != lv {
+ return false
+ }
+ }
+ return true
+}
+
+// Before compares the metrics, using the following criteria:
+//
+// If m has fewer labels than o, it is before o. If it has more, it is not.
+//
+// If the number of labels is the same, the superset of all label names is
+// sorted alphanumerically. The first differing label pair found in that order
+// determines the outcome: If the label does not exist at all in m, then m is
+// before o, and vice versa. Otherwise the label value is compared
+// alphanumerically.
+//
+// If m and o are equal, the method returns false.
+func (ls LabelSet) Before(o LabelSet) bool {
+ if len(ls) < len(o) {
+ return true
+ }
+ if len(ls) > len(o) {
+ return false
+ }
+
+ lns := make(LabelNames, 0, len(ls)+len(o))
+ for ln := range ls {
+ lns = append(lns, ln)
+ }
+ for ln := range o {
+ lns = append(lns, ln)
+ }
+ // It's probably not worth it to de-dup lns.
+ sort.Sort(lns)
+ for _, ln := range lns {
+ mlv, ok := ls[ln]
+ if !ok {
+ return true
+ }
+ olv, ok := o[ln]
+ if !ok {
+ return false
+ }
+ if mlv < olv {
+ return true
+ }
+ if mlv > olv {
+ return false
+ }
+ }
+ return false
+}
+
+// Clone returns a copy of the label set.
+func (ls LabelSet) Clone() LabelSet {
+ lsn := make(LabelSet, len(ls))
+ for ln, lv := range ls {
+ lsn[ln] = lv
+ }
+ return lsn
+}
+
+// Merge is a helper function to non-destructively merge two label sets.
+func (l LabelSet) Merge(other LabelSet) LabelSet {
+ result := make(LabelSet, len(l))
+
+ for k, v := range l {
+ result[k] = v
+ }
+
+ for k, v := range other {
+ result[k] = v
+ }
+
+ return result
+}
+
+func (l LabelSet) String() string {
+ lstrs := make([]string, 0, len(l))
+ for l, v := range l {
+ lstrs = append(lstrs, fmt.Sprintf("%s=%q", l, v))
+ }
+
+ sort.Strings(lstrs)
+ return fmt.Sprintf("{%s}", strings.Join(lstrs, ", "))
+}
+
+// Fingerprint returns the LabelSet's fingerprint.
+func (ls LabelSet) Fingerprint() Fingerprint {
+ return labelSetToFingerprint(ls)
+}
+
+// FastFingerprint returns the LabelSet's Fingerprint calculated by a faster hashing
+// algorithm, which is, however, more susceptible to hash collisions.
+func (ls LabelSet) FastFingerprint() Fingerprint {
+ return labelSetToFastFingerprint(ls)
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (l *LabelSet) UnmarshalJSON(b []byte) error {
+ var m map[LabelName]LabelValue
+ if err := json.Unmarshal(b, &m); err != nil {
+ return err
+ }
+ // encoding/json only unmarshals maps of the form map[string]T. It treats
+ // LabelName as a string and does not call its UnmarshalJSON method.
+ // Thus, we have to replicate the behavior here.
+ for ln := range m {
+ if !ln.IsValid() {
+ return fmt.Errorf("%q is not a valid label name", ln)
+ }
+ }
+ *l = LabelSet(m)
+ return nil
+}
diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go
new file mode 100644
index 0000000..9dff899
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/metric.go
@@ -0,0 +1,103 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "fmt"
+ "regexp"
+ "sort"
+ "strings"
+)
+
+var (
+ separator = []byte{0}
+ // MetricNameRE is a regular expression matching valid metric
+ // names. Note that the IsValidMetricName function performs the same
+ // check but faster than a match with this regular expression.
+ MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`)
+)
+
+// A Metric is similar to a LabelSet, but the key difference is that a Metric is
+// a singleton and refers to one and only one stream of samples.
+type Metric LabelSet
+
+// Equal compares the metrics.
+func (m Metric) Equal(o Metric) bool {
+ return LabelSet(m).Equal(LabelSet(o))
+}
+
+// Before compares the metrics' underlying label sets.
+func (m Metric) Before(o Metric) bool {
+ return LabelSet(m).Before(LabelSet(o))
+}
+
+// Clone returns a copy of the Metric.
+func (m Metric) Clone() Metric {
+ clone := Metric{}
+ for k, v := range m {
+ clone[k] = v
+ }
+ return clone
+}
+
+func (m Metric) String() string {
+ metricName, hasName := m[MetricNameLabel]
+ numLabels := len(m) - 1
+ if !hasName {
+ numLabels = len(m)
+ }
+ labelStrings := make([]string, 0, numLabels)
+ for label, value := range m {
+ if label != MetricNameLabel {
+ labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", label, value))
+ }
+ }
+
+ switch numLabels {
+ case 0:
+ if hasName {
+ return string(metricName)
+ }
+ return "{}"
+ default:
+ sort.Strings(labelStrings)
+ return fmt.Sprintf("%s{%s}", metricName, strings.Join(labelStrings, ", "))
+ }
+}
+
+// Fingerprint returns a Metric's Fingerprint.
+func (m Metric) Fingerprint() Fingerprint {
+ return LabelSet(m).Fingerprint()
+}
+
+// FastFingerprint returns a Metric's Fingerprint calculated by a faster hashing
+// algorithm, which is, however, more susceptible to hash collisions.
+func (m Metric) FastFingerprint() Fingerprint {
+ return LabelSet(m).FastFingerprint()
+}
+
+// IsValidMetricName returns true iff name matches the pattern of MetricNameRE.
+// This function, however, does not use MetricNameRE for the check but a much
+// faster hardcoded implementation.
+func IsValidMetricName(n LabelValue) bool {
+ if len(n) == 0 {
+ return false
+ }
+ for i, b := range n {
+ if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)) {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/github.com/prometheus/common/model/model.go b/vendor/github.com/prometheus/common/model/model.go
new file mode 100644
index 0000000..a7b9691
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/model.go
@@ -0,0 +1,16 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package model contains common data structures that are shared across
+// Prometheus components and libraries.
+package model
diff --git a/vendor/github.com/prometheus/common/model/signature.go b/vendor/github.com/prometheus/common/model/signature.go
new file mode 100644
index 0000000..8762b13
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/signature.go
@@ -0,0 +1,144 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "sort"
+)
+
+// SeparatorByte is a byte that cannot occur in valid UTF-8 sequences and is
+// used to separate label names, label values, and other strings from each other
+// when calculating their combined hash value (aka signature aka fingerprint).
+const SeparatorByte byte = 255
+
+var (
+ // cache the signature of an empty label set.
+ emptyLabelSignature = hashNew()
+)
+
+// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a
+// given label set. (Collisions are possible but unlikely if the number of label
+// sets the function is applied to is small.)
+func LabelsToSignature(labels map[string]string) uint64 {
+ if len(labels) == 0 {
+ return emptyLabelSignature
+ }
+
+ labelNames := make([]string, 0, len(labels))
+ for labelName := range labels {
+ labelNames = append(labelNames, labelName)
+ }
+ sort.Strings(labelNames)
+
+ sum := hashNew()
+ for _, labelName := range labelNames {
+ sum = hashAdd(sum, labelName)
+ sum = hashAddByte(sum, SeparatorByte)
+ sum = hashAdd(sum, labels[labelName])
+ sum = hashAddByte(sum, SeparatorByte)
+ }
+ return sum
+}
+
+// labelSetToFingerprint works exactly as LabelsToSignature but takes a LabelSet as
+// parameter (rather than a label map) and returns a Fingerprint.
+func labelSetToFingerprint(ls LabelSet) Fingerprint {
+ if len(ls) == 0 {
+ return Fingerprint(emptyLabelSignature)
+ }
+
+ labelNames := make(LabelNames, 0, len(ls))
+ for labelName := range ls {
+ labelNames = append(labelNames, labelName)
+ }
+ sort.Sort(labelNames)
+
+ sum := hashNew()
+ for _, labelName := range labelNames {
+ sum = hashAdd(sum, string(labelName))
+ sum = hashAddByte(sum, SeparatorByte)
+ sum = hashAdd(sum, string(ls[labelName]))
+ sum = hashAddByte(sum, SeparatorByte)
+ }
+ return Fingerprint(sum)
+}
+
+// labelSetToFastFingerprint works similar to labelSetToFingerprint but uses a
+// faster and less allocation-heavy hash function, which is more susceptible to
+// create hash collisions. Therefore, collision detection should be applied.
+func labelSetToFastFingerprint(ls LabelSet) Fingerprint {
+ if len(ls) == 0 {
+ return Fingerprint(emptyLabelSignature)
+ }
+
+ var result uint64
+ for labelName, labelValue := range ls {
+ sum := hashNew()
+ sum = hashAdd(sum, string(labelName))
+ sum = hashAddByte(sum, SeparatorByte)
+ sum = hashAdd(sum, string(labelValue))
+ result ^= sum
+ }
+ return Fingerprint(result)
+}
+
+// SignatureForLabels works like LabelsToSignature but takes a Metric as
+// parameter (rather than a label map) and only includes the labels with the
+// specified LabelNames into the signature calculation. The labels passed in
+// will be sorted by this function.
+func SignatureForLabels(m Metric, labels ...LabelName) uint64 {
+ if len(labels) == 0 {
+ return emptyLabelSignature
+ }
+
+ sort.Sort(LabelNames(labels))
+
+ sum := hashNew()
+ for _, label := range labels {
+ sum = hashAdd(sum, string(label))
+ sum = hashAddByte(sum, SeparatorByte)
+ sum = hashAdd(sum, string(m[label]))
+ sum = hashAddByte(sum, SeparatorByte)
+ }
+ return sum
+}
+
+// SignatureWithoutLabels works like LabelsToSignature but takes a Metric as
+// parameter (rather than a label map) and excludes the labels with any of the
+// specified LabelNames from the signature calculation.
+func SignatureWithoutLabels(m Metric, labels map[LabelName]struct{}) uint64 {
+ if len(m) == 0 {
+ return emptyLabelSignature
+ }
+
+ labelNames := make(LabelNames, 0, len(m))
+ for labelName := range m {
+ if _, exclude := labels[labelName]; !exclude {
+ labelNames = append(labelNames, labelName)
+ }
+ }
+ if len(labelNames) == 0 {
+ return emptyLabelSignature
+ }
+ sort.Sort(labelNames)
+
+ sum := hashNew()
+ for _, labelName := range labelNames {
+ sum = hashAdd(sum, string(labelName))
+ sum = hashAddByte(sum, SeparatorByte)
+ sum = hashAdd(sum, string(m[labelName]))
+ sum = hashAddByte(sum, SeparatorByte)
+ }
+ return sum
+}
diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go
new file mode 100644
index 0000000..7538e29
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/silence.go
@@ -0,0 +1,106 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "regexp"
+ "time"
+)
+
+// Matcher describes a matches the value of a given label.
+type Matcher struct {
+ Name LabelName `json:"name"`
+ Value string `json:"value"`
+ IsRegex bool `json:"isRegex"`
+}
+
+func (m *Matcher) UnmarshalJSON(b []byte) error {
+ type plain Matcher
+ if err := json.Unmarshal(b, (*plain)(m)); err != nil {
+ return err
+ }
+
+ if len(m.Name) == 0 {
+ return fmt.Errorf("label name in matcher must not be empty")
+ }
+ if m.IsRegex {
+ if _, err := regexp.Compile(m.Value); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Validate returns true iff all fields of the matcher have valid values.
+func (m *Matcher) Validate() error {
+ if !m.Name.IsValid() {
+ return fmt.Errorf("invalid name %q", m.Name)
+ }
+ if m.IsRegex {
+ if _, err := regexp.Compile(m.Value); err != nil {
+ return fmt.Errorf("invalid regular expression %q", m.Value)
+ }
+ } else if !LabelValue(m.Value).IsValid() || len(m.Value) == 0 {
+ return fmt.Errorf("invalid value %q", m.Value)
+ }
+ return nil
+}
+
+// Silence defines the representation of a silence definiton
+// in the Prometheus eco-system.
+type Silence struct {
+ ID uint64 `json:"id,omitempty"`
+
+ Matchers []*Matcher `json:"matchers"`
+
+ StartsAt time.Time `json:"startsAt"`
+ EndsAt time.Time `json:"endsAt"`
+
+ CreatedAt time.Time `json:"createdAt,omitempty"`
+ CreatedBy string `json:"createdBy"`
+ Comment string `json:"comment,omitempty"`
+}
+
+// Validate returns true iff all fields of the silence have valid values.
+func (s *Silence) Validate() error {
+ if len(s.Matchers) == 0 {
+ return fmt.Errorf("at least one matcher required")
+ }
+ for _, m := range s.Matchers {
+ if err := m.Validate(); err != nil {
+ return fmt.Errorf("invalid matcher: %s", err)
+ }
+ }
+ if s.StartsAt.IsZero() {
+ return fmt.Errorf("start time missing")
+ }
+ if s.EndsAt.IsZero() {
+ return fmt.Errorf("end time missing")
+ }
+ if s.EndsAt.Before(s.StartsAt) {
+ return fmt.Errorf("start time must be before end time")
+ }
+ if s.CreatedBy == "" {
+ return fmt.Errorf("creator information missing")
+ }
+ if s.Comment == "" {
+ return fmt.Errorf("comment missing")
+ }
+ if s.CreatedAt.IsZero() {
+ return fmt.Errorf("creation timestamp missing")
+ }
+ return nil
+}
diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go
new file mode 100644
index 0000000..548968a
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/time.go
@@ -0,0 +1,249 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "fmt"
+ "math"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+)
+
+const (
+ // MinimumTick is the minimum supported time resolution. This has to be
+ // at least time.Second in order for the code below to work.
+ minimumTick = time.Millisecond
+ // second is the Time duration equivalent to one second.
+ second = int64(time.Second / minimumTick)
+ // The number of nanoseconds per minimum tick.
+ nanosPerTick = int64(minimumTick / time.Nanosecond)
+
+ // Earliest is the earliest Time representable. Handy for
+ // initializing a high watermark.
+ Earliest = Time(math.MinInt64)
+ // Latest is the latest Time representable. Handy for initializing
+ // a low watermark.
+ Latest = Time(math.MaxInt64)
+)
+
+// Time is the number of milliseconds since the epoch
+// (1970-01-01 00:00 UTC) excluding leap seconds.
+type Time int64
+
+// Interval describes and interval between two timestamps.
+type Interval struct {
+ Start, End Time
+}
+
+// Now returns the current time as a Time.
+func Now() Time {
+ return TimeFromUnixNano(time.Now().UnixNano())
+}
+
+// TimeFromUnix returns the Time equivalent to the Unix Time t
+// provided in seconds.
+func TimeFromUnix(t int64) Time {
+ return Time(t * second)
+}
+
+// TimeFromUnixNano returns the Time equivalent to the Unix Time
+// t provided in nanoseconds.
+func TimeFromUnixNano(t int64) Time {
+ return Time(t / nanosPerTick)
+}
+
+// Equal reports whether two Times represent the same instant.
+func (t Time) Equal(o Time) bool {
+ return t == o
+}
+
+// Before reports whether the Time t is before o.
+func (t Time) Before(o Time) bool {
+ return t < o
+}
+
+// After reports whether the Time t is after o.
+func (t Time) After(o Time) bool {
+ return t > o
+}
+
+// Add returns the Time t + d.
+func (t Time) Add(d time.Duration) Time {
+ return t + Time(d/minimumTick)
+}
+
+// Sub returns the Duration t - o.
+func (t Time) Sub(o Time) time.Duration {
+ return time.Duration(t-o) * minimumTick
+}
+
+// Time returns the time.Time representation of t.
+func (t Time) Time() time.Time {
+ return time.Unix(int64(t)/second, (int64(t)%second)*nanosPerTick)
+}
+
+// Unix returns t as a Unix time, the number of seconds elapsed
+// since January 1, 1970 UTC.
+func (t Time) Unix() int64 {
+ return int64(t) / second
+}
+
+// UnixNano returns t as a Unix time, the number of nanoseconds elapsed
+// since January 1, 1970 UTC.
+func (t Time) UnixNano() int64 {
+ return int64(t) * nanosPerTick
+}
+
+// The number of digits after the dot.
+var dotPrecision = int(math.Log10(float64(second)))
+
+// String returns a string representation of the Time.
+func (t Time) String() string {
+ return strconv.FormatFloat(float64(t)/float64(second), 'f', -1, 64)
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (t Time) MarshalJSON() ([]byte, error) {
+ return []byte(t.String()), nil
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (t *Time) UnmarshalJSON(b []byte) error {
+ p := strings.Split(string(b), ".")
+ switch len(p) {
+ case 1:
+ v, err := strconv.ParseInt(string(p[0]), 10, 64)
+ if err != nil {
+ return err
+ }
+ *t = Time(v * second)
+
+ case 2:
+ v, err := strconv.ParseInt(string(p[0]), 10, 64)
+ if err != nil {
+ return err
+ }
+ v *= second
+
+ prec := dotPrecision - len(p[1])
+ if prec < 0 {
+ p[1] = p[1][:dotPrecision]
+ } else if prec > 0 {
+ p[1] = p[1] + strings.Repeat("0", prec)
+ }
+
+ va, err := strconv.ParseInt(p[1], 10, 32)
+ if err != nil {
+ return err
+ }
+
+ *t = Time(v + va)
+
+ default:
+ return fmt.Errorf("invalid time %q", string(b))
+ }
+ return nil
+}
+
+// Duration wraps time.Duration. It is used to parse the custom duration format
+// from YAML.
+// This type should not propagate beyond the scope of input/output processing.
+type Duration time.Duration
+
+var durationRE = regexp.MustCompile("^([0-9]+)(y|w|d|h|m|s|ms)$")
+
+// StringToDuration parses a string into a time.Duration, assuming that a year
+// always has 365d, a week always has 7d, and a day always has 24h.
+func ParseDuration(durationStr string) (Duration, error) {
+ matches := durationRE.FindStringSubmatch(durationStr)
+ if len(matches) != 3 {
+ return 0, fmt.Errorf("not a valid duration string: %q", durationStr)
+ }
+ var (
+ n, _ = strconv.Atoi(matches[1])
+ dur = time.Duration(n) * time.Millisecond
+ )
+ switch unit := matches[2]; unit {
+ case "y":
+ dur *= 1000 * 60 * 60 * 24 * 365
+ case "w":
+ dur *= 1000 * 60 * 60 * 24 * 7
+ case "d":
+ dur *= 1000 * 60 * 60 * 24
+ case "h":
+ dur *= 1000 * 60 * 60
+ case "m":
+ dur *= 1000 * 60
+ case "s":
+ dur *= 1000
+ case "ms":
+ // Value already correct
+ default:
+ return 0, fmt.Errorf("invalid time unit in duration string: %q", unit)
+ }
+ return Duration(dur), nil
+}
+
+func (d Duration) String() string {
+ var (
+ ms = int64(time.Duration(d) / time.Millisecond)
+ unit = "ms"
+ )
+ factors := map[string]int64{
+ "y": 1000 * 60 * 60 * 24 * 365,
+ "w": 1000 * 60 * 60 * 24 * 7,
+ "d": 1000 * 60 * 60 * 24,
+ "h": 1000 * 60 * 60,
+ "m": 1000 * 60,
+ "s": 1000,
+ "ms": 1,
+ }
+
+ switch int64(0) {
+ case ms % factors["y"]:
+ unit = "y"
+ case ms % factors["w"]:
+ unit = "w"
+ case ms % factors["d"]:
+ unit = "d"
+ case ms % factors["h"]:
+ unit = "h"
+ case ms % factors["m"]:
+ unit = "m"
+ case ms % factors["s"]:
+ unit = "s"
+ }
+ return fmt.Sprintf("%v%v", ms/factors[unit], unit)
+}
+
+// MarshalYAML implements the yaml.Marshaler interface.
+func (d Duration) MarshalYAML() (interface{}, error) {
+ return d.String(), nil
+}
+
+// UnmarshalYAML implements the yaml.Unmarshaler interface.
+func (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ var s string
+ if err := unmarshal(&s); err != nil {
+ return err
+ }
+ dur, err := ParseDuration(s)
+ if err != nil {
+ return err
+ }
+ *d = dur
+ return nil
+}
diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go
new file mode 100644
index 0000000..7728aba
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/value.go
@@ -0,0 +1,419 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "math"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+var (
+ // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a
+ // non-existing sample pair. It is a SamplePair with timestamp Earliest and
+ // value 0.0. Note that the natural zero value of SamplePair has a timestamp
+ // of 0, which is possible to appear in a real SamplePair and thus not
+ // suitable to signal a non-existing SamplePair.
+ ZeroSamplePair = SamplePair{Timestamp: Earliest}
+
+ // ZeroSample is the pseudo zero-value of Sample used to signal a
+ // non-existing sample. It is a Sample with timestamp Earliest, value 0.0,
+ // and metric nil. Note that the natural zero value of Sample has a timestamp
+ // of 0, which is possible to appear in a real Sample and thus not suitable
+ // to signal a non-existing Sample.
+ ZeroSample = Sample{Timestamp: Earliest}
+)
+
+// A SampleValue is a representation of a value for a given sample at a given
+// time.
+type SampleValue float64
+
+// MarshalJSON implements json.Marshaler.
+func (v SampleValue) MarshalJSON() ([]byte, error) {
+ return json.Marshal(v.String())
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (v *SampleValue) UnmarshalJSON(b []byte) error {
+ if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
+ return fmt.Errorf("sample value must be a quoted string")
+ }
+ f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)
+ if err != nil {
+ return err
+ }
+ *v = SampleValue(f)
+ return nil
+}
+
+// Equal returns true if the value of v and o is equal or if both are NaN. Note
+// that v==o is false if both are NaN. If you want the conventional float
+// behavior, use == to compare two SampleValues.
+func (v SampleValue) Equal(o SampleValue) bool {
+ if v == o {
+ return true
+ }
+ return math.IsNaN(float64(v)) && math.IsNaN(float64(o))
+}
+
+func (v SampleValue) String() string {
+ return strconv.FormatFloat(float64(v), 'f', -1, 64)
+}
+
+// SamplePair pairs a SampleValue with a Timestamp.
+type SamplePair struct {
+ Timestamp Time
+ Value SampleValue
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s SamplePair) MarshalJSON() ([]byte, error) {
+ t, err := json.Marshal(s.Timestamp)
+ if err != nil {
+ return nil, err
+ }
+ v, err := json.Marshal(s.Value)
+ if err != nil {
+ return nil, err
+ }
+ return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *SamplePair) UnmarshalJSON(b []byte) error {
+ v := [...]json.Unmarshaler{&s.Timestamp, &s.Value}
+ return json.Unmarshal(b, &v)
+}
+
+// Equal returns true if this SamplePair and o have equal Values and equal
+// Timestamps. The sematics of Value equality is defined by SampleValue.Equal.
+func (s *SamplePair) Equal(o *SamplePair) bool {
+ return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp))
+}
+
+func (s SamplePair) String() string {
+ return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp)
+}
+
+// Sample is a sample pair associated with a metric.
+type Sample struct {
+ Metric Metric `json:"metric"`
+ Value SampleValue `json:"value"`
+ Timestamp Time `json:"timestamp"`
+}
+
+// Equal compares first the metrics, then the timestamp, then the value. The
+// sematics of value equality is defined by SampleValue.Equal.
+func (s *Sample) Equal(o *Sample) bool {
+ if s == o {
+ return true
+ }
+
+ if !s.Metric.Equal(o.Metric) {
+ return false
+ }
+ if !s.Timestamp.Equal(o.Timestamp) {
+ return false
+ }
+ if s.Value.Equal(o.Value) {
+ return false
+ }
+
+ return true
+}
+
+func (s Sample) String() string {
+ return fmt.Sprintf("%s => %s", s.Metric, SamplePair{
+ Timestamp: s.Timestamp,
+ Value: s.Value,
+ })
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s Sample) MarshalJSON() ([]byte, error) {
+ v := struct {
+ Metric Metric `json:"metric"`
+ Value SamplePair `json:"value"`
+ }{
+ Metric: s.Metric,
+ Value: SamplePair{
+ Timestamp: s.Timestamp,
+ Value: s.Value,
+ },
+ }
+
+ return json.Marshal(&v)
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *Sample) UnmarshalJSON(b []byte) error {
+ v := struct {
+ Metric Metric `json:"metric"`
+ Value SamplePair `json:"value"`
+ }{
+ Metric: s.Metric,
+ Value: SamplePair{
+ Timestamp: s.Timestamp,
+ Value: s.Value,
+ },
+ }
+
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+
+ s.Metric = v.Metric
+ s.Timestamp = v.Value.Timestamp
+ s.Value = v.Value.Value
+
+ return nil
+}
+
+// Samples is a sortable Sample slice. It implements sort.Interface.
+type Samples []*Sample
+
+func (s Samples) Len() int {
+ return len(s)
+}
+
+// Less compares first the metrics, then the timestamp.
+func (s Samples) Less(i, j int) bool {
+ switch {
+ case s[i].Metric.Before(s[j].Metric):
+ return true
+ case s[j].Metric.Before(s[i].Metric):
+ return false
+ case s[i].Timestamp.Before(s[j].Timestamp):
+ return true
+ default:
+ return false
+ }
+}
+
+func (s Samples) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+// Equal compares two sets of samples and returns true if they are equal.
+func (s Samples) Equal(o Samples) bool {
+ if len(s) != len(o) {
+ return false
+ }
+
+ for i, sample := range s {
+ if !sample.Equal(o[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// SampleStream is a stream of Values belonging to an attached COWMetric.
+type SampleStream struct {
+ Metric Metric `json:"metric"`
+ Values []SamplePair `json:"values"`
+}
+
+func (ss SampleStream) String() string {
+ vals := make([]string, len(ss.Values))
+ for i, v := range ss.Values {
+ vals[i] = v.String()
+ }
+ return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n"))
+}
+
+// Value is a generic interface for values resulting from a query evaluation.
+type Value interface {
+ Type() ValueType
+ String() string
+}
+
+func (Matrix) Type() ValueType { return ValMatrix }
+func (Vector) Type() ValueType { return ValVector }
+func (*Scalar) Type() ValueType { return ValScalar }
+func (*String) Type() ValueType { return ValString }
+
+type ValueType int
+
+const (
+ ValNone ValueType = iota
+ ValScalar
+ ValVector
+ ValMatrix
+ ValString
+)
+
+// MarshalJSON implements json.Marshaler.
+func (et ValueType) MarshalJSON() ([]byte, error) {
+ return json.Marshal(et.String())
+}
+
+func (et *ValueType) UnmarshalJSON(b []byte) error {
+ var s string
+ if err := json.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ switch s {
+ case "":
+ *et = ValNone
+ case "scalar":
+ *et = ValScalar
+ case "vector":
+ *et = ValVector
+ case "matrix":
+ *et = ValMatrix
+ case "string":
+ *et = ValString
+ default:
+ return fmt.Errorf("unknown value type %q", s)
+ }
+ return nil
+}
+
+func (e ValueType) String() string {
+ switch e {
+ case ValNone:
+ return ""
+ case ValScalar:
+ return "scalar"
+ case ValVector:
+ return "vector"
+ case ValMatrix:
+ return "matrix"
+ case ValString:
+ return "string"
+ }
+ panic("ValueType.String: unhandled value type")
+}
+
+// Scalar is a scalar value evaluated at the set timestamp.
+type Scalar struct {
+ Value SampleValue `json:"value"`
+ Timestamp Time `json:"timestamp"`
+}
+
+func (s Scalar) String() string {
+ return fmt.Sprintf("scalar: %v @[%v]", s.Value, s.Timestamp)
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s Scalar) MarshalJSON() ([]byte, error) {
+ v := strconv.FormatFloat(float64(s.Value), 'f', -1, 64)
+ return json.Marshal([...]interface{}{s.Timestamp, string(v)})
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *Scalar) UnmarshalJSON(b []byte) error {
+ var f string
+ v := [...]interface{}{&s.Timestamp, &f}
+
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+
+ value, err := strconv.ParseFloat(f, 64)
+ if err != nil {
+ return fmt.Errorf("error parsing sample value: %s", err)
+ }
+ s.Value = SampleValue(value)
+ return nil
+}
+
+// String is a string value evaluated at the set timestamp.
+type String struct {
+ Value string `json:"value"`
+ Timestamp Time `json:"timestamp"`
+}
+
+func (s *String) String() string {
+ return s.Value
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s String) MarshalJSON() ([]byte, error) {
+ return json.Marshal([]interface{}{s.Timestamp, s.Value})
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *String) UnmarshalJSON(b []byte) error {
+ v := [...]interface{}{&s.Timestamp, &s.Value}
+ return json.Unmarshal(b, &v)
+}
+
+// Vector is basically only an alias for Samples, but the
+// contract is that in a Vector, all Samples have the same timestamp.
+type Vector []*Sample
+
+func (vec Vector) String() string {
+ entries := make([]string, len(vec))
+ for i, s := range vec {
+ entries[i] = s.String()
+ }
+ return strings.Join(entries, "\n")
+}
+
+func (vec Vector) Len() int { return len(vec) }
+func (vec Vector) Swap(i, j int) { vec[i], vec[j] = vec[j], vec[i] }
+
+// Less compares first the metrics, then the timestamp.
+func (vec Vector) Less(i, j int) bool {
+ switch {
+ case vec[i].Metric.Before(vec[j].Metric):
+ return true
+ case vec[j].Metric.Before(vec[i].Metric):
+ return false
+ case vec[i].Timestamp.Before(vec[j].Timestamp):
+ return true
+ default:
+ return false
+ }
+}
+
+// Equal compares two sets of samples and returns true if they are equal.
+func (vec Vector) Equal(o Vector) bool {
+ if len(vec) != len(o) {
+ return false
+ }
+
+ for i, sample := range vec {
+ if !sample.Equal(o[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// Matrix is a list of time series.
+type Matrix []*SampleStream
+
+func (m Matrix) Len() int { return len(m) }
+func (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) }
+func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] }
+
+func (mat Matrix) String() string {
+ matCp := make(Matrix, len(mat))
+ copy(matCp, mat)
+ sort.Sort(matCp)
+
+ strs := make([]string, len(matCp))
+
+ for i, ss := range matCp {
+ strs[i] = ss.String()
+ }
+
+ return strings.Join(strs, "\n")
+}
diff --git a/vendor/github.com/prometheus/common/version/info.go b/vendor/github.com/prometheus/common/version/info.go
new file mode 100644
index 0000000..84489a5
--- /dev/null
+++ b/vendor/github.com/prometheus/common/version/info.go
@@ -0,0 +1,89 @@
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package version
+
+import (
+ "bytes"
+ "fmt"
+ "runtime"
+ "strings"
+ "text/template"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+// Build information. Populated at build-time.
+var (
+ Version string
+ Revision string
+ Branch string
+ BuildUser string
+ BuildDate string
+ GoVersion = runtime.Version()
+)
+
+// NewCollector returns a collector which exports metrics about current version information.
+func NewCollector(program string) *prometheus.GaugeVec {
+ buildInfo := prometheus.NewGaugeVec(
+ prometheus.GaugeOpts{
+ Namespace: program,
+ Name: "build_info",
+ Help: fmt.Sprintf(
+ "A metric with a constant '1' value labeled by version, revision, branch, and goversion from which %s was built.",
+ program,
+ ),
+ },
+ []string{"version", "revision", "branch", "goversion"},
+ )
+ buildInfo.WithLabelValues(Version, Revision, Branch, GoVersion).Set(1)
+ return buildInfo
+}
+
+// versionInfoTmpl contains the template used by Info.
+var versionInfoTmpl = `
+{{.program}}, version {{.version}} (branch: {{.branch}}, revision: {{.revision}})
+ build user: {{.buildUser}}
+ build date: {{.buildDate}}
+ go version: {{.goVersion}}
+`
+
+// Print returns version information.
+func Print(program string) string {
+ m := map[string]string{
+ "program": program,
+ "version": Version,
+ "revision": Revision,
+ "branch": Branch,
+ "buildUser": BuildUser,
+ "buildDate": BuildDate,
+ "goVersion": GoVersion,
+ }
+ t := template.Must(template.New("version").Parse(versionInfoTmpl))
+
+ var buf bytes.Buffer
+ if err := t.ExecuteTemplate(&buf, "version", m); err != nil {
+ panic(err)
+ }
+ return strings.TrimSpace(buf.String())
+}
+
+// Info returns version, branch and revision information.
+func Info() string {
+ return fmt.Sprintf("(version=%s, branch=%s, revision=%s)", Version, Branch, Revision)
+}
+
+// BuildContext returns goVersion, buildUser and buildDate information.
+func BuildContext() string {
+ return fmt.Sprintf("(go=%s, user=%s, date=%s)", GoVersion, BuildUser, BuildDate)
+}
diff --git a/vendor/github.com/prometheus/procfs/AUTHORS.md b/vendor/github.com/prometheus/procfs/AUTHORS.md
new file mode 100644
index 0000000..0c802dd
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/AUTHORS.md
@@ -0,0 +1,20 @@
+The Prometheus project was started by Matt T. Proud (emeritus) and
+Julius Volz in 2012.
+
+Maintainers of this repository:
+
+* Tobias Schmidt
+
+The following individuals have contributed code to this repository
+(listed in alphabetical order):
+
+* Armen Baghumian
+* Bjoern Rabenstein
+* David Cournapeau
+* Ji-Hoon, Seol
+* Jonas Große Sundrup
+* Julius Volz
+* Matthias Rampke
+* Nicky Gerritsen
+* Rémi Audebert
+* Tobias Schmidt
diff --git a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md
new file mode 100644
index 0000000..5705f0f
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md
@@ -0,0 +1,18 @@
+# Contributing
+
+Prometheus uses GitHub to manage reviews of pull requests.
+
+* If you have a trivial fix or improvement, go ahead and create a pull
+ request, addressing (with `@...`) one or more of the maintainers
+ (see [AUTHORS.md](AUTHORS.md)) in the description of the pull request.
+
+* If you plan to do something more involved, first discuss your ideas
+ on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers).
+ This will avoid unnecessary work and surely give you and us a good deal
+ of inspiration.
+
+* Relevant coding style guidelines are the [Go Code Review
+ Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments)
+ and the _Formatting and style_ section of Peter Bourgon's [Go: Best
+ Practices for Production
+ Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style).
diff --git a/vendor/github.com/prometheus/procfs/LICENSE b/vendor/github.com/prometheus/procfs/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/prometheus/procfs/Makefile b/vendor/github.com/prometheus/procfs/Makefile
new file mode 100644
index 0000000..c264a49
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/Makefile
@@ -0,0 +1,6 @@
+ci:
+ ! gofmt -l *.go | read nothing
+ go vet
+ go test -v ./...
+ go get github.com/golang/lint/golint
+ golint *.go
diff --git a/vendor/github.com/prometheus/procfs/NOTICE b/vendor/github.com/prometheus/procfs/NOTICE
new file mode 100644
index 0000000..53c5e9a
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/NOTICE
@@ -0,0 +1,7 @@
+procfs provides functions to retrieve system, kernel and process
+metrics from the pseudo-filesystem proc.
+
+Copyright 2014-2015 The Prometheus Authors
+
+This product includes software developed at
+SoundCloud Ltd. (http://soundcloud.com/).
diff --git a/vendor/github.com/prometheus/procfs/README.md b/vendor/github.com/prometheus/procfs/README.md
new file mode 100644
index 0000000..6e7ee6b
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/README.md
@@ -0,0 +1,10 @@
+# procfs
+
+This procfs package provides functions to retrieve system, kernel and process
+metrics from the pseudo-filesystem proc.
+
+*WARNING*: This package is a work in progress. Its API may still break in
+backwards-incompatible ways without warnings. Use it at your own risk.
+
+[](https://godoc.org/github.com/prometheus/procfs)
+[](https://travis-ci.org/prometheus/procfs)
diff --git a/vendor/github.com/prometheus/procfs/doc.go b/vendor/github.com/prometheus/procfs/doc.go
new file mode 100644
index 0000000..e2acd6d
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/doc.go
@@ -0,0 +1,45 @@
+// Copyright 2014 Prometheus Team
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package procfs provides functions to retrieve system, kernel and process
+// metrics from the pseudo-filesystem proc.
+//
+// Example:
+//
+// package main
+//
+// import (
+// "fmt"
+// "log"
+//
+// "github.com/prometheus/procfs"
+// )
+//
+// func main() {
+// p, err := procfs.Self()
+// if err != nil {
+// log.Fatalf("could not get process: %s", err)
+// }
+//
+// stat, err := p.NewStat()
+// if err != nil {
+// log.Fatalf("could not get process stat: %s", err)
+// }
+//
+// fmt.Printf("command: %s\n", stat.Comm)
+// fmt.Printf("cpu time: %fs\n", stat.CPUTime())
+// fmt.Printf("vsize: %dB\n", stat.VirtualMemory())
+// fmt.Printf("rss: %dB\n", stat.ResidentMemory())
+// }
+//
+package procfs
diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go
new file mode 100644
index 0000000..49aaab0
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fs.go
@@ -0,0 +1,33 @@
+package procfs
+
+import (
+ "fmt"
+ "os"
+ "path"
+)
+
+// FS represents the pseudo-filesystem proc, which provides an interface to
+// kernel data structures.
+type FS string
+
+// DefaultMountPoint is the common mount point of the proc filesystem.
+const DefaultMountPoint = "/proc"
+
+// NewFS returns a new FS mounted under the given mountPoint. It will error
+// if the mount point can't be read.
+func NewFS(mountPoint string) (FS, error) {
+ info, err := os.Stat(mountPoint)
+ if err != nil {
+ return "", fmt.Errorf("could not read %s: %s", mountPoint, err)
+ }
+ if !info.IsDir() {
+ return "", fmt.Errorf("mount point %s is not a directory", mountPoint)
+ }
+
+ return FS(mountPoint), nil
+}
+
+// Path returns the path of the given subsystem relative to the procfs root.
+func (fs FS) Path(p ...string) string {
+ return path.Join(append([]string{string(fs)}, p...)...)
+}
diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go
new file mode 100644
index 0000000..e7012f7
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/ipvs.go
@@ -0,0 +1,224 @@
+package procfs
+
+import (
+ "bufio"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net"
+ "os"
+ "strconv"
+ "strings"
+)
+
+// IPVSStats holds IPVS statistics, as exposed by the kernel in `/proc/net/ip_vs_stats`.
+type IPVSStats struct {
+ // Total count of connections.
+ Connections uint64
+ // Total incoming packages processed.
+ IncomingPackets uint64
+ // Total outgoing packages processed.
+ OutgoingPackets uint64
+ // Total incoming traffic.
+ IncomingBytes uint64
+ // Total outgoing traffic.
+ OutgoingBytes uint64
+}
+
+// IPVSBackendStatus holds current metrics of one virtual / real address pair.
+type IPVSBackendStatus struct {
+ // The local (virtual) IP address.
+ LocalAddress net.IP
+ // The local (virtual) port.
+ LocalPort uint16
+ // The transport protocol (TCP, UDP).
+ Proto string
+ // The remote (real) IP address.
+ RemoteAddress net.IP
+ // The remote (real) port.
+ RemotePort uint16
+ // The current number of active connections for this virtual/real address pair.
+ ActiveConn uint64
+ // The current number of inactive connections for this virtual/real address pair.
+ InactConn uint64
+ // The current weight of this virtual/real address pair.
+ Weight uint64
+}
+
+// NewIPVSStats reads the IPVS statistics.
+func NewIPVSStats() (IPVSStats, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+
+ return fs.NewIPVSStats()
+}
+
+// NewIPVSStats reads the IPVS statistics from the specified `proc` filesystem.
+func (fs FS) NewIPVSStats() (IPVSStats, error) {
+ file, err := os.Open(fs.Path("net/ip_vs_stats"))
+ if err != nil {
+ return IPVSStats{}, err
+ }
+ defer file.Close()
+
+ return parseIPVSStats(file)
+}
+
+// parseIPVSStats performs the actual parsing of `ip_vs_stats`.
+func parseIPVSStats(file io.Reader) (IPVSStats, error) {
+ var (
+ statContent []byte
+ statLines []string
+ statFields []string
+ stats IPVSStats
+ )
+
+ statContent, err := ioutil.ReadAll(file)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+
+ statLines = strings.SplitN(string(statContent), "\n", 4)
+ if len(statLines) != 4 {
+ return IPVSStats{}, errors.New("ip_vs_stats corrupt: too short")
+ }
+
+ statFields = strings.Fields(statLines[2])
+ if len(statFields) != 5 {
+ return IPVSStats{}, errors.New("ip_vs_stats corrupt: unexpected number of fields")
+ }
+
+ stats.Connections, err = strconv.ParseUint(statFields[0], 16, 64)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+ stats.IncomingPackets, err = strconv.ParseUint(statFields[1], 16, 64)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+ stats.OutgoingPackets, err = strconv.ParseUint(statFields[2], 16, 64)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+ stats.IncomingBytes, err = strconv.ParseUint(statFields[3], 16, 64)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+ stats.OutgoingBytes, err = strconv.ParseUint(statFields[4], 16, 64)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+
+ return stats, nil
+}
+
+// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs.
+func NewIPVSBackendStatus() ([]IPVSBackendStatus, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return []IPVSBackendStatus{}, err
+ }
+
+ return fs.NewIPVSBackendStatus()
+}
+
+// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem.
+func (fs FS) NewIPVSBackendStatus() ([]IPVSBackendStatus, error) {
+ file, err := os.Open(fs.Path("net/ip_vs"))
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+
+ return parseIPVSBackendStatus(file)
+}
+
+func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) {
+ var (
+ status []IPVSBackendStatus
+ scanner = bufio.NewScanner(file)
+ proto string
+ localAddress net.IP
+ localPort uint16
+ err error
+ )
+
+ for scanner.Scan() {
+ fields := strings.Fields(string(scanner.Text()))
+ if len(fields) == 0 {
+ continue
+ }
+ switch {
+ case fields[0] == "IP" || fields[0] == "Prot" || fields[1] == "RemoteAddress:Port":
+ continue
+ case fields[0] == "TCP" || fields[0] == "UDP":
+ if len(fields) < 2 {
+ continue
+ }
+ proto = fields[0]
+ localAddress, localPort, err = parseIPPort(fields[1])
+ if err != nil {
+ return nil, err
+ }
+ case fields[0] == "->":
+ if len(fields) < 6 {
+ continue
+ }
+ remoteAddress, remotePort, err := parseIPPort(fields[1])
+ if err != nil {
+ return nil, err
+ }
+ weight, err := strconv.ParseUint(fields[3], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ activeConn, err := strconv.ParseUint(fields[4], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ inactConn, err := strconv.ParseUint(fields[5], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ status = append(status, IPVSBackendStatus{
+ LocalAddress: localAddress,
+ LocalPort: localPort,
+ RemoteAddress: remoteAddress,
+ RemotePort: remotePort,
+ Proto: proto,
+ Weight: weight,
+ ActiveConn: activeConn,
+ InactConn: inactConn,
+ })
+ }
+ }
+ return status, nil
+}
+
+func parseIPPort(s string) (net.IP, uint16, error) {
+ tmp := strings.SplitN(s, ":", 2)
+
+ if len(tmp) != 2 {
+ return nil, 0, fmt.Errorf("invalid IP:Port: %s", s)
+ }
+
+ if len(tmp[0]) != 8 && len(tmp[0]) != 32 {
+ return nil, 0, fmt.Errorf("invalid IP: %s", tmp[0])
+ }
+
+ ip, err := hex.DecodeString(tmp[0])
+ if err != nil {
+ return nil, 0, err
+ }
+
+ port, err := strconv.ParseUint(tmp[1], 16, 16)
+ if err != nil {
+ return nil, 0, err
+ }
+
+ return ip, uint16(port), nil
+}
diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go
new file mode 100644
index 0000000..d7a248c
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/mdstat.go
@@ -0,0 +1,138 @@
+package procfs
+
+import (
+ "fmt"
+ "io/ioutil"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+var (
+ statuslineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`)
+ buildlineRE = regexp.MustCompile(`\((\d+)/\d+\)`)
+)
+
+// MDStat holds info parsed from /proc/mdstat.
+type MDStat struct {
+ // Name of the device.
+ Name string
+ // activity-state of the device.
+ ActivityState string
+ // Number of active disks.
+ DisksActive int64
+ // Total number of disks the device consists of.
+ DisksTotal int64
+ // Number of blocks the device holds.
+ BlocksTotal int64
+ // Number of blocks on the device that are in sync.
+ BlocksSynced int64
+}
+
+// ParseMDStat parses an mdstat-file and returns a struct with the relevant infos.
+func (fs FS) ParseMDStat() (mdstates []MDStat, err error) {
+ mdStatusFilePath := fs.Path("mdstat")
+ content, err := ioutil.ReadFile(mdStatusFilePath)
+ if err != nil {
+ return []MDStat{}, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)
+ }
+
+ mdStates := []MDStat{}
+ lines := strings.Split(string(content), "\n")
+ for i, l := range lines {
+ if l == "" {
+ continue
+ }
+ if l[0] == ' ' {
+ continue
+ }
+ if strings.HasPrefix(l, "Personalities") || strings.HasPrefix(l, "unused") {
+ continue
+ }
+
+ mainLine := strings.Split(l, " ")
+ if len(mainLine) < 3 {
+ return mdStates, fmt.Errorf("error parsing mdline: %s", l)
+ }
+ mdName := mainLine[0]
+ activityState := mainLine[2]
+
+ if len(lines) <= i+3 {
+ return mdStates, fmt.Errorf(
+ "error parsing %s: too few lines for md device %s",
+ mdStatusFilePath,
+ mdName,
+ )
+ }
+
+ active, total, size, err := evalStatusline(lines[i+1])
+ if err != nil {
+ return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)
+ }
+
+ // j is the line number of the syncing-line.
+ j := i + 2
+ if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line
+ j = i + 3
+ }
+
+ // If device is syncing at the moment, get the number of currently
+ // synced bytes, otherwise that number equals the size of the device.
+ syncedBlocks := size
+ if strings.Contains(lines[j], "recovery") || strings.Contains(lines[j], "resync") {
+ syncedBlocks, err = evalBuildline(lines[j])
+ if err != nil {
+ return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)
+ }
+ }
+
+ mdStates = append(mdStates, MDStat{
+ Name: mdName,
+ ActivityState: activityState,
+ DisksActive: active,
+ DisksTotal: total,
+ BlocksTotal: size,
+ BlocksSynced: syncedBlocks,
+ })
+ }
+
+ return mdStates, nil
+}
+
+func evalStatusline(statusline string) (active, total, size int64, err error) {
+ matches := statuslineRE.FindStringSubmatch(statusline)
+ if len(matches) != 4 {
+ return 0, 0, 0, fmt.Errorf("unexpected statusline: %s", statusline)
+ }
+
+ size, err = strconv.ParseInt(matches[1], 10, 64)
+ if err != nil {
+ return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err)
+ }
+
+ total, err = strconv.ParseInt(matches[2], 10, 64)
+ if err != nil {
+ return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err)
+ }
+
+ active, err = strconv.ParseInt(matches[3], 10, 64)
+ if err != nil {
+ return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err)
+ }
+
+ return active, total, size, nil
+}
+
+func evalBuildline(buildline string) (syncedBlocks int64, err error) {
+ matches := buildlineRE.FindStringSubmatch(buildline)
+ if len(matches) != 2 {
+ return 0, fmt.Errorf("unexpected buildline: %s", buildline)
+ }
+
+ syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64)
+ if err != nil {
+ return 0, fmt.Errorf("%s in buildline: %s", err, buildline)
+ }
+
+ return syncedBlocks, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go
new file mode 100644
index 0000000..0d0a6a9
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc.go
@@ -0,0 +1,212 @@
+package procfs
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "strconv"
+ "strings"
+)
+
+// Proc provides information about a running process.
+type Proc struct {
+ // The process ID.
+ PID int
+
+ fs FS
+}
+
+// Procs represents a list of Proc structs.
+type Procs []Proc
+
+func (p Procs) Len() int { return len(p) }
+func (p Procs) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID }
+
+// Self returns a process for the current process read via /proc/self.
+func Self() (Proc, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return Proc{}, err
+ }
+ return fs.Self()
+}
+
+// NewProc returns a process for the given pid under /proc.
+func NewProc(pid int) (Proc, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return Proc{}, err
+ }
+ return fs.NewProc(pid)
+}
+
+// AllProcs returns a list of all currently available processes under /proc.
+func AllProcs() (Procs, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return Procs{}, err
+ }
+ return fs.AllProcs()
+}
+
+// Self returns a process for the current process.
+func (fs FS) Self() (Proc, error) {
+ p, err := os.Readlink(fs.Path("self"))
+ if err != nil {
+ return Proc{}, err
+ }
+ pid, err := strconv.Atoi(strings.Replace(p, string(fs), "", -1))
+ if err != nil {
+ return Proc{}, err
+ }
+ return fs.NewProc(pid)
+}
+
+// NewProc returns a process for the given pid.
+func (fs FS) NewProc(pid int) (Proc, error) {
+ if _, err := os.Stat(fs.Path(strconv.Itoa(pid))); err != nil {
+ return Proc{}, err
+ }
+ return Proc{PID: pid, fs: fs}, nil
+}
+
+// AllProcs returns a list of all currently available processes.
+func (fs FS) AllProcs() (Procs, error) {
+ d, err := os.Open(fs.Path())
+ if err != nil {
+ return Procs{}, err
+ }
+ defer d.Close()
+
+ names, err := d.Readdirnames(-1)
+ if err != nil {
+ return Procs{}, fmt.Errorf("could not read %s: %s", d.Name(), err)
+ }
+
+ p := Procs{}
+ for _, n := range names {
+ pid, err := strconv.ParseInt(n, 10, 64)
+ if err != nil {
+ continue
+ }
+ p = append(p, Proc{PID: int(pid), fs: fs})
+ }
+
+ return p, nil
+}
+
+// CmdLine returns the command line of a process.
+func (p Proc) CmdLine() ([]string, error) {
+ f, err := os.Open(p.path("cmdline"))
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ data, err := ioutil.ReadAll(f)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(data) < 1 {
+ return []string{}, nil
+ }
+
+ return strings.Split(string(data[:len(data)-1]), string(byte(0))), nil
+}
+
+// Comm returns the command name of a process.
+func (p Proc) Comm() (string, error) {
+ f, err := os.Open(p.path("comm"))
+ if err != nil {
+ return "", err
+ }
+ defer f.Close()
+
+ data, err := ioutil.ReadAll(f)
+ if err != nil {
+ return "", err
+ }
+
+ return strings.TrimSpace(string(data)), nil
+}
+
+// Executable returns the absolute path of the executable command of a process.
+func (p Proc) Executable() (string, error) {
+ exe, err := os.Readlink(p.path("exe"))
+ if os.IsNotExist(err) {
+ return "", nil
+ }
+
+ return exe, err
+}
+
+// FileDescriptors returns the currently open file descriptors of a process.
+func (p Proc) FileDescriptors() ([]uintptr, error) {
+ names, err := p.fileDescriptors()
+ if err != nil {
+ return nil, err
+ }
+
+ fds := make([]uintptr, len(names))
+ for i, n := range names {
+ fd, err := strconv.ParseInt(n, 10, 32)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse fd %s: %s", n, err)
+ }
+ fds[i] = uintptr(fd)
+ }
+
+ return fds, nil
+}
+
+// FileDescriptorTargets returns the targets of all file descriptors of a process.
+// If a file descriptor is not a symlink to a file (like a socket), that value will be the empty string.
+func (p Proc) FileDescriptorTargets() ([]string, error) {
+ names, err := p.fileDescriptors()
+ if err != nil {
+ return nil, err
+ }
+
+ targets := make([]string, len(names))
+
+ for i, name := range names {
+ target, err := os.Readlink(p.path("fd", name))
+ if err == nil {
+ targets[i] = target
+ }
+ }
+
+ return targets, nil
+}
+
+// FileDescriptorsLen returns the number of currently open file descriptors of
+// a process.
+func (p Proc) FileDescriptorsLen() (int, error) {
+ fds, err := p.fileDescriptors()
+ if err != nil {
+ return 0, err
+ }
+
+ return len(fds), nil
+}
+
+func (p Proc) fileDescriptors() ([]string, error) {
+ d, err := os.Open(p.path("fd"))
+ if err != nil {
+ return nil, err
+ }
+ defer d.Close()
+
+ names, err := d.Readdirnames(-1)
+ if err != nil {
+ return nil, fmt.Errorf("could not read %s: %s", d.Name(), err)
+ }
+
+ return names, nil
+}
+
+func (p Proc) path(pa ...string) string {
+ return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...)
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_io.go b/vendor/github.com/prometheus/procfs/proc_io.go
new file mode 100644
index 0000000..b4e31d7
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_io.go
@@ -0,0 +1,55 @@
+package procfs
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+)
+
+// ProcIO models the content of /proc//io.
+type ProcIO struct {
+ // Chars read.
+ RChar uint64
+ // Chars written.
+ WChar uint64
+ // Read syscalls.
+ SyscR uint64
+ // Write syscalls.
+ SyscW uint64
+ // Bytes read.
+ ReadBytes uint64
+ // Bytes written.
+ WriteBytes uint64
+ // Bytes written, but taking into account truncation. See
+ // Documentation/filesystems/proc.txt in the kernel sources for
+ // detailed explanation.
+ CancelledWriteBytes int64
+}
+
+// NewIO creates a new ProcIO instance from a given Proc instance.
+func (p Proc) NewIO() (ProcIO, error) {
+ pio := ProcIO{}
+
+ f, err := os.Open(p.path("io"))
+ if err != nil {
+ return pio, err
+ }
+ defer f.Close()
+
+ data, err := ioutil.ReadAll(f)
+ if err != nil {
+ return pio, err
+ }
+
+ ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" +
+ "read_bytes: %d\nwrite_bytes: %d\n" +
+ "cancelled_write_bytes: %d\n"
+
+ _, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR,
+ &pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes)
+ if err != nil {
+ return pio, err
+ }
+
+ return pio, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go
new file mode 100644
index 0000000..2df997c
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_limits.go
@@ -0,0 +1,137 @@
+package procfs
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "regexp"
+ "strconv"
+)
+
+// ProcLimits represents the soft limits for each of the process's resource
+// limits. For more information see getrlimit(2):
+// http://man7.org/linux/man-pages/man2/getrlimit.2.html.
+type ProcLimits struct {
+ // CPU time limit in seconds.
+ CPUTime int
+ // Maximum size of files that the process may create.
+ FileSize int
+ // Maximum size of the process's data segment (initialized data,
+ // uninitialized data, and heap).
+ DataSize int
+ // Maximum size of the process stack in bytes.
+ StackSize int
+ // Maximum size of a core file.
+ CoreFileSize int
+ // Limit of the process's resident set in pages.
+ ResidentSet int
+ // Maximum number of processes that can be created for the real user ID of
+ // the calling process.
+ Processes int
+ // Value one greater than the maximum file descriptor number that can be
+ // opened by this process.
+ OpenFiles int
+ // Maximum number of bytes of memory that may be locked into RAM.
+ LockedMemory int
+ // Maximum size of the process's virtual memory address space in bytes.
+ AddressSpace int
+ // Limit on the combined number of flock(2) locks and fcntl(2) leases that
+ // this process may establish.
+ FileLocks int
+ // Limit of signals that may be queued for the real user ID of the calling
+ // process.
+ PendingSignals int
+ // Limit on the number of bytes that can be allocated for POSIX message
+ // queues for the real user ID of the calling process.
+ MsqqueueSize int
+ // Limit of the nice priority set using setpriority(2) or nice(2).
+ NicePriority int
+ // Limit of the real-time priority set using sched_setscheduler(2) or
+ // sched_setparam(2).
+ RealtimePriority int
+ // Limit (in microseconds) on the amount of CPU time that a process
+ // scheduled under a real-time scheduling policy may consume without making
+ // a blocking system call.
+ RealtimeTimeout int
+}
+
+const (
+ limitsFields = 3
+ limitsUnlimited = "unlimited"
+)
+
+var (
+ limitsDelimiter = regexp.MustCompile(" +")
+)
+
+// NewLimits returns the current soft limits of the process.
+func (p Proc) NewLimits() (ProcLimits, error) {
+ f, err := os.Open(p.path("limits"))
+ if err != nil {
+ return ProcLimits{}, err
+ }
+ defer f.Close()
+
+ var (
+ l = ProcLimits{}
+ s = bufio.NewScanner(f)
+ )
+ for s.Scan() {
+ fields := limitsDelimiter.Split(s.Text(), limitsFields)
+ if len(fields) != limitsFields {
+ return ProcLimits{}, fmt.Errorf(
+ "couldn't parse %s line %s", f.Name(), s.Text())
+ }
+
+ switch fields[0] {
+ case "Max cpu time":
+ l.CPUTime, err = parseInt(fields[1])
+ case "Max file size":
+ l.FileSize, err = parseInt(fields[1])
+ case "Max data size":
+ l.DataSize, err = parseInt(fields[1])
+ case "Max stack size":
+ l.StackSize, err = parseInt(fields[1])
+ case "Max core file size":
+ l.CoreFileSize, err = parseInt(fields[1])
+ case "Max resident set":
+ l.ResidentSet, err = parseInt(fields[1])
+ case "Max processes":
+ l.Processes, err = parseInt(fields[1])
+ case "Max open files":
+ l.OpenFiles, err = parseInt(fields[1])
+ case "Max locked memory":
+ l.LockedMemory, err = parseInt(fields[1])
+ case "Max address space":
+ l.AddressSpace, err = parseInt(fields[1])
+ case "Max file locks":
+ l.FileLocks, err = parseInt(fields[1])
+ case "Max pending signals":
+ l.PendingSignals, err = parseInt(fields[1])
+ case "Max msgqueue size":
+ l.MsqqueueSize, err = parseInt(fields[1])
+ case "Max nice priority":
+ l.NicePriority, err = parseInt(fields[1])
+ case "Max realtime priority":
+ l.RealtimePriority, err = parseInt(fields[1])
+ case "Max realtime timeout":
+ l.RealtimeTimeout, err = parseInt(fields[1])
+ }
+ if err != nil {
+ return ProcLimits{}, err
+ }
+ }
+
+ return l, s.Err()
+}
+
+func parseInt(s string) (int, error) {
+ if s == limitsUnlimited {
+ return -1, nil
+ }
+ i, err := strconv.ParseInt(s, 10, 32)
+ if err != nil {
+ return 0, fmt.Errorf("couldn't parse value %s: %s", s, err)
+ }
+ return int(i), nil
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go
new file mode 100644
index 0000000..724e271
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_stat.go
@@ -0,0 +1,175 @@
+package procfs
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+)
+
+// Originally, this USER_HZ value was dynamically retrieved via a sysconf call
+// which required cgo. However, that caused a lot of problems regarding
+// cross-compilation. Alternatives such as running a binary to determine the
+// value, or trying to derive it in some other way were all problematic. After
+// much research it was determined that USER_HZ is actually hardcoded to 100 on
+// all Go-supported platforms as of the time of this writing. This is why we
+// decided to hardcode it here as well. It is not impossible that there could
+// be systems with exceptions, but they should be very exotic edge cases, and
+// in that case, the worst outcome will be two misreported metrics.
+//
+// See also the following discussions:
+//
+// - https://github.com/prometheus/node_exporter/issues/52
+// - https://github.com/prometheus/procfs/pull/2
+// - http://stackoverflow.com/questions/17410841/how-does-user-hz-solve-the-jiffy-scaling-issue
+const userHZ = 100
+
+// ProcStat provides status information about the process,
+// read from /proc/[pid]/stat.
+type ProcStat struct {
+ // The process ID.
+ PID int
+ // The filename of the executable.
+ Comm string
+ // The process state.
+ State string
+ // The PID of the parent of this process.
+ PPID int
+ // The process group ID of the process.
+ PGRP int
+ // The session ID of the process.
+ Session int
+ // The controlling terminal of the process.
+ TTY int
+ // The ID of the foreground process group of the controlling terminal of
+ // the process.
+ TPGID int
+ // The kernel flags word of the process.
+ Flags uint
+ // The number of minor faults the process has made which have not required
+ // loading a memory page from disk.
+ MinFlt uint
+ // The number of minor faults that the process's waited-for children have
+ // made.
+ CMinFlt uint
+ // The number of major faults the process has made which have required
+ // loading a memory page from disk.
+ MajFlt uint
+ // The number of major faults that the process's waited-for children have
+ // made.
+ CMajFlt uint
+ // Amount of time that this process has been scheduled in user mode,
+ // measured in clock ticks.
+ UTime uint
+ // Amount of time that this process has been scheduled in kernel mode,
+ // measured in clock ticks.
+ STime uint
+ // Amount of time that this process's waited-for children have been
+ // scheduled in user mode, measured in clock ticks.
+ CUTime uint
+ // Amount of time that this process's waited-for children have been
+ // scheduled in kernel mode, measured in clock ticks.
+ CSTime uint
+ // For processes running a real-time scheduling policy, this is the negated
+ // scheduling priority, minus one.
+ Priority int
+ // The nice value, a value in the range 19 (low priority) to -20 (high
+ // priority).
+ Nice int
+ // Number of threads in this process.
+ NumThreads int
+ // The time the process started after system boot, the value is expressed
+ // in clock ticks.
+ Starttime uint64
+ // Virtual memory size in bytes.
+ VSize int
+ // Resident set size in pages.
+ RSS int
+
+ fs FS
+}
+
+// NewStat returns the current status information of the process.
+func (p Proc) NewStat() (ProcStat, error) {
+ f, err := os.Open(p.path("stat"))
+ if err != nil {
+ return ProcStat{}, err
+ }
+ defer f.Close()
+
+ data, err := ioutil.ReadAll(f)
+ if err != nil {
+ return ProcStat{}, err
+ }
+
+ var (
+ ignore int
+
+ s = ProcStat{PID: p.PID, fs: p.fs}
+ l = bytes.Index(data, []byte("("))
+ r = bytes.LastIndex(data, []byte(")"))
+ )
+
+ if l < 0 || r < 0 {
+ return ProcStat{}, fmt.Errorf(
+ "unexpected format, couldn't extract comm: %s",
+ data,
+ )
+ }
+
+ s.Comm = string(data[l+1 : r])
+ _, err = fmt.Fscan(
+ bytes.NewBuffer(data[r+2:]),
+ &s.State,
+ &s.PPID,
+ &s.PGRP,
+ &s.Session,
+ &s.TTY,
+ &s.TPGID,
+ &s.Flags,
+ &s.MinFlt,
+ &s.CMinFlt,
+ &s.MajFlt,
+ &s.CMajFlt,
+ &s.UTime,
+ &s.STime,
+ &s.CUTime,
+ &s.CSTime,
+ &s.Priority,
+ &s.Nice,
+ &s.NumThreads,
+ &ignore,
+ &s.Starttime,
+ &s.VSize,
+ &s.RSS,
+ )
+ if err != nil {
+ return ProcStat{}, err
+ }
+
+ return s, nil
+}
+
+// VirtualMemory returns the virtual memory size in bytes.
+func (s ProcStat) VirtualMemory() int {
+ return s.VSize
+}
+
+// ResidentMemory returns the resident memory size in bytes.
+func (s ProcStat) ResidentMemory() int {
+ return s.RSS * os.Getpagesize()
+}
+
+// StartTime returns the unix timestamp of the process in seconds.
+func (s ProcStat) StartTime() (float64, error) {
+ stat, err := s.fs.NewStat()
+ if err != nil {
+ return 0, err
+ }
+ return float64(stat.BootTime) + (float64(s.Starttime) / userHZ), nil
+}
+
+// CPUTime returns the total CPU user and system time in seconds.
+func (s ProcStat) CPUTime() float64 {
+ return float64(s.UTime+s.STime) / userHZ
+}
diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go
new file mode 100644
index 0000000..1ca217e
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/stat.go
@@ -0,0 +1,56 @@
+package procfs
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+)
+
+// Stat represents kernel/system statistics.
+type Stat struct {
+ // Boot time in seconds since the Epoch.
+ BootTime int64
+}
+
+// NewStat returns kernel/system statistics read from /proc/stat.
+func NewStat() (Stat, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return Stat{}, err
+ }
+
+ return fs.NewStat()
+}
+
+// NewStat returns an information about current kernel/system statistics.
+func (fs FS) NewStat() (Stat, error) {
+ f, err := os.Open(fs.Path("stat"))
+ if err != nil {
+ return Stat{}, err
+ }
+ defer f.Close()
+
+ s := bufio.NewScanner(f)
+ for s.Scan() {
+ line := s.Text()
+ if !strings.HasPrefix(line, "btime") {
+ continue
+ }
+ fields := strings.Fields(line)
+ if len(fields) != 2 {
+ return Stat{}, fmt.Errorf("couldn't parse %s line %s", f.Name(), line)
+ }
+ i, err := strconv.ParseInt(fields[1], 10, 32)
+ if err != nil {
+ return Stat{}, fmt.Errorf("couldn't parse %s: %s", fields[1], err)
+ }
+ return Stat{BootTime: i}, nil
+ }
+ if err := s.Err(); err != nil {
+ return Stat{}, fmt.Errorf("couldn't parse %s: %s", f.Name(), err)
+ }
+
+ return Stat{}, fmt.Errorf("couldn't parse %s, missing btime", f.Name())
+}
diff --git a/vendor/golang.org/x/sys/LICENSE b/vendor/golang.org/x/sys/LICENSE
new file mode 100644
index 0000000..6a66aea
--- /dev/null
+++ b/vendor/golang.org/x/sys/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/golang.org/x/sys/PATENTS b/vendor/golang.org/x/sys/PATENTS
new file mode 100644
index 0000000..7330990
--- /dev/null
+++ b/vendor/golang.org/x/sys/PATENTS
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Go project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Go, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Go. This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation. If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Go or any code incorporated within this
+implementation of Go constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Go
+shall terminate as of the date such litigation is filed.
diff --git a/vendor/golang.org/x/sys/unix/asm.s b/vendor/golang.org/x/sys/unix/asm.s
new file mode 100644
index 0000000..8ed2fdb
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm.s
@@ -0,0 +1,10 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+
+#include "textflag.h"
+
+TEXT ·use(SB),NOSPLIT,$0
+ RET
diff --git a/vendor/golang.org/x/sys/unix/asm_darwin_386.s b/vendor/golang.org/x/sys/unix/asm_darwin_386.s
new file mode 100644
index 0000000..8a72783
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_darwin_386.s
@@ -0,0 +1,29 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System call support for 386, Darwin
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-28
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-40
+ JMP syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-52
+ JMP syscall·Syscall9(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-28
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
+ JMP syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_darwin_amd64.s b/vendor/golang.org/x/sys/unix/asm_darwin_amd64.s
new file mode 100644
index 0000000..6321421
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_darwin_amd64.s
@@ -0,0 +1,29 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System call support for AMD64, Darwin
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-56
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ JMP syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-104
+ JMP syscall·Syscall9(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
+ JMP syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_darwin_arm.s b/vendor/golang.org/x/sys/unix/asm_darwin_arm.s
new file mode 100644
index 0000000..333242d
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_darwin_arm.s
@@ -0,0 +1,30 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+// +build arm,darwin
+
+#include "textflag.h"
+
+//
+// System call support for ARM, Darwin
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-28
+ B syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-40
+ B syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-52
+ B syscall·Syscall9(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-28
+ B syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
+ B syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_darwin_arm64.s b/vendor/golang.org/x/sys/unix/asm_darwin_arm64.s
new file mode 100644
index 0000000..97e0174
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_darwin_arm64.s
@@ -0,0 +1,30 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+// +build arm64,darwin
+
+#include "textflag.h"
+
+//
+// System call support for AMD64, Darwin
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-56
+ B syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ B syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-104
+ B syscall·Syscall9(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
+ B syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
+ B syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s b/vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s
new file mode 100644
index 0000000..d5ed672
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s
@@ -0,0 +1,29 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System call support for AMD64, DragonFly
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-64
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-88
+ JMP syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-112
+ JMP syscall·Syscall9(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-64
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-88
+ JMP syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_freebsd_386.s b/vendor/golang.org/x/sys/unix/asm_freebsd_386.s
new file mode 100644
index 0000000..c9a0a26
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_freebsd_386.s
@@ -0,0 +1,29 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System call support for 386, FreeBSD
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-28
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-40
+ JMP syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-52
+ JMP syscall·Syscall9(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-28
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
+ JMP syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_freebsd_amd64.s b/vendor/golang.org/x/sys/unix/asm_freebsd_amd64.s
new file mode 100644
index 0000000..3517247
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_freebsd_amd64.s
@@ -0,0 +1,29 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System call support for AMD64, FreeBSD
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-56
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ JMP syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-104
+ JMP syscall·Syscall9(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
+ JMP syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_freebsd_arm.s b/vendor/golang.org/x/sys/unix/asm_freebsd_arm.s
new file mode 100644
index 0000000..9227c87
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_freebsd_arm.s
@@ -0,0 +1,29 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System call support for ARM, FreeBSD
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-28
+ B syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-40
+ B syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-52
+ B syscall·Syscall9(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-28
+ B syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
+ B syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_386.s b/vendor/golang.org/x/sys/unix/asm_linux_386.s
new file mode 100644
index 0000000..4db2909
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_linux_386.s
@@ -0,0 +1,35 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System calls for 386, Linux
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-28
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-40
+ JMP syscall·Syscall6(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-28
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
+ JMP syscall·RawSyscall6(SB)
+
+TEXT ·socketcall(SB),NOSPLIT,$0-36
+ JMP syscall·socketcall(SB)
+
+TEXT ·rawsocketcall(SB),NOSPLIT,$0-36
+ JMP syscall·rawsocketcall(SB)
+
+TEXT ·seek(SB),NOSPLIT,$0-28
+ JMP syscall·seek(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_amd64.s b/vendor/golang.org/x/sys/unix/asm_linux_amd64.s
new file mode 100644
index 0000000..44e25c6
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_linux_amd64.s
@@ -0,0 +1,29 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System calls for AMD64, Linux
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-56
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ JMP syscall·Syscall6(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
+ JMP syscall·RawSyscall6(SB)
+
+TEXT ·gettimeofday(SB),NOSPLIT,$0-16
+ JMP syscall·gettimeofday(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_arm.s b/vendor/golang.org/x/sys/unix/asm_linux_arm.s
new file mode 100644
index 0000000..cf0b574
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_linux_arm.s
@@ -0,0 +1,29 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System calls for arm, Linux
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-28
+ B syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-40
+ B syscall·Syscall6(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-28
+ B syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
+ B syscall·RawSyscall6(SB)
+
+TEXT ·seek(SB),NOSPLIT,$0-32
+ B syscall·seek(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_arm64.s b/vendor/golang.org/x/sys/unix/asm_linux_arm64.s
new file mode 100644
index 0000000..4be9bfe
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_linux_arm64.s
@@ -0,0 +1,24 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+// +build arm64
+// +build !gccgo
+
+#include "textflag.h"
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-56
+ B syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ B syscall·Syscall6(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
+ B syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
+ B syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s b/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s
new file mode 100644
index 0000000..724e580
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s
@@ -0,0 +1,28 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+// +build mips64 mips64le
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System calls for mips64, Linux
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-56
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ JMP syscall·Syscall6(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
+ JMP syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s b/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s
new file mode 100644
index 0000000..8d231fe
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s
@@ -0,0 +1,28 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+// +build ppc64 ppc64le
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System calls for ppc64, Linux
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-56
+ BR syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ BR syscall·Syscall6(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
+ BR syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
+ BR syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_s390x.s b/vendor/golang.org/x/sys/unix/asm_linux_s390x.s
new file mode 100644
index 0000000..1188985
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_linux_s390x.s
@@ -0,0 +1,28 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build s390x
+// +build linux
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System calls for s390x, Linux
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-56
+ BR syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ BR syscall·Syscall6(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
+ BR syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
+ BR syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_netbsd_386.s b/vendor/golang.org/x/sys/unix/asm_netbsd_386.s
new file mode 100644
index 0000000..48bdcd7
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_netbsd_386.s
@@ -0,0 +1,29 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System call support for 386, NetBSD
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-28
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-40
+ JMP syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-52
+ JMP syscall·Syscall9(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-28
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
+ JMP syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_netbsd_amd64.s b/vendor/golang.org/x/sys/unix/asm_netbsd_amd64.s
new file mode 100644
index 0000000..2ede05c
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_netbsd_amd64.s
@@ -0,0 +1,29 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System call support for AMD64, NetBSD
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-56
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ JMP syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-104
+ JMP syscall·Syscall9(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
+ JMP syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_netbsd_arm.s b/vendor/golang.org/x/sys/unix/asm_netbsd_arm.s
new file mode 100644
index 0000000..e892857
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_netbsd_arm.s
@@ -0,0 +1,29 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System call support for ARM, NetBSD
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-28
+ B syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-40
+ B syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-52
+ B syscall·Syscall9(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-28
+ B syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
+ B syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_openbsd_386.s b/vendor/golang.org/x/sys/unix/asm_openbsd_386.s
new file mode 100644
index 0000000..00576f3
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_openbsd_386.s
@@ -0,0 +1,29 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System call support for 386, OpenBSD
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-28
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-40
+ JMP syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-52
+ JMP syscall·Syscall9(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-28
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
+ JMP syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_openbsd_amd64.s b/vendor/golang.org/x/sys/unix/asm_openbsd_amd64.s
new file mode 100644
index 0000000..790ef77
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_openbsd_amd64.s
@@ -0,0 +1,29 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System call support for AMD64, OpenBSD
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-56
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ JMP syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-104
+ JMP syscall·Syscall9(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
+ JMP syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s b/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s
new file mode 100644
index 0000000..43ed17a
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s
@@ -0,0 +1,17 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System calls for amd64, Solaris are implemented in runtime/syscall_solaris.go
+//
+
+TEXT ·sysvicall6(SB),NOSPLIT,$0-64
+ JMP syscall·sysvicall6(SB)
+
+TEXT ·rawSysvicall6(SB),NOSPLIT,$0-64
+ JMP syscall·rawSysvicall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/bluetooth_linux.go b/vendor/golang.org/x/sys/unix/bluetooth_linux.go
new file mode 100644
index 0000000..6e32296
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/bluetooth_linux.go
@@ -0,0 +1,35 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Bluetooth sockets and messages
+
+package unix
+
+// Bluetooth Protocols
+const (
+ BTPROTO_L2CAP = 0
+ BTPROTO_HCI = 1
+ BTPROTO_SCO = 2
+ BTPROTO_RFCOMM = 3
+ BTPROTO_BNEP = 4
+ BTPROTO_CMTP = 5
+ BTPROTO_HIDP = 6
+ BTPROTO_AVDTP = 7
+)
+
+const (
+ HCI_CHANNEL_RAW = 0
+ HCI_CHANNEL_USER = 1
+ HCI_CHANNEL_MONITOR = 2
+ HCI_CHANNEL_CONTROL = 3
+)
+
+// Socketoption Level
+const (
+ SOL_BLUETOOTH = 0x112
+ SOL_HCI = 0x0
+ SOL_L2CAP = 0x6
+ SOL_RFCOMM = 0x12
+ SOL_SCO = 0x11
+)
diff --git a/vendor/golang.org/x/sys/unix/constants.go b/vendor/golang.org/x/sys/unix/constants.go
new file mode 100644
index 0000000..a96f0eb
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/constants.go
@@ -0,0 +1,13 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd solaris
+
+package unix
+
+const (
+ R_OK = 0x4
+ W_OK = 0x2
+ X_OK = 0x1
+)
diff --git a/vendor/golang.org/x/sys/unix/env_unix.go b/vendor/golang.org/x/sys/unix/env_unix.go
new file mode 100644
index 0000000..45e281a
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/env_unix.go
@@ -0,0 +1,27 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd solaris
+
+// Unix environment variables.
+
+package unix
+
+import "syscall"
+
+func Getenv(key string) (value string, found bool) {
+ return syscall.Getenv(key)
+}
+
+func Setenv(key, value string) error {
+ return syscall.Setenv(key, value)
+}
+
+func Clearenv() {
+ syscall.Clearenv()
+}
+
+func Environ() []string {
+ return syscall.Environ()
+}
diff --git a/vendor/golang.org/x/sys/unix/env_unset.go b/vendor/golang.org/x/sys/unix/env_unset.go
new file mode 100644
index 0000000..9222262
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/env_unset.go
@@ -0,0 +1,14 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.4
+
+package unix
+
+import "syscall"
+
+func Unsetenv(key string) error {
+ // This was added in Go 1.4.
+ return syscall.Unsetenv(key)
+}
diff --git a/vendor/golang.org/x/sys/unix/flock.go b/vendor/golang.org/x/sys/unix/flock.go
new file mode 100644
index 0000000..ce67a59
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/flock.go
@@ -0,0 +1,24 @@
+// +build linux darwin freebsd openbsd netbsd dragonfly
+
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd
+
+package unix
+
+import "unsafe"
+
+// fcntl64Syscall is usually SYS_FCNTL, but is overridden on 32-bit Linux
+// systems by flock_linux_32bit.go to be SYS_FCNTL64.
+var fcntl64Syscall uintptr = SYS_FCNTL
+
+// FcntlFlock performs a fcntl syscall for the F_GETLK, F_SETLK or F_SETLKW command.
+func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error {
+ _, _, errno := Syscall(fcntl64Syscall, fd, uintptr(cmd), uintptr(unsafe.Pointer(lk)))
+ if errno == 0 {
+ return nil
+ }
+ return errno
+}
diff --git a/vendor/golang.org/x/sys/unix/flock_linux_32bit.go b/vendor/golang.org/x/sys/unix/flock_linux_32bit.go
new file mode 100644
index 0000000..362831c
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/flock_linux_32bit.go
@@ -0,0 +1,13 @@
+// +build linux,386 linux,arm
+
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package unix
+
+func init() {
+ // On 32-bit Linux systems, the fcntl syscall that matches Go's
+ // Flock_t type is SYS_FCNTL64, not SYS_FCNTL.
+ fcntl64Syscall = SYS_FCNTL64
+}
diff --git a/vendor/golang.org/x/sys/unix/gccgo.go b/vendor/golang.org/x/sys/unix/gccgo.go
new file mode 100644
index 0000000..94c8232
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/gccgo.go
@@ -0,0 +1,46 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build gccgo
+
+package unix
+
+import "syscall"
+
+// We can't use the gc-syntax .s files for gccgo. On the plus side
+// much of the functionality can be written directly in Go.
+
+//extern gccgoRealSyscall
+func realSyscall(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r, errno uintptr)
+
+func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) {
+ syscall.Entersyscall()
+ r, errno := realSyscall(trap, a1, a2, a3, 0, 0, 0, 0, 0, 0)
+ syscall.Exitsyscall()
+ return r, 0, syscall.Errno(errno)
+}
+
+func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno) {
+ syscall.Entersyscall()
+ r, errno := realSyscall(trap, a1, a2, a3, a4, a5, a6, 0, 0, 0)
+ syscall.Exitsyscall()
+ return r, 0, syscall.Errno(errno)
+}
+
+func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) {
+ syscall.Entersyscall()
+ r, errno := realSyscall(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9)
+ syscall.Exitsyscall()
+ return r, 0, syscall.Errno(errno)
+}
+
+func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) {
+ r, errno := realSyscall(trap, a1, a2, a3, 0, 0, 0, 0, 0, 0)
+ return r, 0, syscall.Errno(errno)
+}
+
+func RawSyscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno) {
+ r, errno := realSyscall(trap, a1, a2, a3, a4, a5, a6, 0, 0, 0)
+ return r, 0, syscall.Errno(errno)
+}
diff --git a/vendor/golang.org/x/sys/unix/gccgo_c.c b/vendor/golang.org/x/sys/unix/gccgo_c.c
new file mode 100644
index 0000000..07f6be0
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/gccgo_c.c
@@ -0,0 +1,41 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build gccgo
+
+#include
+#include
+#include
+
+#define _STRINGIFY2_(x) #x
+#define _STRINGIFY_(x) _STRINGIFY2_(x)
+#define GOSYM_PREFIX _STRINGIFY_(__USER_LABEL_PREFIX__)
+
+// Call syscall from C code because the gccgo support for calling from
+// Go to C does not support varargs functions.
+
+struct ret {
+ uintptr_t r;
+ uintptr_t err;
+};
+
+struct ret
+gccgoRealSyscall(uintptr_t trap, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5, uintptr_t a6, uintptr_t a7, uintptr_t a8, uintptr_t a9)
+{
+ struct ret r;
+
+ errno = 0;
+ r.r = syscall(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9);
+ r.err = errno;
+ return r;
+}
+
+// Define the use function in C so that it is not inlined.
+
+extern void use(void *) __asm__ (GOSYM_PREFIX GOPKGPATH ".use") __attribute__((noinline));
+
+void
+use(void *p __attribute__ ((unused)))
+{
+}
diff --git a/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go b/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go
new file mode 100644
index 0000000..bffe1a7
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go
@@ -0,0 +1,20 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build gccgo,linux,amd64
+
+package unix
+
+import "syscall"
+
+//extern gettimeofday
+func realGettimeofday(*Timeval, *byte) int32
+
+func gettimeofday(tv *Timeval) (err syscall.Errno) {
+ r := realGettimeofday(tv, nil)
+ if r < 0 {
+ return syscall.GetErrno()
+ }
+ return 0
+}
diff --git a/vendor/golang.org/x/sys/unix/gccgo_linux_sparc64.go b/vendor/golang.org/x/sys/unix/gccgo_linux_sparc64.go
new file mode 100644
index 0000000..5633269
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/gccgo_linux_sparc64.go
@@ -0,0 +1,20 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build gccgo,linux,sparc64
+
+package unix
+
+import "syscall"
+
+//extern sysconf
+func realSysconf(name int) int64
+
+func sysconf(name int) (n int64, err syscall.Errno) {
+ r := realSysconf(name)
+ if r < 0 {
+ return 0, syscall.GetErrno()
+ }
+ return r, 0
+}
diff --git a/vendor/golang.org/x/sys/unix/mkall.sh b/vendor/golang.org/x/sys/unix/mkall.sh
new file mode 100755
index 0000000..2a1473f
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/mkall.sh
@@ -0,0 +1,292 @@
+#!/usr/bin/env bash
+# Copyright 2009 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+# The unix package provides access to the raw system call
+# interface of the underlying operating system. Porting Go to
+# a new architecture/operating system combination requires
+# some manual effort, though there are tools that automate
+# much of the process. The auto-generated files have names
+# beginning with z.
+#
+# This script runs or (given -n) prints suggested commands to generate z files
+# for the current system. Running those commands is not automatic.
+# This script is documentation more than anything else.
+#
+# * asm_${GOOS}_${GOARCH}.s
+#
+# This hand-written assembly file implements system call dispatch.
+# There are three entry points:
+#
+# func Syscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr);
+# func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr);
+# func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr);
+#
+# The first and second are the standard ones; they differ only in
+# how many arguments can be passed to the kernel.
+# The third is for low-level use by the ForkExec wrapper;
+# unlike the first two, it does not call into the scheduler to
+# let it know that a system call is running.
+#
+# * syscall_${GOOS}.go
+#
+# This hand-written Go file implements system calls that need
+# special handling and lists "//sys" comments giving prototypes
+# for ones that can be auto-generated. Mksyscall reads those
+# comments to generate the stubs.
+#
+# * syscall_${GOOS}_${GOARCH}.go
+#
+# Same as syscall_${GOOS}.go except that it contains code specific
+# to ${GOOS} on one particular architecture.
+#
+# * types_${GOOS}.c
+#
+# This hand-written C file includes standard C headers and then
+# creates typedef or enum names beginning with a dollar sign
+# (use of $ in variable names is a gcc extension). The hardest
+# part about preparing this file is figuring out which headers to
+# include and which symbols need to be #defined to get the
+# actual data structures that pass through to the kernel system calls.
+# Some C libraries present alternate versions for binary compatibility
+# and translate them on the way in and out of system calls, but
+# there is almost always a #define that can get the real ones.
+# See types_darwin.c and types_linux.c for examples.
+#
+# * zerror_${GOOS}_${GOARCH}.go
+#
+# This machine-generated file defines the system's error numbers,
+# error strings, and signal numbers. The generator is "mkerrors.sh".
+# Usually no arguments are needed, but mkerrors.sh will pass its
+# arguments on to godefs.
+#
+# * zsyscall_${GOOS}_${GOARCH}.go
+#
+# Generated by mksyscall.pl; see syscall_${GOOS}.go above.
+#
+# * zsysnum_${GOOS}_${GOARCH}.go
+#
+# Generated by mksysnum_${GOOS}.
+#
+# * ztypes_${GOOS}_${GOARCH}.go
+#
+# Generated by godefs; see types_${GOOS}.c above.
+
+GOOSARCH="${GOOS}_${GOARCH}"
+
+# defaults
+mksyscall="./mksyscall.pl"
+mkerrors="./mkerrors.sh"
+zerrors="zerrors_$GOOSARCH.go"
+mksysctl=""
+zsysctl="zsysctl_$GOOSARCH.go"
+mksysnum=
+mktypes=
+run="sh"
+
+case "$1" in
+-syscalls)
+ for i in zsyscall*go
+ do
+ sed 1q $i | sed 's;^// ;;' | sh > _$i && gofmt < _$i > $i
+ rm _$i
+ done
+ exit 0
+ ;;
+-n)
+ run="cat"
+ shift
+esac
+
+case "$#" in
+0)
+ ;;
+*)
+ echo 'usage: mkall.sh [-n]' 1>&2
+ exit 2
+esac
+
+GOOSARCH_in=syscall_$GOOSARCH.go
+case "$GOOSARCH" in
+_* | *_ | _)
+ echo 'undefined $GOOS_$GOARCH:' "$GOOSARCH" 1>&2
+ exit 1
+ ;;
+darwin_386)
+ mkerrors="$mkerrors -m32"
+ mksyscall="./mksyscall.pl -l32"
+ mksysnum="./mksysnum_darwin.pl $(xcrun --show-sdk-path --sdk macosx)/usr/include/sys/syscall.h"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+darwin_amd64)
+ mkerrors="$mkerrors -m64"
+ mksysnum="./mksysnum_darwin.pl $(xcrun --show-sdk-path --sdk macosx)/usr/include/sys/syscall.h"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+darwin_arm)
+ mkerrors="$mkerrors"
+ mksysnum="./mksysnum_darwin.pl /usr/include/sys/syscall.h"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+darwin_arm64)
+ mkerrors="$mkerrors -m64"
+ mksysnum="./mksysnum_darwin.pl $(xcrun --show-sdk-path --sdk iphoneos)/usr/include/sys/syscall.h"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+dragonfly_386)
+ mkerrors="$mkerrors -m32"
+ mksyscall="./mksyscall.pl -l32 -dragonfly"
+ mksysnum="curl -s 'http://gitweb.dragonflybsd.org/dragonfly.git/blob_plain/HEAD:/sys/kern/syscalls.master' | ./mksysnum_dragonfly.pl"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+dragonfly_amd64)
+ mkerrors="$mkerrors -m64"
+ mksyscall="./mksyscall.pl -dragonfly"
+ mksysnum="curl -s 'http://gitweb.dragonflybsd.org/dragonfly.git/blob_plain/HEAD:/sys/kern/syscalls.master' | ./mksysnum_dragonfly.pl"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+freebsd_386)
+ mkerrors="$mkerrors -m32"
+ mksyscall="./mksyscall.pl -l32"
+ mksysnum="curl -s 'http://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master' | ./mksysnum_freebsd.pl"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+freebsd_amd64)
+ mkerrors="$mkerrors -m64"
+ mksysnum="curl -s 'http://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master' | ./mksysnum_freebsd.pl"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+freebsd_arm)
+ mkerrors="$mkerrors"
+ mksyscall="./mksyscall.pl -l32 -arm"
+ mksysnum="curl -s 'http://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master' | ./mksysnum_freebsd.pl"
+ # Let the type of C char be signed for making the bare syscall
+ # API consistent across over platforms.
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
+ ;;
+linux_386)
+ mkerrors="$mkerrors -m32"
+ mksyscall="./mksyscall.pl -l32"
+ mksysnum="./mksysnum_linux.pl /usr/include/asm/unistd_32.h"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+linux_amd64)
+ unistd_h=$(ls -1 /usr/include/asm/unistd_64.h /usr/include/x86_64-linux-gnu/asm/unistd_64.h 2>/dev/null | head -1)
+ if [ "$unistd_h" = "" ]; then
+ echo >&2 cannot find unistd_64.h
+ exit 1
+ fi
+ mkerrors="$mkerrors -m64"
+ mksysnum="./mksysnum_linux.pl $unistd_h"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+linux_arm)
+ mkerrors="$mkerrors"
+ mksyscall="./mksyscall.pl -l32 -arm"
+ mksysnum="curl -s 'http://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/plain/arch/arm/include/uapi/asm/unistd.h' | ./mksysnum_linux.pl -"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+linux_arm64)
+ unistd_h=$(ls -1 /usr/include/asm/unistd.h /usr/include/asm-generic/unistd.h 2>/dev/null | head -1)
+ if [ "$unistd_h" = "" ]; then
+ echo >&2 cannot find unistd_64.h
+ exit 1
+ fi
+ mksysnum="./mksysnum_linux.pl $unistd_h"
+ # Let the type of C char be signed for making the bare syscall
+ # API consistent across over platforms.
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
+ ;;
+linux_ppc64)
+ GOOSARCH_in=syscall_linux_ppc64x.go
+ unistd_h=/usr/include/asm/unistd.h
+ mkerrors="$mkerrors -m64"
+ mksysnum="./mksysnum_linux.pl $unistd_h"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+linux_ppc64le)
+ GOOSARCH_in=syscall_linux_ppc64x.go
+ unistd_h=/usr/include/powerpc64le-linux-gnu/asm/unistd.h
+ mkerrors="$mkerrors -m64"
+ mksysnum="./mksysnum_linux.pl $unistd_h"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+linux_s390x)
+ GOOSARCH_in=syscall_linux_s390x.go
+ unistd_h=/usr/include/asm/unistd.h
+ mkerrors="$mkerrors -m64"
+ mksysnum="./mksysnum_linux.pl $unistd_h"
+ # Let the type of C char be signed to make the bare sys
+ # API more consistent between platforms.
+ # This is a deliberate departure from the way the syscall
+ # package generates its version of the types file.
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
+ ;;
+linux_sparc64)
+ GOOSARCH_in=syscall_linux_sparc64.go
+ unistd_h=/usr/include/sparc64-linux-gnu/asm/unistd.h
+ mkerrors="$mkerrors -m64"
+ mksysnum="./mksysnum_linux.pl $unistd_h"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+netbsd_386)
+ mkerrors="$mkerrors -m32"
+ mksyscall="./mksyscall.pl -l32 -netbsd"
+ mksysnum="curl -s 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_netbsd.pl"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+netbsd_amd64)
+ mkerrors="$mkerrors -m64"
+ mksyscall="./mksyscall.pl -netbsd"
+ mksysnum="curl -s 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_netbsd.pl"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+openbsd_386)
+ mkerrors="$mkerrors -m32"
+ mksyscall="./mksyscall.pl -l32 -openbsd"
+ mksysctl="./mksysctl_openbsd.pl"
+ zsysctl="zsysctl_openbsd.go"
+ mksysnum="curl -s 'http://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_openbsd.pl"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+openbsd_amd64)
+ mkerrors="$mkerrors -m64"
+ mksyscall="./mksyscall.pl -openbsd"
+ mksysctl="./mksysctl_openbsd.pl"
+ zsysctl="zsysctl_openbsd.go"
+ mksysnum="curl -s 'http://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_openbsd.pl"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+solaris_amd64)
+ mksyscall="./mksyscall_solaris.pl"
+ mkerrors="$mkerrors -m64"
+ mksysnum=
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+*)
+ echo 'unrecognized $GOOS_$GOARCH: ' "$GOOSARCH" 1>&2
+ exit 1
+ ;;
+esac
+
+(
+ if [ -n "$mkerrors" ]; then echo "$mkerrors |gofmt >$zerrors"; fi
+ case "$GOOS" in
+ *)
+ syscall_goos="syscall_$GOOS.go"
+ case "$GOOS" in
+ darwin | dragonfly | freebsd | netbsd | openbsd)
+ syscall_goos="syscall_bsd.go $syscall_goos"
+ ;;
+ esac
+ if [ -n "$mksyscall" ]; then echo "$mksyscall $syscall_goos $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.go"; fi
+ ;;
+ esac
+ if [ -n "$mksysctl" ]; then echo "$mksysctl |gofmt >$zsysctl"; fi
+ if [ -n "$mksysnum" ]; then echo "$mksysnum |gofmt >zsysnum_$GOOSARCH.go"; fi
+ if [ -n "$mktypes" ]; then
+ echo "echo // +build $GOARCH,$GOOS > ztypes_$GOOSARCH.go";
+ echo "$mktypes types_$GOOS.go | go run mkpost.go >>ztypes_$GOOSARCH.go";
+ fi
+) | $run
diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh
new file mode 100755
index 0000000..7e6276b
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/mkerrors.sh
@@ -0,0 +1,485 @@
+#!/usr/bin/env bash
+# Copyright 2009 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+# Generate Go code listing errors and other #defined constant
+# values (ENAMETOOLONG etc.), by asking the preprocessor
+# about the definitions.
+
+unset LANG
+export LC_ALL=C
+export LC_CTYPE=C
+
+if test -z "$GOARCH" -o -z "$GOOS"; then
+ echo 1>&2 "GOARCH or GOOS not defined in environment"
+ exit 1
+fi
+
+CC=${CC:-cc}
+
+if [[ "$GOOS" -eq "solaris" ]]; then
+ # Assumes GNU versions of utilities in PATH.
+ export PATH=/usr/gnu/bin:$PATH
+fi
+
+uname=$(uname)
+
+includes_Darwin='
+#define _DARWIN_C_SOURCE
+#define KERNEL
+#define _DARWIN_USE_64_BIT_INODE
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+'
+
+includes_DragonFly='
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+'
+
+includes_FreeBSD='
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#if __FreeBSD__ >= 10
+#define IFT_CARP 0xf8 // IFT_CARP is deprecated in FreeBSD 10
+#undef SIOCAIFADDR
+#define SIOCAIFADDR _IOW(105, 26, struct oifaliasreq) // ifaliasreq contains if_data
+#undef SIOCSIFPHYADDR
+#define SIOCSIFPHYADDR _IOW(105, 70, struct oifaliasreq) // ifaliasreq contains if_data
+#endif
+'
+
+includes_Linux='
+#define _LARGEFILE_SOURCE
+#define _LARGEFILE64_SOURCE
+#ifndef __LP64__
+#define _FILE_OFFSET_BITS 64
+#endif
+#define _GNU_SOURCE
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#ifndef MSG_FASTOPEN
+#define MSG_FASTOPEN 0x20000000
+#endif
+
+#ifndef PTRACE_GETREGS
+#define PTRACE_GETREGS 0xc
+#endif
+
+#ifndef PTRACE_SETREGS
+#define PTRACE_SETREGS 0xd
+#endif
+
+#ifdef SOL_BLUETOOTH
+// SPARC includes this in /usr/include/sparc64-linux-gnu/bits/socket.h
+// but it is already in bluetooth_linux.go
+#undef SOL_BLUETOOTH
+#endif
+'
+
+includes_NetBSD='
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+// Needed since refers to it...
+#define schedppq 1
+'
+
+includes_OpenBSD='
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+// We keep some constants not supported in OpenBSD 5.5 and beyond for
+// the promise of compatibility.
+#define EMUL_ENABLED 0x1
+#define EMUL_NATIVE 0x2
+#define IPV6_FAITH 0x1d
+#define IPV6_OPTIONS 0x1
+#define IPV6_RTHDR_STRICT 0x1
+#define IPV6_SOCKOPT_RESERVED1 0x3
+#define SIOCGIFGENERIC 0xc020693a
+#define SIOCSIFGENERIC 0x80206939
+#define WALTSIG 0x4
+'
+
+includes_SunOS='
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+'
+
+
+includes='
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+'
+ccflags="$@"
+
+# Write go tool cgo -godefs input.
+(
+ echo package unix
+ echo
+ echo '/*'
+ indirect="includes_$(uname)"
+ echo "${!indirect} $includes"
+ echo '*/'
+ echo 'import "C"'
+ echo 'import "syscall"'
+ echo
+ echo 'const ('
+
+ # The gcc command line prints all the #defines
+ # it encounters while processing the input
+ echo "${!indirect} $includes" | $CC -x c - -E -dM $ccflags |
+ awk '
+ $1 != "#define" || $2 ~ /\(/ || $3 == "" {next}
+
+ $2 ~ /^E([ABCD]X|[BIS]P|[SD]I|S|FL)$/ {next} # 386 registers
+ $2 ~ /^(SIGEV_|SIGSTKSZ|SIGRT(MIN|MAX))/ {next}
+ $2 ~ /^(SCM_SRCRT)$/ {next}
+ $2 ~ /^(MAP_FAILED)$/ {next}
+ $2 ~ /^ELF_.*$/ {next}# contains ELF_ARCH, etc.
+
+ $2 ~ /^EXTATTR_NAMESPACE_NAMES/ ||
+ $2 ~ /^EXTATTR_NAMESPACE_[A-Z]+_STRING/ {next}
+
+ $2 !~ /^ETH_/ &&
+ $2 !~ /^EPROC_/ &&
+ $2 !~ /^EQUIV_/ &&
+ $2 !~ /^EXPR_/ &&
+ $2 ~ /^E[A-Z0-9_]+$/ ||
+ $2 ~ /^B[0-9_]+$/ ||
+ $2 == "BOTHER" ||
+ $2 ~ /^CI?BAUD(EX)?$/ ||
+ $2 == "IBSHIFT" ||
+ $2 ~ /^V[A-Z0-9]+$/ ||
+ $2 ~ /^CS[A-Z0-9]/ ||
+ $2 ~ /^I(SIG|CANON|CRNL|UCLC|EXTEN|MAXBEL|STRIP|UTF8)$/ ||
+ $2 ~ /^IGN/ ||
+ $2 ~ /^IX(ON|ANY|OFF)$/ ||
+ $2 ~ /^IN(LCR|PCK)$/ ||
+ $2 ~ /(^FLU?SH)|(FLU?SH$)/ ||
+ $2 ~ /^C(LOCAL|READ|MSPAR|RTSCTS)$/ ||
+ $2 == "BRKINT" ||
+ $2 == "HUPCL" ||
+ $2 == "PENDIN" ||
+ $2 == "TOSTOP" ||
+ $2 == "XCASE" ||
+ $2 == "ALTWERASE" ||
+ $2 == "NOKERNINFO" ||
+ $2 ~ /^PAR/ ||
+ $2 ~ /^SIG[^_]/ ||
+ $2 ~ /^O[CNPFPL][A-Z]+[^_][A-Z]+$/ ||
+ $2 ~ /^(NL|CR|TAB|BS|VT|FF)DLY$/ ||
+ $2 ~ /^(NL|CR|TAB|BS|VT|FF)[0-9]$/ ||
+ $2 ~ /^O?XTABS$/ ||
+ $2 ~ /^TC[IO](ON|OFF)$/ ||
+ $2 ~ /^IN_/ ||
+ $2 ~ /^LOCK_(SH|EX|NB|UN)$/ ||
+ $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|ICMP6|TCP|EVFILT|NOTE|EV|SHUT|PROT|MAP|PACKET|MSG|SCM|MCL|DT|MADV|PR)_/ ||
+ $2 == "ICMPV6_FILTER" ||
+ $2 == "SOMAXCONN" ||
+ $2 == "NAME_MAX" ||
+ $2 == "IFNAMSIZ" ||
+ $2 ~ /^CTL_(MAXNAME|NET|QUERY)$/ ||
+ $2 ~ /^SYSCTL_VERS/ ||
+ $2 ~ /^(MS|MNT)_/ ||
+ $2 ~ /^TUN(SET|GET|ATTACH|DETACH)/ ||
+ $2 ~ /^(O|F|FD|NAME|S|PTRACE|PT)_/ ||
+ $2 ~ /^LINUX_REBOOT_CMD_/ ||
+ $2 ~ /^LINUX_REBOOT_MAGIC[12]$/ ||
+ $2 !~ "NLA_TYPE_MASK" &&
+ $2 ~ /^(NETLINK|NLM|NLMSG|NLA|IFA|IFAN|RT|RTCF|RTN|RTPROT|RTNH|ARPHRD|ETH_P)_/ ||
+ $2 ~ /^SIOC/ ||
+ $2 ~ /^TIOC/ ||
+ $2 ~ /^TCGET/ ||
+ $2 ~ /^TCSET/ ||
+ $2 ~ /^TC(FLSH|SBRKP?|XONC)$/ ||
+ $2 !~ "RTF_BITS" &&
+ $2 ~ /^(IFF|IFT|NET_RT|RTM|RTF|RTV|RTA|RTAX)_/ ||
+ $2 ~ /^BIOC/ ||
+ $2 ~ /^RUSAGE_(SELF|CHILDREN|THREAD)/ ||
+ $2 ~ /^RLIMIT_(AS|CORE|CPU|DATA|FSIZE|NOFILE|STACK)|RLIM_INFINITY/ ||
+ $2 ~ /^PRIO_(PROCESS|PGRP|USER)/ ||
+ $2 ~ /^CLONE_[A-Z_]+/ ||
+ $2 !~ /^(BPF_TIMEVAL)$/ &&
+ $2 ~ /^(BPF|DLT)_/ ||
+ $2 ~ /^CLOCK_/ ||
+ $2 ~ /^CAN_/ ||
+ $2 !~ "WMESGLEN" &&
+ $2 ~ /^W[A-Z0-9]+$/ {printf("\t%s = C.%s\n", $2, $2)}
+ $2 ~ /^__WCOREFLAG$/ {next}
+ $2 ~ /^__W[A-Z0-9]+$/ {printf("\t%s = C.%s\n", substr($2,3), $2)}
+
+ {next}
+ ' | sort
+
+ echo ')'
+) >_const.go
+
+# Pull out the error names for later.
+errors=$(
+ echo '#include ' | $CC -x c - -E -dM $ccflags |
+ awk '$1=="#define" && $2 ~ /^E[A-Z0-9_]+$/ { print $2 }' |
+ sort
+)
+
+# Pull out the signal names for later.
+signals=$(
+ echo '#include ' | $CC -x c - -E -dM $ccflags |
+ awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print $2 }' |
+ egrep -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT)' |
+ sort
+)
+
+# Again, writing regexps to a file.
+echo '#include ' | $CC -x c - -E -dM $ccflags |
+ awk '$1=="#define" && $2 ~ /^E[A-Z0-9_]+$/ { print "^\t" $2 "[ \t]*=" }' |
+ sort >_error.grep
+echo '#include ' | $CC -x c - -E -dM $ccflags |
+ awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print "^\t" $2 "[ \t]*=" }' |
+ egrep -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT)' |
+ sort >_signal.grep
+
+echo '// mkerrors.sh' "$@"
+echo '// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT'
+echo
+echo "// +build ${GOARCH},${GOOS}"
+echo
+go tool cgo -godefs -- "$@" _const.go >_error.out
+cat _error.out | grep -vf _error.grep | grep -vf _signal.grep
+echo
+echo '// Errors'
+echo 'const ('
+cat _error.out | grep -f _error.grep | sed 's/=\(.*\)/= syscall.Errno(\1)/'
+echo ')'
+
+echo
+echo '// Signals'
+echo 'const ('
+cat _error.out | grep -f _signal.grep | sed 's/=\(.*\)/= syscall.Signal(\1)/'
+echo ')'
+
+# Run C program to print error and syscall strings.
+(
+ echo -E "
+#include
+#include
+#include
+#include
+#include
+#include
+
+#define nelem(x) (sizeof(x)/sizeof((x)[0]))
+
+enum { A = 'A', Z = 'Z', a = 'a', z = 'z' }; // avoid need for single quotes below
+
+int errors[] = {
+"
+ for i in $errors
+ do
+ echo -E ' '$i,
+ done
+
+ echo -E "
+};
+
+int signals[] = {
+"
+ for i in $signals
+ do
+ echo -E ' '$i,
+ done
+
+ # Use -E because on some systems bash builtin interprets \n itself.
+ echo -E '
+};
+
+static int
+intcmp(const void *a, const void *b)
+{
+ return *(int*)a - *(int*)b;
+}
+
+int
+main(void)
+{
+ int i, j, e;
+ char buf[1024], *p;
+
+ printf("\n\n// Error table\n");
+ printf("var errors = [...]string {\n");
+ qsort(errors, nelem(errors), sizeof errors[0], intcmp);
+ for(i=0; i 0 && errors[i-1] == e)
+ continue;
+ strcpy(buf, strerror(e));
+ // lowercase first letter: Bad -> bad, but STREAM -> STREAM.
+ if(A <= buf[0] && buf[0] <= Z && a <= buf[1] && buf[1] <= z)
+ buf[0] += a - A;
+ printf("\t%d: \"%s\",\n", e, buf);
+ }
+ printf("}\n\n");
+
+ printf("\n\n// Signal table\n");
+ printf("var signals = [...]string {\n");
+ qsort(signals, nelem(signals), sizeof signals[0], intcmp);
+ for(i=0; i 0 && signals[i-1] == e)
+ continue;
+ strcpy(buf, strsignal(e));
+ // lowercase first letter: Bad -> bad, but STREAM -> STREAM.
+ if(A <= buf[0] && buf[0] <= Z && a <= buf[1] && buf[1] <= z)
+ buf[0] += a - A;
+ // cut trailing : number.
+ p = strrchr(buf, ":"[0]);
+ if(p)
+ *p = '\0';
+ printf("\t%d: \"%s\",\n", e, buf);
+ }
+ printf("}\n\n");
+
+ return 0;
+}
+
+'
+) >_errors.c
+
+$CC $ccflags -o _errors _errors.c && $GORUN ./_errors && rm -f _errors.c _errors _const.go _error.grep _signal.grep _error.out
diff --git a/vendor/golang.org/x/sys/unix/mkpost.go b/vendor/golang.org/x/sys/unix/mkpost.go
new file mode 100644
index 0000000..ed50d90
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/mkpost.go
@@ -0,0 +1,62 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+// mkpost processes the output of cgo -godefs to
+// modify the generated types. It is used to clean up
+// the sys API in an architecture specific manner.
+//
+// mkpost is run after cgo -godefs by mkall.sh.
+package main
+
+import (
+ "fmt"
+ "go/format"
+ "io/ioutil"
+ "log"
+ "os"
+ "regexp"
+)
+
+func main() {
+ b, err := ioutil.ReadAll(os.Stdin)
+ if err != nil {
+ log.Fatal(err)
+ }
+ s := string(b)
+
+ goarch := os.Getenv("GOARCH")
+ goos := os.Getenv("GOOS")
+ if goarch == "s390x" && goos == "linux" {
+ // Export the types of PtraceRegs fields.
+ re := regexp.MustCompile("ptrace(Psw|Fpregs|Per)")
+ s = re.ReplaceAllString(s, "Ptrace$1")
+
+ // Replace padding fields inserted by cgo with blank identifiers.
+ re = regexp.MustCompile("Pad_cgo[A-Za-z0-9_]*")
+ s = re.ReplaceAllString(s, "_")
+
+ // Replace other unwanted fields with blank identifiers.
+ re = regexp.MustCompile("X_[A-Za-z0-9_]*")
+ s = re.ReplaceAllString(s, "_")
+
+ // Replace the control_regs union with a blank identifier for now.
+ re = regexp.MustCompile("(Control_regs)\\s+\\[0\\]uint64")
+ s = re.ReplaceAllString(s, "_ [0]uint64")
+ }
+
+ // gofmt
+ b, err = format.Source([]byte(s))
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Append this command to the header to show where the new file
+ // came from.
+ re := regexp.MustCompile("(cgo -godefs [a-zA-Z0-9_]+\\.go.*)")
+ b = re.ReplaceAll(b, []byte("$1 | go run mkpost.go"))
+
+ fmt.Printf("%s", b)
+}
diff --git a/vendor/golang.org/x/sys/unix/mksyscall.pl b/vendor/golang.org/x/sys/unix/mksyscall.pl
new file mode 100755
index 0000000..b1e7766
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/mksyscall.pl
@@ -0,0 +1,323 @@
+#!/usr/bin/env perl
+# Copyright 2009 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+# This program reads a file containing function prototypes
+# (like syscall_darwin.go) and generates system call bodies.
+# The prototypes are marked by lines beginning with "//sys"
+# and read like func declarations if //sys is replaced by func, but:
+# * The parameter lists must give a name for each argument.
+# This includes return parameters.
+# * The parameter lists must give a type for each argument:
+# the (x, y, z int) shorthand is not allowed.
+# * If the return parameter is an error number, it must be named errno.
+
+# A line beginning with //sysnb is like //sys, except that the
+# goroutine will not be suspended during the execution of the system
+# call. This must only be used for system calls which can never
+# block, as otherwise the system call could cause all goroutines to
+# hang.
+
+use strict;
+
+my $cmdline = "mksyscall.pl " . join(' ', @ARGV);
+my $errors = 0;
+my $_32bit = "";
+my $plan9 = 0;
+my $openbsd = 0;
+my $netbsd = 0;
+my $dragonfly = 0;
+my $arm = 0; # 64-bit value should use (even, odd)-pair
+
+if($ARGV[0] eq "-b32") {
+ $_32bit = "big-endian";
+ shift;
+} elsif($ARGV[0] eq "-l32") {
+ $_32bit = "little-endian";
+ shift;
+}
+if($ARGV[0] eq "-plan9") {
+ $plan9 = 1;
+ shift;
+}
+if($ARGV[0] eq "-openbsd") {
+ $openbsd = 1;
+ shift;
+}
+if($ARGV[0] eq "-netbsd") {
+ $netbsd = 1;
+ shift;
+}
+if($ARGV[0] eq "-dragonfly") {
+ $dragonfly = 1;
+ shift;
+}
+if($ARGV[0] eq "-arm") {
+ $arm = 1;
+ shift;
+}
+
+if($ARGV[0] =~ /^-/) {
+ print STDERR "usage: mksyscall.pl [-b32 | -l32] [file ...]\n";
+ exit 1;
+}
+
+if($ENV{'GOARCH'} eq "" || $ENV{'GOOS'} eq "") {
+ print STDERR "GOARCH or GOOS not defined in environment\n";
+ exit 1;
+}
+
+sub parseparamlist($) {
+ my ($list) = @_;
+ $list =~ s/^\s*//;
+ $list =~ s/\s*$//;
+ if($list eq "") {
+ return ();
+ }
+ return split(/\s*,\s*/, $list);
+}
+
+sub parseparam($) {
+ my ($p) = @_;
+ if($p !~ /^(\S*) (\S*)$/) {
+ print STDERR "$ARGV:$.: malformed parameter: $p\n";
+ $errors = 1;
+ return ("xx", "int");
+ }
+ return ($1, $2);
+}
+
+my $text = "";
+while(<>) {
+ chomp;
+ s/\s+/ /g;
+ s/^\s+//;
+ s/\s+$//;
+ my $nonblock = /^\/\/sysnb /;
+ next if !/^\/\/sys / && !$nonblock;
+
+ # Line must be of the form
+ # func Open(path string, mode int, perm int) (fd int, errno error)
+ # Split into name, in params, out params.
+ if(!/^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*((?i)SYS_[A-Z0-9_]+))?$/) {
+ print STDERR "$ARGV:$.: malformed //sys declaration\n";
+ $errors = 1;
+ next;
+ }
+ my ($func, $in, $out, $sysname) = ($2, $3, $4, $5);
+
+ # Split argument lists on comma.
+ my @in = parseparamlist($in);
+ my @out = parseparamlist($out);
+
+ # Try in vain to keep people from editing this file.
+ # The theory is that they jump into the middle of the file
+ # without reading the header.
+ $text .= "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n";
+
+ # Go function header.
+ my $out_decl = @out ? sprintf(" (%s)", join(', ', @out)) : "";
+ $text .= sprintf "func %s(%s)%s {\n", $func, join(', ', @in), $out_decl;
+
+ # Check if err return available
+ my $errvar = "";
+ foreach my $p (@out) {
+ my ($name, $type) = parseparam($p);
+ if($type eq "error") {
+ $errvar = $name;
+ last;
+ }
+ }
+
+ # Prepare arguments to Syscall.
+ my @args = ();
+ my @uses = ();
+ my $n = 0;
+ foreach my $p (@in) {
+ my ($name, $type) = parseparam($p);
+ if($type =~ /^\*/) {
+ push @args, "uintptr(unsafe.Pointer($name))";
+ } elsif($type eq "string" && $errvar ne "") {
+ $text .= "\tvar _p$n *byte\n";
+ $text .= "\t_p$n, $errvar = BytePtrFromString($name)\n";
+ $text .= "\tif $errvar != nil {\n\t\treturn\n\t}\n";
+ push @args, "uintptr(unsafe.Pointer(_p$n))";
+ push @uses, "use(unsafe.Pointer(_p$n))";
+ $n++;
+ } elsif($type eq "string") {
+ print STDERR "$ARGV:$.: $func uses string arguments, but has no error return\n";
+ $text .= "\tvar _p$n *byte\n";
+ $text .= "\t_p$n, _ = BytePtrFromString($name)\n";
+ push @args, "uintptr(unsafe.Pointer(_p$n))";
+ push @uses, "use(unsafe.Pointer(_p$n))";
+ $n++;
+ } elsif($type =~ /^\[\](.*)/) {
+ # Convert slice into pointer, length.
+ # Have to be careful not to take address of &a[0] if len == 0:
+ # pass dummy pointer in that case.
+ # Used to pass nil, but some OSes or simulators reject write(fd, nil, 0).
+ $text .= "\tvar _p$n unsafe.Pointer\n";
+ $text .= "\tif len($name) > 0 {\n\t\t_p$n = unsafe.Pointer(\&${name}[0])\n\t}";
+ $text .= " else {\n\t\t_p$n = unsafe.Pointer(&_zero)\n\t}";
+ $text .= "\n";
+ push @args, "uintptr(_p$n)", "uintptr(len($name))";
+ $n++;
+ } elsif($type eq "int64" && ($openbsd || $netbsd)) {
+ push @args, "0";
+ if($_32bit eq "big-endian") {
+ push @args, "uintptr($name>>32)", "uintptr($name)";
+ } elsif($_32bit eq "little-endian") {
+ push @args, "uintptr($name)", "uintptr($name>>32)";
+ } else {
+ push @args, "uintptr($name)";
+ }
+ } elsif($type eq "int64" && $dragonfly) {
+ if ($func !~ /^extp(read|write)/i) {
+ push @args, "0";
+ }
+ if($_32bit eq "big-endian") {
+ push @args, "uintptr($name>>32)", "uintptr($name)";
+ } elsif($_32bit eq "little-endian") {
+ push @args, "uintptr($name)", "uintptr($name>>32)";
+ } else {
+ push @args, "uintptr($name)";
+ }
+ } elsif($type eq "int64" && $_32bit ne "") {
+ if(@args % 2 && $arm) {
+ # arm abi specifies 64-bit argument uses
+ # (even, odd) pair
+ push @args, "0"
+ }
+ if($_32bit eq "big-endian") {
+ push @args, "uintptr($name>>32)", "uintptr($name)";
+ } else {
+ push @args, "uintptr($name)", "uintptr($name>>32)";
+ }
+ } else {
+ push @args, "uintptr($name)";
+ }
+ }
+
+ # Determine which form to use; pad args with zeros.
+ my $asm = "Syscall";
+ if ($nonblock) {
+ $asm = "RawSyscall";
+ }
+ if(@args <= 3) {
+ while(@args < 3) {
+ push @args, "0";
+ }
+ } elsif(@args <= 6) {
+ $asm .= "6";
+ while(@args < 6) {
+ push @args, "0";
+ }
+ } elsif(@args <= 9) {
+ $asm .= "9";
+ while(@args < 9) {
+ push @args, "0";
+ }
+ } else {
+ print STDERR "$ARGV:$.: too many arguments to system call\n";
+ }
+
+ # System call number.
+ if($sysname eq "") {
+ $sysname = "SYS_$func";
+ $sysname =~ s/([a-z])([A-Z])/${1}_$2/g; # turn FooBar into Foo_Bar
+ $sysname =~ y/a-z/A-Z/;
+ }
+
+ # Actual call.
+ my $args = join(', ', @args);
+ my $call = "$asm($sysname, $args)";
+
+ # Assign return values.
+ my $body = "";
+ my @ret = ("_", "_", "_");
+ my $do_errno = 0;
+ for(my $i=0; $i<@out; $i++) {
+ my $p = $out[$i];
+ my ($name, $type) = parseparam($p);
+ my $reg = "";
+ if($name eq "err" && !$plan9) {
+ $reg = "e1";
+ $ret[2] = $reg;
+ $do_errno = 1;
+ } elsif($name eq "err" && $plan9) {
+ $ret[0] = "r0";
+ $ret[2] = "e1";
+ next;
+ } else {
+ $reg = sprintf("r%d", $i);
+ $ret[$i] = $reg;
+ }
+ if($type eq "bool") {
+ $reg = "$reg != 0";
+ }
+ if($type eq "int64" && $_32bit ne "") {
+ # 64-bit number in r1:r0 or r0:r1.
+ if($i+2 > @out) {
+ print STDERR "$ARGV:$.: not enough registers for int64 return\n";
+ }
+ if($_32bit eq "big-endian") {
+ $reg = sprintf("int64(r%d)<<32 | int64(r%d)", $i, $i+1);
+ } else {
+ $reg = sprintf("int64(r%d)<<32 | int64(r%d)", $i+1, $i);
+ }
+ $ret[$i] = sprintf("r%d", $i);
+ $ret[$i+1] = sprintf("r%d", $i+1);
+ }
+ if($reg ne "e1" || $plan9) {
+ $body .= "\t$name = $type($reg)\n";
+ }
+ }
+ if ($ret[0] eq "_" && $ret[1] eq "_" && $ret[2] eq "_") {
+ $text .= "\t$call\n";
+ } else {
+ $text .= "\t$ret[0], $ret[1], $ret[2] := $call\n";
+ }
+ foreach my $use (@uses) {
+ $text .= "\t$use\n";
+ }
+ $text .= $body;
+
+ if ($plan9 && $ret[2] eq "e1") {
+ $text .= "\tif int32(r0) == -1 {\n";
+ $text .= "\t\terr = e1\n";
+ $text .= "\t}\n";
+ } elsif ($do_errno) {
+ $text .= "\tif e1 != 0 {\n";
+ $text .= "\t\terr = errnoErr(e1)\n";
+ $text .= "\t}\n";
+ }
+ $text .= "\treturn\n";
+ $text .= "}\n\n";
+}
+
+chomp $text;
+chomp $text;
+
+if($errors) {
+ exit 1;
+}
+
+print <) {
+ chomp;
+ s/\s+/ /g;
+ s/^\s+//;
+ s/\s+$//;
+ $package = $1 if !$package && /^package (\S+)$/;
+ my $nonblock = /^\/\/sysnb /;
+ next if !/^\/\/sys / && !$nonblock;
+
+ # Line must be of the form
+ # func Open(path string, mode int, perm int) (fd int, err error)
+ # Split into name, in params, out params.
+ if(!/^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$/) {
+ print STDERR "$ARGV:$.: malformed //sys declaration\n";
+ $errors = 1;
+ next;
+ }
+ my ($nb, $func, $in, $out, $modname, $sysname) = ($1, $2, $3, $4, $5, $6);
+
+ # Split argument lists on comma.
+ my @in = parseparamlist($in);
+ my @out = parseparamlist($out);
+
+ # So file name.
+ if($modname eq "") {
+ $modname = "libc";
+ }
+
+ # System call name.
+ if($sysname eq "") {
+ $sysname = "$func";
+ }
+
+ # System call pointer variable name.
+ my $sysvarname = "proc$sysname";
+
+ my $strconvfunc = "BytePtrFromString";
+ my $strconvtype = "*byte";
+
+ $sysname =~ y/A-Z/a-z/; # All libc functions are lowercase.
+
+ # Runtime import of function to allow cross-platform builds.
+ $dynimports .= "//go:cgo_import_dynamic libc_${sysname} ${sysname} \"$modname.so\"\n";
+ # Link symbol to proc address variable.
+ $linknames .= "//go:linkname ${sysvarname} libc_${sysname}\n";
+ # Library proc address variable.
+ push @vars, $sysvarname;
+
+ # Go function header.
+ $out = join(', ', @out);
+ if($out ne "") {
+ $out = " ($out)";
+ }
+ if($text ne "") {
+ $text .= "\n"
+ }
+ $text .= sprintf "func %s(%s)%s {\n", $func, join(', ', @in), $out;
+
+ # Check if err return available
+ my $errvar = "";
+ foreach my $p (@out) {
+ my ($name, $type) = parseparam($p);
+ if($type eq "error") {
+ $errvar = $name;
+ last;
+ }
+ }
+
+ # Prepare arguments to Syscall.
+ my @args = ();
+ my @uses = ();
+ my $n = 0;
+ foreach my $p (@in) {
+ my ($name, $type) = parseparam($p);
+ if($type =~ /^\*/) {
+ push @args, "uintptr(unsafe.Pointer($name))";
+ } elsif($type eq "string" && $errvar ne "") {
+ $text .= "\tvar _p$n $strconvtype\n";
+ $text .= "\t_p$n, $errvar = $strconvfunc($name)\n";
+ $text .= "\tif $errvar != nil {\n\t\treturn\n\t}\n";
+ push @args, "uintptr(unsafe.Pointer(_p$n))";
+ push @uses, "use(unsafe.Pointer(_p$n))";
+ $n++;
+ } elsif($type eq "string") {
+ print STDERR "$ARGV:$.: $func uses string arguments, but has no error return\n";
+ $text .= "\tvar _p$n $strconvtype\n";
+ $text .= "\t_p$n, _ = $strconvfunc($name)\n";
+ push @args, "uintptr(unsafe.Pointer(_p$n))";
+ push @uses, "use(unsafe.Pointer(_p$n))";
+ $n++;
+ } elsif($type =~ /^\[\](.*)/) {
+ # Convert slice into pointer, length.
+ # Have to be careful not to take address of &a[0] if len == 0:
+ # pass nil in that case.
+ $text .= "\tvar _p$n *$1\n";
+ $text .= "\tif len($name) > 0 {\n\t\t_p$n = \&$name\[0]\n\t}\n";
+ push @args, "uintptr(unsafe.Pointer(_p$n))", "uintptr(len($name))";
+ $n++;
+ } elsif($type eq "int64" && $_32bit ne "") {
+ if($_32bit eq "big-endian") {
+ push @args, "uintptr($name >> 32)", "uintptr($name)";
+ } else {
+ push @args, "uintptr($name)", "uintptr($name >> 32)";
+ }
+ } elsif($type eq "bool") {
+ $text .= "\tvar _p$n uint32\n";
+ $text .= "\tif $name {\n\t\t_p$n = 1\n\t} else {\n\t\t_p$n = 0\n\t}\n";
+ push @args, "uintptr(_p$n)";
+ $n++;
+ } else {
+ push @args, "uintptr($name)";
+ }
+ }
+ my $nargs = @args;
+
+ # Determine which form to use; pad args with zeros.
+ my $asm = "sysvicall6";
+ if ($nonblock) {
+ $asm = "rawSysvicall6";
+ }
+ if(@args <= 6) {
+ while(@args < 6) {
+ push @args, "0";
+ }
+ } else {
+ print STDERR "$ARGV:$.: too many arguments to system call\n";
+ }
+
+ # Actual call.
+ my $args = join(', ', @args);
+ my $call = "$asm(uintptr(unsafe.Pointer(&$sysvarname)), $nargs, $args)";
+
+ # Assign return values.
+ my $body = "";
+ my $failexpr = "";
+ my @ret = ("_", "_", "_");
+ my @pout= ();
+ my $do_errno = 0;
+ for(my $i=0; $i<@out; $i++) {
+ my $p = $out[$i];
+ my ($name, $type) = parseparam($p);
+ my $reg = "";
+ if($name eq "err") {
+ $reg = "e1";
+ $ret[2] = $reg;
+ $do_errno = 1;
+ } else {
+ $reg = sprintf("r%d", $i);
+ $ret[$i] = $reg;
+ }
+ if($type eq "bool") {
+ $reg = "$reg != 0";
+ }
+ if($type eq "int64" && $_32bit ne "") {
+ # 64-bit number in r1:r0 or r0:r1.
+ if($i+2 > @out) {
+ print STDERR "$ARGV:$.: not enough registers for int64 return\n";
+ }
+ if($_32bit eq "big-endian") {
+ $reg = sprintf("int64(r%d)<<32 | int64(r%d)", $i, $i+1);
+ } else {
+ $reg = sprintf("int64(r%d)<<32 | int64(r%d)", $i+1, $i);
+ }
+ $ret[$i] = sprintf("r%d", $i);
+ $ret[$i+1] = sprintf("r%d", $i+1);
+ }
+ if($reg ne "e1") {
+ $body .= "\t$name = $type($reg)\n";
+ }
+ }
+ if ($ret[0] eq "_" && $ret[1] eq "_" && $ret[2] eq "_") {
+ $text .= "\t$call\n";
+ } else {
+ $text .= "\t$ret[0], $ret[1], $ret[2] := $call\n";
+ }
+ foreach my $use (@uses) {
+ $text .= "\t$use\n";
+ }
+ $text .= $body;
+
+ if ($do_errno) {
+ $text .= "\tif e1 != 0 {\n";
+ $text .= "\t\terr = e1\n";
+ $text .= "\t}\n";
+ }
+ $text .= "\treturn\n";
+ $text .= "}\n";
+}
+
+if($errors) {
+ exit 1;
+}
+
+print < "net.inet",
+ "net.inet.ipproto" => "net.inet",
+ "net.inet6.ipv6proto" => "net.inet6",
+ "net.inet6.ipv6" => "net.inet6.ip6",
+ "net.inet.icmpv6" => "net.inet6.icmp6",
+ "net.inet6.divert6" => "net.inet6.divert",
+ "net.inet6.tcp6" => "net.inet.tcp",
+ "net.inet6.udp6" => "net.inet.udp",
+ "mpls" => "net.mpls",
+ "swpenc" => "vm.swapencrypt"
+);
+
+# Node mappings
+my %node_map = (
+ "net.inet.ip.ifq" => "net.ifq",
+ "net.inet.pfsync" => "net.pfsync",
+ "net.mpls.ifq" => "net.ifq"
+);
+
+my $ctlname;
+my %mib = ();
+my %sysctl = ();
+my $node;
+
+sub debug() {
+ print STDERR "$_[0]\n" if $debug;
+}
+
+# Walk the MIB and build a sysctl name to OID mapping.
+sub build_sysctl() {
+ my ($node, $name, $oid) = @_;
+ my %node = %{$node};
+ my @oid = @{$oid};
+
+ foreach my $key (sort keys %node) {
+ my @node = @{$node{$key}};
+ my $nodename = $name.($name ne '' ? '.' : '').$key;
+ my @nodeoid = (@oid, $node[0]);
+ if ($node[1] eq 'CTLTYPE_NODE') {
+ if (exists $node_map{$nodename}) {
+ $node = \%mib;
+ $ctlname = $node_map{$nodename};
+ foreach my $part (split /\./, $ctlname) {
+ $node = \%{@{$$node{$part}}[2]};
+ }
+ } else {
+ $node = $node[2];
+ }
+ &build_sysctl($node, $nodename, \@nodeoid);
+ } elsif ($node[1] ne '') {
+ $sysctl{$nodename} = \@nodeoid;
+ }
+ }
+}
+
+foreach my $ctl (@ctls) {
+ $ctls{$ctl} = $ctl;
+}
+
+# Build MIB
+foreach my $header (@headers) {
+ &debug("Processing $header...");
+ open HEADER, "/usr/include/$header" ||
+ print STDERR "Failed to open $header\n";
+ while () {
+ if ($_ =~ /^#define\s+(CTL_NAMES)\s+{/ ||
+ $_ =~ /^#define\s+(CTL_(.*)_NAMES)\s+{/ ||
+ $_ =~ /^#define\s+((.*)CTL_NAMES)\s+{/) {
+ if ($1 eq 'CTL_NAMES') {
+ # Top level.
+ $node = \%mib;
+ } else {
+ # Node.
+ my $nodename = lc($2);
+ if ($header =~ /^netinet\//) {
+ $ctlname = "net.inet.$nodename";
+ } elsif ($header =~ /^netinet6\//) {
+ $ctlname = "net.inet6.$nodename";
+ } elsif ($header =~ /^net\//) {
+ $ctlname = "net.$nodename";
+ } else {
+ $ctlname = "$nodename";
+ $ctlname =~ s/^(fs|net|kern)_/$1\./;
+ }
+ if (exists $ctl_map{$ctlname}) {
+ $ctlname = $ctl_map{$ctlname};
+ }
+ if (not exists $ctls{$ctlname}) {
+ &debug("Ignoring $ctlname...");
+ next;
+ }
+
+ # Walk down from the top of the MIB.
+ $node = \%mib;
+ foreach my $part (split /\./, $ctlname) {
+ if (not exists $$node{$part}) {
+ &debug("Missing node $part");
+ $$node{$part} = [ 0, '', {} ];
+ }
+ $node = \%{@{$$node{$part}}[2]};
+ }
+ }
+
+ # Populate current node with entries.
+ my $i = -1;
+ while (defined($_) && $_ !~ /^}/) {
+ $_ = ;
+ $i++ if $_ =~ /{.*}/;
+ next if $_ !~ /{\s+"(\w+)",\s+(CTLTYPE_[A-Z]+)\s+}/;
+ $$node{$1} = [ $i, $2, {} ];
+ }
+ }
+ }
+ close HEADER;
+}
+
+&build_sysctl(\%mib, "", []);
+
+print <){
+ if(/^#define\s+SYS_(\w+)\s+([0-9]+)/){
+ my $name = $1;
+ my $num = $2;
+ $name =~ y/a-z/A-Z/;
+ print " SYS_$name = $num;"
+ }
+}
+
+print <){
+ if(/^([0-9]+)\s+STD\s+({ \S+\s+(\w+).*)$/){
+ my $num = $1;
+ my $proto = $2;
+ my $name = "SYS_$3";
+ $name =~ y/a-z/A-Z/;
+
+ # There are multiple entries for enosys and nosys, so comment them out.
+ if($name =~ /^SYS_E?NOSYS$/){
+ $name = "// $name";
+ }
+ if($name eq 'SYS_SYS_EXIT'){
+ $name = 'SYS_EXIT';
+ }
+
+ print " $name = $num; // $proto\n";
+ }
+}
+
+print <){
+ if(/^([0-9]+)\s+\S+\s+STD\s+({ \S+\s+(\w+).*)$/){
+ my $num = $1;
+ my $proto = $2;
+ my $name = "SYS_$3";
+ $name =~ y/a-z/A-Z/;
+
+ # There are multiple entries for enosys and nosys, so comment them out.
+ if($name =~ /^SYS_E?NOSYS$/){
+ $name = "// $name";
+ }
+ if($name eq 'SYS_SYS_EXIT'){
+ $name = 'SYS_EXIT';
+ }
+ if($name =~ /^SYS_CAP_+/ || $name =~ /^SYS___CAP_+/){
+ next
+ }
+
+ print " $name = $num; // $proto\n";
+
+ # We keep Capsicum syscall numbers for FreeBSD
+ # 9-STABLE here because we are not sure whether they
+ # are mature and stable.
+ if($num == 513){
+ print " SYS_CAP_NEW = 514 // { int cap_new(int fd, uint64_t rights); }\n";
+ print " SYS_CAP_GETRIGHTS = 515 // { int cap_getrights(int fd, \\\n";
+ print " SYS_CAP_ENTER = 516 // { int cap_enter(void); }\n";
+ print " SYS_CAP_GETMODE = 517 // { int cap_getmode(u_int *modep); }\n";
+ }
+ }
+}
+
+print < 999){
+ # ignore deprecated syscalls that are no longer implemented
+ # https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/tree/include/uapi/asm-generic/unistd.h?id=refs/heads/master#n716
+ return;
+ }
+ $name =~ y/a-z/A-Z/;
+ print " SYS_$name = $num;\n";
+}
+
+my $prev;
+open(GCC, "gcc -E -dD $ARGV[0] |") || die "can't run gcc";
+while(){
+ if(/^#define __NR_syscalls\s+/) {
+ # ignore redefinitions of __NR_syscalls
+ }
+ elsif(/^#define __NR_(\w+)\s+([0-9]+)/){
+ $prev = $2;
+ fmt($1, $2);
+ }
+ elsif(/^#define __NR3264_(\w+)\s+([0-9]+)/){
+ $prev = $2;
+ fmt($1, $2);
+ }
+ elsif(/^#define __NR_(\w+)\s+\(\w+\+\s*([0-9]+)\)/){
+ fmt($1, $prev+$2)
+ }
+}
+
+print <){
+ if($line =~ /^(.*)\\$/) {
+ # Handle continuation
+ $line = $1;
+ $_ =~ s/^\s+//;
+ $line .= $_;
+ } else {
+ # New line
+ $line = $_;
+ }
+ next if $line =~ /\\$/;
+ if($line =~ /^([0-9]+)\s+((STD)|(NOERR))\s+(RUMP\s+)?({\s+\S+\s*\*?\s*\|(\S+)\|(\S*)\|(\w+).*\s+})(\s+(\S+))?$/) {
+ my $num = $1;
+ my $proto = $6;
+ my $compat = $8;
+ my $name = "$7_$9";
+
+ $name = "$7_$11" if $11 ne '';
+ $name =~ y/a-z/A-Z/;
+
+ if($compat eq '' || $compat eq '30' || $compat eq '50') {
+ print " $name = $num; // $proto\n";
+ }
+ }
+}
+
+print <){
+ if(/^([0-9]+)\s+STD\s+(NOLOCK\s+)?({ \S+\s+\*?(\w+).*)$/){
+ my $num = $1;
+ my $proto = $3;
+ my $name = $4;
+ $name =~ y/a-z/A-Z/;
+
+ # There are multiple entries for enosys and nosys, so comment them out.
+ if($name =~ /^SYS_E?NOSYS$/){
+ $name = "// $name";
+ }
+ if($name eq 'SYS_SYS_EXIT'){
+ $name = 'SYS_EXIT';
+ }
+
+ print " $name = $num; // $proto\n";
+ }
+}
+
+print < uint64(len(b)) {
+ return nil, nil, EINVAL
+ }
+ return h, b[cmsgAlignOf(SizeofCmsghdr):h.Len], nil
+}
+
+// UnixRights encodes a set of open file descriptors into a socket
+// control message for sending to another process.
+func UnixRights(fds ...int) []byte {
+ datalen := len(fds) * 4
+ b := make([]byte, CmsgSpace(datalen))
+ h := (*Cmsghdr)(unsafe.Pointer(&b[0]))
+ h.Level = SOL_SOCKET
+ h.Type = SCM_RIGHTS
+ h.SetLen(CmsgLen(datalen))
+ data := cmsgData(h)
+ for _, fd := range fds {
+ *(*int32)(data) = int32(fd)
+ data = unsafe.Pointer(uintptr(data) + 4)
+ }
+ return b
+}
+
+// ParseUnixRights decodes a socket control message that contains an
+// integer array of open file descriptors from another process.
+func ParseUnixRights(m *SocketControlMessage) ([]int, error) {
+ if m.Header.Level != SOL_SOCKET {
+ return nil, EINVAL
+ }
+ if m.Header.Type != SCM_RIGHTS {
+ return nil, EINVAL
+ }
+ fds := make([]int, len(m.Data)>>2)
+ for i, j := 0, 0; i < len(m.Data); i += 4 {
+ fds[j] = int(*(*int32)(unsafe.Pointer(&m.Data[i])))
+ j++
+ }
+ return fds, nil
+}
diff --git a/vendor/golang.org/x/sys/unix/str.go b/vendor/golang.org/x/sys/unix/str.go
new file mode 100644
index 0000000..35ed664
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/str.go
@@ -0,0 +1,26 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd solaris
+
+package unix
+
+func itoa(val int) string { // do it here rather than with fmt to avoid dependency
+ if val < 0 {
+ return "-" + uitoa(uint(-val))
+ }
+ return uitoa(uint(val))
+}
+
+func uitoa(val uint) string {
+ var buf [32]byte // big enough for int64
+ i := len(buf) - 1
+ for val >= 10 {
+ buf[i] = byte(val%10 + '0')
+ i--
+ val /= 10
+ }
+ buf[i] = byte(val + '0')
+ return string(buf[i:])
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall.go b/vendor/golang.org/x/sys/unix/syscall.go
new file mode 100644
index 0000000..a0bcf84
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall.go
@@ -0,0 +1,76 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd solaris
+
+// Package unix contains an interface to the low-level operating system
+// primitives. OS details vary depending on the underlying system, and
+// by default, godoc will display OS-specific documentation for the current
+// system. If you want godoc to display OS documentation for another
+// system, set $GOOS and $GOARCH to the desired system. For example, if
+// you want to view documentation for freebsd/arm on linux/amd64, set $GOOS
+// to freebsd and $GOARCH to arm.
+// The primary use of this package is inside other packages that provide a more
+// portable interface to the system, such as "os", "time" and "net". Use
+// those packages rather than this one if you can.
+// For details of the functions and data types in this package consult
+// the manuals for the appropriate operating system.
+// These calls return err == nil to indicate success; otherwise
+// err represents an operating system error describing the failure and
+// holds a value of type syscall.Errno.
+package unix // import "golang.org/x/sys/unix"
+
+import "unsafe"
+
+// ByteSliceFromString returns a NUL-terminated slice of bytes
+// containing the text of s. If s contains a NUL byte at any
+// location, it returns (nil, EINVAL).
+func ByteSliceFromString(s string) ([]byte, error) {
+ for i := 0; i < len(s); i++ {
+ if s[i] == 0 {
+ return nil, EINVAL
+ }
+ }
+ a := make([]byte, len(s)+1)
+ copy(a, s)
+ return a, nil
+}
+
+// BytePtrFromString returns a pointer to a NUL-terminated array of
+// bytes containing the text of s. If s contains a NUL byte at any
+// location, it returns (nil, EINVAL).
+func BytePtrFromString(s string) (*byte, error) {
+ a, err := ByteSliceFromString(s)
+ if err != nil {
+ return nil, err
+ }
+ return &a[0], nil
+}
+
+// Single-word zero for use when we need a valid pointer to 0 bytes.
+// See mkunix.pl.
+var _zero uintptr
+
+func (ts *Timespec) Unix() (sec int64, nsec int64) {
+ return int64(ts.Sec), int64(ts.Nsec)
+}
+
+func (tv *Timeval) Unix() (sec int64, nsec int64) {
+ return int64(tv.Sec), int64(tv.Usec) * 1000
+}
+
+func (ts *Timespec) Nano() int64 {
+ return int64(ts.Sec)*1e9 + int64(ts.Nsec)
+}
+
+func (tv *Timeval) Nano() int64 {
+ return int64(tv.Sec)*1e9 + int64(tv.Usec)*1000
+}
+
+func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 }
+
+// use is a no-op, but the compiler cannot see that it is.
+// Calling use(p) ensures that p is kept live until that point.
+//go:noescape
+func use(p unsafe.Pointer)
diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/golang.org/x/sys/unix/syscall_bsd.go
new file mode 100644
index 0000000..e967176
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_bsd.go
@@ -0,0 +1,628 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd netbsd openbsd
+
+// BSD system call wrappers shared by *BSD based systems
+// including OS X (Darwin) and FreeBSD. Like the other
+// syscall_*.go files it is compiled as Go code but also
+// used as input to mksyscall which parses the //sys
+// lines and generates system call stubs.
+
+package unix
+
+import (
+ "runtime"
+ "syscall"
+ "unsafe"
+)
+
+/*
+ * Wrapped
+ */
+
+//sysnb getgroups(ngid int, gid *_Gid_t) (n int, err error)
+//sysnb setgroups(ngid int, gid *_Gid_t) (err error)
+
+func Getgroups() (gids []int, err error) {
+ n, err := getgroups(0, nil)
+ if err != nil {
+ return nil, err
+ }
+ if n == 0 {
+ return nil, nil
+ }
+
+ // Sanity check group count. Max is 16 on BSD.
+ if n < 0 || n > 1000 {
+ return nil, EINVAL
+ }
+
+ a := make([]_Gid_t, n)
+ n, err = getgroups(n, &a[0])
+ if err != nil {
+ return nil, err
+ }
+ gids = make([]int, n)
+ for i, v := range a[0:n] {
+ gids[i] = int(v)
+ }
+ return
+}
+
+func Setgroups(gids []int) (err error) {
+ if len(gids) == 0 {
+ return setgroups(0, nil)
+ }
+
+ a := make([]_Gid_t, len(gids))
+ for i, v := range gids {
+ a[i] = _Gid_t(v)
+ }
+ return setgroups(len(a), &a[0])
+}
+
+func ReadDirent(fd int, buf []byte) (n int, err error) {
+ // Final argument is (basep *uintptr) and the syscall doesn't take nil.
+ // 64 bits should be enough. (32 bits isn't even on 386). Since the
+ // actual system call is getdirentries64, 64 is a good guess.
+ // TODO(rsc): Can we use a single global basep for all calls?
+ var base = (*uintptr)(unsafe.Pointer(new(uint64)))
+ return Getdirentries(fd, buf, base)
+}
+
+// Wait status is 7 bits at bottom, either 0 (exited),
+// 0x7F (stopped), or a signal number that caused an exit.
+// The 0x80 bit is whether there was a core dump.
+// An extra number (exit code, signal causing a stop)
+// is in the high bits.
+
+type WaitStatus uint32
+
+const (
+ mask = 0x7F
+ core = 0x80
+ shift = 8
+
+ exited = 0
+ stopped = 0x7F
+)
+
+func (w WaitStatus) Exited() bool { return w&mask == exited }
+
+func (w WaitStatus) ExitStatus() int {
+ if w&mask != exited {
+ return -1
+ }
+ return int(w >> shift)
+}
+
+func (w WaitStatus) Signaled() bool { return w&mask != stopped && w&mask != 0 }
+
+func (w WaitStatus) Signal() syscall.Signal {
+ sig := syscall.Signal(w & mask)
+ if sig == stopped || sig == 0 {
+ return -1
+ }
+ return sig
+}
+
+func (w WaitStatus) CoreDump() bool { return w.Signaled() && w&core != 0 }
+
+func (w WaitStatus) Stopped() bool { return w&mask == stopped && syscall.Signal(w>>shift) != SIGSTOP }
+
+func (w WaitStatus) Continued() bool { return w&mask == stopped && syscall.Signal(w>>shift) == SIGSTOP }
+
+func (w WaitStatus) StopSignal() syscall.Signal {
+ if !w.Stopped() {
+ return -1
+ }
+ return syscall.Signal(w>>shift) & 0xFF
+}
+
+func (w WaitStatus) TrapCause() int { return -1 }
+
+//sys wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error)
+
+func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) {
+ var status _C_int
+ wpid, err = wait4(pid, &status, options, rusage)
+ if wstatus != nil {
+ *wstatus = WaitStatus(status)
+ }
+ return
+}
+
+//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error)
+//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error)
+//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error)
+//sysnb socket(domain int, typ int, proto int) (fd int, err error)
+//sys getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error)
+//sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error)
+//sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error)
+//sysnb getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error)
+//sys Shutdown(s int, how int) (err error)
+
+func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) {
+ if sa.Port < 0 || sa.Port > 0xFFFF {
+ return nil, 0, EINVAL
+ }
+ sa.raw.Len = SizeofSockaddrInet4
+ sa.raw.Family = AF_INET
+ p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port))
+ p[0] = byte(sa.Port >> 8)
+ p[1] = byte(sa.Port)
+ for i := 0; i < len(sa.Addr); i++ {
+ sa.raw.Addr[i] = sa.Addr[i]
+ }
+ return unsafe.Pointer(&sa.raw), _Socklen(sa.raw.Len), nil
+}
+
+func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, _Socklen, error) {
+ if sa.Port < 0 || sa.Port > 0xFFFF {
+ return nil, 0, EINVAL
+ }
+ sa.raw.Len = SizeofSockaddrInet6
+ sa.raw.Family = AF_INET6
+ p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port))
+ p[0] = byte(sa.Port >> 8)
+ p[1] = byte(sa.Port)
+ sa.raw.Scope_id = sa.ZoneId
+ for i := 0; i < len(sa.Addr); i++ {
+ sa.raw.Addr[i] = sa.Addr[i]
+ }
+ return unsafe.Pointer(&sa.raw), _Socklen(sa.raw.Len), nil
+}
+
+func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) {
+ name := sa.Name
+ n := len(name)
+ if n >= len(sa.raw.Path) || n == 0 {
+ return nil, 0, EINVAL
+ }
+ sa.raw.Len = byte(3 + n) // 2 for Family, Len; 1 for NUL
+ sa.raw.Family = AF_UNIX
+ for i := 0; i < n; i++ {
+ sa.raw.Path[i] = int8(name[i])
+ }
+ return unsafe.Pointer(&sa.raw), _Socklen(sa.raw.Len), nil
+}
+
+func (sa *SockaddrDatalink) sockaddr() (unsafe.Pointer, _Socklen, error) {
+ if sa.Index == 0 {
+ return nil, 0, EINVAL
+ }
+ sa.raw.Len = sa.Len
+ sa.raw.Family = AF_LINK
+ sa.raw.Index = sa.Index
+ sa.raw.Type = sa.Type
+ sa.raw.Nlen = sa.Nlen
+ sa.raw.Alen = sa.Alen
+ sa.raw.Slen = sa.Slen
+ for i := 0; i < len(sa.raw.Data); i++ {
+ sa.raw.Data[i] = sa.Data[i]
+ }
+ return unsafe.Pointer(&sa.raw), SizeofSockaddrDatalink, nil
+}
+
+func anyToSockaddr(rsa *RawSockaddrAny) (Sockaddr, error) {
+ switch rsa.Addr.Family {
+ case AF_LINK:
+ pp := (*RawSockaddrDatalink)(unsafe.Pointer(rsa))
+ sa := new(SockaddrDatalink)
+ sa.Len = pp.Len
+ sa.Family = pp.Family
+ sa.Index = pp.Index
+ sa.Type = pp.Type
+ sa.Nlen = pp.Nlen
+ sa.Alen = pp.Alen
+ sa.Slen = pp.Slen
+ for i := 0; i < len(sa.Data); i++ {
+ sa.Data[i] = pp.Data[i]
+ }
+ return sa, nil
+
+ case AF_UNIX:
+ pp := (*RawSockaddrUnix)(unsafe.Pointer(rsa))
+ if pp.Len < 2 || pp.Len > SizeofSockaddrUnix {
+ return nil, EINVAL
+ }
+ sa := new(SockaddrUnix)
+
+ // Some BSDs include the trailing NUL in the length, whereas
+ // others do not. Work around this by subtracting the leading
+ // family and len. The path is then scanned to see if a NUL
+ // terminator still exists within the length.
+ n := int(pp.Len) - 2 // subtract leading Family, Len
+ for i := 0; i < n; i++ {
+ if pp.Path[i] == 0 {
+ // found early NUL; assume Len included the NUL
+ // or was overestimating.
+ n = i
+ break
+ }
+ }
+ bytes := (*[10000]byte)(unsafe.Pointer(&pp.Path[0]))[0:n]
+ sa.Name = string(bytes)
+ return sa, nil
+
+ case AF_INET:
+ pp := (*RawSockaddrInet4)(unsafe.Pointer(rsa))
+ sa := new(SockaddrInet4)
+ p := (*[2]byte)(unsafe.Pointer(&pp.Port))
+ sa.Port = int(p[0])<<8 + int(p[1])
+ for i := 0; i < len(sa.Addr); i++ {
+ sa.Addr[i] = pp.Addr[i]
+ }
+ return sa, nil
+
+ case AF_INET6:
+ pp := (*RawSockaddrInet6)(unsafe.Pointer(rsa))
+ sa := new(SockaddrInet6)
+ p := (*[2]byte)(unsafe.Pointer(&pp.Port))
+ sa.Port = int(p[0])<<8 + int(p[1])
+ sa.ZoneId = pp.Scope_id
+ for i := 0; i < len(sa.Addr); i++ {
+ sa.Addr[i] = pp.Addr[i]
+ }
+ return sa, nil
+ }
+ return nil, EAFNOSUPPORT
+}
+
+func Accept(fd int) (nfd int, sa Sockaddr, err error) {
+ var rsa RawSockaddrAny
+ var len _Socklen = SizeofSockaddrAny
+ nfd, err = accept(fd, &rsa, &len)
+ if err != nil {
+ return
+ }
+ if runtime.GOOS == "darwin" && len == 0 {
+ // Accepted socket has no address.
+ // This is likely due to a bug in xnu kernels,
+ // where instead of ECONNABORTED error socket
+ // is accepted, but has no address.
+ Close(nfd)
+ return 0, nil, ECONNABORTED
+ }
+ sa, err = anyToSockaddr(&rsa)
+ if err != nil {
+ Close(nfd)
+ nfd = 0
+ }
+ return
+}
+
+func Getsockname(fd int) (sa Sockaddr, err error) {
+ var rsa RawSockaddrAny
+ var len _Socklen = SizeofSockaddrAny
+ if err = getsockname(fd, &rsa, &len); err != nil {
+ return
+ }
+ // TODO(jsing): DragonFly has a "bug" (see issue 3349), which should be
+ // reported upstream.
+ if runtime.GOOS == "dragonfly" && rsa.Addr.Family == AF_UNSPEC && rsa.Addr.Len == 0 {
+ rsa.Addr.Family = AF_UNIX
+ rsa.Addr.Len = SizeofSockaddrUnix
+ }
+ return anyToSockaddr(&rsa)
+}
+
+//sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error)
+
+func GetsockoptByte(fd, level, opt int) (value byte, err error) {
+ var n byte
+ vallen := _Socklen(1)
+ err = getsockopt(fd, level, opt, unsafe.Pointer(&n), &vallen)
+ return n, err
+}
+
+func GetsockoptInet4Addr(fd, level, opt int) (value [4]byte, err error) {
+ vallen := _Socklen(4)
+ err = getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen)
+ return value, err
+}
+
+func GetsockoptIPMreq(fd, level, opt int) (*IPMreq, error) {
+ var value IPMreq
+ vallen := _Socklen(SizeofIPMreq)
+ err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen)
+ return &value, err
+}
+
+func GetsockoptIPv6Mreq(fd, level, opt int) (*IPv6Mreq, error) {
+ var value IPv6Mreq
+ vallen := _Socklen(SizeofIPv6Mreq)
+ err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen)
+ return &value, err
+}
+
+func GetsockoptIPv6MTUInfo(fd, level, opt int) (*IPv6MTUInfo, error) {
+ var value IPv6MTUInfo
+ vallen := _Socklen(SizeofIPv6MTUInfo)
+ err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen)
+ return &value, err
+}
+
+func GetsockoptICMPv6Filter(fd, level, opt int) (*ICMPv6Filter, error) {
+ var value ICMPv6Filter
+ vallen := _Socklen(SizeofICMPv6Filter)
+ err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen)
+ return &value, err
+}
+
+//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error)
+//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error)
+//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error)
+
+func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) {
+ var msg Msghdr
+ var rsa RawSockaddrAny
+ msg.Name = (*byte)(unsafe.Pointer(&rsa))
+ msg.Namelen = uint32(SizeofSockaddrAny)
+ var iov Iovec
+ if len(p) > 0 {
+ iov.Base = (*byte)(unsafe.Pointer(&p[0]))
+ iov.SetLen(len(p))
+ }
+ var dummy byte
+ if len(oob) > 0 {
+ // receive at least one normal byte
+ if len(p) == 0 {
+ iov.Base = &dummy
+ iov.SetLen(1)
+ }
+ msg.Control = (*byte)(unsafe.Pointer(&oob[0]))
+ msg.SetControllen(len(oob))
+ }
+ msg.Iov = &iov
+ msg.Iovlen = 1
+ if n, err = recvmsg(fd, &msg, flags); err != nil {
+ return
+ }
+ oobn = int(msg.Controllen)
+ recvflags = int(msg.Flags)
+ // source address is only specified if the socket is unconnected
+ if rsa.Addr.Family != AF_UNSPEC {
+ from, err = anyToSockaddr(&rsa)
+ }
+ return
+}
+
+//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error)
+
+func Sendmsg(fd int, p, oob []byte, to Sockaddr, flags int) (err error) {
+ _, err = SendmsgN(fd, p, oob, to, flags)
+ return
+}
+
+func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) {
+ var ptr unsafe.Pointer
+ var salen _Socklen
+ if to != nil {
+ ptr, salen, err = to.sockaddr()
+ if err != nil {
+ return 0, err
+ }
+ }
+ var msg Msghdr
+ msg.Name = (*byte)(unsafe.Pointer(ptr))
+ msg.Namelen = uint32(salen)
+ var iov Iovec
+ if len(p) > 0 {
+ iov.Base = (*byte)(unsafe.Pointer(&p[0]))
+ iov.SetLen(len(p))
+ }
+ var dummy byte
+ if len(oob) > 0 {
+ // send at least one normal byte
+ if len(p) == 0 {
+ iov.Base = &dummy
+ iov.SetLen(1)
+ }
+ msg.Control = (*byte)(unsafe.Pointer(&oob[0]))
+ msg.SetControllen(len(oob))
+ }
+ msg.Iov = &iov
+ msg.Iovlen = 1
+ if n, err = sendmsg(fd, &msg, flags); err != nil {
+ return 0, err
+ }
+ if len(oob) > 0 && len(p) == 0 {
+ n = 0
+ }
+ return n, nil
+}
+
+//sys kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error)
+
+func Kevent(kq int, changes, events []Kevent_t, timeout *Timespec) (n int, err error) {
+ var change, event unsafe.Pointer
+ if len(changes) > 0 {
+ change = unsafe.Pointer(&changes[0])
+ }
+ if len(events) > 0 {
+ event = unsafe.Pointer(&events[0])
+ }
+ return kevent(kq, change, len(changes), event, len(events), timeout)
+}
+
+//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL
+
+// sysctlmib translates name to mib number and appends any additional args.
+func sysctlmib(name string, args ...int) ([]_C_int, error) {
+ // Translate name to mib number.
+ mib, err := nametomib(name)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, a := range args {
+ mib = append(mib, _C_int(a))
+ }
+
+ return mib, nil
+}
+
+func Sysctl(name string) (string, error) {
+ return SysctlArgs(name)
+}
+
+func SysctlArgs(name string, args ...int) (string, error) {
+ mib, err := sysctlmib(name, args...)
+ if err != nil {
+ return "", err
+ }
+
+ // Find size.
+ n := uintptr(0)
+ if err := sysctl(mib, nil, &n, nil, 0); err != nil {
+ return "", err
+ }
+ if n == 0 {
+ return "", nil
+ }
+
+ // Read into buffer of that size.
+ buf := make([]byte, n)
+ if err := sysctl(mib, &buf[0], &n, nil, 0); err != nil {
+ return "", err
+ }
+
+ // Throw away terminating NUL.
+ if n > 0 && buf[n-1] == '\x00' {
+ n--
+ }
+ return string(buf[0:n]), nil
+}
+
+func SysctlUint32(name string) (uint32, error) {
+ return SysctlUint32Args(name)
+}
+
+func SysctlUint32Args(name string, args ...int) (uint32, error) {
+ mib, err := sysctlmib(name, args...)
+ if err != nil {
+ return 0, err
+ }
+
+ n := uintptr(4)
+ buf := make([]byte, 4)
+ if err := sysctl(mib, &buf[0], &n, nil, 0); err != nil {
+ return 0, err
+ }
+ if n != 4 {
+ return 0, EIO
+ }
+ return *(*uint32)(unsafe.Pointer(&buf[0])), nil
+}
+
+func SysctlUint64(name string, args ...int) (uint64, error) {
+ mib, err := sysctlmib(name, args...)
+ if err != nil {
+ return 0, err
+ }
+
+ n := uintptr(8)
+ buf := make([]byte, 8)
+ if err := sysctl(mib, &buf[0], &n, nil, 0); err != nil {
+ return 0, err
+ }
+ if n != 8 {
+ return 0, EIO
+ }
+ return *(*uint64)(unsafe.Pointer(&buf[0])), nil
+}
+
+func SysctlRaw(name string, args ...int) ([]byte, error) {
+ mib, err := sysctlmib(name, args...)
+ if err != nil {
+ return nil, err
+ }
+
+ // Find size.
+ n := uintptr(0)
+ if err := sysctl(mib, nil, &n, nil, 0); err != nil {
+ return nil, err
+ }
+ if n == 0 {
+ return nil, nil
+ }
+
+ // Read into buffer of that size.
+ buf := make([]byte, n)
+ if err := sysctl(mib, &buf[0], &n, nil, 0); err != nil {
+ return nil, err
+ }
+
+ // The actual call may return less than the original reported required
+ // size so ensure we deal with that.
+ return buf[:n], nil
+}
+
+//sys utimes(path string, timeval *[2]Timeval) (err error)
+
+func Utimes(path string, tv []Timeval) error {
+ if tv == nil {
+ return utimes(path, nil)
+ }
+ if len(tv) != 2 {
+ return EINVAL
+ }
+ return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0])))
+}
+
+func UtimesNano(path string, ts []Timespec) error {
+ if ts == nil {
+ return utimes(path, nil)
+ }
+ // TODO: The BSDs can do utimensat with SYS_UTIMENSAT but it
+ // isn't supported by darwin so this uses utimes instead
+ if len(ts) != 2 {
+ return EINVAL
+ }
+ // Not as efficient as it could be because Timespec and
+ // Timeval have different types in the different OSes
+ tv := [2]Timeval{
+ NsecToTimeval(TimespecToNsec(ts[0])),
+ NsecToTimeval(TimespecToNsec(ts[1])),
+ }
+ return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0])))
+}
+
+//sys futimes(fd int, timeval *[2]Timeval) (err error)
+
+func Futimes(fd int, tv []Timeval) error {
+ if tv == nil {
+ return futimes(fd, nil)
+ }
+ if len(tv) != 2 {
+ return EINVAL
+ }
+ return futimes(fd, (*[2]Timeval)(unsafe.Pointer(&tv[0])))
+}
+
+//sys fcntl(fd int, cmd int, arg int) (val int, err error)
+
+// TODO: wrap
+// Acct(name nil-string) (err error)
+// Gethostuuid(uuid *byte, timeout *Timespec) (err error)
+// Madvise(addr *byte, len int, behav int) (err error)
+// Mprotect(addr *byte, len int, prot int) (err error)
+// Msync(addr *byte, len int, flags int) (err error)
+// Ptrace(req int, pid int, addr uintptr, data int) (ret uintptr, err error)
+
+var mapper = &mmapper{
+ active: make(map[*byte][]byte),
+ mmap: mmap,
+ munmap: munmap,
+}
+
+func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) {
+ return mapper.Mmap(fd, offset, length, prot, flags)
+}
+
+func Munmap(b []byte) (err error) {
+ return mapper.Munmap(b)
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go
new file mode 100644
index 0000000..3d534d2
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go
@@ -0,0 +1,511 @@
+// Copyright 2009,2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Darwin system calls.
+// This file is compiled as ordinary Go code,
+// but it is also input to mksyscall,
+// which parses the //sys lines and generates system call stubs.
+// Note that sometimes we use a lowercase //sys name and wrap
+// it in our own nicer implementation, either here or in
+// syscall_bsd.go or syscall_unix.go.
+
+package unix
+
+import (
+ errorspkg "errors"
+ "syscall"
+ "unsafe"
+)
+
+const ImplementsGetwd = true
+
+func Getwd() (string, error) {
+ buf := make([]byte, 2048)
+ attrs, err := getAttrList(".", attrList{CommonAttr: attrCmnFullpath}, buf, 0)
+ if err == nil && len(attrs) == 1 && len(attrs[0]) >= 2 {
+ wd := string(attrs[0])
+ // Sanity check that it's an absolute path and ends
+ // in a null byte, which we then strip.
+ if wd[0] == '/' && wd[len(wd)-1] == 0 {
+ return wd[:len(wd)-1], nil
+ }
+ }
+ // If pkg/os/getwd.go gets ENOTSUP, it will fall back to the
+ // slow algorithm.
+ return "", ENOTSUP
+}
+
+type SockaddrDatalink struct {
+ Len uint8
+ Family uint8
+ Index uint16
+ Type uint8
+ Nlen uint8
+ Alen uint8
+ Slen uint8
+ Data [12]int8
+ raw RawSockaddrDatalink
+}
+
+// Translate "kern.hostname" to []_C_int{0,1,2,3}.
+func nametomib(name string) (mib []_C_int, err error) {
+ const siz = unsafe.Sizeof(mib[0])
+
+ // NOTE(rsc): It seems strange to set the buffer to have
+ // size CTL_MAXNAME+2 but use only CTL_MAXNAME
+ // as the size. I don't know why the +2 is here, but the
+ // kernel uses +2 for its own implementation of this function.
+ // I am scared that if we don't include the +2 here, the kernel
+ // will silently write 2 words farther than we specify
+ // and we'll get memory corruption.
+ var buf [CTL_MAXNAME + 2]_C_int
+ n := uintptr(CTL_MAXNAME) * siz
+
+ p := (*byte)(unsafe.Pointer(&buf[0]))
+ bytes, err := ByteSliceFromString(name)
+ if err != nil {
+ return nil, err
+ }
+
+ // Magic sysctl: "setting" 0.3 to a string name
+ // lets you read back the array of integers form.
+ if err = sysctl([]_C_int{0, 3}, p, &n, &bytes[0], uintptr(len(name))); err != nil {
+ return nil, err
+ }
+ return buf[0 : n/siz], nil
+}
+
+// ParseDirent parses up to max directory entries in buf,
+// appending the names to names. It returns the number
+// bytes consumed from buf, the number of entries added
+// to names, and the new names slice.
+func ParseDirent(buf []byte, max int, names []string) (consumed int, count int, newnames []string) {
+ origlen := len(buf)
+ for max != 0 && len(buf) > 0 {
+ dirent := (*Dirent)(unsafe.Pointer(&buf[0]))
+ if dirent.Reclen == 0 {
+ buf = nil
+ break
+ }
+ buf = buf[dirent.Reclen:]
+ if dirent.Ino == 0 { // File absent in directory.
+ continue
+ }
+ bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0]))
+ var name = string(bytes[0:dirent.Namlen])
+ if name == "." || name == ".." { // Useless names
+ continue
+ }
+ max--
+ count++
+ names = append(names, name)
+ }
+ return origlen - len(buf), count, names
+}
+
+//sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error)
+func PtraceAttach(pid int) (err error) { return ptrace(PT_ATTACH, pid, 0, 0) }
+func PtraceDetach(pid int) (err error) { return ptrace(PT_DETACH, pid, 0, 0) }
+
+const (
+ attrBitMapCount = 5
+ attrCmnFullpath = 0x08000000
+)
+
+type attrList struct {
+ bitmapCount uint16
+ _ uint16
+ CommonAttr uint32
+ VolAttr uint32
+ DirAttr uint32
+ FileAttr uint32
+ Forkattr uint32
+}
+
+func getAttrList(path string, attrList attrList, attrBuf []byte, options uint) (attrs [][]byte, err error) {
+ if len(attrBuf) < 4 {
+ return nil, errorspkg.New("attrBuf too small")
+ }
+ attrList.bitmapCount = attrBitMapCount
+
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return nil, err
+ }
+
+ _, _, e1 := Syscall6(
+ SYS_GETATTRLIST,
+ uintptr(unsafe.Pointer(_p0)),
+ uintptr(unsafe.Pointer(&attrList)),
+ uintptr(unsafe.Pointer(&attrBuf[0])),
+ uintptr(len(attrBuf)),
+ uintptr(options),
+ 0,
+ )
+ use(unsafe.Pointer(_p0))
+ if e1 != 0 {
+ return nil, e1
+ }
+ size := *(*uint32)(unsafe.Pointer(&attrBuf[0]))
+
+ // dat is the section of attrBuf that contains valid data,
+ // without the 4 byte length header. All attribute offsets
+ // are relative to dat.
+ dat := attrBuf
+ if int(size) < len(attrBuf) {
+ dat = dat[:size]
+ }
+ dat = dat[4:] // remove length prefix
+
+ for i := uint32(0); int(i) < len(dat); {
+ header := dat[i:]
+ if len(header) < 8 {
+ return attrs, errorspkg.New("truncated attribute header")
+ }
+ datOff := *(*int32)(unsafe.Pointer(&header[0]))
+ attrLen := *(*uint32)(unsafe.Pointer(&header[4]))
+ if datOff < 0 || uint32(datOff)+attrLen > uint32(len(dat)) {
+ return attrs, errorspkg.New("truncated results; attrBuf too small")
+ }
+ end := uint32(datOff) + attrLen
+ attrs = append(attrs, dat[datOff:end])
+ i = end
+ if r := i % 4; r != 0 {
+ i += (4 - r)
+ }
+ }
+ return
+}
+
+//sysnb pipe() (r int, w int, err error)
+
+func Pipe(p []int) (err error) {
+ if len(p) != 2 {
+ return EINVAL
+ }
+ p[0], p[1], err = pipe()
+ return
+}
+
+func Getfsstat(buf []Statfs_t, flags int) (n int, err error) {
+ var _p0 unsafe.Pointer
+ var bufsize uintptr
+ if len(buf) > 0 {
+ _p0 = unsafe.Pointer(&buf[0])
+ bufsize = unsafe.Sizeof(Statfs_t{}) * uintptr(len(buf))
+ }
+ r0, _, e1 := Syscall(SYS_GETFSSTAT64, uintptr(_p0), bufsize, uintptr(flags))
+ use(unsafe.Pointer(_p0))
+ n = int(r0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
+/*
+ * Wrapped
+ */
+
+//sys kill(pid int, signum int, posix int) (err error)
+
+func Kill(pid int, signum syscall.Signal) (err error) { return kill(pid, int(signum), 1) }
+
+/*
+ * Exposed directly
+ */
+//sys Access(path string, mode uint32) (err error)
+//sys Adjtime(delta *Timeval, olddelta *Timeval) (err error)
+//sys Chdir(path string) (err error)
+//sys Chflags(path string, flags int) (err error)
+//sys Chmod(path string, mode uint32) (err error)
+//sys Chown(path string, uid int, gid int) (err error)
+//sys Chroot(path string) (err error)
+//sys Close(fd int) (err error)
+//sys Dup(fd int) (nfd int, err error)
+//sys Dup2(from int, to int) (err error)
+//sys Exchangedata(path1 string, path2 string, options int) (err error)
+//sys Exit(code int)
+//sys Fchdir(fd int) (err error)
+//sys Fchflags(fd int, flags int) (err error)
+//sys Fchmod(fd int, mode uint32) (err error)
+//sys Fchown(fd int, uid int, gid int) (err error)
+//sys Flock(fd int, how int) (err error)
+//sys Fpathconf(fd int, name int) (val int, err error)
+//sys Fstat(fd int, stat *Stat_t) (err error) = SYS_FSTAT64
+//sys Fstatfs(fd int, stat *Statfs_t) (err error) = SYS_FSTATFS64
+//sys Fsync(fd int) (err error)
+//sys Ftruncate(fd int, length int64) (err error)
+//sys Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) = SYS_GETDIRENTRIES64
+//sys Getdtablesize() (size int)
+//sysnb Getegid() (egid int)
+//sysnb Geteuid() (uid int)
+//sysnb Getgid() (gid int)
+//sysnb Getpgid(pid int) (pgid int, err error)
+//sysnb Getpgrp() (pgrp int)
+//sysnb Getpid() (pid int)
+//sysnb Getppid() (ppid int)
+//sys Getpriority(which int, who int) (prio int, err error)
+//sysnb Getrlimit(which int, lim *Rlimit) (err error)
+//sysnb Getrusage(who int, rusage *Rusage) (err error)
+//sysnb Getsid(pid int) (sid int, err error)
+//sysnb Getuid() (uid int)
+//sysnb Issetugid() (tainted bool)
+//sys Kqueue() (fd int, err error)
+//sys Lchown(path string, uid int, gid int) (err error)
+//sys Link(path string, link string) (err error)
+//sys Listen(s int, backlog int) (err error)
+//sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64
+//sys Mkdir(path string, mode uint32) (err error)
+//sys Mkfifo(path string, mode uint32) (err error)
+//sys Mknod(path string, mode uint32, dev int) (err error)
+//sys Mlock(b []byte) (err error)
+//sys Mlockall(flags int) (err error)
+//sys Mprotect(b []byte, prot int) (err error)
+//sys Munlock(b []byte) (err error)
+//sys Munlockall() (err error)
+//sys Open(path string, mode int, perm uint32) (fd int, err error)
+//sys Pathconf(path string, name int) (val int, err error)
+//sys Pread(fd int, p []byte, offset int64) (n int, err error)
+//sys Pwrite(fd int, p []byte, offset int64) (n int, err error)
+//sys read(fd int, p []byte) (n int, err error)
+//sys Readlink(path string, buf []byte) (n int, err error)
+//sys Rename(from string, to string) (err error)
+//sys Revoke(path string) (err error)
+//sys Rmdir(path string) (err error)
+//sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = SYS_LSEEK
+//sys Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error)
+//sys Setegid(egid int) (err error)
+//sysnb Seteuid(euid int) (err error)
+//sysnb Setgid(gid int) (err error)
+//sys Setlogin(name string) (err error)
+//sysnb Setpgid(pid int, pgid int) (err error)
+//sys Setpriority(which int, who int, prio int) (err error)
+//sys Setprivexec(flag int) (err error)
+//sysnb Setregid(rgid int, egid int) (err error)
+//sysnb Setreuid(ruid int, euid int) (err error)
+//sysnb Setrlimit(which int, lim *Rlimit) (err error)
+//sysnb Setsid() (pid int, err error)
+//sysnb Settimeofday(tp *Timeval) (err error)
+//sysnb Setuid(uid int) (err error)
+//sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64
+//sys Statfs(path string, stat *Statfs_t) (err error) = SYS_STATFS64
+//sys Symlink(path string, link string) (err error)
+//sys Sync() (err error)
+//sys Truncate(path string, length int64) (err error)
+//sys Umask(newmask int) (oldmask int)
+//sys Undelete(path string) (err error)
+//sys Unlink(path string) (err error)
+//sys Unmount(path string, flags int) (err error)
+//sys write(fd int, p []byte) (n int, err error)
+//sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error)
+//sys munmap(addr uintptr, length uintptr) (err error)
+//sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ
+//sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE
+
+/*
+ * Unimplemented
+ */
+// Profil
+// Sigaction
+// Sigprocmask
+// Getlogin
+// Sigpending
+// Sigaltstack
+// Ioctl
+// Reboot
+// Execve
+// Vfork
+// Sbrk
+// Sstk
+// Ovadvise
+// Mincore
+// Setitimer
+// Swapon
+// Select
+// Sigsuspend
+// Readv
+// Writev
+// Nfssvc
+// Getfh
+// Quotactl
+// Mount
+// Csops
+// Waitid
+// Add_profil
+// Kdebug_trace
+// Sigreturn
+// Mmap
+// Mlock
+// Munlock
+// Atsocket
+// Kqueue_from_portset_np
+// Kqueue_portset
+// Getattrlist
+// Setattrlist
+// Getdirentriesattr
+// Searchfs
+// Delete
+// Copyfile
+// Poll
+// Watchevent
+// Waitevent
+// Modwatch
+// Getxattr
+// Fgetxattr
+// Setxattr
+// Fsetxattr
+// Removexattr
+// Fremovexattr
+// Listxattr
+// Flistxattr
+// Fsctl
+// Initgroups
+// Posix_spawn
+// Nfsclnt
+// Fhopen
+// Minherit
+// Semsys
+// Msgsys
+// Shmsys
+// Semctl
+// Semget
+// Semop
+// Msgctl
+// Msgget
+// Msgsnd
+// Msgrcv
+// Shmat
+// Shmctl
+// Shmdt
+// Shmget
+// Shm_open
+// Shm_unlink
+// Sem_open
+// Sem_close
+// Sem_unlink
+// Sem_wait
+// Sem_trywait
+// Sem_post
+// Sem_getvalue
+// Sem_init
+// Sem_destroy
+// Open_extended
+// Umask_extended
+// Stat_extended
+// Lstat_extended
+// Fstat_extended
+// Chmod_extended
+// Fchmod_extended
+// Access_extended
+// Settid
+// Gettid
+// Setsgroups
+// Getsgroups
+// Setwgroups
+// Getwgroups
+// Mkfifo_extended
+// Mkdir_extended
+// Identitysvc
+// Shared_region_check_np
+// Shared_region_map_np
+// __pthread_mutex_destroy
+// __pthread_mutex_init
+// __pthread_mutex_lock
+// __pthread_mutex_trylock
+// __pthread_mutex_unlock
+// __pthread_cond_init
+// __pthread_cond_destroy
+// __pthread_cond_broadcast
+// __pthread_cond_signal
+// Setsid_with_pid
+// __pthread_cond_timedwait
+// Aio_fsync
+// Aio_return
+// Aio_suspend
+// Aio_cancel
+// Aio_error
+// Aio_read
+// Aio_write
+// Lio_listio
+// __pthread_cond_wait
+// Iopolicysys
+// Mlockall
+// Munlockall
+// __pthread_kill
+// __pthread_sigmask
+// __sigwait
+// __disable_threadsignal
+// __pthread_markcancel
+// __pthread_canceled
+// __semwait_signal
+// Proc_info
+// sendfile
+// Stat64_extended
+// Lstat64_extended
+// Fstat64_extended
+// __pthread_chdir
+// __pthread_fchdir
+// Audit
+// Auditon
+// Getauid
+// Setauid
+// Getaudit
+// Setaudit
+// Getaudit_addr
+// Setaudit_addr
+// Auditctl
+// Bsdthread_create
+// Bsdthread_terminate
+// Stack_snapshot
+// Bsdthread_register
+// Workq_open
+// Workq_ops
+// __mac_execve
+// __mac_syscall
+// __mac_get_file
+// __mac_set_file
+// __mac_get_link
+// __mac_set_link
+// __mac_get_proc
+// __mac_set_proc
+// __mac_get_fd
+// __mac_set_fd
+// __mac_get_pid
+// __mac_get_lcid
+// __mac_get_lctx
+// __mac_set_lctx
+// Setlcid
+// Read_nocancel
+// Write_nocancel
+// Open_nocancel
+// Close_nocancel
+// Wait4_nocancel
+// Recvmsg_nocancel
+// Sendmsg_nocancel
+// Recvfrom_nocancel
+// Accept_nocancel
+// Msync_nocancel
+// Fcntl_nocancel
+// Select_nocancel
+// Fsync_nocancel
+// Connect_nocancel
+// Sigsuspend_nocancel
+// Readv_nocancel
+// Writev_nocancel
+// Sendto_nocancel
+// Pread_nocancel
+// Pwrite_nocancel
+// Waitid_nocancel
+// Poll_nocancel
+// Msgsnd_nocancel
+// Msgrcv_nocancel
+// Sem_wait_nocancel
+// Aio_suspend_nocancel
+// __sigwait_nocancel
+// __semwait_signal_nocancel
+// __mac_mount
+// __mac_get_mount
+// __mac_getfsstat
diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_386.go b/vendor/golang.org/x/sys/unix/syscall_darwin_386.go
new file mode 100644
index 0000000..c172a3d
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_darwin_386.go
@@ -0,0 +1,77 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build 386,darwin
+
+package unix
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+func Getpagesize() int { return 4096 }
+
+func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
+
+func NsecToTimespec(nsec int64) (ts Timespec) {
+ ts.Sec = int32(nsec / 1e9)
+ ts.Nsec = int32(nsec % 1e9)
+ return
+}
+
+func NsecToTimeval(nsec int64) (tv Timeval) {
+ nsec += 999 // round up to microsecond
+ tv.Usec = int32(nsec % 1e9 / 1e3)
+ tv.Sec = int32(nsec / 1e9)
+ return
+}
+
+//sysnb gettimeofday(tp *Timeval) (sec int32, usec int32, err error)
+func Gettimeofday(tv *Timeval) (err error) {
+ // The tv passed to gettimeofday must be non-nil
+ // but is otherwise unused. The answers come back
+ // in the two registers.
+ sec, usec, err := gettimeofday(tv)
+ tv.Sec = int32(sec)
+ tv.Usec = int32(usec)
+ return err
+}
+
+func SetKevent(k *Kevent_t, fd, mode, flags int) {
+ k.Ident = uint32(fd)
+ k.Filter = int16(mode)
+ k.Flags = uint16(flags)
+}
+
+func (iov *Iovec) SetLen(length int) {
+ iov.Len = uint32(length)
+}
+
+func (msghdr *Msghdr) SetControllen(length int) {
+ msghdr.Controllen = uint32(length)
+}
+
+func (cmsg *Cmsghdr) SetLen(length int) {
+ cmsg.Len = uint32(length)
+}
+
+func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
+ var length = uint64(count)
+
+ _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr(*offset>>32), uintptr(unsafe.Pointer(&length)), 0, 0, 0, 0)
+
+ written = int(length)
+
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
+func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno)
+
+// SYS___SYSCTL is used by syscall_bsd.go for all BSDs, but in modern versions
+// of darwin/386 the syscall is called sysctl instead of __sysctl.
+const SYS___SYSCTL = SYS_SYSCTL
diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go
new file mode 100644
index 0000000..fc1e5a4
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go
@@ -0,0 +1,79 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build amd64,darwin
+
+package unix
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+//sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error)
+
+func Getpagesize() int { return 4096 }
+
+func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
+
+func NsecToTimespec(nsec int64) (ts Timespec) {
+ ts.Sec = nsec / 1e9
+ ts.Nsec = nsec % 1e9
+ return
+}
+
+func NsecToTimeval(nsec int64) (tv Timeval) {
+ nsec += 999 // round up to microsecond
+ tv.Usec = int32(nsec % 1e9 / 1e3)
+ tv.Sec = int64(nsec / 1e9)
+ return
+}
+
+//sysnb gettimeofday(tp *Timeval) (sec int64, usec int32, err error)
+func Gettimeofday(tv *Timeval) (err error) {
+ // The tv passed to gettimeofday must be non-nil
+ // but is otherwise unused. The answers come back
+ // in the two registers.
+ sec, usec, err := gettimeofday(tv)
+ tv.Sec = sec
+ tv.Usec = usec
+ return err
+}
+
+func SetKevent(k *Kevent_t, fd, mode, flags int) {
+ k.Ident = uint64(fd)
+ k.Filter = int16(mode)
+ k.Flags = uint16(flags)
+}
+
+func (iov *Iovec) SetLen(length int) {
+ iov.Len = uint64(length)
+}
+
+func (msghdr *Msghdr) SetControllen(length int) {
+ msghdr.Controllen = uint32(length)
+}
+
+func (cmsg *Cmsghdr) SetLen(length int) {
+ cmsg.Len = uint32(length)
+}
+
+func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
+ var length = uint64(count)
+
+ _, _, e1 := Syscall6(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr(unsafe.Pointer(&length)), 0, 0)
+
+ written = int(length)
+
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
+func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno)
+
+// SYS___SYSCTL is used by syscall_bsd.go for all BSDs, but in modern versions
+// of darwin/amd64 the syscall is called sysctl instead of __sysctl.
+const SYS___SYSCTL = SYS_SYSCTL
diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go
new file mode 100644
index 0000000..d286cf4
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go
@@ -0,0 +1,71 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package unix
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+func Getpagesize() int { return 4096 }
+
+func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
+
+func NsecToTimespec(nsec int64) (ts Timespec) {
+ ts.Sec = int32(nsec / 1e9)
+ ts.Nsec = int32(nsec % 1e9)
+ return
+}
+
+func NsecToTimeval(nsec int64) (tv Timeval) {
+ nsec += 999 // round up to microsecond
+ tv.Usec = int32(nsec % 1e9 / 1e3)
+ tv.Sec = int32(nsec / 1e9)
+ return
+}
+
+//sysnb gettimeofday(tp *Timeval) (sec int32, usec int32, err error)
+func Gettimeofday(tv *Timeval) (err error) {
+ // The tv passed to gettimeofday must be non-nil
+ // but is otherwise unused. The answers come back
+ // in the two registers.
+ sec, usec, err := gettimeofday(tv)
+ tv.Sec = int32(sec)
+ tv.Usec = int32(usec)
+ return err
+}
+
+func SetKevent(k *Kevent_t, fd, mode, flags int) {
+ k.Ident = uint32(fd)
+ k.Filter = int16(mode)
+ k.Flags = uint16(flags)
+}
+
+func (iov *Iovec) SetLen(length int) {
+ iov.Len = uint32(length)
+}
+
+func (msghdr *Msghdr) SetControllen(length int) {
+ msghdr.Controllen = uint32(length)
+}
+
+func (cmsg *Cmsghdr) SetLen(length int) {
+ cmsg.Len = uint32(length)
+}
+
+func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
+ var length = uint64(count)
+
+ _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr(*offset>>32), uintptr(unsafe.Pointer(&length)), 0, 0, 0, 0)
+
+ written = int(length)
+
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
+func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) // sic
diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go
new file mode 100644
index 0000000..c33905c
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go
@@ -0,0 +1,77 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build arm64,darwin
+
+package unix
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+func Getpagesize() int { return 16384 }
+
+func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
+
+func NsecToTimespec(nsec int64) (ts Timespec) {
+ ts.Sec = nsec / 1e9
+ ts.Nsec = nsec % 1e9
+ return
+}
+
+func NsecToTimeval(nsec int64) (tv Timeval) {
+ nsec += 999 // round up to microsecond
+ tv.Usec = int32(nsec % 1e9 / 1e3)
+ tv.Sec = int64(nsec / 1e9)
+ return
+}
+
+//sysnb gettimeofday(tp *Timeval) (sec int64, usec int32, err error)
+func Gettimeofday(tv *Timeval) (err error) {
+ // The tv passed to gettimeofday must be non-nil
+ // but is otherwise unused. The answers come back
+ // in the two registers.
+ sec, usec, err := gettimeofday(tv)
+ tv.Sec = sec
+ tv.Usec = usec
+ return err
+}
+
+func SetKevent(k *Kevent_t, fd, mode, flags int) {
+ k.Ident = uint64(fd)
+ k.Filter = int16(mode)
+ k.Flags = uint16(flags)
+}
+
+func (iov *Iovec) SetLen(length int) {
+ iov.Len = uint64(length)
+}
+
+func (msghdr *Msghdr) SetControllen(length int) {
+ msghdr.Controllen = uint32(length)
+}
+
+func (cmsg *Cmsghdr) SetLen(length int) {
+ cmsg.Len = uint32(length)
+}
+
+func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
+ var length = uint64(count)
+
+ _, _, e1 := Syscall6(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr(unsafe.Pointer(&length)), 0, 0)
+
+ written = int(length)
+
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
+func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) // sic
+
+// SYS___SYSCTL is used by syscall_bsd.go for all BSDs, but in modern versions
+// of darwin/arm64 the syscall is called sysctl instead of __sysctl.
+const SYS___SYSCTL = SYS_SYSCTL
diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go
new file mode 100644
index 0000000..ec408ee
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go
@@ -0,0 +1,412 @@
+// Copyright 2009,2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// FreeBSD system calls.
+// This file is compiled as ordinary Go code,
+// but it is also input to mksyscall,
+// which parses the //sys lines and generates system call stubs.
+// Note that sometimes we use a lowercase //sys name and wrap
+// it in our own nicer implementation, either here or in
+// syscall_bsd.go or syscall_unix.go.
+
+package unix
+
+import "unsafe"
+
+type SockaddrDatalink struct {
+ Len uint8
+ Family uint8
+ Index uint16
+ Type uint8
+ Nlen uint8
+ Alen uint8
+ Slen uint8
+ Data [12]int8
+ Rcf uint16
+ Route [16]uint16
+ raw RawSockaddrDatalink
+}
+
+// Translate "kern.hostname" to []_C_int{0,1,2,3}.
+func nametomib(name string) (mib []_C_int, err error) {
+ const siz = unsafe.Sizeof(mib[0])
+
+ // NOTE(rsc): It seems strange to set the buffer to have
+ // size CTL_MAXNAME+2 but use only CTL_MAXNAME
+ // as the size. I don't know why the +2 is here, but the
+ // kernel uses +2 for its own implementation of this function.
+ // I am scared that if we don't include the +2 here, the kernel
+ // will silently write 2 words farther than we specify
+ // and we'll get memory corruption.
+ var buf [CTL_MAXNAME + 2]_C_int
+ n := uintptr(CTL_MAXNAME) * siz
+
+ p := (*byte)(unsafe.Pointer(&buf[0]))
+ bytes, err := ByteSliceFromString(name)
+ if err != nil {
+ return nil, err
+ }
+
+ // Magic sysctl: "setting" 0.3 to a string name
+ // lets you read back the array of integers form.
+ if err = sysctl([]_C_int{0, 3}, p, &n, &bytes[0], uintptr(len(name))); err != nil {
+ return nil, err
+ }
+ return buf[0 : n/siz], nil
+}
+
+// ParseDirent parses up to max directory entries in buf,
+// appending the names to names. It returns the number
+// bytes consumed from buf, the number of entries added
+// to names, and the new names slice.
+func ParseDirent(buf []byte, max int, names []string) (consumed int, count int, newnames []string) {
+ origlen := len(buf)
+ for max != 0 && len(buf) > 0 {
+ dirent := (*Dirent)(unsafe.Pointer(&buf[0]))
+ reclen := int(16+dirent.Namlen+1+7) & ^7
+ buf = buf[reclen:]
+ if dirent.Fileno == 0 { // File absent in directory.
+ continue
+ }
+ bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0]))
+ var name = string(bytes[0:dirent.Namlen])
+ if name == "." || name == ".." { // Useless names
+ continue
+ }
+ max--
+ count++
+ names = append(names, name)
+ }
+ return origlen - len(buf), count, names
+}
+
+//sysnb pipe() (r int, w int, err error)
+
+func Pipe(p []int) (err error) {
+ if len(p) != 2 {
+ return EINVAL
+ }
+ p[0], p[1], err = pipe()
+ return
+}
+
+//sys extpread(fd int, p []byte, flags int, offset int64) (n int, err error)
+func Pread(fd int, p []byte, offset int64) (n int, err error) {
+ return extpread(fd, p, 0, offset)
+}
+
+//sys extpwrite(fd int, p []byte, flags int, offset int64) (n int, err error)
+func Pwrite(fd int, p []byte, offset int64) (n int, err error) {
+ return extpwrite(fd, p, 0, offset)
+}
+
+func Getfsstat(buf []Statfs_t, flags int) (n int, err error) {
+ var _p0 unsafe.Pointer
+ var bufsize uintptr
+ if len(buf) > 0 {
+ _p0 = unsafe.Pointer(&buf[0])
+ bufsize = unsafe.Sizeof(Statfs_t{}) * uintptr(len(buf))
+ }
+ r0, _, e1 := Syscall(SYS_GETFSSTAT, uintptr(_p0), bufsize, uintptr(flags))
+ use(unsafe.Pointer(_p0))
+ n = int(r0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
+/*
+ * Exposed directly
+ */
+//sys Access(path string, mode uint32) (err error)
+//sys Adjtime(delta *Timeval, olddelta *Timeval) (err error)
+//sys Chdir(path string) (err error)
+//sys Chflags(path string, flags int) (err error)
+//sys Chmod(path string, mode uint32) (err error)
+//sys Chown(path string, uid int, gid int) (err error)
+//sys Chroot(path string) (err error)
+//sys Close(fd int) (err error)
+//sys Dup(fd int) (nfd int, err error)
+//sys Dup2(from int, to int) (err error)
+//sys Exit(code int)
+//sys Fchdir(fd int) (err error)
+//sys Fchflags(fd int, flags int) (err error)
+//sys Fchmod(fd int, mode uint32) (err error)
+//sys Fchown(fd int, uid int, gid int) (err error)
+//sys Flock(fd int, how int) (err error)
+//sys Fpathconf(fd int, name int) (val int, err error)
+//sys Fstat(fd int, stat *Stat_t) (err error)
+//sys Fstatfs(fd int, stat *Statfs_t) (err error)
+//sys Fsync(fd int) (err error)
+//sys Ftruncate(fd int, length int64) (err error)
+//sys Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error)
+//sys Getdtablesize() (size int)
+//sysnb Getegid() (egid int)
+//sysnb Geteuid() (uid int)
+//sysnb Getgid() (gid int)
+//sysnb Getpgid(pid int) (pgid int, err error)
+//sysnb Getpgrp() (pgrp int)
+//sysnb Getpid() (pid int)
+//sysnb Getppid() (ppid int)
+//sys Getpriority(which int, who int) (prio int, err error)
+//sysnb Getrlimit(which int, lim *Rlimit) (err error)
+//sysnb Getrusage(who int, rusage *Rusage) (err error)
+//sysnb Getsid(pid int) (sid int, err error)
+//sysnb Gettimeofday(tv *Timeval) (err error)
+//sysnb Getuid() (uid int)
+//sys Issetugid() (tainted bool)
+//sys Kill(pid int, signum syscall.Signal) (err error)
+//sys Kqueue() (fd int, err error)
+//sys Lchown(path string, uid int, gid int) (err error)
+//sys Link(path string, link string) (err error)
+//sys Listen(s int, backlog int) (err error)
+//sys Lstat(path string, stat *Stat_t) (err error)
+//sys Mkdir(path string, mode uint32) (err error)
+//sys Mkfifo(path string, mode uint32) (err error)
+//sys Mknod(path string, mode uint32, dev int) (err error)
+//sys Mlock(b []byte) (err error)
+//sys Mlockall(flags int) (err error)
+//sys Mprotect(b []byte, prot int) (err error)
+//sys Munlock(b []byte) (err error)
+//sys Munlockall() (err error)
+//sys Nanosleep(time *Timespec, leftover *Timespec) (err error)
+//sys Open(path string, mode int, perm uint32) (fd int, err error)
+//sys Pathconf(path string, name int) (val int, err error)
+//sys read(fd int, p []byte) (n int, err error)
+//sys Readlink(path string, buf []byte) (n int, err error)
+//sys Rename(from string, to string) (err error)
+//sys Revoke(path string) (err error)
+//sys Rmdir(path string) (err error)
+//sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = SYS_LSEEK
+//sys Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error)
+//sysnb Setegid(egid int) (err error)
+//sysnb Seteuid(euid int) (err error)
+//sysnb Setgid(gid int) (err error)
+//sys Setlogin(name string) (err error)
+//sysnb Setpgid(pid int, pgid int) (err error)
+//sys Setpriority(which int, who int, prio int) (err error)
+//sysnb Setregid(rgid int, egid int) (err error)
+//sysnb Setreuid(ruid int, euid int) (err error)
+//sysnb Setresgid(rgid int, egid int, sgid int) (err error)
+//sysnb Setresuid(ruid int, euid int, suid int) (err error)
+//sysnb Setrlimit(which int, lim *Rlimit) (err error)
+//sysnb Setsid() (pid int, err error)
+//sysnb Settimeofday(tp *Timeval) (err error)
+//sysnb Setuid(uid int) (err error)
+//sys Stat(path string, stat *Stat_t) (err error)
+//sys Statfs(path string, stat *Statfs_t) (err error)
+//sys Symlink(path string, link string) (err error)
+//sys Sync() (err error)
+//sys Truncate(path string, length int64) (err error)
+//sys Umask(newmask int) (oldmask int)
+//sys Undelete(path string) (err error)
+//sys Unlink(path string) (err error)
+//sys Unmount(path string, flags int) (err error)
+//sys write(fd int, p []byte) (n int, err error)
+//sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error)
+//sys munmap(addr uintptr, length uintptr) (err error)
+//sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ
+//sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE
+
+/*
+ * Unimplemented
+ * TODO(jsing): Update this list for DragonFly.
+ */
+// Profil
+// Sigaction
+// Sigprocmask
+// Getlogin
+// Sigpending
+// Sigaltstack
+// Ioctl
+// Reboot
+// Execve
+// Vfork
+// Sbrk
+// Sstk
+// Ovadvise
+// Mincore
+// Setitimer
+// Swapon
+// Select
+// Sigsuspend
+// Readv
+// Writev
+// Nfssvc
+// Getfh
+// Quotactl
+// Mount
+// Csops
+// Waitid
+// Add_profil
+// Kdebug_trace
+// Sigreturn
+// Mmap
+// Atsocket
+// Kqueue_from_portset_np
+// Kqueue_portset
+// Getattrlist
+// Setattrlist
+// Getdirentriesattr
+// Searchfs
+// Delete
+// Copyfile
+// Poll
+// Watchevent
+// Waitevent
+// Modwatch
+// Getxattr
+// Fgetxattr
+// Setxattr
+// Fsetxattr
+// Removexattr
+// Fremovexattr
+// Listxattr
+// Flistxattr
+// Fsctl
+// Initgroups
+// Posix_spawn
+// Nfsclnt
+// Fhopen
+// Minherit
+// Semsys
+// Msgsys
+// Shmsys
+// Semctl
+// Semget
+// Semop
+// Msgctl
+// Msgget
+// Msgsnd
+// Msgrcv
+// Shmat
+// Shmctl
+// Shmdt
+// Shmget
+// Shm_open
+// Shm_unlink
+// Sem_open
+// Sem_close
+// Sem_unlink
+// Sem_wait
+// Sem_trywait
+// Sem_post
+// Sem_getvalue
+// Sem_init
+// Sem_destroy
+// Open_extended
+// Umask_extended
+// Stat_extended
+// Lstat_extended
+// Fstat_extended
+// Chmod_extended
+// Fchmod_extended
+// Access_extended
+// Settid
+// Gettid
+// Setsgroups
+// Getsgroups
+// Setwgroups
+// Getwgroups
+// Mkfifo_extended
+// Mkdir_extended
+// Identitysvc
+// Shared_region_check_np
+// Shared_region_map_np
+// __pthread_mutex_destroy
+// __pthread_mutex_init
+// __pthread_mutex_lock
+// __pthread_mutex_trylock
+// __pthread_mutex_unlock
+// __pthread_cond_init
+// __pthread_cond_destroy
+// __pthread_cond_broadcast
+// __pthread_cond_signal
+// Setsid_with_pid
+// __pthread_cond_timedwait
+// Aio_fsync
+// Aio_return
+// Aio_suspend
+// Aio_cancel
+// Aio_error
+// Aio_read
+// Aio_write
+// Lio_listio
+// __pthread_cond_wait
+// Iopolicysys
+// __pthread_kill
+// __pthread_sigmask
+// __sigwait
+// __disable_threadsignal
+// __pthread_markcancel
+// __pthread_canceled
+// __semwait_signal
+// Proc_info
+// Stat64_extended
+// Lstat64_extended
+// Fstat64_extended
+// __pthread_chdir
+// __pthread_fchdir
+// Audit
+// Auditon
+// Getauid
+// Setauid
+// Getaudit
+// Setaudit
+// Getaudit_addr
+// Setaudit_addr
+// Auditctl
+// Bsdthread_create
+// Bsdthread_terminate
+// Stack_snapshot
+// Bsdthread_register
+// Workq_open
+// Workq_ops
+// __mac_execve
+// __mac_syscall
+// __mac_get_file
+// __mac_set_file
+// __mac_get_link
+// __mac_set_link
+// __mac_get_proc
+// __mac_set_proc
+// __mac_get_fd
+// __mac_set_fd
+// __mac_get_pid
+// __mac_get_lcid
+// __mac_get_lctx
+// __mac_set_lctx
+// Setlcid
+// Read_nocancel
+// Write_nocancel
+// Open_nocancel
+// Close_nocancel
+// Wait4_nocancel
+// Recvmsg_nocancel
+// Sendmsg_nocancel
+// Recvfrom_nocancel
+// Accept_nocancel
+// Msync_nocancel
+// Fcntl_nocancel
+// Select_nocancel
+// Fsync_nocancel
+// Connect_nocancel
+// Sigsuspend_nocancel
+// Readv_nocancel
+// Writev_nocancel
+// Sendto_nocancel
+// Pread_nocancel
+// Pwrite_nocancel
+// Waitid_nocancel
+// Poll_nocancel
+// Msgsnd_nocancel
+// Msgrcv_nocancel
+// Sem_wait_nocancel
+// Aio_suspend_nocancel
+// __sigwait_nocancel
+// __semwait_signal_nocancel
+// __mac_mount
+// __mac_get_mount
+// __mac_getfsstat
diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go
new file mode 100644
index 0000000..da7cb79
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go
@@ -0,0 +1,61 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build amd64,dragonfly
+
+package unix
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+func Getpagesize() int { return 4096 }
+
+func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
+
+func NsecToTimespec(nsec int64) (ts Timespec) {
+ ts.Sec = nsec / 1e9
+ ts.Nsec = nsec % 1e9
+ return
+}
+
+func NsecToTimeval(nsec int64) (tv Timeval) {
+ nsec += 999 // round up to microsecond
+ tv.Usec = nsec % 1e9 / 1e3
+ tv.Sec = int64(nsec / 1e9)
+ return
+}
+
+func SetKevent(k *Kevent_t, fd, mode, flags int) {
+ k.Ident = uint64(fd)
+ k.Filter = int16(mode)
+ k.Flags = uint16(flags)
+}
+
+func (iov *Iovec) SetLen(length int) {
+ iov.Len = uint64(length)
+}
+
+func (msghdr *Msghdr) SetControllen(length int) {
+ msghdr.Controllen = uint32(length)
+}
+
+func (cmsg *Cmsghdr) SetLen(length int) {
+ cmsg.Len = uint32(length)
+}
+
+func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
+ var writtenOut uint64 = 0
+ _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0, 0)
+
+ written = int(writtenOut)
+
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
+func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno)
diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go
new file mode 100644
index 0000000..520ccbe
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go
@@ -0,0 +1,683 @@
+// Copyright 2009,2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// FreeBSD system calls.
+// This file is compiled as ordinary Go code,
+// but it is also input to mksyscall,
+// which parses the //sys lines and generates system call stubs.
+// Note that sometimes we use a lowercase //sys name and wrap
+// it in our own nicer implementation, either here or in
+// syscall_bsd.go or syscall_unix.go.
+
+package unix
+
+import "unsafe"
+
+type SockaddrDatalink struct {
+ Len uint8
+ Family uint8
+ Index uint16
+ Type uint8
+ Nlen uint8
+ Alen uint8
+ Slen uint8
+ Data [46]int8
+ raw RawSockaddrDatalink
+}
+
+// Translate "kern.hostname" to []_C_int{0,1,2,3}.
+func nametomib(name string) (mib []_C_int, err error) {
+ const siz = unsafe.Sizeof(mib[0])
+
+ // NOTE(rsc): It seems strange to set the buffer to have
+ // size CTL_MAXNAME+2 but use only CTL_MAXNAME
+ // as the size. I don't know why the +2 is here, but the
+ // kernel uses +2 for its own implementation of this function.
+ // I am scared that if we don't include the +2 here, the kernel
+ // will silently write 2 words farther than we specify
+ // and we'll get memory corruption.
+ var buf [CTL_MAXNAME + 2]_C_int
+ n := uintptr(CTL_MAXNAME) * siz
+
+ p := (*byte)(unsafe.Pointer(&buf[0]))
+ bytes, err := ByteSliceFromString(name)
+ if err != nil {
+ return nil, err
+ }
+
+ // Magic sysctl: "setting" 0.3 to a string name
+ // lets you read back the array of integers form.
+ if err = sysctl([]_C_int{0, 3}, p, &n, &bytes[0], uintptr(len(name))); err != nil {
+ return nil, err
+ }
+ return buf[0 : n/siz], nil
+}
+
+// ParseDirent parses up to max directory entries in buf,
+// appending the names to names. It returns the number
+// bytes consumed from buf, the number of entries added
+// to names, and the new names slice.
+func ParseDirent(buf []byte, max int, names []string) (consumed int, count int, newnames []string) {
+ origlen := len(buf)
+ for max != 0 && len(buf) > 0 {
+ dirent := (*Dirent)(unsafe.Pointer(&buf[0]))
+ if dirent.Reclen == 0 {
+ buf = nil
+ break
+ }
+ buf = buf[dirent.Reclen:]
+ if dirent.Fileno == 0 { // File absent in directory.
+ continue
+ }
+ bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0]))
+ var name = string(bytes[0:dirent.Namlen])
+ if name == "." || name == ".." { // Useless names
+ continue
+ }
+ max--
+ count++
+ names = append(names, name)
+ }
+ return origlen - len(buf), count, names
+}
+
+//sysnb pipe() (r int, w int, err error)
+
+func Pipe(p []int) (err error) {
+ if len(p) != 2 {
+ return EINVAL
+ }
+ p[0], p[1], err = pipe()
+ return
+}
+
+func GetsockoptIPMreqn(fd, level, opt int) (*IPMreqn, error) {
+ var value IPMreqn
+ vallen := _Socklen(SizeofIPMreqn)
+ errno := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen)
+ return &value, errno
+}
+
+func SetsockoptIPMreqn(fd, level, opt int, mreq *IPMreqn) (err error) {
+ return setsockopt(fd, level, opt, unsafe.Pointer(mreq), unsafe.Sizeof(*mreq))
+}
+
+func Accept4(fd, flags int) (nfd int, sa Sockaddr, err error) {
+ var rsa RawSockaddrAny
+ var len _Socklen = SizeofSockaddrAny
+ nfd, err = accept4(fd, &rsa, &len, flags)
+ if err != nil {
+ return
+ }
+ if len > SizeofSockaddrAny {
+ panic("RawSockaddrAny too small")
+ }
+ sa, err = anyToSockaddr(&rsa)
+ if err != nil {
+ Close(nfd)
+ nfd = 0
+ }
+ return
+}
+
+func Getfsstat(buf []Statfs_t, flags int) (n int, err error) {
+ var _p0 unsafe.Pointer
+ var bufsize uintptr
+ if len(buf) > 0 {
+ _p0 = unsafe.Pointer(&buf[0])
+ bufsize = unsafe.Sizeof(Statfs_t{}) * uintptr(len(buf))
+ }
+ r0, _, e1 := Syscall(SYS_GETFSSTAT, uintptr(_p0), bufsize, uintptr(flags))
+ use(unsafe.Pointer(_p0))
+ n = int(r0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
+// Derive extattr namespace and attribute name
+
+func xattrnamespace(fullattr string) (ns int, attr string, err error) {
+ s := -1
+ for idx, val := range fullattr {
+ if val == '.' {
+ s = idx
+ break
+ }
+ }
+
+ if s == -1 {
+ return -1, "", ENOATTR
+ }
+
+ namespace := fullattr[0:s]
+ attr = fullattr[s+1:]
+
+ switch namespace {
+ case "user":
+ return EXTATTR_NAMESPACE_USER, attr, nil
+ case "system":
+ return EXTATTR_NAMESPACE_SYSTEM, attr, nil
+ default:
+ return -1, "", ENOATTR
+ }
+}
+
+func initxattrdest(dest []byte, idx int) (d unsafe.Pointer) {
+ if len(dest) > idx {
+ return unsafe.Pointer(&dest[idx])
+ } else {
+ return unsafe.Pointer(_zero)
+ }
+}
+
+// FreeBSD implements its own syscalls to handle extended attributes
+
+func Getxattr(file string, attr string, dest []byte) (sz int, err error) {
+ d := initxattrdest(dest, 0)
+ destsize := len(dest)
+
+ nsid, a, err := xattrnamespace(attr)
+ if err != nil {
+ return -1, err
+ }
+
+ return ExtattrGetFile(file, nsid, a, uintptr(d), destsize)
+}
+
+func Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) {
+ d := initxattrdest(dest, 0)
+ destsize := len(dest)
+
+ nsid, a, err := xattrnamespace(attr)
+ if err != nil {
+ return -1, err
+ }
+
+ return ExtattrGetFd(fd, nsid, a, uintptr(d), destsize)
+}
+
+func Lgetxattr(link string, attr string, dest []byte) (sz int, err error) {
+ d := initxattrdest(dest, 0)
+ destsize := len(dest)
+
+ nsid, a, err := xattrnamespace(attr)
+ if err != nil {
+ return -1, err
+ }
+
+ return ExtattrGetLink(link, nsid, a, uintptr(d), destsize)
+}
+
+// flags are unused on FreeBSD
+
+func Fsetxattr(fd int, attr string, data []byte, flags int) (err error) {
+ d := unsafe.Pointer(&data[0])
+ datasiz := len(data)
+
+ nsid, a, err := xattrnamespace(attr)
+ if err != nil {
+ return
+ }
+
+ _, err = ExtattrSetFd(fd, nsid, a, uintptr(d), datasiz)
+ return
+}
+
+func Setxattr(file string, attr string, data []byte, flags int) (err error) {
+ d := unsafe.Pointer(&data[0])
+ datasiz := len(data)
+
+ nsid, a, err := xattrnamespace(attr)
+ if err != nil {
+ return
+ }
+
+ _, err = ExtattrSetFile(file, nsid, a, uintptr(d), datasiz)
+ return
+}
+
+func Lsetxattr(link string, attr string, data []byte, flags int) (err error) {
+ d := unsafe.Pointer(&data[0])
+ datasiz := len(data)
+
+ nsid, a, err := xattrnamespace(attr)
+ if err != nil {
+ return
+ }
+
+ _, err = ExtattrSetLink(link, nsid, a, uintptr(d), datasiz)
+ return
+}
+
+func Removexattr(file string, attr string) (err error) {
+ nsid, a, err := xattrnamespace(attr)
+ if err != nil {
+ return
+ }
+
+ err = ExtattrDeleteFile(file, nsid, a)
+ return
+}
+
+func Fremovexattr(fd int, attr string) (err error) {
+ nsid, a, err := xattrnamespace(attr)
+ if err != nil {
+ return
+ }
+
+ err = ExtattrDeleteFd(fd, nsid, a)
+ return
+}
+
+func Lremovexattr(link string, attr string) (err error) {
+ nsid, a, err := xattrnamespace(attr)
+ if err != nil {
+ return
+ }
+
+ err = ExtattrDeleteLink(link, nsid, a)
+ return
+}
+
+func Listxattr(file string, dest []byte) (sz int, err error) {
+ d := initxattrdest(dest, 0)
+ destsiz := len(dest)
+
+ // FreeBSD won't allow you to list xattrs from multiple namespaces
+ s := 0
+ var e error
+ for _, nsid := range [...]int{EXTATTR_NAMESPACE_USER, EXTATTR_NAMESPACE_SYSTEM} {
+ stmp, e := ExtattrListFile(file, nsid, uintptr(d), destsiz)
+
+ /* Errors accessing system attrs are ignored so that
+ * we can implement the Linux-like behavior of omitting errors that
+ * we don't have read permissions on
+ *
+ * Linux will still error if we ask for user attributes on a file that
+ * we don't have read permissions on, so don't ignore those errors
+ */
+ if e != nil && e == EPERM && nsid != EXTATTR_NAMESPACE_USER {
+ e = nil
+ continue
+ } else if e != nil {
+ return s, e
+ }
+
+ s += stmp
+ destsiz -= s
+ if destsiz < 0 {
+ destsiz = 0
+ }
+ d = initxattrdest(dest, s)
+ }
+
+ return s, e
+}
+
+func Flistxattr(fd int, dest []byte) (sz int, err error) {
+ d := initxattrdest(dest, 0)
+ destsiz := len(dest)
+
+ s := 0
+ var e error
+ for _, nsid := range [...]int{EXTATTR_NAMESPACE_USER, EXTATTR_NAMESPACE_SYSTEM} {
+ stmp, e := ExtattrListFd(fd, nsid, uintptr(d), destsiz)
+ if e != nil && e == EPERM && nsid != EXTATTR_NAMESPACE_USER {
+ e = nil
+ continue
+ } else if e != nil {
+ return s, e
+ }
+
+ s += stmp
+ destsiz -= s
+ if destsiz < 0 {
+ destsiz = 0
+ }
+ d = initxattrdest(dest, s)
+ }
+
+ return s, e
+}
+
+func Llistxattr(link string, dest []byte) (sz int, err error) {
+ d := initxattrdest(dest, 0)
+ destsiz := len(dest)
+
+ s := 0
+ var e error
+ for _, nsid := range [...]int{EXTATTR_NAMESPACE_USER, EXTATTR_NAMESPACE_SYSTEM} {
+ stmp, e := ExtattrListLink(link, nsid, uintptr(d), destsiz)
+ if e != nil && e == EPERM && nsid != EXTATTR_NAMESPACE_USER {
+ e = nil
+ continue
+ } else if e != nil {
+ return s, e
+ }
+
+ s += stmp
+ destsiz -= s
+ if destsiz < 0 {
+ destsiz = 0
+ }
+ d = initxattrdest(dest, s)
+ }
+
+ return s, e
+}
+
+/*
+ * Exposed directly
+ */
+//sys Access(path string, mode uint32) (err error)
+//sys Adjtime(delta *Timeval, olddelta *Timeval) (err error)
+//sys Chdir(path string) (err error)
+//sys Chflags(path string, flags int) (err error)
+//sys Chmod(path string, mode uint32) (err error)
+//sys Chown(path string, uid int, gid int) (err error)
+//sys Chroot(path string) (err error)
+//sys Close(fd int) (err error)
+//sys Dup(fd int) (nfd int, err error)
+//sys Dup2(from int, to int) (err error)
+//sys Exit(code int)
+//sys ExtattrGetFd(fd int, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error)
+//sys ExtattrSetFd(fd int, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error)
+//sys ExtattrDeleteFd(fd int, attrnamespace int, attrname string) (err error)
+//sys ExtattrListFd(fd int, attrnamespace int, data uintptr, nbytes int) (ret int, err error)
+//sys ExtattrGetFile(file string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error)
+//sys ExtattrSetFile(file string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error)
+//sys ExtattrDeleteFile(file string, attrnamespace int, attrname string) (err error)
+//sys ExtattrListFile(file string, attrnamespace int, data uintptr, nbytes int) (ret int, err error)
+//sys ExtattrGetLink(link string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error)
+//sys ExtattrSetLink(link string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error)
+//sys ExtattrDeleteLink(link string, attrnamespace int, attrname string) (err error)
+//sys ExtattrListLink(link string, attrnamespace int, data uintptr, nbytes int) (ret int, err error)
+//sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_POSIX_FADVISE
+//sys Fchdir(fd int) (err error)
+//sys Fchflags(fd int, flags int) (err error)
+//sys Fchmod(fd int, mode uint32) (err error)
+//sys Fchown(fd int, uid int, gid int) (err error)
+//sys Flock(fd int, how int) (err error)
+//sys Fpathconf(fd int, name int) (val int, err error)
+//sys Fstat(fd int, stat *Stat_t) (err error)
+//sys Fstatfs(fd int, stat *Statfs_t) (err error)
+//sys Fsync(fd int) (err error)
+//sys Ftruncate(fd int, length int64) (err error)
+//sys Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error)
+//sys Getdtablesize() (size int)
+//sysnb Getegid() (egid int)
+//sysnb Geteuid() (uid int)
+//sysnb Getgid() (gid int)
+//sysnb Getpgid(pid int) (pgid int, err error)
+//sysnb Getpgrp() (pgrp int)
+//sysnb Getpid() (pid int)
+//sysnb Getppid() (ppid int)
+//sys Getpriority(which int, who int) (prio int, err error)
+//sysnb Getrlimit(which int, lim *Rlimit) (err error)
+//sysnb Getrusage(who int, rusage *Rusage) (err error)
+//sysnb Getsid(pid int) (sid int, err error)
+//sysnb Gettimeofday(tv *Timeval) (err error)
+//sysnb Getuid() (uid int)
+//sys Issetugid() (tainted bool)
+//sys Kill(pid int, signum syscall.Signal) (err error)
+//sys Kqueue() (fd int, err error)
+//sys Lchown(path string, uid int, gid int) (err error)
+//sys Link(path string, link string) (err error)
+//sys Listen(s int, backlog int) (err error)
+//sys Lstat(path string, stat *Stat_t) (err error)
+//sys Mkdir(path string, mode uint32) (err error)
+//sys Mkfifo(path string, mode uint32) (err error)
+//sys Mknod(path string, mode uint32, dev int) (err error)
+//sys Mlock(b []byte) (err error)
+//sys Mlockall(flags int) (err error)
+//sys Mprotect(b []byte, prot int) (err error)
+//sys Munlock(b []byte) (err error)
+//sys Munlockall() (err error)
+//sys Nanosleep(time *Timespec, leftover *Timespec) (err error)
+//sys Open(path string, mode int, perm uint32) (fd int, err error)
+//sys Pathconf(path string, name int) (val int, err error)
+//sys Pread(fd int, p []byte, offset int64) (n int, err error)
+//sys Pwrite(fd int, p []byte, offset int64) (n int, err error)
+//sys read(fd int, p []byte) (n int, err error)
+//sys Readlink(path string, buf []byte) (n int, err error)
+//sys Rename(from string, to string) (err error)
+//sys Revoke(path string) (err error)
+//sys Rmdir(path string) (err error)
+//sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = SYS_LSEEK
+//sys Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error)
+//sysnb Setegid(egid int) (err error)
+//sysnb Seteuid(euid int) (err error)
+//sysnb Setgid(gid int) (err error)
+//sys Setlogin(name string) (err error)
+//sysnb Setpgid(pid int, pgid int) (err error)
+//sys Setpriority(which int, who int, prio int) (err error)
+//sysnb Setregid(rgid int, egid int) (err error)
+//sysnb Setreuid(ruid int, euid int) (err error)
+//sysnb Setresgid(rgid int, egid int, sgid int) (err error)
+//sysnb Setresuid(ruid int, euid int, suid int) (err error)
+//sysnb Setrlimit(which int, lim *Rlimit) (err error)
+//sysnb Setsid() (pid int, err error)
+//sysnb Settimeofday(tp *Timeval) (err error)
+//sysnb Setuid(uid int) (err error)
+//sys Stat(path string, stat *Stat_t) (err error)
+//sys Statfs(path string, stat *Statfs_t) (err error)
+//sys Symlink(path string, link string) (err error)
+//sys Sync() (err error)
+//sys Truncate(path string, length int64) (err error)
+//sys Umask(newmask int) (oldmask int)
+//sys Undelete(path string) (err error)
+//sys Unlink(path string) (err error)
+//sys Unmount(path string, flags int) (err error)
+//sys write(fd int, p []byte) (n int, err error)
+//sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error)
+//sys munmap(addr uintptr, length uintptr) (err error)
+//sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ
+//sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE
+//sys accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error)
+
+/*
+ * Unimplemented
+ */
+// Profil
+// Sigaction
+// Sigprocmask
+// Getlogin
+// Sigpending
+// Sigaltstack
+// Ioctl
+// Reboot
+// Execve
+// Vfork
+// Sbrk
+// Sstk
+// Ovadvise
+// Mincore
+// Setitimer
+// Swapon
+// Select
+// Sigsuspend
+// Readv
+// Writev
+// Nfssvc
+// Getfh
+// Quotactl
+// Mount
+// Csops
+// Waitid
+// Add_profil
+// Kdebug_trace
+// Sigreturn
+// Mmap
+// Mlock
+// Munlock
+// Atsocket
+// Kqueue_from_portset_np
+// Kqueue_portset
+// Getattrlist
+// Setattrlist
+// Getdirentriesattr
+// Searchfs
+// Delete
+// Copyfile
+// Poll
+// Watchevent
+// Waitevent
+// Modwatch
+// Getxattr
+// Fgetxattr
+// Setxattr
+// Fsetxattr
+// Removexattr
+// Fremovexattr
+// Listxattr
+// Flistxattr
+// Fsctl
+// Initgroups
+// Posix_spawn
+// Nfsclnt
+// Fhopen
+// Minherit
+// Semsys
+// Msgsys
+// Shmsys
+// Semctl
+// Semget
+// Semop
+// Msgctl
+// Msgget
+// Msgsnd
+// Msgrcv
+// Shmat
+// Shmctl
+// Shmdt
+// Shmget
+// Shm_open
+// Shm_unlink
+// Sem_open
+// Sem_close
+// Sem_unlink
+// Sem_wait
+// Sem_trywait
+// Sem_post
+// Sem_getvalue
+// Sem_init
+// Sem_destroy
+// Open_extended
+// Umask_extended
+// Stat_extended
+// Lstat_extended
+// Fstat_extended
+// Chmod_extended
+// Fchmod_extended
+// Access_extended
+// Settid
+// Gettid
+// Setsgroups
+// Getsgroups
+// Setwgroups
+// Getwgroups
+// Mkfifo_extended
+// Mkdir_extended
+// Identitysvc
+// Shared_region_check_np
+// Shared_region_map_np
+// __pthread_mutex_destroy
+// __pthread_mutex_init
+// __pthread_mutex_lock
+// __pthread_mutex_trylock
+// __pthread_mutex_unlock
+// __pthread_cond_init
+// __pthread_cond_destroy
+// __pthread_cond_broadcast
+// __pthread_cond_signal
+// Setsid_with_pid
+// __pthread_cond_timedwait
+// Aio_fsync
+// Aio_return
+// Aio_suspend
+// Aio_cancel
+// Aio_error
+// Aio_read
+// Aio_write
+// Lio_listio
+// __pthread_cond_wait
+// Iopolicysys
+// Mlockall
+// Munlockall
+// __pthread_kill
+// __pthread_sigmask
+// __sigwait
+// __disable_threadsignal
+// __pthread_markcancel
+// __pthread_canceled
+// __semwait_signal
+// Proc_info
+// Stat64_extended
+// Lstat64_extended
+// Fstat64_extended
+// __pthread_chdir
+// __pthread_fchdir
+// Audit
+// Auditon
+// Getauid
+// Setauid
+// Getaudit
+// Setaudit
+// Getaudit_addr
+// Setaudit_addr
+// Auditctl
+// Bsdthread_create
+// Bsdthread_terminate
+// Stack_snapshot
+// Bsdthread_register
+// Workq_open
+// Workq_ops
+// __mac_execve
+// __mac_syscall
+// __mac_get_file
+// __mac_set_file
+// __mac_get_link
+// __mac_set_link
+// __mac_get_proc
+// __mac_set_proc
+// __mac_get_fd
+// __mac_set_fd
+// __mac_get_pid
+// __mac_get_lcid
+// __mac_get_lctx
+// __mac_set_lctx
+// Setlcid
+// Read_nocancel
+// Write_nocancel
+// Open_nocancel
+// Close_nocancel
+// Wait4_nocancel
+// Recvmsg_nocancel
+// Sendmsg_nocancel
+// Recvfrom_nocancel
+// Accept_nocancel
+// Msync_nocancel
+// Fcntl_nocancel
+// Select_nocancel
+// Fsync_nocancel
+// Connect_nocancel
+// Sigsuspend_nocancel
+// Readv_nocancel
+// Writev_nocancel
+// Sendto_nocancel
+// Pread_nocancel
+// Pwrite_nocancel
+// Waitid_nocancel
+// Poll_nocancel
+// Msgsnd_nocancel
+// Msgrcv_nocancel
+// Sem_wait_nocancel
+// Aio_suspend_nocancel
+// __sigwait_nocancel
+// __semwait_signal_nocancel
+// __mac_mount
+// __mac_get_mount
+// __mac_getfsstat
diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go
new file mode 100644
index 0000000..6a0cd80
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go
@@ -0,0 +1,61 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build 386,freebsd
+
+package unix
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+func Getpagesize() int { return 4096 }
+
+func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
+
+func NsecToTimespec(nsec int64) (ts Timespec) {
+ ts.Sec = int32(nsec / 1e9)
+ ts.Nsec = int32(nsec % 1e9)
+ return
+}
+
+func NsecToTimeval(nsec int64) (tv Timeval) {
+ nsec += 999 // round up to microsecond
+ tv.Usec = int32(nsec % 1e9 / 1e3)
+ tv.Sec = int32(nsec / 1e9)
+ return
+}
+
+func SetKevent(k *Kevent_t, fd, mode, flags int) {
+ k.Ident = uint32(fd)
+ k.Filter = int16(mode)
+ k.Flags = uint16(flags)
+}
+
+func (iov *Iovec) SetLen(length int) {
+ iov.Len = uint32(length)
+}
+
+func (msghdr *Msghdr) SetControllen(length int) {
+ msghdr.Controllen = uint32(length)
+}
+
+func (cmsg *Cmsghdr) SetLen(length int) {
+ cmsg.Len = uint32(length)
+}
+
+func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
+ var writtenOut uint64 = 0
+ _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr((*offset)>>32), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0)
+
+ written = int(writtenOut)
+
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
+func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno)
diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go
new file mode 100644
index 0000000..e142540
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go
@@ -0,0 +1,61 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build amd64,freebsd
+
+package unix
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+func Getpagesize() int { return 4096 }
+
+func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
+
+func NsecToTimespec(nsec int64) (ts Timespec) {
+ ts.Sec = nsec / 1e9
+ ts.Nsec = nsec % 1e9
+ return
+}
+
+func NsecToTimeval(nsec int64) (tv Timeval) {
+ nsec += 999 // round up to microsecond
+ tv.Usec = nsec % 1e9 / 1e3
+ tv.Sec = int64(nsec / 1e9)
+ return
+}
+
+func SetKevent(k *Kevent_t, fd, mode, flags int) {
+ k.Ident = uint64(fd)
+ k.Filter = int16(mode)
+ k.Flags = uint16(flags)
+}
+
+func (iov *Iovec) SetLen(length int) {
+ iov.Len = uint64(length)
+}
+
+func (msghdr *Msghdr) SetControllen(length int) {
+ msghdr.Controllen = uint32(length)
+}
+
+func (cmsg *Cmsghdr) SetLen(length int) {
+ cmsg.Len = uint32(length)
+}
+
+func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
+ var writtenOut uint64 = 0
+ _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0, 0)
+
+ written = int(writtenOut)
+
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
+func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno)
diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go
new file mode 100644
index 0000000..5504cb1
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go
@@ -0,0 +1,61 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build arm,freebsd
+
+package unix
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+func Getpagesize() int { return 4096 }
+
+func TimespecToNsec(ts Timespec) int64 { return ts.Sec*1e9 + int64(ts.Nsec) }
+
+func NsecToTimespec(nsec int64) (ts Timespec) {
+ ts.Sec = nsec / 1e9
+ ts.Nsec = int32(nsec % 1e9)
+ return
+}
+
+func NsecToTimeval(nsec int64) (tv Timeval) {
+ nsec += 999 // round up to microsecond
+ tv.Usec = int32(nsec % 1e9 / 1e3)
+ tv.Sec = nsec / 1e9
+ return
+}
+
+func SetKevent(k *Kevent_t, fd, mode, flags int) {
+ k.Ident = uint32(fd)
+ k.Filter = int16(mode)
+ k.Flags = uint16(flags)
+}
+
+func (iov *Iovec) SetLen(length int) {
+ iov.Len = uint32(length)
+}
+
+func (msghdr *Msghdr) SetControllen(length int) {
+ msghdr.Controllen = uint32(length)
+}
+
+func (cmsg *Cmsghdr) SetLen(length int) {
+ cmsg.Len = uint32(length)
+}
+
+func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
+ var writtenOut uint64 = 0
+ _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr((*offset)>>32), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0)
+
+ written = int(writtenOut)
+
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
+func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno)
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go
new file mode 100644
index 0000000..01c569a
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_linux.go
@@ -0,0 +1,1152 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Linux system calls.
+// This file is compiled as ordinary Go code,
+// but it is also input to mksyscall,
+// which parses the //sys lines and generates system call stubs.
+// Note that sometimes we use a lowercase //sys name and
+// wrap it in our own nicer implementation.
+
+package unix
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+/*
+ * Wrapped
+ */
+
+func Access(path string, mode uint32) (err error) {
+ return Faccessat(AT_FDCWD, path, mode, 0)
+}
+
+func Chmod(path string, mode uint32) (err error) {
+ return Fchmodat(AT_FDCWD, path, mode, 0)
+}
+
+func Chown(path string, uid int, gid int) (err error) {
+ return Fchownat(AT_FDCWD, path, uid, gid, 0)
+}
+
+func Creat(path string, mode uint32) (fd int, err error) {
+ return Open(path, O_CREAT|O_WRONLY|O_TRUNC, mode)
+}
+
+//sys Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error)
+
+func Link(oldpath string, newpath string) (err error) {
+ return Linkat(AT_FDCWD, oldpath, AT_FDCWD, newpath, 0)
+}
+
+func Mkdir(path string, mode uint32) (err error) {
+ return Mkdirat(AT_FDCWD, path, mode)
+}
+
+func Mknod(path string, mode uint32, dev int) (err error) {
+ return Mknodat(AT_FDCWD, path, mode, dev)
+}
+
+func Open(path string, mode int, perm uint32) (fd int, err error) {
+ return openat(AT_FDCWD, path, mode|O_LARGEFILE, perm)
+}
+
+//sys openat(dirfd int, path string, flags int, mode uint32) (fd int, err error)
+
+func Openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) {
+ return openat(dirfd, path, flags|O_LARGEFILE, mode)
+}
+
+//sys ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error)
+
+func Ppoll(fds []PollFd, timeout *Timespec, sigmask *Sigset_t) (n int, err error) {
+ if len(fds) == 0 {
+ return ppoll(nil, 0, timeout, sigmask)
+ }
+ return ppoll(&fds[0], len(fds), timeout, sigmask)
+}
+
+//sys Readlinkat(dirfd int, path string, buf []byte) (n int, err error)
+
+func Readlink(path string, buf []byte) (n int, err error) {
+ return Readlinkat(AT_FDCWD, path, buf)
+}
+
+func Rename(oldpath string, newpath string) (err error) {
+ return Renameat(AT_FDCWD, oldpath, AT_FDCWD, newpath)
+}
+
+func Rmdir(path string) error {
+ return Unlinkat(AT_FDCWD, path, AT_REMOVEDIR)
+}
+
+//sys Symlinkat(oldpath string, newdirfd int, newpath string) (err error)
+
+func Symlink(oldpath string, newpath string) (err error) {
+ return Symlinkat(oldpath, AT_FDCWD, newpath)
+}
+
+func Unlink(path string) error {
+ return Unlinkat(AT_FDCWD, path, 0)
+}
+
+//sys Unlinkat(dirfd int, path string, flags int) (err error)
+
+//sys utimes(path string, times *[2]Timeval) (err error)
+
+func Utimes(path string, tv []Timeval) error {
+ if tv == nil {
+ err := utimensat(AT_FDCWD, path, nil, 0)
+ if err != ENOSYS {
+ return err
+ }
+ return utimes(path, nil)
+ }
+ if len(tv) != 2 {
+ return EINVAL
+ }
+ var ts [2]Timespec
+ ts[0] = NsecToTimespec(TimevalToNsec(tv[0]))
+ ts[1] = NsecToTimespec(TimevalToNsec(tv[1]))
+ err := utimensat(AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0)
+ if err != ENOSYS {
+ return err
+ }
+ return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0])))
+}
+
+//sys utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error)
+
+func UtimesNano(path string, ts []Timespec) error {
+ if ts == nil {
+ err := utimensat(AT_FDCWD, path, nil, 0)
+ if err != ENOSYS {
+ return err
+ }
+ return utimes(path, nil)
+ }
+ if len(ts) != 2 {
+ return EINVAL
+ }
+ err := utimensat(AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0)
+ if err != ENOSYS {
+ return err
+ }
+ // If the utimensat syscall isn't available (utimensat was added to Linux
+ // in 2.6.22, Released, 8 July 2007) then fall back to utimes
+ var tv [2]Timeval
+ for i := 0; i < 2; i++ {
+ tv[i] = NsecToTimeval(TimespecToNsec(ts[i]))
+ }
+ return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0])))
+}
+
+func UtimesNanoAt(dirfd int, path string, ts []Timespec, flags int) error {
+ if ts == nil {
+ return utimensat(dirfd, path, nil, flags)
+ }
+ if len(ts) != 2 {
+ return EINVAL
+ }
+ return utimensat(dirfd, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), flags)
+}
+
+//sys futimesat(dirfd int, path *byte, times *[2]Timeval) (err error)
+
+func Futimesat(dirfd int, path string, tv []Timeval) error {
+ pathp, err := BytePtrFromString(path)
+ if err != nil {
+ return err
+ }
+ if tv == nil {
+ return futimesat(dirfd, pathp, nil)
+ }
+ if len(tv) != 2 {
+ return EINVAL
+ }
+ return futimesat(dirfd, pathp, (*[2]Timeval)(unsafe.Pointer(&tv[0])))
+}
+
+func Futimes(fd int, tv []Timeval) (err error) {
+ // Believe it or not, this is the best we can do on Linux
+ // (and is what glibc does).
+ return Utimes("/proc/self/fd/"+itoa(fd), tv)
+}
+
+const ImplementsGetwd = true
+
+//sys Getcwd(buf []byte) (n int, err error)
+
+func Getwd() (wd string, err error) {
+ var buf [PathMax]byte
+ n, err := Getcwd(buf[0:])
+ if err != nil {
+ return "", err
+ }
+ // Getcwd returns the number of bytes written to buf, including the NUL.
+ if n < 1 || n > len(buf) || buf[n-1] != 0 {
+ return "", EINVAL
+ }
+ return string(buf[0 : n-1]), nil
+}
+
+func Getgroups() (gids []int, err error) {
+ n, err := getgroups(0, nil)
+ if err != nil {
+ return nil, err
+ }
+ if n == 0 {
+ return nil, nil
+ }
+
+ // Sanity check group count. Max is 1<<16 on Linux.
+ if n < 0 || n > 1<<20 {
+ return nil, EINVAL
+ }
+
+ a := make([]_Gid_t, n)
+ n, err = getgroups(n, &a[0])
+ if err != nil {
+ return nil, err
+ }
+ gids = make([]int, n)
+ for i, v := range a[0:n] {
+ gids[i] = int(v)
+ }
+ return
+}
+
+func Setgroups(gids []int) (err error) {
+ if len(gids) == 0 {
+ return setgroups(0, nil)
+ }
+
+ a := make([]_Gid_t, len(gids))
+ for i, v := range gids {
+ a[i] = _Gid_t(v)
+ }
+ return setgroups(len(a), &a[0])
+}
+
+type WaitStatus uint32
+
+// Wait status is 7 bits at bottom, either 0 (exited),
+// 0x7F (stopped), or a signal number that caused an exit.
+// The 0x80 bit is whether there was a core dump.
+// An extra number (exit code, signal causing a stop)
+// is in the high bits. At least that's the idea.
+// There are various irregularities. For example, the
+// "continued" status is 0xFFFF, distinguishing itself
+// from stopped via the core dump bit.
+
+const (
+ mask = 0x7F
+ core = 0x80
+ exited = 0x00
+ stopped = 0x7F
+ shift = 8
+)
+
+func (w WaitStatus) Exited() bool { return w&mask == exited }
+
+func (w WaitStatus) Signaled() bool { return w&mask != stopped && w&mask != exited }
+
+func (w WaitStatus) Stopped() bool { return w&0xFF == stopped }
+
+func (w WaitStatus) Continued() bool { return w == 0xFFFF }
+
+func (w WaitStatus) CoreDump() bool { return w.Signaled() && w&core != 0 }
+
+func (w WaitStatus) ExitStatus() int {
+ if !w.Exited() {
+ return -1
+ }
+ return int(w>>shift) & 0xFF
+}
+
+func (w WaitStatus) Signal() syscall.Signal {
+ if !w.Signaled() {
+ return -1
+ }
+ return syscall.Signal(w & mask)
+}
+
+func (w WaitStatus) StopSignal() syscall.Signal {
+ if !w.Stopped() {
+ return -1
+ }
+ return syscall.Signal(w>>shift) & 0xFF
+}
+
+func (w WaitStatus) TrapCause() int {
+ if w.StopSignal() != SIGTRAP {
+ return -1
+ }
+ return int(w>>shift) >> 8
+}
+
+//sys wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error)
+
+func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) {
+ var status _C_int
+ wpid, err = wait4(pid, &status, options, rusage)
+ if wstatus != nil {
+ *wstatus = WaitStatus(status)
+ }
+ return
+}
+
+func Mkfifo(path string, mode uint32) (err error) {
+ return Mknod(path, mode|S_IFIFO, 0)
+}
+
+func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) {
+ if sa.Port < 0 || sa.Port > 0xFFFF {
+ return nil, 0, EINVAL
+ }
+ sa.raw.Family = AF_INET
+ p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port))
+ p[0] = byte(sa.Port >> 8)
+ p[1] = byte(sa.Port)
+ for i := 0; i < len(sa.Addr); i++ {
+ sa.raw.Addr[i] = sa.Addr[i]
+ }
+ return unsafe.Pointer(&sa.raw), SizeofSockaddrInet4, nil
+}
+
+func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, _Socklen, error) {
+ if sa.Port < 0 || sa.Port > 0xFFFF {
+ return nil, 0, EINVAL
+ }
+ sa.raw.Family = AF_INET6
+ p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port))
+ p[0] = byte(sa.Port >> 8)
+ p[1] = byte(sa.Port)
+ sa.raw.Scope_id = sa.ZoneId
+ for i := 0; i < len(sa.Addr); i++ {
+ sa.raw.Addr[i] = sa.Addr[i]
+ }
+ return unsafe.Pointer(&sa.raw), SizeofSockaddrInet6, nil
+}
+
+func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) {
+ name := sa.Name
+ n := len(name)
+ if n >= len(sa.raw.Path) {
+ return nil, 0, EINVAL
+ }
+ sa.raw.Family = AF_UNIX
+ for i := 0; i < n; i++ {
+ sa.raw.Path[i] = int8(name[i])
+ }
+ // length is family (uint16), name, NUL.
+ sl := _Socklen(2)
+ if n > 0 {
+ sl += _Socklen(n) + 1
+ }
+ if sa.raw.Path[0] == '@' {
+ sa.raw.Path[0] = 0
+ // Don't count trailing NUL for abstract address.
+ sl--
+ }
+
+ return unsafe.Pointer(&sa.raw), sl, nil
+}
+
+type SockaddrLinklayer struct {
+ Protocol uint16
+ Ifindex int
+ Hatype uint16
+ Pkttype uint8
+ Halen uint8
+ Addr [8]byte
+ raw RawSockaddrLinklayer
+}
+
+func (sa *SockaddrLinklayer) sockaddr() (unsafe.Pointer, _Socklen, error) {
+ if sa.Ifindex < 0 || sa.Ifindex > 0x7fffffff {
+ return nil, 0, EINVAL
+ }
+ sa.raw.Family = AF_PACKET
+ sa.raw.Protocol = sa.Protocol
+ sa.raw.Ifindex = int32(sa.Ifindex)
+ sa.raw.Hatype = sa.Hatype
+ sa.raw.Pkttype = sa.Pkttype
+ sa.raw.Halen = sa.Halen
+ for i := 0; i < len(sa.Addr); i++ {
+ sa.raw.Addr[i] = sa.Addr[i]
+ }
+ return unsafe.Pointer(&sa.raw), SizeofSockaddrLinklayer, nil
+}
+
+type SockaddrNetlink struct {
+ Family uint16
+ Pad uint16
+ Pid uint32
+ Groups uint32
+ raw RawSockaddrNetlink
+}
+
+func (sa *SockaddrNetlink) sockaddr() (unsafe.Pointer, _Socklen, error) {
+ sa.raw.Family = AF_NETLINK
+ sa.raw.Pad = sa.Pad
+ sa.raw.Pid = sa.Pid
+ sa.raw.Groups = sa.Groups
+ return unsafe.Pointer(&sa.raw), SizeofSockaddrNetlink, nil
+}
+
+type SockaddrHCI struct {
+ Dev uint16
+ Channel uint16
+ raw RawSockaddrHCI
+}
+
+func (sa *SockaddrHCI) sockaddr() (unsafe.Pointer, _Socklen, error) {
+ sa.raw.Family = AF_BLUETOOTH
+ sa.raw.Dev = sa.Dev
+ sa.raw.Channel = sa.Channel
+ return unsafe.Pointer(&sa.raw), SizeofSockaddrHCI, nil
+}
+
+// SockaddrCAN implements the Sockaddr interface for AF_CAN type sockets.
+// The RxID and TxID fields are used for transport protocol addressing in
+// (CAN_TP16, CAN_TP20, CAN_MCNET, and CAN_ISOTP), they can be left with
+// zero values for CAN_RAW and CAN_BCM sockets as they have no meaning.
+//
+// The SockaddrCAN struct must be bound to the socket file descriptor
+// using Bind before the CAN socket can be used.
+//
+// // Read one raw CAN frame
+// fd, _ := Socket(AF_CAN, SOCK_RAW, CAN_RAW)
+// addr := &SockaddrCAN{Ifindex: index}
+// Bind(fd, addr)
+// frame := make([]byte, 16)
+// Read(fd, frame)
+//
+// The full SocketCAN documentation can be found in the linux kernel
+// archives at: https://www.kernel.org/doc/Documentation/networking/can.txt
+type SockaddrCAN struct {
+ Ifindex int
+ RxID uint32
+ TxID uint32
+ raw RawSockaddrCAN
+}
+
+func (sa *SockaddrCAN) sockaddr() (unsafe.Pointer, _Socklen, error) {
+ if sa.Ifindex < 0 || sa.Ifindex > 0x7fffffff {
+ return nil, 0, EINVAL
+ }
+ sa.raw.Family = AF_CAN
+ sa.raw.Ifindex = int32(sa.Ifindex)
+ rx := (*[4]byte)(unsafe.Pointer(&sa.RxID))
+ for i := 0; i < 4; i++ {
+ sa.raw.Addr[i] = rx[i]
+ }
+ tx := (*[4]byte)(unsafe.Pointer(&sa.TxID))
+ for i := 0; i < 4; i++ {
+ sa.raw.Addr[i+4] = tx[i]
+ }
+ return unsafe.Pointer(&sa.raw), SizeofSockaddrCAN, nil
+}
+
+func anyToSockaddr(rsa *RawSockaddrAny) (Sockaddr, error) {
+ switch rsa.Addr.Family {
+ case AF_NETLINK:
+ pp := (*RawSockaddrNetlink)(unsafe.Pointer(rsa))
+ sa := new(SockaddrNetlink)
+ sa.Family = pp.Family
+ sa.Pad = pp.Pad
+ sa.Pid = pp.Pid
+ sa.Groups = pp.Groups
+ return sa, nil
+
+ case AF_PACKET:
+ pp := (*RawSockaddrLinklayer)(unsafe.Pointer(rsa))
+ sa := new(SockaddrLinklayer)
+ sa.Protocol = pp.Protocol
+ sa.Ifindex = int(pp.Ifindex)
+ sa.Hatype = pp.Hatype
+ sa.Pkttype = pp.Pkttype
+ sa.Halen = pp.Halen
+ for i := 0; i < len(sa.Addr); i++ {
+ sa.Addr[i] = pp.Addr[i]
+ }
+ return sa, nil
+
+ case AF_UNIX:
+ pp := (*RawSockaddrUnix)(unsafe.Pointer(rsa))
+ sa := new(SockaddrUnix)
+ if pp.Path[0] == 0 {
+ // "Abstract" Unix domain socket.
+ // Rewrite leading NUL as @ for textual display.
+ // (This is the standard convention.)
+ // Not friendly to overwrite in place,
+ // but the callers below don't care.
+ pp.Path[0] = '@'
+ }
+
+ // Assume path ends at NUL.
+ // This is not technically the Linux semantics for
+ // abstract Unix domain sockets--they are supposed
+ // to be uninterpreted fixed-size binary blobs--but
+ // everyone uses this convention.
+ n := 0
+ for n < len(pp.Path) && pp.Path[n] != 0 {
+ n++
+ }
+ bytes := (*[10000]byte)(unsafe.Pointer(&pp.Path[0]))[0:n]
+ sa.Name = string(bytes)
+ return sa, nil
+
+ case AF_INET:
+ pp := (*RawSockaddrInet4)(unsafe.Pointer(rsa))
+ sa := new(SockaddrInet4)
+ p := (*[2]byte)(unsafe.Pointer(&pp.Port))
+ sa.Port = int(p[0])<<8 + int(p[1])
+ for i := 0; i < len(sa.Addr); i++ {
+ sa.Addr[i] = pp.Addr[i]
+ }
+ return sa, nil
+
+ case AF_INET6:
+ pp := (*RawSockaddrInet6)(unsafe.Pointer(rsa))
+ sa := new(SockaddrInet6)
+ p := (*[2]byte)(unsafe.Pointer(&pp.Port))
+ sa.Port = int(p[0])<<8 + int(p[1])
+ sa.ZoneId = pp.Scope_id
+ for i := 0; i < len(sa.Addr); i++ {
+ sa.Addr[i] = pp.Addr[i]
+ }
+ return sa, nil
+ }
+ return nil, EAFNOSUPPORT
+}
+
+func Accept(fd int) (nfd int, sa Sockaddr, err error) {
+ var rsa RawSockaddrAny
+ var len _Socklen = SizeofSockaddrAny
+ nfd, err = accept(fd, &rsa, &len)
+ if err != nil {
+ return
+ }
+ sa, err = anyToSockaddr(&rsa)
+ if err != nil {
+ Close(nfd)
+ nfd = 0
+ }
+ return
+}
+
+func Accept4(fd int, flags int) (nfd int, sa Sockaddr, err error) {
+ var rsa RawSockaddrAny
+ var len _Socklen = SizeofSockaddrAny
+ nfd, err = accept4(fd, &rsa, &len, flags)
+ if err != nil {
+ return
+ }
+ if len > SizeofSockaddrAny {
+ panic("RawSockaddrAny too small")
+ }
+ sa, err = anyToSockaddr(&rsa)
+ if err != nil {
+ Close(nfd)
+ nfd = 0
+ }
+ return
+}
+
+func Getsockname(fd int) (sa Sockaddr, err error) {
+ var rsa RawSockaddrAny
+ var len _Socklen = SizeofSockaddrAny
+ if err = getsockname(fd, &rsa, &len); err != nil {
+ return
+ }
+ return anyToSockaddr(&rsa)
+}
+
+func GetsockoptInet4Addr(fd, level, opt int) (value [4]byte, err error) {
+ vallen := _Socklen(4)
+ err = getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen)
+ return value, err
+}
+
+func GetsockoptIPMreq(fd, level, opt int) (*IPMreq, error) {
+ var value IPMreq
+ vallen := _Socklen(SizeofIPMreq)
+ err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen)
+ return &value, err
+}
+
+func GetsockoptIPMreqn(fd, level, opt int) (*IPMreqn, error) {
+ var value IPMreqn
+ vallen := _Socklen(SizeofIPMreqn)
+ err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen)
+ return &value, err
+}
+
+func GetsockoptIPv6Mreq(fd, level, opt int) (*IPv6Mreq, error) {
+ var value IPv6Mreq
+ vallen := _Socklen(SizeofIPv6Mreq)
+ err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen)
+ return &value, err
+}
+
+func GetsockoptIPv6MTUInfo(fd, level, opt int) (*IPv6MTUInfo, error) {
+ var value IPv6MTUInfo
+ vallen := _Socklen(SizeofIPv6MTUInfo)
+ err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen)
+ return &value, err
+}
+
+func GetsockoptICMPv6Filter(fd, level, opt int) (*ICMPv6Filter, error) {
+ var value ICMPv6Filter
+ vallen := _Socklen(SizeofICMPv6Filter)
+ err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen)
+ return &value, err
+}
+
+func GetsockoptUcred(fd, level, opt int) (*Ucred, error) {
+ var value Ucred
+ vallen := _Socklen(SizeofUcred)
+ err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen)
+ return &value, err
+}
+
+func SetsockoptIPMreqn(fd, level, opt int, mreq *IPMreqn) (err error) {
+ return setsockopt(fd, level, opt, unsafe.Pointer(mreq), unsafe.Sizeof(*mreq))
+}
+
+func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) {
+ var msg Msghdr
+ var rsa RawSockaddrAny
+ msg.Name = (*byte)(unsafe.Pointer(&rsa))
+ msg.Namelen = uint32(SizeofSockaddrAny)
+ var iov Iovec
+ if len(p) > 0 {
+ iov.Base = (*byte)(unsafe.Pointer(&p[0]))
+ iov.SetLen(len(p))
+ }
+ var dummy byte
+ if len(oob) > 0 {
+ // receive at least one normal byte
+ if len(p) == 0 {
+ iov.Base = &dummy
+ iov.SetLen(1)
+ }
+ msg.Control = (*byte)(unsafe.Pointer(&oob[0]))
+ msg.SetControllen(len(oob))
+ }
+ msg.Iov = &iov
+ msg.Iovlen = 1
+ if n, err = recvmsg(fd, &msg, flags); err != nil {
+ return
+ }
+ oobn = int(msg.Controllen)
+ recvflags = int(msg.Flags)
+ // source address is only specified if the socket is unconnected
+ if rsa.Addr.Family != AF_UNSPEC {
+ from, err = anyToSockaddr(&rsa)
+ }
+ return
+}
+
+func Sendmsg(fd int, p, oob []byte, to Sockaddr, flags int) (err error) {
+ _, err = SendmsgN(fd, p, oob, to, flags)
+ return
+}
+
+func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) {
+ var ptr unsafe.Pointer
+ var salen _Socklen
+ if to != nil {
+ var err error
+ ptr, salen, err = to.sockaddr()
+ if err != nil {
+ return 0, err
+ }
+ }
+ var msg Msghdr
+ msg.Name = (*byte)(unsafe.Pointer(ptr))
+ msg.Namelen = uint32(salen)
+ var iov Iovec
+ if len(p) > 0 {
+ iov.Base = (*byte)(unsafe.Pointer(&p[0]))
+ iov.SetLen(len(p))
+ }
+ var dummy byte
+ if len(oob) > 0 {
+ // send at least one normal byte
+ if len(p) == 0 {
+ iov.Base = &dummy
+ iov.SetLen(1)
+ }
+ msg.Control = (*byte)(unsafe.Pointer(&oob[0]))
+ msg.SetControllen(len(oob))
+ }
+ msg.Iov = &iov
+ msg.Iovlen = 1
+ if n, err = sendmsg(fd, &msg, flags); err != nil {
+ return 0, err
+ }
+ if len(oob) > 0 && len(p) == 0 {
+ n = 0
+ }
+ return n, nil
+}
+
+// BindToDevice binds the socket associated with fd to device.
+func BindToDevice(fd int, device string) (err error) {
+ return SetsockoptString(fd, SOL_SOCKET, SO_BINDTODEVICE, device)
+}
+
+//sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error)
+
+func ptracePeek(req int, pid int, addr uintptr, out []byte) (count int, err error) {
+ // The peek requests are machine-size oriented, so we wrap it
+ // to retrieve arbitrary-length data.
+
+ // The ptrace syscall differs from glibc's ptrace.
+ // Peeks returns the word in *data, not as the return value.
+
+ var buf [sizeofPtr]byte
+
+ // Leading edge. PEEKTEXT/PEEKDATA don't require aligned
+ // access (PEEKUSER warns that it might), but if we don't
+ // align our reads, we might straddle an unmapped page
+ // boundary and not get the bytes leading up to the page
+ // boundary.
+ n := 0
+ if addr%sizeofPtr != 0 {
+ err = ptrace(req, pid, addr-addr%sizeofPtr, uintptr(unsafe.Pointer(&buf[0])))
+ if err != nil {
+ return 0, err
+ }
+ n += copy(out, buf[addr%sizeofPtr:])
+ out = out[n:]
+ }
+
+ // Remainder.
+ for len(out) > 0 {
+ // We use an internal buffer to guarantee alignment.
+ // It's not documented if this is necessary, but we're paranoid.
+ err = ptrace(req, pid, addr+uintptr(n), uintptr(unsafe.Pointer(&buf[0])))
+ if err != nil {
+ return n, err
+ }
+ copied := copy(out, buf[0:])
+ n += copied
+ out = out[copied:]
+ }
+
+ return n, nil
+}
+
+func PtracePeekText(pid int, addr uintptr, out []byte) (count int, err error) {
+ return ptracePeek(PTRACE_PEEKTEXT, pid, addr, out)
+}
+
+func PtracePeekData(pid int, addr uintptr, out []byte) (count int, err error) {
+ return ptracePeek(PTRACE_PEEKDATA, pid, addr, out)
+}
+
+func ptracePoke(pokeReq int, peekReq int, pid int, addr uintptr, data []byte) (count int, err error) {
+ // As for ptracePeek, we need to align our accesses to deal
+ // with the possibility of straddling an invalid page.
+
+ // Leading edge.
+ n := 0
+ if addr%sizeofPtr != 0 {
+ var buf [sizeofPtr]byte
+ err = ptrace(peekReq, pid, addr-addr%sizeofPtr, uintptr(unsafe.Pointer(&buf[0])))
+ if err != nil {
+ return 0, err
+ }
+ n += copy(buf[addr%sizeofPtr:], data)
+ word := *((*uintptr)(unsafe.Pointer(&buf[0])))
+ err = ptrace(pokeReq, pid, addr-addr%sizeofPtr, word)
+ if err != nil {
+ return 0, err
+ }
+ data = data[n:]
+ }
+
+ // Interior.
+ for len(data) > sizeofPtr {
+ word := *((*uintptr)(unsafe.Pointer(&data[0])))
+ err = ptrace(pokeReq, pid, addr+uintptr(n), word)
+ if err != nil {
+ return n, err
+ }
+ n += sizeofPtr
+ data = data[sizeofPtr:]
+ }
+
+ // Trailing edge.
+ if len(data) > 0 {
+ var buf [sizeofPtr]byte
+ err = ptrace(peekReq, pid, addr+uintptr(n), uintptr(unsafe.Pointer(&buf[0])))
+ if err != nil {
+ return n, err
+ }
+ copy(buf[0:], data)
+ word := *((*uintptr)(unsafe.Pointer(&buf[0])))
+ err = ptrace(pokeReq, pid, addr+uintptr(n), word)
+ if err != nil {
+ return n, err
+ }
+ n += len(data)
+ }
+
+ return n, nil
+}
+
+func PtracePokeText(pid int, addr uintptr, data []byte) (count int, err error) {
+ return ptracePoke(PTRACE_POKETEXT, PTRACE_PEEKTEXT, pid, addr, data)
+}
+
+func PtracePokeData(pid int, addr uintptr, data []byte) (count int, err error) {
+ return ptracePoke(PTRACE_POKEDATA, PTRACE_PEEKDATA, pid, addr, data)
+}
+
+func PtraceGetRegs(pid int, regsout *PtraceRegs) (err error) {
+ return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout)))
+}
+
+func PtraceSetRegs(pid int, regs *PtraceRegs) (err error) {
+ return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs)))
+}
+
+func PtraceSetOptions(pid int, options int) (err error) {
+ return ptrace(PTRACE_SETOPTIONS, pid, 0, uintptr(options))
+}
+
+func PtraceGetEventMsg(pid int) (msg uint, err error) {
+ var data _C_long
+ err = ptrace(PTRACE_GETEVENTMSG, pid, 0, uintptr(unsafe.Pointer(&data)))
+ msg = uint(data)
+ return
+}
+
+func PtraceCont(pid int, signal int) (err error) {
+ return ptrace(PTRACE_CONT, pid, 0, uintptr(signal))
+}
+
+func PtraceSyscall(pid int, signal int) (err error) {
+ return ptrace(PTRACE_SYSCALL, pid, 0, uintptr(signal))
+}
+
+func PtraceSingleStep(pid int) (err error) { return ptrace(PTRACE_SINGLESTEP, pid, 0, 0) }
+
+func PtraceAttach(pid int) (err error) { return ptrace(PTRACE_ATTACH, pid, 0, 0) }
+
+func PtraceDetach(pid int) (err error) { return ptrace(PTRACE_DETACH, pid, 0, 0) }
+
+//sys reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error)
+
+func Reboot(cmd int) (err error) {
+ return reboot(LINUX_REBOOT_MAGIC1, LINUX_REBOOT_MAGIC2, cmd, "")
+}
+
+func clen(n []byte) int {
+ for i := 0; i < len(n); i++ {
+ if n[i] == 0 {
+ return i
+ }
+ }
+ return len(n)
+}
+
+func ReadDirent(fd int, buf []byte) (n int, err error) {
+ return Getdents(fd, buf)
+}
+
+func ParseDirent(buf []byte, max int, names []string) (consumed int, count int, newnames []string) {
+ origlen := len(buf)
+ count = 0
+ for max != 0 && len(buf) > 0 {
+ dirent := (*Dirent)(unsafe.Pointer(&buf[0]))
+ buf = buf[dirent.Reclen:]
+ if dirent.Ino == 0 { // File absent in directory.
+ continue
+ }
+ bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0]))
+ var name = string(bytes[0:clen(bytes[:])])
+ if name == "." || name == ".." { // Useless names
+ continue
+ }
+ max--
+ count++
+ names = append(names, name)
+ }
+ return origlen - len(buf), count, names
+}
+
+//sys mount(source string, target string, fstype string, flags uintptr, data *byte) (err error)
+
+func Mount(source string, target string, fstype string, flags uintptr, data string) (err error) {
+ // Certain file systems get rather angry and EINVAL if you give
+ // them an empty string of data, rather than NULL.
+ if data == "" {
+ return mount(source, target, fstype, flags, nil)
+ }
+ datap, err := BytePtrFromString(data)
+ if err != nil {
+ return err
+ }
+ return mount(source, target, fstype, flags, datap)
+}
+
+// Sendto
+// Recvfrom
+// Socketpair
+
+/*
+ * Direct access
+ */
+//sys Acct(path string) (err error)
+//sys Adjtimex(buf *Timex) (state int, err error)
+//sys Chdir(path string) (err error)
+//sys Chroot(path string) (err error)
+//sys ClockGettime(clockid int32, time *Timespec) (err error)
+//sys Close(fd int) (err error)
+//sys Dup(oldfd int) (fd int, err error)
+//sys Dup3(oldfd int, newfd int, flags int) (err error)
+//sysnb EpollCreate(size int) (fd int, err error)
+//sysnb EpollCreate1(flag int) (fd int, err error)
+//sysnb EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error)
+//sys Exit(code int) = SYS_EXIT_GROUP
+//sys Faccessat(dirfd int, path string, mode uint32, flags int) (err error)
+//sys Fallocate(fd int, mode uint32, off int64, len int64) (err error)
+//sys Fchdir(fd int) (err error)
+//sys Fchmod(fd int, mode uint32) (err error)
+//sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error)
+//sys Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error)
+//sys fcntl(fd int, cmd int, arg int) (val int, err error)
+//sys Fdatasync(fd int) (err error)
+//sys Flock(fd int, how int) (err error)
+//sys Fsync(fd int) (err error)
+//sys Getdents(fd int, buf []byte) (n int, err error) = SYS_GETDENTS64
+//sysnb Getpgid(pid int) (pgid int, err error)
+
+func Getpgrp() (pid int) {
+ pid, _ = Getpgid(0)
+ return
+}
+
+//sysnb Getpid() (pid int)
+//sysnb Getppid() (ppid int)
+//sys Getpriority(which int, who int) (prio int, err error)
+//sysnb Getrusage(who int, rusage *Rusage) (err error)
+//sysnb Getsid(pid int) (sid int, err error)
+//sysnb Gettid() (tid int)
+//sys Getxattr(path string, attr string, dest []byte) (sz int, err error)
+//sys InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error)
+//sysnb InotifyInit1(flags int) (fd int, err error)
+//sysnb InotifyRmWatch(fd int, watchdesc uint32) (success int, err error)
+//sysnb Kill(pid int, sig syscall.Signal) (err error)
+//sys Klogctl(typ int, buf []byte) (n int, err error) = SYS_SYSLOG
+//sys Listxattr(path string, dest []byte) (sz int, err error)
+//sys Mkdirat(dirfd int, path string, mode uint32) (err error)
+//sys Mknodat(dirfd int, path string, mode uint32, dev int) (err error)
+//sys Nanosleep(time *Timespec, leftover *Timespec) (err error)
+//sys PivotRoot(newroot string, putold string) (err error) = SYS_PIVOT_ROOT
+//sysnb prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) = SYS_PRLIMIT64
+//sys Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error)
+//sys read(fd int, p []byte) (n int, err error)
+//sys Removexattr(path string, attr string) (err error)
+//sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error)
+//sys Setdomainname(p []byte) (err error)
+//sys Sethostname(p []byte) (err error)
+//sysnb Setpgid(pid int, pgid int) (err error)
+//sysnb Setsid() (pid int, err error)
+//sysnb Settimeofday(tv *Timeval) (err error)
+//sys Setns(fd int, nstype int) (err error)
+
+// issue 1435.
+// On linux Setuid and Setgid only affects the current thread, not the process.
+// This does not match what most callers expect so we must return an error
+// here rather than letting the caller think that the call succeeded.
+
+func Setuid(uid int) (err error) {
+ return EOPNOTSUPP
+}
+
+func Setgid(uid int) (err error) {
+ return EOPNOTSUPP
+}
+
+//sys Setpriority(which int, who int, prio int) (err error)
+//sys Setxattr(path string, attr string, data []byte, flags int) (err error)
+//sys Sync()
+//sysnb Sysinfo(info *Sysinfo_t) (err error)
+//sys Tee(rfd int, wfd int, len int, flags int) (n int64, err error)
+//sysnb Tgkill(tgid int, tid int, sig syscall.Signal) (err error)
+//sysnb Times(tms *Tms) (ticks uintptr, err error)
+//sysnb Umask(mask int) (oldmask int)
+//sysnb Uname(buf *Utsname) (err error)
+//sys Unmount(target string, flags int) (err error) = SYS_UMOUNT2
+//sys Unshare(flags int) (err error)
+//sys Ustat(dev int, ubuf *Ustat_t) (err error)
+//sys write(fd int, p []byte) (n int, err error)
+//sys exitThread(code int) (err error) = SYS_EXIT
+//sys readlen(fd int, p *byte, np int) (n int, err error) = SYS_READ
+//sys writelen(fd int, p *byte, np int) (n int, err error) = SYS_WRITE
+
+// mmap varies by architecture; see syscall_linux_*.go.
+//sys munmap(addr uintptr, length uintptr) (err error)
+
+var mapper = &mmapper{
+ active: make(map[*byte][]byte),
+ mmap: mmap,
+ munmap: munmap,
+}
+
+func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) {
+ return mapper.Mmap(fd, offset, length, prot, flags)
+}
+
+func Munmap(b []byte) (err error) {
+ return mapper.Munmap(b)
+}
+
+//sys Madvise(b []byte, advice int) (err error)
+//sys Mprotect(b []byte, prot int) (err error)
+//sys Mlock(b []byte) (err error)
+//sys Munlock(b []byte) (err error)
+//sys Mlockall(flags int) (err error)
+//sys Munlockall() (err error)
+
+/*
+ * Unimplemented
+ */
+// AddKey
+// AfsSyscall
+// Alarm
+// ArchPrctl
+// Brk
+// Capget
+// Capset
+// ClockGetres
+// ClockNanosleep
+// ClockSettime
+// Clone
+// CreateModule
+// DeleteModule
+// EpollCtlOld
+// EpollPwait
+// EpollWaitOld
+// Eventfd
+// Execve
+// Fgetxattr
+// Flistxattr
+// Fork
+// Fremovexattr
+// Fsetxattr
+// Futex
+// GetKernelSyms
+// GetMempolicy
+// GetRobustList
+// GetThreadArea
+// Getitimer
+// Getpmsg
+// IoCancel
+// IoDestroy
+// IoGetevents
+// IoSetup
+// IoSubmit
+// Ioctl
+// IoprioGet
+// IoprioSet
+// KexecLoad
+// Keyctl
+// Lgetxattr
+// Llistxattr
+// LookupDcookie
+// Lremovexattr
+// Lsetxattr
+// Mbind
+// MigratePages
+// Mincore
+// ModifyLdt
+// Mount
+// MovePages
+// Mprotect
+// MqGetsetattr
+// MqNotify
+// MqOpen
+// MqTimedreceive
+// MqTimedsend
+// MqUnlink
+// Mremap
+// Msgctl
+// Msgget
+// Msgrcv
+// Msgsnd
+// Msync
+// Newfstatat
+// Nfsservctl
+// Personality
+// Pselect6
+// Ptrace
+// Putpmsg
+// QueryModule
+// Quotactl
+// Readahead
+// Readv
+// RemapFilePages
+// RequestKey
+// RestartSyscall
+// RtSigaction
+// RtSigpending
+// RtSigprocmask
+// RtSigqueueinfo
+// RtSigreturn
+// RtSigsuspend
+// RtSigtimedwait
+// SchedGetPriorityMax
+// SchedGetPriorityMin
+// SchedGetaffinity
+// SchedGetparam
+// SchedGetscheduler
+// SchedRrGetInterval
+// SchedSetaffinity
+// SchedSetparam
+// SchedYield
+// Security
+// Semctl
+// Semget
+// Semop
+// Semtimedop
+// SetMempolicy
+// SetRobustList
+// SetThreadArea
+// SetTidAddress
+// Shmat
+// Shmctl
+// Shmdt
+// Shmget
+// Sigaltstack
+// Signalfd
+// Swapoff
+// Swapon
+// Sysfs
+// TimerCreate
+// TimerDelete
+// TimerGetoverrun
+// TimerGettime
+// TimerSettime
+// Timerfd
+// Tkill (obsolete)
+// Tuxcall
+// Umount2
+// Uselib
+// Utimensat
+// Vfork
+// Vhangup
+// Vmsplice
+// Vserver
+// Waitid
+// _Sysctl
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_386.go
new file mode 100644
index 0000000..2b881b9
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_386.go
@@ -0,0 +1,399 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// TODO(rsc): Rewrite all nn(SP) references into name+(nn-8)(FP)
+// so that go vet can check that they are correct.
+
+// +build 386,linux
+
+package unix
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+func Getpagesize() int { return 4096 }
+
+func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
+
+func NsecToTimespec(nsec int64) (ts Timespec) {
+ ts.Sec = int32(nsec / 1e9)
+ ts.Nsec = int32(nsec % 1e9)
+ return
+}
+
+func NsecToTimeval(nsec int64) (tv Timeval) {
+ nsec += 999 // round up to microsecond
+ tv.Sec = int32(nsec / 1e9)
+ tv.Usec = int32(nsec % 1e9 / 1e3)
+ return
+}
+
+//sysnb pipe(p *[2]_C_int) (err error)
+
+func Pipe(p []int) (err error) {
+ if len(p) != 2 {
+ return EINVAL
+ }
+ var pp [2]_C_int
+ err = pipe(&pp)
+ p[0] = int(pp[0])
+ p[1] = int(pp[1])
+ return
+}
+
+//sysnb pipe2(p *[2]_C_int, flags int) (err error)
+
+func Pipe2(p []int, flags int) (err error) {
+ if len(p) != 2 {
+ return EINVAL
+ }
+ var pp [2]_C_int
+ err = pipe2(&pp, flags)
+ p[0] = int(pp[0])
+ p[1] = int(pp[1])
+ return
+}
+
+// 64-bit file system and 32-bit uid calls
+// (386 default is 32-bit file system and 16-bit uid).
+//sys Dup2(oldfd int, newfd int) (err error)
+//sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64_64
+//sys Fchown(fd int, uid int, gid int) (err error) = SYS_FCHOWN32
+//sys Fstat(fd int, stat *Stat_t) (err error) = SYS_FSTAT64
+//sys Ftruncate(fd int, length int64) (err error) = SYS_FTRUNCATE64
+//sysnb Getegid() (egid int) = SYS_GETEGID32
+//sysnb Geteuid() (euid int) = SYS_GETEUID32
+//sysnb Getgid() (gid int) = SYS_GETGID32
+//sysnb Getuid() (uid int) = SYS_GETUID32
+//sysnb InotifyInit() (fd int, err error)
+//sys Ioperm(from int, num int, on int) (err error)
+//sys Iopl(level int) (err error)
+//sys Lchown(path string, uid int, gid int) (err error) = SYS_LCHOWN32
+//sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64
+//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64
+//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64
+//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) = SYS_SENDFILE64
+//sys Setfsgid(gid int) (err error) = SYS_SETFSGID32
+//sys Setfsuid(uid int) (err error) = SYS_SETFSUID32
+//sysnb Setregid(rgid int, egid int) (err error) = SYS_SETREGID32
+//sysnb Setresgid(rgid int, egid int, sgid int) (err error) = SYS_SETRESGID32
+//sysnb Setresuid(ruid int, euid int, suid int) (err error) = SYS_SETRESUID32
+//sysnb Setreuid(ruid int, euid int) (err error) = SYS_SETREUID32
+//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error)
+//sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64
+//sys SyncFileRange(fd int, off int64, n int64, flags int) (err error)
+//sys Truncate(path string, length int64) (err error) = SYS_TRUNCATE64
+//sysnb getgroups(n int, list *_Gid_t) (nn int, err error) = SYS_GETGROUPS32
+//sysnb setgroups(n int, list *_Gid_t) (err error) = SYS_SETGROUPS32
+//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS__NEWSELECT
+
+//sys mmap2(addr uintptr, length uintptr, prot int, flags int, fd int, pageOffset uintptr) (xaddr uintptr, err error)
+//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error)
+//sys Pause() (err error)
+
+func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) {
+ page := uintptr(offset / 4096)
+ if offset != int64(page)*4096 {
+ return 0, EINVAL
+ }
+ return mmap2(addr, length, prot, flags, fd, page)
+}
+
+type rlimit32 struct {
+ Cur uint32
+ Max uint32
+}
+
+//sysnb getrlimit(resource int, rlim *rlimit32) (err error) = SYS_GETRLIMIT
+
+const rlimInf32 = ^uint32(0)
+const rlimInf64 = ^uint64(0)
+
+func Getrlimit(resource int, rlim *Rlimit) (err error) {
+ err = prlimit(0, resource, nil, rlim)
+ if err != ENOSYS {
+ return err
+ }
+
+ rl := rlimit32{}
+ err = getrlimit(resource, &rl)
+ if err != nil {
+ return
+ }
+
+ if rl.Cur == rlimInf32 {
+ rlim.Cur = rlimInf64
+ } else {
+ rlim.Cur = uint64(rl.Cur)
+ }
+
+ if rl.Max == rlimInf32 {
+ rlim.Max = rlimInf64
+ } else {
+ rlim.Max = uint64(rl.Max)
+ }
+ return
+}
+
+//sysnb setrlimit(resource int, rlim *rlimit32) (err error) = SYS_SETRLIMIT
+
+func Setrlimit(resource int, rlim *Rlimit) (err error) {
+ err = prlimit(0, resource, rlim, nil)
+ if err != ENOSYS {
+ return err
+ }
+
+ rl := rlimit32{}
+ if rlim.Cur == rlimInf64 {
+ rl.Cur = rlimInf32
+ } else if rlim.Cur < uint64(rlimInf32) {
+ rl.Cur = uint32(rlim.Cur)
+ } else {
+ return EINVAL
+ }
+ if rlim.Max == rlimInf64 {
+ rl.Max = rlimInf32
+ } else if rlim.Max < uint64(rlimInf32) {
+ rl.Max = uint32(rlim.Max)
+ } else {
+ return EINVAL
+ }
+
+ return setrlimit(resource, &rl)
+}
+
+// Underlying system call writes to newoffset via pointer.
+// Implemented in assembly to avoid allocation.
+func seek(fd int, offset int64, whence int) (newoffset int64, err syscall.Errno)
+
+func Seek(fd int, offset int64, whence int) (newoffset int64, err error) {
+ newoffset, errno := seek(fd, offset, whence)
+ if errno != 0 {
+ return 0, errno
+ }
+ return newoffset, nil
+}
+
+// Vsyscalls on amd64.
+//sysnb Gettimeofday(tv *Timeval) (err error)
+//sysnb Time(t *Time_t) (tt Time_t, err error)
+
+//sys Utime(path string, buf *Utimbuf) (err error)
+
+// On x86 Linux, all the socket calls go through an extra indirection,
+// I think because the 5-register system call interface can't handle
+// the 6-argument calls like sendto and recvfrom. Instead the
+// arguments to the underlying system call are the number below
+// and a pointer to an array of uintptr. We hide the pointer in the
+// socketcall assembly to avoid allocation on every system call.
+
+const (
+ // see linux/net.h
+ _SOCKET = 1
+ _BIND = 2
+ _CONNECT = 3
+ _LISTEN = 4
+ _ACCEPT = 5
+ _GETSOCKNAME = 6
+ _GETPEERNAME = 7
+ _SOCKETPAIR = 8
+ _SEND = 9
+ _RECV = 10
+ _SENDTO = 11
+ _RECVFROM = 12
+ _SHUTDOWN = 13
+ _SETSOCKOPT = 14
+ _GETSOCKOPT = 15
+ _SENDMSG = 16
+ _RECVMSG = 17
+ _ACCEPT4 = 18
+ _RECVMMSG = 19
+ _SENDMMSG = 20
+)
+
+func socketcall(call int, a0, a1, a2, a3, a4, a5 uintptr) (n int, err syscall.Errno)
+func rawsocketcall(call int, a0, a1, a2, a3, a4, a5 uintptr) (n int, err syscall.Errno)
+
+func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) {
+ fd, e := socketcall(_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0)
+ if e != 0 {
+ err = e
+ }
+ return
+}
+
+func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) {
+ fd, e := socketcall(_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0)
+ if e != 0 {
+ err = e
+ }
+ return
+}
+
+func getsockname(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
+ _, e := rawsocketcall(_GETSOCKNAME, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0)
+ if e != 0 {
+ err = e
+ }
+ return
+}
+
+func getpeername(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
+ _, e := rawsocketcall(_GETPEERNAME, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0)
+ if e != 0 {
+ err = e
+ }
+ return
+}
+
+func socketpair(domain int, typ int, flags int, fd *[2]int32) (err error) {
+ _, e := rawsocketcall(_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(flags), uintptr(unsafe.Pointer(fd)), 0, 0)
+ if e != 0 {
+ err = e
+ }
+ return
+}
+
+func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
+ _, e := socketcall(_BIND, uintptr(s), uintptr(addr), uintptr(addrlen), 0, 0, 0)
+ if e != 0 {
+ err = e
+ }
+ return
+}
+
+func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
+ _, e := socketcall(_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen), 0, 0, 0)
+ if e != 0 {
+ err = e
+ }
+ return
+}
+
+func socket(domain int, typ int, proto int) (fd int, err error) {
+ fd, e := rawsocketcall(_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto), 0, 0, 0)
+ if e != 0 {
+ err = e
+ }
+ return
+}
+
+func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) {
+ _, e := socketcall(_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0)
+ if e != 0 {
+ err = e
+ }
+ return
+}
+
+func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) {
+ _, e := socketcall(_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), vallen, 0)
+ if e != 0 {
+ err = e
+ }
+ return
+}
+
+func recvfrom(s int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) {
+ var base uintptr
+ if len(p) > 0 {
+ base = uintptr(unsafe.Pointer(&p[0]))
+ }
+ n, e := socketcall(_RECVFROM, uintptr(s), base, uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)))
+ if e != 0 {
+ err = e
+ }
+ return
+}
+
+func sendto(s int, p []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) {
+ var base uintptr
+ if len(p) > 0 {
+ base = uintptr(unsafe.Pointer(&p[0]))
+ }
+ _, e := socketcall(_SENDTO, uintptr(s), base, uintptr(len(p)), uintptr(flags), uintptr(to), uintptr(addrlen))
+ if e != 0 {
+ err = e
+ }
+ return
+}
+
+func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) {
+ n, e := socketcall(_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags), 0, 0, 0)
+ if e != 0 {
+ err = e
+ }
+ return
+}
+
+func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) {
+ n, e := socketcall(_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags), 0, 0, 0)
+ if e != 0 {
+ err = e
+ }
+ return
+}
+
+func Listen(s int, n int) (err error) {
+ _, e := socketcall(_LISTEN, uintptr(s), uintptr(n), 0, 0, 0, 0)
+ if e != 0 {
+ err = e
+ }
+ return
+}
+
+func Shutdown(s, how int) (err error) {
+ _, e := socketcall(_SHUTDOWN, uintptr(s), uintptr(how), 0, 0, 0, 0)
+ if e != 0 {
+ err = e
+ }
+ return
+}
+
+func Fstatfs(fd int, buf *Statfs_t) (err error) {
+ _, _, e := Syscall(SYS_FSTATFS64, uintptr(fd), unsafe.Sizeof(*buf), uintptr(unsafe.Pointer(buf)))
+ if e != 0 {
+ err = e
+ }
+ return
+}
+
+func Statfs(path string, buf *Statfs_t) (err error) {
+ pathp, err := BytePtrFromString(path)
+ if err != nil {
+ return err
+ }
+ _, _, e := Syscall(SYS_STATFS64, uintptr(unsafe.Pointer(pathp)), unsafe.Sizeof(*buf), uintptr(unsafe.Pointer(buf)))
+ if e != 0 {
+ err = e
+ }
+ return
+}
+
+func (r *PtraceRegs) PC() uint64 { return uint64(uint32(r.Eip)) }
+
+func (r *PtraceRegs) SetPC(pc uint64) { r.Eip = int32(pc) }
+
+func (iov *Iovec) SetLen(length int) {
+ iov.Len = uint32(length)
+}
+
+func (msghdr *Msghdr) SetControllen(length int) {
+ msghdr.Controllen = uint32(length)
+}
+
+func (cmsg *Cmsghdr) SetLen(length int) {
+ cmsg.Len = uint32(length)
+}
+
+//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error)
+
+func Poll(fds []PollFd, timeout int) (n int, err error) {
+ if len(fds) == 0 {
+ return poll(nil, 0, timeout)
+ }
+ return poll(&fds[0], len(fds), timeout)
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go
new file mode 100644
index 0000000..18911c2
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go
@@ -0,0 +1,157 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build amd64,linux
+
+package unix
+
+import "syscall"
+
+//sys Dup2(oldfd int, newfd int) (err error)
+//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error)
+//sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64
+//sys Fchown(fd int, uid int, gid int) (err error)
+//sys Fstat(fd int, stat *Stat_t) (err error)
+//sys Fstatfs(fd int, buf *Statfs_t) (err error)
+//sys Ftruncate(fd int, length int64) (err error)
+//sysnb Getegid() (egid int)
+//sysnb Geteuid() (euid int)
+//sysnb Getgid() (gid int)
+//sysnb Getrlimit(resource int, rlim *Rlimit) (err error)
+//sysnb Getuid() (uid int)
+//sysnb InotifyInit() (fd int, err error)
+//sys Ioperm(from int, num int, on int) (err error)
+//sys Iopl(level int) (err error)
+//sys Lchown(path string, uid int, gid int) (err error)
+//sys Listen(s int, n int) (err error)
+//sys Lstat(path string, stat *Stat_t) (err error)
+//sys Pause() (err error)
+//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64
+//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64
+//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK
+//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error)
+//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
+//sys Setfsgid(gid int) (err error)
+//sys Setfsuid(uid int) (err error)
+//sysnb Setregid(rgid int, egid int) (err error)
+//sysnb Setresgid(rgid int, egid int, sgid int) (err error)
+//sysnb Setresuid(ruid int, euid int, suid int) (err error)
+//sysnb Setrlimit(resource int, rlim *Rlimit) (err error)
+//sysnb Setreuid(ruid int, euid int) (err error)
+//sys Shutdown(fd int, how int) (err error)
+//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)
+//sys Stat(path string, stat *Stat_t) (err error)
+//sys Statfs(path string, buf *Statfs_t) (err error)
+//sys SyncFileRange(fd int, off int64, n int64, flags int) (err error)
+//sys Truncate(path string, length int64) (err error)
+//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error)
+//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error)
+//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error)
+//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error)
+//sysnb getgroups(n int, list *_Gid_t) (nn int, err error)
+//sysnb setgroups(n int, list *_Gid_t) (err error)
+//sys getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error)
+//sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error)
+//sysnb socket(domain int, typ int, proto int) (fd int, err error)
+//sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error)
+//sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error)
+//sysnb getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error)
+//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error)
+//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error)
+//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error)
+//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error)
+//sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error)
+
+//go:noescape
+func gettimeofday(tv *Timeval) (err syscall.Errno)
+
+func Gettimeofday(tv *Timeval) (err error) {
+ errno := gettimeofday(tv)
+ if errno != 0 {
+ return errno
+ }
+ return nil
+}
+
+func Getpagesize() int { return 4096 }
+
+func Time(t *Time_t) (tt Time_t, err error) {
+ var tv Timeval
+ errno := gettimeofday(&tv)
+ if errno != 0 {
+ return 0, errno
+ }
+ if t != nil {
+ *t = Time_t(tv.Sec)
+ }
+ return Time_t(tv.Sec), nil
+}
+
+//sys Utime(path string, buf *Utimbuf) (err error)
+
+func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
+
+func NsecToTimespec(nsec int64) (ts Timespec) {
+ ts.Sec = nsec / 1e9
+ ts.Nsec = nsec % 1e9
+ return
+}
+
+func NsecToTimeval(nsec int64) (tv Timeval) {
+ nsec += 999 // round up to microsecond
+ tv.Sec = nsec / 1e9
+ tv.Usec = nsec % 1e9 / 1e3
+ return
+}
+
+//sysnb pipe(p *[2]_C_int) (err error)
+
+func Pipe(p []int) (err error) {
+ if len(p) != 2 {
+ return EINVAL
+ }
+ var pp [2]_C_int
+ err = pipe(&pp)
+ p[0] = int(pp[0])
+ p[1] = int(pp[1])
+ return
+}
+
+//sysnb pipe2(p *[2]_C_int, flags int) (err error)
+
+func Pipe2(p []int, flags int) (err error) {
+ if len(p) != 2 {
+ return EINVAL
+ }
+ var pp [2]_C_int
+ err = pipe2(&pp, flags)
+ p[0] = int(pp[0])
+ p[1] = int(pp[1])
+ return
+}
+
+func (r *PtraceRegs) PC() uint64 { return r.Rip }
+
+func (r *PtraceRegs) SetPC(pc uint64) { r.Rip = pc }
+
+func (iov *Iovec) SetLen(length int) {
+ iov.Len = uint64(length)
+}
+
+func (msghdr *Msghdr) SetControllen(length int) {
+ msghdr.Controllen = uint64(length)
+}
+
+func (cmsg *Cmsghdr) SetLen(length int) {
+ cmsg.Len = uint64(length)
+}
+
+//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error)
+
+func Poll(fds []PollFd, timeout int) (n int, err error) {
+ if len(fds) == 0 {
+ return poll(nil, 0, timeout)
+ }
+ return poll(&fds[0], len(fds), timeout)
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go
new file mode 100644
index 0000000..71d8702
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go
@@ -0,0 +1,263 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build arm,linux
+
+package unix
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+func Getpagesize() int { return 4096 }
+
+func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
+
+func NsecToTimespec(nsec int64) (ts Timespec) {
+ ts.Sec = int32(nsec / 1e9)
+ ts.Nsec = int32(nsec % 1e9)
+ return
+}
+
+func NsecToTimeval(nsec int64) (tv Timeval) {
+ nsec += 999 // round up to microsecond
+ tv.Sec = int32(nsec / 1e9)
+ tv.Usec = int32(nsec % 1e9 / 1e3)
+ return
+}
+
+func Pipe(p []int) (err error) {
+ if len(p) != 2 {
+ return EINVAL
+ }
+ var pp [2]_C_int
+ err = pipe2(&pp, 0)
+ p[0] = int(pp[0])
+ p[1] = int(pp[1])
+ return
+}
+
+//sysnb pipe2(p *[2]_C_int, flags int) (err error)
+
+func Pipe2(p []int, flags int) (err error) {
+ if len(p) != 2 {
+ return EINVAL
+ }
+ var pp [2]_C_int
+ err = pipe2(&pp, flags)
+ p[0] = int(pp[0])
+ p[1] = int(pp[1])
+ return
+}
+
+// Underlying system call writes to newoffset via pointer.
+// Implemented in assembly to avoid allocation.
+func seek(fd int, offset int64, whence int) (newoffset int64, err syscall.Errno)
+
+func Seek(fd int, offset int64, whence int) (newoffset int64, err error) {
+ newoffset, errno := seek(fd, offset, whence)
+ if errno != 0 {
+ return 0, errno
+ }
+ return newoffset, nil
+}
+
+//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error)
+//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error)
+//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error)
+//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error)
+//sysnb getgroups(n int, list *_Gid_t) (nn int, err error) = SYS_GETGROUPS32
+//sysnb setgroups(n int, list *_Gid_t) (err error) = SYS_SETGROUPS32
+//sys getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error)
+//sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error)
+//sysnb socket(domain int, typ int, proto int) (fd int, err error)
+//sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error)
+//sysnb getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error)
+//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error)
+//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error)
+//sysnb socketpair(domain int, typ int, flags int, fd *[2]int32) (err error)
+//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error)
+//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error)
+
+// 64-bit file system and 32-bit uid calls
+// (16-bit uid calls are not always supported in newer kernels)
+//sys Dup2(oldfd int, newfd int) (err error)
+//sys Fchown(fd int, uid int, gid int) (err error) = SYS_FCHOWN32
+//sys Fstat(fd int, stat *Stat_t) (err error) = SYS_FSTAT64
+//sysnb Getegid() (egid int) = SYS_GETEGID32
+//sysnb Geteuid() (euid int) = SYS_GETEUID32
+//sysnb Getgid() (gid int) = SYS_GETGID32
+//sysnb Getuid() (uid int) = SYS_GETUID32
+//sysnb InotifyInit() (fd int, err error)
+//sys Lchown(path string, uid int, gid int) (err error) = SYS_LCHOWN32
+//sys Listen(s int, n int) (err error)
+//sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64
+//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) = SYS_SENDFILE64
+//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS__NEWSELECT
+//sys Setfsgid(gid int) (err error) = SYS_SETFSGID32
+//sys Setfsuid(uid int) (err error) = SYS_SETFSUID32
+//sysnb Setregid(rgid int, egid int) (err error) = SYS_SETREGID32
+//sysnb Setresgid(rgid int, egid int, sgid int) (err error) = SYS_SETRESGID32
+//sysnb Setresuid(ruid int, euid int, suid int) (err error) = SYS_SETRESUID32
+//sysnb Setreuid(ruid int, euid int) (err error) = SYS_SETREUID32
+//sys Shutdown(fd int, how int) (err error)
+//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error)
+//sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64
+
+// Vsyscalls on amd64.
+//sysnb Gettimeofday(tv *Timeval) (err error)
+//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error)
+//sys Pause() (err error)
+
+func Time(t *Time_t) (Time_t, error) {
+ var tv Timeval
+ err := Gettimeofday(&tv)
+ if err != nil {
+ return 0, err
+ }
+ if t != nil {
+ *t = Time_t(tv.Sec)
+ }
+ return Time_t(tv.Sec), nil
+}
+
+func Utime(path string, buf *Utimbuf) error {
+ tv := []Timeval{
+ {Sec: buf.Actime},
+ {Sec: buf.Modtime},
+ }
+ return Utimes(path, tv)
+}
+
+//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64
+//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64
+//sys Truncate(path string, length int64) (err error) = SYS_TRUNCATE64
+//sys Ftruncate(fd int, length int64) (err error) = SYS_FTRUNCATE64
+
+func Fadvise(fd int, offset int64, length int64, advice int) (err error) {
+ _, _, e1 := Syscall6(SYS_ARM_FADVISE64_64, uintptr(fd), uintptr(advice), uintptr(offset), uintptr(offset>>32), uintptr(length), uintptr(length>>32))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+//sys mmap2(addr uintptr, length uintptr, prot int, flags int, fd int, pageOffset uintptr) (xaddr uintptr, err error)
+
+func Fstatfs(fd int, buf *Statfs_t) (err error) {
+ _, _, e := Syscall(SYS_FSTATFS64, uintptr(fd), unsafe.Sizeof(*buf), uintptr(unsafe.Pointer(buf)))
+ if e != 0 {
+ err = e
+ }
+ return
+}
+
+func Statfs(path string, buf *Statfs_t) (err error) {
+ pathp, err := BytePtrFromString(path)
+ if err != nil {
+ return err
+ }
+ _, _, e := Syscall(SYS_STATFS64, uintptr(unsafe.Pointer(pathp)), unsafe.Sizeof(*buf), uintptr(unsafe.Pointer(buf)))
+ if e != 0 {
+ err = e
+ }
+ return
+}
+
+func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) {
+ page := uintptr(offset / 4096)
+ if offset != int64(page)*4096 {
+ return 0, EINVAL
+ }
+ return mmap2(addr, length, prot, flags, fd, page)
+}
+
+type rlimit32 struct {
+ Cur uint32
+ Max uint32
+}
+
+//sysnb getrlimit(resource int, rlim *rlimit32) (err error) = SYS_UGETRLIMIT
+
+const rlimInf32 = ^uint32(0)
+const rlimInf64 = ^uint64(0)
+
+func Getrlimit(resource int, rlim *Rlimit) (err error) {
+ err = prlimit(0, resource, nil, rlim)
+ if err != ENOSYS {
+ return err
+ }
+
+ rl := rlimit32{}
+ err = getrlimit(resource, &rl)
+ if err != nil {
+ return
+ }
+
+ if rl.Cur == rlimInf32 {
+ rlim.Cur = rlimInf64
+ } else {
+ rlim.Cur = uint64(rl.Cur)
+ }
+
+ if rl.Max == rlimInf32 {
+ rlim.Max = rlimInf64
+ } else {
+ rlim.Max = uint64(rl.Max)
+ }
+ return
+}
+
+//sysnb setrlimit(resource int, rlim *rlimit32) (err error) = SYS_SETRLIMIT
+
+func Setrlimit(resource int, rlim *Rlimit) (err error) {
+ err = prlimit(0, resource, rlim, nil)
+ if err != ENOSYS {
+ return err
+ }
+
+ rl := rlimit32{}
+ if rlim.Cur == rlimInf64 {
+ rl.Cur = rlimInf32
+ } else if rlim.Cur < uint64(rlimInf32) {
+ rl.Cur = uint32(rlim.Cur)
+ } else {
+ return EINVAL
+ }
+ if rlim.Max == rlimInf64 {
+ rl.Max = rlimInf32
+ } else if rlim.Max < uint64(rlimInf32) {
+ rl.Max = uint32(rlim.Max)
+ } else {
+ return EINVAL
+ }
+
+ return setrlimit(resource, &rl)
+}
+
+func (r *PtraceRegs) PC() uint64 { return uint64(r.Uregs[15]) }
+
+func (r *PtraceRegs) SetPC(pc uint64) { r.Uregs[15] = uint32(pc) }
+
+func (iov *Iovec) SetLen(length int) {
+ iov.Len = uint32(length)
+}
+
+func (msghdr *Msghdr) SetControllen(length int) {
+ msghdr.Controllen = uint32(length)
+}
+
+func (cmsg *Cmsghdr) SetLen(length int) {
+ cmsg.Len = uint32(length)
+}
+
+//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error)
+
+func Poll(fds []PollFd, timeout int) (n int, err error) {
+ if len(fds) == 0 {
+ return poll(nil, 0, timeout)
+ }
+ return poll(&fds[0], len(fds), timeout)
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go
new file mode 100644
index 0000000..4a13639
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go
@@ -0,0 +1,190 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build arm64,linux
+
+package unix
+
+//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) = SYS_EPOLL_PWAIT
+//sys Fchown(fd int, uid int, gid int) (err error)
+//sys Fstat(fd int, stat *Stat_t) (err error)
+//sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error)
+//sys Fstatfs(fd int, buf *Statfs_t) (err error)
+//sys Ftruncate(fd int, length int64) (err error)
+//sysnb Getegid() (egid int)
+//sysnb Geteuid() (euid int)
+//sysnb Getgid() (gid int)
+//sysnb Getrlimit(resource int, rlim *Rlimit) (err error)
+//sysnb Getuid() (uid int)
+//sys Listen(s int, n int) (err error)
+//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64
+//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64
+//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK
+//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS_PSELECT6
+//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
+//sys Setfsgid(gid int) (err error)
+//sys Setfsuid(uid int) (err error)
+//sysnb Setregid(rgid int, egid int) (err error)
+//sysnb Setresgid(rgid int, egid int, sgid int) (err error)
+//sysnb Setresuid(ruid int, euid int, suid int) (err error)
+//sysnb Setrlimit(resource int, rlim *Rlimit) (err error)
+//sysnb Setreuid(ruid int, euid int) (err error)
+//sys Shutdown(fd int, how int) (err error)
+//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)
+
+func Stat(path string, stat *Stat_t) (err error) {
+ return Fstatat(AT_FDCWD, path, stat, 0)
+}
+
+func Lchown(path string, uid int, gid int) (err error) {
+ return Fchownat(AT_FDCWD, path, uid, gid, AT_SYMLINK_NOFOLLOW)
+}
+
+func Lstat(path string, stat *Stat_t) (err error) {
+ return Fstatat(AT_FDCWD, path, stat, AT_SYMLINK_NOFOLLOW)
+}
+
+//sys Statfs(path string, buf *Statfs_t) (err error)
+//sys SyncFileRange(fd int, off int64, n int64, flags int) (err error)
+//sys Truncate(path string, length int64) (err error)
+//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error)
+//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error)
+//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error)
+//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error)
+//sysnb getgroups(n int, list *_Gid_t) (nn int, err error)
+//sysnb setgroups(n int, list *_Gid_t) (err error)
+//sys getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error)
+//sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error)
+//sysnb socket(domain int, typ int, proto int) (fd int, err error)
+//sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error)
+//sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error)
+//sysnb getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error)
+//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error)
+//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error)
+//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error)
+//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error)
+//sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error)
+
+func Getpagesize() int { return 65536 }
+
+//sysnb Gettimeofday(tv *Timeval) (err error)
+
+func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
+
+func NsecToTimespec(nsec int64) (ts Timespec) {
+ ts.Sec = nsec / 1e9
+ ts.Nsec = nsec % 1e9
+ return
+}
+
+func NsecToTimeval(nsec int64) (tv Timeval) {
+ nsec += 999 // round up to microsecond
+ tv.Sec = nsec / 1e9
+ tv.Usec = nsec % 1e9 / 1e3
+ return
+}
+
+func Time(t *Time_t) (Time_t, error) {
+ var tv Timeval
+ err := Gettimeofday(&tv)
+ if err != nil {
+ return 0, err
+ }
+ if t != nil {
+ *t = Time_t(tv.Sec)
+ }
+ return Time_t(tv.Sec), nil
+}
+
+func Utime(path string, buf *Utimbuf) error {
+ tv := []Timeval{
+ {Sec: buf.Actime},
+ {Sec: buf.Modtime},
+ }
+ return Utimes(path, tv)
+}
+
+func Pipe(p []int) (err error) {
+ if len(p) != 2 {
+ return EINVAL
+ }
+ var pp [2]_C_int
+ err = pipe2(&pp, 0)
+ p[0] = int(pp[0])
+ p[1] = int(pp[1])
+ return
+}
+
+//sysnb pipe2(p *[2]_C_int, flags int) (err error)
+
+func Pipe2(p []int, flags int) (err error) {
+ if len(p) != 2 {
+ return EINVAL
+ }
+ var pp [2]_C_int
+ err = pipe2(&pp, flags)
+ p[0] = int(pp[0])
+ p[1] = int(pp[1])
+ return
+}
+
+func (r *PtraceRegs) PC() uint64 { return r.Pc }
+
+func (r *PtraceRegs) SetPC(pc uint64) { r.Pc = pc }
+
+func (iov *Iovec) SetLen(length int) {
+ iov.Len = uint64(length)
+}
+
+func (msghdr *Msghdr) SetControllen(length int) {
+ msghdr.Controllen = uint64(length)
+}
+
+func (cmsg *Cmsghdr) SetLen(length int) {
+ cmsg.Len = uint64(length)
+}
+
+func InotifyInit() (fd int, err error) {
+ return InotifyInit1(0)
+}
+
+func Dup2(oldfd int, newfd int) (err error) {
+ return Dup3(oldfd, newfd, 0)
+}
+
+func Pause() (err error) {
+ _, _, e1 := Syscall6(SYS_PPOLL, 0, 0, 0, 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// TODO(dfc): constants that should be in zsysnum_linux_arm64.go, remove
+// these when the deprecated syscalls that the syscall package relies on
+// are removed.
+const (
+ SYS_GETPGRP = 1060
+ SYS_UTIMES = 1037
+ SYS_FUTIMESAT = 1066
+ SYS_PAUSE = 1061
+ SYS_USTAT = 1070
+ SYS_UTIME = 1063
+ SYS_LCHOWN = 1032
+ SYS_TIME = 1062
+ SYS_EPOLL_CREATE = 1042
+ SYS_EPOLL_WAIT = 1069
+)
+
+func Poll(fds []PollFd, timeout int) (n int, err error) {
+ var ts *Timespec
+ if timeout >= 0 {
+ ts = new(Timespec)
+ *ts = NsecToTimespec(int64(timeout) * 1e6)
+ }
+ if len(fds) == 0 {
+ return ppoll(nil, 0, ts, nil)
+ }
+ return ppoll(&fds[0], len(fds), ts, nil)
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go
new file mode 100644
index 0000000..8119fde
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go
@@ -0,0 +1,208 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+// +build mips64 mips64le
+
+package unix
+
+//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error)
+//sys Fchown(fd int, uid int, gid int) (err error)
+//sys Fstatfs(fd int, buf *Statfs_t) (err error)
+//sys Ftruncate(fd int, length int64) (err error)
+//sysnb Getegid() (egid int)
+//sysnb Geteuid() (euid int)
+//sysnb Getgid() (gid int)
+//sysnb Getrlimit(resource int, rlim *Rlimit) (err error)
+//sysnb Getuid() (uid int)
+//sys Lchown(path string, uid int, gid int) (err error)
+//sys Listen(s int, n int) (err error)
+//sys Pause() (err error)
+//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64
+//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64
+//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK
+//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS_PSELECT6
+//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
+//sys Setfsgid(gid int) (err error)
+//sys Setfsuid(uid int) (err error)
+//sysnb Setregid(rgid int, egid int) (err error)
+//sysnb Setresgid(rgid int, egid int, sgid int) (err error)
+//sysnb Setresuid(ruid int, euid int, suid int) (err error)
+//sysnb Setrlimit(resource int, rlim *Rlimit) (err error)
+//sysnb Setreuid(ruid int, euid int) (err error)
+//sys Shutdown(fd int, how int) (err error)
+//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)
+//sys Statfs(path string, buf *Statfs_t) (err error)
+//sys SyncFileRange(fd int, off int64, n int64, flags int) (err error)
+//sys Truncate(path string, length int64) (err error)
+//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error)
+//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error)
+//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error)
+//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error)
+//sysnb getgroups(n int, list *_Gid_t) (nn int, err error)
+//sysnb setgroups(n int, list *_Gid_t) (err error)
+//sys getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error)
+//sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error)
+//sysnb socket(domain int, typ int, proto int) (fd int, err error)
+//sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error)
+//sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error)
+//sysnb getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error)
+//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error)
+//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error)
+//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error)
+//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error)
+//sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error)
+
+func Getpagesize() int { return 65536 }
+
+//sysnb Gettimeofday(tv *Timeval) (err error)
+
+func Time(t *Time_t) (tt Time_t, err error) {
+ var tv Timeval
+ err = Gettimeofday(&tv)
+ if err != nil {
+ return 0, err
+ }
+ if t != nil {
+ *t = Time_t(tv.Sec)
+ }
+ return Time_t(tv.Sec), nil
+}
+
+//sys Utime(path string, buf *Utimbuf) (err error)
+
+func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
+
+func NsecToTimespec(nsec int64) (ts Timespec) {
+ ts.Sec = nsec / 1e9
+ ts.Nsec = nsec % 1e9
+ return
+}
+
+func NsecToTimeval(nsec int64) (tv Timeval) {
+ nsec += 999 // round up to microsecond
+ tv.Sec = nsec / 1e9
+ tv.Usec = nsec % 1e9 / 1e3
+ return
+}
+
+func Pipe(p []int) (err error) {
+ if len(p) != 2 {
+ return EINVAL
+ }
+ var pp [2]_C_int
+ err = pipe2(&pp, 0)
+ p[0] = int(pp[0])
+ p[1] = int(pp[1])
+ return
+}
+
+//sysnb pipe2(p *[2]_C_int, flags int) (err error)
+
+func Pipe2(p []int, flags int) (err error) {
+ if len(p) != 2 {
+ return EINVAL
+ }
+ var pp [2]_C_int
+ err = pipe2(&pp, flags)
+ p[0] = int(pp[0])
+ p[1] = int(pp[1])
+ return
+}
+
+func Ioperm(from int, num int, on int) (err error) {
+ return ENOSYS
+}
+
+func Iopl(level int) (err error) {
+ return ENOSYS
+}
+
+type stat_t struct {
+ Dev uint32
+ Pad0 [3]int32
+ Ino uint64
+ Mode uint32
+ Nlink uint32
+ Uid uint32
+ Gid uint32
+ Rdev uint32
+ Pad1 [3]uint32
+ Size int64
+ Atime uint32
+ Atime_nsec uint32
+ Mtime uint32
+ Mtime_nsec uint32
+ Ctime uint32
+ Ctime_nsec uint32
+ Blksize uint32
+ Pad2 uint32
+ Blocks int64
+}
+
+//sys fstat(fd int, st *stat_t) (err error)
+//sys lstat(path string, st *stat_t) (err error)
+//sys stat(path string, st *stat_t) (err error)
+
+func Fstat(fd int, s *Stat_t) (err error) {
+ st := &stat_t{}
+ err = fstat(fd, st)
+ fillStat_t(s, st)
+ return
+}
+
+func Lstat(path string, s *Stat_t) (err error) {
+ st := &stat_t{}
+ err = lstat(path, st)
+ fillStat_t(s, st)
+ return
+}
+
+func Stat(path string, s *Stat_t) (err error) {
+ st := &stat_t{}
+ err = stat(path, st)
+ fillStat_t(s, st)
+ return
+}
+
+func fillStat_t(s *Stat_t, st *stat_t) {
+ s.Dev = st.Dev
+ s.Ino = st.Ino
+ s.Mode = st.Mode
+ s.Nlink = st.Nlink
+ s.Uid = st.Uid
+ s.Gid = st.Gid
+ s.Rdev = st.Rdev
+ s.Size = st.Size
+ s.Atim = Timespec{int64(st.Atime), int64(st.Atime_nsec)}
+ s.Mtim = Timespec{int64(st.Mtime), int64(st.Mtime_nsec)}
+ s.Ctim = Timespec{int64(st.Ctime), int64(st.Ctime_nsec)}
+ s.Blksize = st.Blksize
+ s.Blocks = st.Blocks
+}
+
+func (r *PtraceRegs) PC() uint64 { return r.Regs[64] }
+
+func (r *PtraceRegs) SetPC(pc uint64) { r.Regs[64] = pc }
+
+func (iov *Iovec) SetLen(length int) {
+ iov.Len = uint64(length)
+}
+
+func (msghdr *Msghdr) SetControllen(length int) {
+ msghdr.Controllen = uint64(length)
+}
+
+func (cmsg *Cmsghdr) SetLen(length int) {
+ cmsg.Len = uint64(length)
+}
+
+//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error)
+
+func Poll(fds []PollFd, timeout int) (n int, err error) {
+ if len(fds) == 0 {
+ return poll(nil, 0, timeout)
+ }
+ return poll(&fds[0], len(fds), timeout)
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go
new file mode 100644
index 0000000..60770f6
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go
@@ -0,0 +1,135 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+// +build ppc64 ppc64le
+
+package unix
+
+//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error)
+//sys Dup2(oldfd int, newfd int) (err error)
+//sys Fchown(fd int, uid int, gid int) (err error)
+//sys Fstat(fd int, stat *Stat_t) (err error)
+//sys Fstatfs(fd int, buf *Statfs_t) (err error)
+//sys Ftruncate(fd int, length int64) (err error)
+//sysnb Getegid() (egid int)
+//sysnb Geteuid() (euid int)
+//sysnb Getgid() (gid int)
+//sysnb Getrlimit(resource int, rlim *Rlimit) (err error) = SYS_UGETRLIMIT
+//sysnb Getuid() (uid int)
+//sysnb InotifyInit() (fd int, err error)
+//sys Ioperm(from int, num int, on int) (err error)
+//sys Iopl(level int) (err error)
+//sys Lchown(path string, uid int, gid int) (err error)
+//sys Listen(s int, n int) (err error)
+//sys Lstat(path string, stat *Stat_t) (err error)
+//sys Pause() (err error)
+//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64
+//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64
+//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK
+//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error)
+//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
+//sys Setfsgid(gid int) (err error)
+//sys Setfsuid(uid int) (err error)
+//sysnb Setregid(rgid int, egid int) (err error)
+//sysnb Setresgid(rgid int, egid int, sgid int) (err error)
+//sysnb Setresuid(ruid int, euid int, suid int) (err error)
+//sysnb Setrlimit(resource int, rlim *Rlimit) (err error)
+//sysnb Setreuid(ruid int, euid int) (err error)
+//sys Shutdown(fd int, how int) (err error)
+//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)
+//sys Stat(path string, stat *Stat_t) (err error)
+//sys Statfs(path string, buf *Statfs_t) (err error)
+//sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) = SYS_SYNC_FILE_RANGE2
+//sys Truncate(path string, length int64) (err error)
+//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error)
+//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error)
+//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error)
+//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error)
+//sysnb getgroups(n int, list *_Gid_t) (nn int, err error)
+//sysnb setgroups(n int, list *_Gid_t) (err error)
+//sys getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error)
+//sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error)
+//sysnb socket(domain int, typ int, proto int) (fd int, err error)
+//sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error)
+//sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error)
+//sysnb getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error)
+//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error)
+//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error)
+//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error)
+//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error)
+//sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error)
+
+func Getpagesize() int { return 65536 }
+
+//sysnb Gettimeofday(tv *Timeval) (err error)
+//sysnb Time(t *Time_t) (tt Time_t, err error)
+
+//sys Utime(path string, buf *Utimbuf) (err error)
+
+func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
+
+func NsecToTimespec(nsec int64) (ts Timespec) {
+ ts.Sec = nsec / 1e9
+ ts.Nsec = nsec % 1e9
+ return
+}
+
+func NsecToTimeval(nsec int64) (tv Timeval) {
+ nsec += 999 // round up to microsecond
+ tv.Sec = nsec / 1e9
+ tv.Usec = nsec % 1e9 / 1e3
+ return
+}
+
+func (r *PtraceRegs) PC() uint64 { return r.Nip }
+
+func (r *PtraceRegs) SetPC(pc uint64) { r.Nip = pc }
+
+func (iov *Iovec) SetLen(length int) {
+ iov.Len = uint64(length)
+}
+
+func (msghdr *Msghdr) SetControllen(length int) {
+ msghdr.Controllen = uint64(length)
+}
+
+func (cmsg *Cmsghdr) SetLen(length int) {
+ cmsg.Len = uint64(length)
+}
+
+//sysnb pipe(p *[2]_C_int) (err error)
+
+func Pipe(p []int) (err error) {
+ if len(p) != 2 {
+ return EINVAL
+ }
+ var pp [2]_C_int
+ err = pipe(&pp)
+ p[0] = int(pp[0])
+ p[1] = int(pp[1])
+ return
+}
+
+//sysnb pipe2(p *[2]_C_int, flags int) (err error)
+
+func Pipe2(p []int, flags int) (err error) {
+ if len(p) != 2 {
+ return EINVAL
+ }
+ var pp [2]_C_int
+ err = pipe2(&pp, flags)
+ p[0] = int(pp[0])
+ p[1] = int(pp[1])
+ return
+}
+
+//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error)
+
+func Poll(fds []PollFd, timeout int) (n int, err error) {
+ if len(fds) == 0 {
+ return poll(nil, 0, timeout)
+ }
+ return poll(&fds[0], len(fds), timeout)
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go
new file mode 100644
index 0000000..81c5f47
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go
@@ -0,0 +1,329 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build s390x,linux
+
+package unix
+
+import (
+ "unsafe"
+)
+
+//sys Dup2(oldfd int, newfd int) (err error)
+//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error)
+//sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64
+//sys Fchown(fd int, uid int, gid int) (err error)
+//sys Fstat(fd int, stat *Stat_t) (err error)
+//sys Fstatfs(fd int, buf *Statfs_t) (err error)
+//sys Ftruncate(fd int, length int64) (err error)
+//sysnb Getegid() (egid int)
+//sysnb Geteuid() (euid int)
+//sysnb Getgid() (gid int)
+//sysnb Getrlimit(resource int, rlim *Rlimit) (err error)
+//sysnb Getuid() (uid int)
+//sysnb InotifyInit() (fd int, err error)
+//sys Lchown(path string, uid int, gid int) (err error)
+//sys Lstat(path string, stat *Stat_t) (err error)
+//sys Pause() (err error)
+//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64
+//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64
+//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK
+//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error)
+//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
+//sys Setfsgid(gid int) (err error)
+//sys Setfsuid(uid int) (err error)
+//sysnb Setregid(rgid int, egid int) (err error)
+//sysnb Setresgid(rgid int, egid int, sgid int) (err error)
+//sysnb Setresuid(ruid int, euid int, suid int) (err error)
+//sysnb Setrlimit(resource int, rlim *Rlimit) (err error)
+//sysnb Setreuid(ruid int, euid int) (err error)
+//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)
+//sys Stat(path string, stat *Stat_t) (err error)
+//sys Statfs(path string, buf *Statfs_t) (err error)
+//sys SyncFileRange(fd int, off int64, n int64, flags int) (err error)
+//sys Truncate(path string, length int64) (err error)
+//sysnb getgroups(n int, list *_Gid_t) (nn int, err error)
+//sysnb setgroups(n int, list *_Gid_t) (err error)
+
+func Getpagesize() int { return 4096 }
+
+//sysnb Gettimeofday(tv *Timeval) (err error)
+
+func Time(t *Time_t) (tt Time_t, err error) {
+ var tv Timeval
+ err = Gettimeofday(&tv)
+ if err != nil {
+ return 0, err
+ }
+ if t != nil {
+ *t = Time_t(tv.Sec)
+ }
+ return Time_t(tv.Sec), nil
+}
+
+//sys Utime(path string, buf *Utimbuf) (err error)
+
+func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
+
+func NsecToTimespec(nsec int64) (ts Timespec) {
+ ts.Sec = nsec / 1e9
+ ts.Nsec = nsec % 1e9
+ return
+}
+
+func NsecToTimeval(nsec int64) (tv Timeval) {
+ nsec += 999 // round up to microsecond
+ tv.Sec = nsec / 1e9
+ tv.Usec = nsec % 1e9 / 1e3
+ return
+}
+
+//sysnb pipe2(p *[2]_C_int, flags int) (err error)
+
+func Pipe(p []int) (err error) {
+ if len(p) != 2 {
+ return EINVAL
+ }
+ var pp [2]_C_int
+ err = pipe2(&pp, 0) // pipe2 is the same as pipe when flags are set to 0.
+ p[0] = int(pp[0])
+ p[1] = int(pp[1])
+ return
+}
+
+func Pipe2(p []int, flags int) (err error) {
+ if len(p) != 2 {
+ return EINVAL
+ }
+ var pp [2]_C_int
+ err = pipe2(&pp, flags)
+ p[0] = int(pp[0])
+ p[1] = int(pp[1])
+ return
+}
+
+func Ioperm(from int, num int, on int) (err error) {
+ return ENOSYS
+}
+
+func Iopl(level int) (err error) {
+ return ENOSYS
+}
+
+func (r *PtraceRegs) PC() uint64 { return r.Psw.Addr }
+
+func (r *PtraceRegs) SetPC(pc uint64) { r.Psw.Addr = pc }
+
+func (iov *Iovec) SetLen(length int) {
+ iov.Len = uint64(length)
+}
+
+func (msghdr *Msghdr) SetControllen(length int) {
+ msghdr.Controllen = uint64(length)
+}
+
+func (cmsg *Cmsghdr) SetLen(length int) {
+ cmsg.Len = uint64(length)
+}
+
+// Linux on s390x uses the old mmap interface, which requires arguments to be passed in a struct.
+// mmap2 also requires arguments to be passed in a struct; it is currently not exposed in .
+func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) {
+ mmap_args := [6]uintptr{addr, length, uintptr(prot), uintptr(flags), uintptr(fd), uintptr(offset)}
+ r0, _, e1 := Syscall(SYS_MMAP, uintptr(unsafe.Pointer(&mmap_args[0])), 0, 0)
+ use(unsafe.Pointer(&mmap_args[0]))
+ xaddr = uintptr(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// On s390x Linux, all the socket calls go through an extra indirection.
+// The arguments to the underlying system call (SYS_SOCKETCALL) are the
+// number below and a pointer to an array of uintptr.
+const (
+ // see linux/net.h
+ netSocket = 1
+ netBind = 2
+ netConnect = 3
+ netListen = 4
+ netAccept = 5
+ netGetSockName = 6
+ netGetPeerName = 7
+ netSocketPair = 8
+ netSend = 9
+ netRecv = 10
+ netSendTo = 11
+ netRecvFrom = 12
+ netShutdown = 13
+ netSetSockOpt = 14
+ netGetSockOpt = 15
+ netSendMsg = 16
+ netRecvMsg = 17
+ netAccept4 = 18
+ netRecvMMsg = 19
+ netSendMMsg = 20
+)
+
+func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (int, error) {
+ args := [3]uintptr{uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))}
+ fd, _, err := Syscall(SYS_SOCKETCALL, netAccept, uintptr(unsafe.Pointer(&args)), 0)
+ if err != 0 {
+ return 0, err
+ }
+ return int(fd), nil
+}
+
+func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (int, error) {
+ args := [4]uintptr{uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags)}
+ fd, _, err := Syscall(SYS_SOCKETCALL, netAccept4, uintptr(unsafe.Pointer(&args)), 0)
+ if err != 0 {
+ return 0, err
+ }
+ return int(fd), nil
+}
+
+func getsockname(s int, rsa *RawSockaddrAny, addrlen *_Socklen) error {
+ args := [3]uintptr{uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))}
+ _, _, err := RawSyscall(SYS_SOCKETCALL, netGetSockName, uintptr(unsafe.Pointer(&args)), 0)
+ if err != 0 {
+ return err
+ }
+ return nil
+}
+
+func getpeername(s int, rsa *RawSockaddrAny, addrlen *_Socklen) error {
+ args := [3]uintptr{uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))}
+ _, _, err := RawSyscall(SYS_SOCKETCALL, netGetPeerName, uintptr(unsafe.Pointer(&args)), 0)
+ if err != 0 {
+ return err
+ }
+ return nil
+}
+
+func socketpair(domain int, typ int, flags int, fd *[2]int32) error {
+ args := [4]uintptr{uintptr(domain), uintptr(typ), uintptr(flags), uintptr(unsafe.Pointer(fd))}
+ _, _, err := RawSyscall(SYS_SOCKETCALL, netSocketPair, uintptr(unsafe.Pointer(&args)), 0)
+ if err != 0 {
+ return err
+ }
+ return nil
+}
+
+func bind(s int, addr unsafe.Pointer, addrlen _Socklen) error {
+ args := [3]uintptr{uintptr(s), uintptr(addr), uintptr(addrlen)}
+ _, _, err := Syscall(SYS_SOCKETCALL, netBind, uintptr(unsafe.Pointer(&args)), 0)
+ if err != 0 {
+ return err
+ }
+ return nil
+}
+
+func connect(s int, addr unsafe.Pointer, addrlen _Socklen) error {
+ args := [3]uintptr{uintptr(s), uintptr(addr), uintptr(addrlen)}
+ _, _, err := Syscall(SYS_SOCKETCALL, netConnect, uintptr(unsafe.Pointer(&args)), 0)
+ if err != 0 {
+ return err
+ }
+ return nil
+}
+
+func socket(domain int, typ int, proto int) (int, error) {
+ args := [3]uintptr{uintptr(domain), uintptr(typ), uintptr(proto)}
+ fd, _, err := RawSyscall(SYS_SOCKETCALL, netSocket, uintptr(unsafe.Pointer(&args)), 0)
+ if err != 0 {
+ return 0, err
+ }
+ return int(fd), nil
+}
+
+func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) error {
+ args := [5]uintptr{uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen))}
+ _, _, err := Syscall(SYS_SOCKETCALL, netGetSockOpt, uintptr(unsafe.Pointer(&args)), 0)
+ if err != 0 {
+ return err
+ }
+ return nil
+}
+
+func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) error {
+ args := [4]uintptr{uintptr(s), uintptr(level), uintptr(name), uintptr(val)}
+ _, _, err := Syscall(SYS_SOCKETCALL, netSetSockOpt, uintptr(unsafe.Pointer(&args)), 0)
+ if err != 0 {
+ return err
+ }
+ return nil
+}
+
+func recvfrom(s int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (int, error) {
+ var base uintptr
+ if len(p) > 0 {
+ base = uintptr(unsafe.Pointer(&p[0]))
+ }
+ args := [6]uintptr{uintptr(s), base, uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))}
+ n, _, err := Syscall(SYS_SOCKETCALL, netRecvFrom, uintptr(unsafe.Pointer(&args)), 0)
+ if err != 0 {
+ return 0, err
+ }
+ return int(n), nil
+}
+
+func sendto(s int, p []byte, flags int, to unsafe.Pointer, addrlen _Socklen) error {
+ var base uintptr
+ if len(p) > 0 {
+ base = uintptr(unsafe.Pointer(&p[0]))
+ }
+ args := [6]uintptr{uintptr(s), base, uintptr(len(p)), uintptr(flags), uintptr(to), uintptr(addrlen)}
+ _, _, err := Syscall(SYS_SOCKETCALL, netSendTo, uintptr(unsafe.Pointer(&args)), 0)
+ if err != 0 {
+ return err
+ }
+ return nil
+}
+
+func recvmsg(s int, msg *Msghdr, flags int) (int, error) {
+ args := [3]uintptr{uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)}
+ n, _, err := Syscall(SYS_SOCKETCALL, netRecvMsg, uintptr(unsafe.Pointer(&args)), 0)
+ if err != 0 {
+ return 0, err
+ }
+ return int(n), nil
+}
+
+func sendmsg(s int, msg *Msghdr, flags int) (int, error) {
+ args := [3]uintptr{uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)}
+ n, _, err := Syscall(SYS_SOCKETCALL, netSendMsg, uintptr(unsafe.Pointer(&args)), 0)
+ if err != 0 {
+ return 0, err
+ }
+ return int(n), nil
+}
+
+func Listen(s int, n int) error {
+ args := [2]uintptr{uintptr(s), uintptr(n)}
+ _, _, err := Syscall(SYS_SOCKETCALL, netListen, uintptr(unsafe.Pointer(&args)), 0)
+ if err != 0 {
+ return err
+ }
+ return nil
+}
+
+func Shutdown(s, how int) error {
+ args := [2]uintptr{uintptr(s), uintptr(how)}
+ _, _, err := Syscall(SYS_SOCKETCALL, netShutdown, uintptr(unsafe.Pointer(&args)), 0)
+ if err != 0 {
+ return err
+ }
+ return nil
+}
+
+//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error)
+
+func Poll(fds []PollFd, timeout int) (n int, err error) {
+ if len(fds) == 0 {
+ return poll(nil, 0, timeout)
+ }
+ return poll(&fds[0], len(fds), timeout)
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go
new file mode 100644
index 0000000..20b7454
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go
@@ -0,0 +1,169 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build sparc64,linux
+
+package unix
+
+import (
+ "sync/atomic"
+ "syscall"
+)
+
+//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error)
+//sys Dup2(oldfd int, newfd int) (err error)
+//sys Fchown(fd int, uid int, gid int) (err error)
+//sys Fstat(fd int, stat *Stat_t) (err error)
+//sys Fstatfs(fd int, buf *Statfs_t) (err error)
+//sys Ftruncate(fd int, length int64) (err error)
+//sysnb Getegid() (egid int)
+//sysnb Geteuid() (euid int)
+//sysnb Getgid() (gid int)
+//sysnb Getrlimit(resource int, rlim *Rlimit) (err error)
+//sysnb Getuid() (uid int)
+//sysnb InotifyInit() (fd int, err error)
+//sys Lchown(path string, uid int, gid int) (err error)
+//sys Listen(s int, n int) (err error)
+//sys Lstat(path string, stat *Stat_t) (err error)
+//sys Pause() (err error)
+//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64
+//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64
+//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK
+//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error)
+//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
+//sys Setfsgid(gid int) (err error)
+//sys Setfsuid(uid int) (err error)
+//sysnb Setregid(rgid int, egid int) (err error)
+//sysnb Setresgid(rgid int, egid int, sgid int) (err error)
+//sysnb Setresuid(ruid int, euid int, suid int) (err error)
+//sysnb Setrlimit(resource int, rlim *Rlimit) (err error)
+//sysnb Setreuid(ruid int, euid int) (err error)
+//sys Shutdown(fd int, how int) (err error)
+//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)
+//sys Stat(path string, stat *Stat_t) (err error)
+//sys Statfs(path string, buf *Statfs_t) (err error)
+//sys SyncFileRange(fd int, off int64, n int64, flags int) (err error)
+//sys Truncate(path string, length int64) (err error)
+//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error)
+//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error)
+//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error)
+//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error)
+//sysnb getgroups(n int, list *_Gid_t) (nn int, err error)
+//sysnb setgroups(n int, list *_Gid_t) (err error)
+//sys getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error)
+//sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error)
+//sysnb socket(domain int, typ int, proto int) (fd int, err error)
+//sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error)
+//sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error)
+//sysnb getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error)
+//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error)
+//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error)
+//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error)
+//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error)
+//sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error)
+
+func sysconf(name int) (n int64, err syscall.Errno)
+
+// pageSize caches the value of Getpagesize, since it can't change
+// once the system is booted.
+var pageSize int64 // accessed atomically
+
+func Getpagesize() int {
+ n := atomic.LoadInt64(&pageSize)
+ if n == 0 {
+ n, _ = sysconf(_SC_PAGESIZE)
+ atomic.StoreInt64(&pageSize, n)
+ }
+ return int(n)
+}
+
+func Ioperm(from int, num int, on int) (err error) {
+ return ENOSYS
+}
+
+func Iopl(level int) (err error) {
+ return ENOSYS
+}
+
+//sysnb Gettimeofday(tv *Timeval) (err error)
+
+func Time(t *Time_t) (tt Time_t, err error) {
+ var tv Timeval
+ err = Gettimeofday(&tv)
+ if err != nil {
+ return 0, err
+ }
+ if t != nil {
+ *t = Time_t(tv.Sec)
+ }
+ return Time_t(tv.Sec), nil
+}
+
+//sys Utime(path string, buf *Utimbuf) (err error)
+
+func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
+
+func NsecToTimespec(nsec int64) (ts Timespec) {
+ ts.Sec = nsec / 1e9
+ ts.Nsec = nsec % 1e9
+ return
+}
+
+func NsecToTimeval(nsec int64) (tv Timeval) {
+ nsec += 999 // round up to microsecond
+ tv.Sec = nsec / 1e9
+ tv.Usec = int32(nsec % 1e9 / 1e3)
+ return
+}
+
+func (r *PtraceRegs) PC() uint64 { return r.Tpc }
+
+func (r *PtraceRegs) SetPC(pc uint64) { r.Tpc = pc }
+
+func (iov *Iovec) SetLen(length int) {
+ iov.Len = uint64(length)
+}
+
+func (msghdr *Msghdr) SetControllen(length int) {
+ msghdr.Controllen = uint64(length)
+}
+
+func (cmsg *Cmsghdr) SetLen(length int) {
+ cmsg.Len = uint64(length)
+}
+
+//sysnb pipe(p *[2]_C_int) (err error)
+
+func Pipe(p []int) (err error) {
+ if len(p) != 2 {
+ return EINVAL
+ }
+ var pp [2]_C_int
+ err = pipe(&pp)
+ p[0] = int(pp[0])
+ p[1] = int(pp[1])
+ return
+}
+
+//sysnb pipe2(p *[2]_C_int, flags int) (err error)
+
+func Pipe2(p []int, flags int) (err error) {
+ if len(p) != 2 {
+ return EINVAL
+ }
+ var pp [2]_C_int
+ err = pipe2(&pp, flags)
+ p[0] = int(pp[0])
+ p[1] = int(pp[1])
+ return
+}
+
+//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error)
+
+func Poll(fds []PollFd, timeout int) (n int, err error) {
+ if len(fds) == 0 {
+ return poll(nil, 0, timeout)
+ }
+ return poll(&fds[0], len(fds), timeout)
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/vendor/golang.org/x/sys/unix/syscall_netbsd.go
new file mode 100644
index 0000000..c4e945c
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_netbsd.go
@@ -0,0 +1,492 @@
+// Copyright 2009,2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// NetBSD system calls.
+// This file is compiled as ordinary Go code,
+// but it is also input to mksyscall,
+// which parses the //sys lines and generates system call stubs.
+// Note that sometimes we use a lowercase //sys name and wrap
+// it in our own nicer implementation, either here or in
+// syscall_bsd.go or syscall_unix.go.
+
+package unix
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+type SockaddrDatalink struct {
+ Len uint8
+ Family uint8
+ Index uint16
+ Type uint8
+ Nlen uint8
+ Alen uint8
+ Slen uint8
+ Data [12]int8
+ raw RawSockaddrDatalink
+}
+
+func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno)
+
+func sysctlNodes(mib []_C_int) (nodes []Sysctlnode, err error) {
+ var olen uintptr
+
+ // Get a list of all sysctl nodes below the given MIB by performing
+ // a sysctl for the given MIB with CTL_QUERY appended.
+ mib = append(mib, CTL_QUERY)
+ qnode := Sysctlnode{Flags: SYSCTL_VERS_1}
+ qp := (*byte)(unsafe.Pointer(&qnode))
+ sz := unsafe.Sizeof(qnode)
+ if err = sysctl(mib, nil, &olen, qp, sz); err != nil {
+ return nil, err
+ }
+
+ // Now that we know the size, get the actual nodes.
+ nodes = make([]Sysctlnode, olen/sz)
+ np := (*byte)(unsafe.Pointer(&nodes[0]))
+ if err = sysctl(mib, np, &olen, qp, sz); err != nil {
+ return nil, err
+ }
+
+ return nodes, nil
+}
+
+func nametomib(name string) (mib []_C_int, err error) {
+
+ // Split name into components.
+ var parts []string
+ last := 0
+ for i := 0; i < len(name); i++ {
+ if name[i] == '.' {
+ parts = append(parts, name[last:i])
+ last = i + 1
+ }
+ }
+ parts = append(parts, name[last:])
+
+ // Discover the nodes and construct the MIB OID.
+ for partno, part := range parts {
+ nodes, err := sysctlNodes(mib)
+ if err != nil {
+ return nil, err
+ }
+ for _, node := range nodes {
+ n := make([]byte, 0)
+ for i := range node.Name {
+ if node.Name[i] != 0 {
+ n = append(n, byte(node.Name[i]))
+ }
+ }
+ if string(n) == part {
+ mib = append(mib, _C_int(node.Num))
+ break
+ }
+ }
+ if len(mib) != partno+1 {
+ return nil, EINVAL
+ }
+ }
+
+ return mib, nil
+}
+
+// ParseDirent parses up to max directory entries in buf,
+// appending the names to names. It returns the number
+// bytes consumed from buf, the number of entries added
+// to names, and the new names slice.
+func ParseDirent(buf []byte, max int, names []string) (consumed int, count int, newnames []string) {
+ origlen := len(buf)
+ for max != 0 && len(buf) > 0 {
+ dirent := (*Dirent)(unsafe.Pointer(&buf[0]))
+ if dirent.Reclen == 0 {
+ buf = nil
+ break
+ }
+ buf = buf[dirent.Reclen:]
+ if dirent.Fileno == 0 { // File absent in directory.
+ continue
+ }
+ bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0]))
+ var name = string(bytes[0:dirent.Namlen])
+ if name == "." || name == ".." { // Useless names
+ continue
+ }
+ max--
+ count++
+ names = append(names, name)
+ }
+ return origlen - len(buf), count, names
+}
+
+//sysnb pipe() (fd1 int, fd2 int, err error)
+func Pipe(p []int) (err error) {
+ if len(p) != 2 {
+ return EINVAL
+ }
+ p[0], p[1], err = pipe()
+ return
+}
+
+//sys getdents(fd int, buf []byte) (n int, err error)
+func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) {
+ return getdents(fd, buf)
+}
+
+// TODO
+func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
+ return -1, ENOSYS
+}
+
+/*
+ * Exposed directly
+ */
+//sys Access(path string, mode uint32) (err error)
+//sys Adjtime(delta *Timeval, olddelta *Timeval) (err error)
+//sys Chdir(path string) (err error)
+//sys Chflags(path string, flags int) (err error)
+//sys Chmod(path string, mode uint32) (err error)
+//sys Chown(path string, uid int, gid int) (err error)
+//sys Chroot(path string) (err error)
+//sys Close(fd int) (err error)
+//sys Dup(fd int) (nfd int, err error)
+//sys Dup2(from int, to int) (err error)
+//sys Exit(code int)
+//sys Fchdir(fd int) (err error)
+//sys Fchflags(fd int, flags int) (err error)
+//sys Fchmod(fd int, mode uint32) (err error)
+//sys Fchown(fd int, uid int, gid int) (err error)
+//sys Flock(fd int, how int) (err error)
+//sys Fpathconf(fd int, name int) (val int, err error)
+//sys Fstat(fd int, stat *Stat_t) (err error)
+//sys Fsync(fd int) (err error)
+//sys Ftruncate(fd int, length int64) (err error)
+//sysnb Getegid() (egid int)
+//sysnb Geteuid() (uid int)
+//sysnb Getgid() (gid int)
+//sysnb Getpgid(pid int) (pgid int, err error)
+//sysnb Getpgrp() (pgrp int)
+//sysnb Getpid() (pid int)
+//sysnb Getppid() (ppid int)
+//sys Getpriority(which int, who int) (prio int, err error)
+//sysnb Getrlimit(which int, lim *Rlimit) (err error)
+//sysnb Getrusage(who int, rusage *Rusage) (err error)
+//sysnb Getsid(pid int) (sid int, err error)
+//sysnb Gettimeofday(tv *Timeval) (err error)
+//sysnb Getuid() (uid int)
+//sys Issetugid() (tainted bool)
+//sys Kill(pid int, signum syscall.Signal) (err error)
+//sys Kqueue() (fd int, err error)
+//sys Lchown(path string, uid int, gid int) (err error)
+//sys Link(path string, link string) (err error)
+//sys Listen(s int, backlog int) (err error)
+//sys Lstat(path string, stat *Stat_t) (err error)
+//sys Mkdir(path string, mode uint32) (err error)
+//sys Mkfifo(path string, mode uint32) (err error)
+//sys Mknod(path string, mode uint32, dev int) (err error)
+//sys Mlock(b []byte) (err error)
+//sys Mlockall(flags int) (err error)
+//sys Mprotect(b []byte, prot int) (err error)
+//sys Munlock(b []byte) (err error)
+//sys Munlockall() (err error)
+//sys Nanosleep(time *Timespec, leftover *Timespec) (err error)
+//sys Open(path string, mode int, perm uint32) (fd int, err error)
+//sys Pathconf(path string, name int) (val int, err error)
+//sys Pread(fd int, p []byte, offset int64) (n int, err error)
+//sys Pwrite(fd int, p []byte, offset int64) (n int, err error)
+//sys read(fd int, p []byte) (n int, err error)
+//sys Readlink(path string, buf []byte) (n int, err error)
+//sys Rename(from string, to string) (err error)
+//sys Revoke(path string) (err error)
+//sys Rmdir(path string) (err error)
+//sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = SYS_LSEEK
+//sys Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error)
+//sysnb Setegid(egid int) (err error)
+//sysnb Seteuid(euid int) (err error)
+//sysnb Setgid(gid int) (err error)
+//sysnb Setpgid(pid int, pgid int) (err error)
+//sys Setpriority(which int, who int, prio int) (err error)
+//sysnb Setregid(rgid int, egid int) (err error)
+//sysnb Setreuid(ruid int, euid int) (err error)
+//sysnb Setrlimit(which int, lim *Rlimit) (err error)
+//sysnb Setsid() (pid int, err error)
+//sysnb Settimeofday(tp *Timeval) (err error)
+//sysnb Setuid(uid int) (err error)
+//sys Stat(path string, stat *Stat_t) (err error)
+//sys Symlink(path string, link string) (err error)
+//sys Sync() (err error)
+//sys Truncate(path string, length int64) (err error)
+//sys Umask(newmask int) (oldmask int)
+//sys Unlink(path string) (err error)
+//sys Unmount(path string, flags int) (err error)
+//sys write(fd int, p []byte) (n int, err error)
+//sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error)
+//sys munmap(addr uintptr, length uintptr) (err error)
+//sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ
+//sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE
+
+/*
+ * Unimplemented
+ */
+// ____semctl13
+// __clone
+// __fhopen40
+// __fhstat40
+// __fhstatvfs140
+// __fstat30
+// __getcwd
+// __getfh30
+// __getlogin
+// __lstat30
+// __mount50
+// __msgctl13
+// __msync13
+// __ntp_gettime30
+// __posix_chown
+// __posix_fadvise50
+// __posix_fchown
+// __posix_lchown
+// __posix_rename
+// __setlogin
+// __shmctl13
+// __sigaction_sigtramp
+// __sigaltstack14
+// __sigpending14
+// __sigprocmask14
+// __sigsuspend14
+// __sigtimedwait
+// __stat30
+// __syscall
+// __vfork14
+// _ksem_close
+// _ksem_destroy
+// _ksem_getvalue
+// _ksem_init
+// _ksem_open
+// _ksem_post
+// _ksem_trywait
+// _ksem_unlink
+// _ksem_wait
+// _lwp_continue
+// _lwp_create
+// _lwp_ctl
+// _lwp_detach
+// _lwp_exit
+// _lwp_getname
+// _lwp_getprivate
+// _lwp_kill
+// _lwp_park
+// _lwp_self
+// _lwp_setname
+// _lwp_setprivate
+// _lwp_suspend
+// _lwp_unpark
+// _lwp_unpark_all
+// _lwp_wait
+// _lwp_wakeup
+// _pset_bind
+// _sched_getaffinity
+// _sched_getparam
+// _sched_setaffinity
+// _sched_setparam
+// acct
+// aio_cancel
+// aio_error
+// aio_fsync
+// aio_read
+// aio_return
+// aio_suspend
+// aio_write
+// break
+// clock_getres
+// clock_gettime
+// clock_settime
+// compat_09_ogetdomainname
+// compat_09_osetdomainname
+// compat_09_ouname
+// compat_10_omsgsys
+// compat_10_osemsys
+// compat_10_oshmsys
+// compat_12_fstat12
+// compat_12_getdirentries
+// compat_12_lstat12
+// compat_12_msync
+// compat_12_oreboot
+// compat_12_oswapon
+// compat_12_stat12
+// compat_13_sigaction13
+// compat_13_sigaltstack13
+// compat_13_sigpending13
+// compat_13_sigprocmask13
+// compat_13_sigreturn13
+// compat_13_sigsuspend13
+// compat_14___semctl
+// compat_14_msgctl
+// compat_14_shmctl
+// compat_16___sigaction14
+// compat_16___sigreturn14
+// compat_20_fhstatfs
+// compat_20_fstatfs
+// compat_20_getfsstat
+// compat_20_statfs
+// compat_30___fhstat30
+// compat_30___fstat13
+// compat_30___lstat13
+// compat_30___stat13
+// compat_30_fhopen
+// compat_30_fhstat
+// compat_30_fhstatvfs1
+// compat_30_getdents
+// compat_30_getfh
+// compat_30_ntp_gettime
+// compat_30_socket
+// compat_40_mount
+// compat_43_fstat43
+// compat_43_lstat43
+// compat_43_oaccept
+// compat_43_ocreat
+// compat_43_oftruncate
+// compat_43_ogetdirentries
+// compat_43_ogetdtablesize
+// compat_43_ogethostid
+// compat_43_ogethostname
+// compat_43_ogetkerninfo
+// compat_43_ogetpagesize
+// compat_43_ogetpeername
+// compat_43_ogetrlimit
+// compat_43_ogetsockname
+// compat_43_okillpg
+// compat_43_olseek
+// compat_43_ommap
+// compat_43_oquota
+// compat_43_orecv
+// compat_43_orecvfrom
+// compat_43_orecvmsg
+// compat_43_osend
+// compat_43_osendmsg
+// compat_43_osethostid
+// compat_43_osethostname
+// compat_43_osetrlimit
+// compat_43_osigblock
+// compat_43_osigsetmask
+// compat_43_osigstack
+// compat_43_osigvec
+// compat_43_otruncate
+// compat_43_owait
+// compat_43_stat43
+// execve
+// extattr_delete_fd
+// extattr_delete_file
+// extattr_delete_link
+// extattr_get_fd
+// extattr_get_file
+// extattr_get_link
+// extattr_list_fd
+// extattr_list_file
+// extattr_list_link
+// extattr_set_fd
+// extattr_set_file
+// extattr_set_link
+// extattrctl
+// fchroot
+// fdatasync
+// fgetxattr
+// fktrace
+// flistxattr
+// fork
+// fremovexattr
+// fsetxattr
+// fstatvfs1
+// fsync_range
+// getcontext
+// getitimer
+// getvfsstat
+// getxattr
+// ioctl
+// ktrace
+// lchflags
+// lchmod
+// lfs_bmapv
+// lfs_markv
+// lfs_segclean
+// lfs_segwait
+// lgetxattr
+// lio_listio
+// listxattr
+// llistxattr
+// lremovexattr
+// lseek
+// lsetxattr
+// lutimes
+// madvise
+// mincore
+// minherit
+// modctl
+// mq_close
+// mq_getattr
+// mq_notify
+// mq_open
+// mq_receive
+// mq_send
+// mq_setattr
+// mq_timedreceive
+// mq_timedsend
+// mq_unlink
+// mremap
+// msgget
+// msgrcv
+// msgsnd
+// nfssvc
+// ntp_adjtime
+// pmc_control
+// pmc_get_info
+// poll
+// pollts
+// preadv
+// profil
+// pselect
+// pset_assign
+// pset_create
+// pset_destroy
+// ptrace
+// pwritev
+// quotactl
+// rasctl
+// readv
+// reboot
+// removexattr
+// sa_enable
+// sa_preempt
+// sa_register
+// sa_setconcurrency
+// sa_stacks
+// sa_yield
+// sbrk
+// sched_yield
+// semconfig
+// semget
+// semop
+// setcontext
+// setitimer
+// setxattr
+// shmat
+// shmdt
+// shmget
+// sstk
+// statvfs1
+// swapctl
+// sysarch
+// syscall
+// timer_create
+// timer_delete
+// timer_getoverrun
+// timer_gettime
+// timer_settime
+// undelete
+// utrace
+// uuidgen
+// vadvise
+// vfork
+// writev
diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go
new file mode 100644
index 0000000..afaca09
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go
@@ -0,0 +1,42 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build 386,netbsd
+
+package unix
+
+func Getpagesize() int { return 4096 }
+
+func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
+
+func NsecToTimespec(nsec int64) (ts Timespec) {
+ ts.Sec = int64(nsec / 1e9)
+ ts.Nsec = int32(nsec % 1e9)
+ return
+}
+
+func NsecToTimeval(nsec int64) (tv Timeval) {
+ nsec += 999 // round up to microsecond
+ tv.Usec = int32(nsec % 1e9 / 1e3)
+ tv.Sec = int64(nsec / 1e9)
+ return
+}
+
+func SetKevent(k *Kevent_t, fd, mode, flags int) {
+ k.Ident = uint32(fd)
+ k.Filter = uint32(mode)
+ k.Flags = uint32(flags)
+}
+
+func (iov *Iovec) SetLen(length int) {
+ iov.Len = uint32(length)
+}
+
+func (msghdr *Msghdr) SetControllen(length int) {
+ msghdr.Controllen = uint32(length)
+}
+
+func (cmsg *Cmsghdr) SetLen(length int) {
+ cmsg.Len = uint32(length)
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go
new file mode 100644
index 0000000..a6ff04c
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go
@@ -0,0 +1,42 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build amd64,netbsd
+
+package unix
+
+func Getpagesize() int { return 4096 }
+
+func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
+
+func NsecToTimespec(nsec int64) (ts Timespec) {
+ ts.Sec = int64(nsec / 1e9)
+ ts.Nsec = int64(nsec % 1e9)
+ return
+}
+
+func NsecToTimeval(nsec int64) (tv Timeval) {
+ nsec += 999 // round up to microsecond
+ tv.Usec = int32(nsec % 1e9 / 1e3)
+ tv.Sec = int64(nsec / 1e9)
+ return
+}
+
+func SetKevent(k *Kevent_t, fd, mode, flags int) {
+ k.Ident = uint64(fd)
+ k.Filter = uint32(mode)
+ k.Flags = uint32(flags)
+}
+
+func (iov *Iovec) SetLen(length int) {
+ iov.Len = uint64(length)
+}
+
+func (msghdr *Msghdr) SetControllen(length int) {
+ msghdr.Controllen = uint32(length)
+}
+
+func (cmsg *Cmsghdr) SetLen(length int) {
+ cmsg.Len = uint32(length)
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go
new file mode 100644
index 0000000..68a6969
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go
@@ -0,0 +1,42 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build arm,netbsd
+
+package unix
+
+func Getpagesize() int { return 4096 }
+
+func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
+
+func NsecToTimespec(nsec int64) (ts Timespec) {
+ ts.Sec = int64(nsec / 1e9)
+ ts.Nsec = int32(nsec % 1e9)
+ return
+}
+
+func NsecToTimeval(nsec int64) (tv Timeval) {
+ nsec += 999 // round up to microsecond
+ tv.Usec = int32(nsec % 1e9 / 1e3)
+ tv.Sec = int64(nsec / 1e9)
+ return
+}
+
+func SetKevent(k *Kevent_t, fd, mode, flags int) {
+ k.Ident = uint32(fd)
+ k.Filter = uint32(mode)
+ k.Flags = uint32(flags)
+}
+
+func (iov *Iovec) SetLen(length int) {
+ iov.Len = uint32(length)
+}
+
+func (msghdr *Msghdr) SetControllen(length int) {
+ msghdr.Controllen = uint32(length)
+}
+
+func (cmsg *Cmsghdr) SetLen(length int) {
+ cmsg.Len = uint32(length)
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_no_getwd.go b/vendor/golang.org/x/sys/unix/syscall_no_getwd.go
new file mode 100644
index 0000000..530792e
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_no_getwd.go
@@ -0,0 +1,11 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build dragonfly freebsd netbsd openbsd
+
+package unix
+
+const ImplementsGetwd = false
+
+func Getwd() (string, error) { return "", ENOTSUP }
diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go
new file mode 100644
index 0000000..554a823
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go
@@ -0,0 +1,304 @@
+// Copyright 2009,2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// OpenBSD system calls.
+// This file is compiled as ordinary Go code,
+// but it is also input to mksyscall,
+// which parses the //sys lines and generates system call stubs.
+// Note that sometimes we use a lowercase //sys name and wrap
+// it in our own nicer implementation, either here or in
+// syscall_bsd.go or syscall_unix.go.
+
+package unix
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+type SockaddrDatalink struct {
+ Len uint8
+ Family uint8
+ Index uint16
+ Type uint8
+ Nlen uint8
+ Alen uint8
+ Slen uint8
+ Data [24]int8
+ raw RawSockaddrDatalink
+}
+
+func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno)
+
+func nametomib(name string) (mib []_C_int, err error) {
+
+ // Perform lookup via a binary search
+ left := 0
+ right := len(sysctlMib) - 1
+ for {
+ idx := left + (right-left)/2
+ switch {
+ case name == sysctlMib[idx].ctlname:
+ return sysctlMib[idx].ctloid, nil
+ case name > sysctlMib[idx].ctlname:
+ left = idx + 1
+ default:
+ right = idx - 1
+ }
+ if left > right {
+ break
+ }
+ }
+ return nil, EINVAL
+}
+
+// ParseDirent parses up to max directory entries in buf,
+// appending the names to names. It returns the number
+// bytes consumed from buf, the number of entries added
+// to names, and the new names slice.
+func ParseDirent(buf []byte, max int, names []string) (consumed int, count int, newnames []string) {
+ origlen := len(buf)
+ for max != 0 && len(buf) > 0 {
+ dirent := (*Dirent)(unsafe.Pointer(&buf[0]))
+ if dirent.Reclen == 0 {
+ buf = nil
+ break
+ }
+ buf = buf[dirent.Reclen:]
+ if dirent.Fileno == 0 { // File absent in directory.
+ continue
+ }
+ bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0]))
+ var name = string(bytes[0:dirent.Namlen])
+ if name == "." || name == ".." { // Useless names
+ continue
+ }
+ max--
+ count++
+ names = append(names, name)
+ }
+ return origlen - len(buf), count, names
+}
+
+//sysnb pipe(p *[2]_C_int) (err error)
+func Pipe(p []int) (err error) {
+ if len(p) != 2 {
+ return EINVAL
+ }
+ var pp [2]_C_int
+ err = pipe(&pp)
+ p[0] = int(pp[0])
+ p[1] = int(pp[1])
+ return
+}
+
+//sys getdents(fd int, buf []byte) (n int, err error)
+func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) {
+ return getdents(fd, buf)
+}
+
+// TODO
+func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
+ return -1, ENOSYS
+}
+
+func Getfsstat(buf []Statfs_t, flags int) (n int, err error) {
+ var _p0 unsafe.Pointer
+ var bufsize uintptr
+ if len(buf) > 0 {
+ _p0 = unsafe.Pointer(&buf[0])
+ bufsize = unsafe.Sizeof(Statfs_t{}) * uintptr(len(buf))
+ }
+ r0, _, e1 := Syscall(SYS_GETFSSTAT, uintptr(_p0), bufsize, uintptr(flags))
+ use(unsafe.Pointer(_p0))
+ n = int(r0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
+/*
+ * Exposed directly
+ */
+//sys Access(path string, mode uint32) (err error)
+//sys Adjtime(delta *Timeval, olddelta *Timeval) (err error)
+//sys Chdir(path string) (err error)
+//sys Chflags(path string, flags int) (err error)
+//sys Chmod(path string, mode uint32) (err error)
+//sys Chown(path string, uid int, gid int) (err error)
+//sys Chroot(path string) (err error)
+//sys Close(fd int) (err error)
+//sys Dup(fd int) (nfd int, err error)
+//sys Dup2(from int, to int) (err error)
+//sys Exit(code int)
+//sys Fchdir(fd int) (err error)
+//sys Fchflags(fd int, flags int) (err error)
+//sys Fchmod(fd int, mode uint32) (err error)
+//sys Fchown(fd int, uid int, gid int) (err error)
+//sys Flock(fd int, how int) (err error)
+//sys Fpathconf(fd int, name int) (val int, err error)
+//sys Fstat(fd int, stat *Stat_t) (err error)
+//sys Fstatfs(fd int, stat *Statfs_t) (err error)
+//sys Fsync(fd int) (err error)
+//sys Ftruncate(fd int, length int64) (err error)
+//sysnb Getegid() (egid int)
+//sysnb Geteuid() (uid int)
+//sysnb Getgid() (gid int)
+//sysnb Getpgid(pid int) (pgid int, err error)
+//sysnb Getpgrp() (pgrp int)
+//sysnb Getpid() (pid int)
+//sysnb Getppid() (ppid int)
+//sys Getpriority(which int, who int) (prio int, err error)
+//sysnb Getrlimit(which int, lim *Rlimit) (err error)
+//sysnb Getrusage(who int, rusage *Rusage) (err error)
+//sysnb Getsid(pid int) (sid int, err error)
+//sysnb Gettimeofday(tv *Timeval) (err error)
+//sysnb Getuid() (uid int)
+//sys Issetugid() (tainted bool)
+//sys Kill(pid int, signum syscall.Signal) (err error)
+//sys Kqueue() (fd int, err error)
+//sys Lchown(path string, uid int, gid int) (err error)
+//sys Link(path string, link string) (err error)
+//sys Listen(s int, backlog int) (err error)
+//sys Lstat(path string, stat *Stat_t) (err error)
+//sys Mkdir(path string, mode uint32) (err error)
+//sys Mkfifo(path string, mode uint32) (err error)
+//sys Mknod(path string, mode uint32, dev int) (err error)
+//sys Mlock(b []byte) (err error)
+//sys Mlockall(flags int) (err error)
+//sys Mprotect(b []byte, prot int) (err error)
+//sys Munlock(b []byte) (err error)
+//sys Munlockall() (err error)
+//sys Nanosleep(time *Timespec, leftover *Timespec) (err error)
+//sys Open(path string, mode int, perm uint32) (fd int, err error)
+//sys Pathconf(path string, name int) (val int, err error)
+//sys Pread(fd int, p []byte, offset int64) (n int, err error)
+//sys Pwrite(fd int, p []byte, offset int64) (n int, err error)
+//sys read(fd int, p []byte) (n int, err error)
+//sys Readlink(path string, buf []byte) (n int, err error)
+//sys Rename(from string, to string) (err error)
+//sys Revoke(path string) (err error)
+//sys Rmdir(path string) (err error)
+//sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = SYS_LSEEK
+//sys Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error)
+//sysnb Setegid(egid int) (err error)
+//sysnb Seteuid(euid int) (err error)
+//sysnb Setgid(gid int) (err error)
+//sys Setlogin(name string) (err error)
+//sysnb Setpgid(pid int, pgid int) (err error)
+//sys Setpriority(which int, who int, prio int) (err error)
+//sysnb Setregid(rgid int, egid int) (err error)
+//sysnb Setreuid(ruid int, euid int) (err error)
+//sysnb Setresgid(rgid int, egid int, sgid int) (err error)
+//sysnb Setresuid(ruid int, euid int, suid int) (err error)
+//sysnb Setrlimit(which int, lim *Rlimit) (err error)
+//sysnb Setsid() (pid int, err error)
+//sysnb Settimeofday(tp *Timeval) (err error)
+//sysnb Setuid(uid int) (err error)
+//sys Stat(path string, stat *Stat_t) (err error)
+//sys Statfs(path string, stat *Statfs_t) (err error)
+//sys Symlink(path string, link string) (err error)
+//sys Sync() (err error)
+//sys Truncate(path string, length int64) (err error)
+//sys Umask(newmask int) (oldmask int)
+//sys Unlink(path string) (err error)
+//sys Unmount(path string, flags int) (err error)
+//sys write(fd int, p []byte) (n int, err error)
+//sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error)
+//sys munmap(addr uintptr, length uintptr) (err error)
+//sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ
+//sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE
+
+/*
+ * Unimplemented
+ */
+// __getcwd
+// __semctl
+// __syscall
+// __sysctl
+// adjfreq
+// break
+// clock_getres
+// clock_gettime
+// clock_settime
+// closefrom
+// execve
+// faccessat
+// fchmodat
+// fchownat
+// fcntl
+// fhopen
+// fhstat
+// fhstatfs
+// fork
+// fstatat
+// futimens
+// getfh
+// getgid
+// getitimer
+// getlogin
+// getresgid
+// getresuid
+// getrtable
+// getthrid
+// ioctl
+// ktrace
+// lfs_bmapv
+// lfs_markv
+// lfs_segclean
+// lfs_segwait
+// linkat
+// mincore
+// minherit
+// mkdirat
+// mkfifoat
+// mknodat
+// mount
+// mquery
+// msgctl
+// msgget
+// msgrcv
+// msgsnd
+// nfssvc
+// nnpfspioctl
+// openat
+// poll
+// preadv
+// profil
+// pwritev
+// quotactl
+// readlinkat
+// readv
+// reboot
+// renameat
+// rfork
+// sched_yield
+// semget
+// semop
+// setgroups
+// setitimer
+// setrtable
+// setsockopt
+// shmat
+// shmctl
+// shmdt
+// shmget
+// sigaction
+// sigaltstack
+// sigpending
+// sigprocmask
+// sigreturn
+// sigsuspend
+// symlinkat
+// sysarch
+// syscall
+// threxit
+// thrsigdivert
+// thrsleep
+// thrwakeup
+// unlinkat
+// utimensat
+// vfork
+// writev
diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go
new file mode 100644
index 0000000..a66ddc5
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go
@@ -0,0 +1,42 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build 386,openbsd
+
+package unix
+
+func Getpagesize() int { return 4096 }
+
+func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
+
+func NsecToTimespec(nsec int64) (ts Timespec) {
+ ts.Sec = int64(nsec / 1e9)
+ ts.Nsec = int32(nsec % 1e9)
+ return
+}
+
+func NsecToTimeval(nsec int64) (tv Timeval) {
+ nsec += 999 // round up to microsecond
+ tv.Usec = int32(nsec % 1e9 / 1e3)
+ tv.Sec = int64(nsec / 1e9)
+ return
+}
+
+func SetKevent(k *Kevent_t, fd, mode, flags int) {
+ k.Ident = uint32(fd)
+ k.Filter = int16(mode)
+ k.Flags = uint16(flags)
+}
+
+func (iov *Iovec) SetLen(length int) {
+ iov.Len = uint32(length)
+}
+
+func (msghdr *Msghdr) SetControllen(length int) {
+ msghdr.Controllen = uint32(length)
+}
+
+func (cmsg *Cmsghdr) SetLen(length int) {
+ cmsg.Len = uint32(length)
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go
new file mode 100644
index 0000000..0776c1f
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go
@@ -0,0 +1,42 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build amd64,openbsd
+
+package unix
+
+func Getpagesize() int { return 4096 }
+
+func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
+
+func NsecToTimespec(nsec int64) (ts Timespec) {
+ ts.Sec = nsec / 1e9
+ ts.Nsec = nsec % 1e9
+ return
+}
+
+func NsecToTimeval(nsec int64) (tv Timeval) {
+ nsec += 999 // round up to microsecond
+ tv.Usec = nsec % 1e9 / 1e3
+ tv.Sec = nsec / 1e9
+ return
+}
+
+func SetKevent(k *Kevent_t, fd, mode, flags int) {
+ k.Ident = uint64(fd)
+ k.Filter = int16(mode)
+ k.Flags = uint16(flags)
+}
+
+func (iov *Iovec) SetLen(length int) {
+ iov.Len = uint64(length)
+}
+
+func (msghdr *Msghdr) SetControllen(length int) {
+ msghdr.Controllen = uint32(length)
+}
+
+func (cmsg *Cmsghdr) SetLen(length int) {
+ cmsg.Len = uint32(length)
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go
new file mode 100644
index 0000000..acb74b1
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go
@@ -0,0 +1,725 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Solaris system calls.
+// This file is compiled as ordinary Go code,
+// but it is also input to mksyscall,
+// which parses the //sys lines and generates system call stubs.
+// Note that sometimes we use a lowercase //sys name and wrap
+// it in our own nicer implementation, either here or in
+// syscall_solaris.go or syscall_unix.go.
+
+package unix
+
+import (
+ "sync/atomic"
+ "syscall"
+ "unsafe"
+)
+
+// Implemented in runtime/syscall_solaris.go.
+type syscallFunc uintptr
+
+func rawSysvicall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno)
+func sysvicall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno)
+
+type SockaddrDatalink struct {
+ Family uint16
+ Index uint16
+ Type uint8
+ Nlen uint8
+ Alen uint8
+ Slen uint8
+ Data [244]int8
+ raw RawSockaddrDatalink
+}
+
+func clen(n []byte) int {
+ for i := 0; i < len(n); i++ {
+ if n[i] == 0 {
+ return i
+ }
+ }
+ return len(n)
+}
+
+// ParseDirent parses up to max directory entries in buf,
+// appending the names to names. It returns the number
+// bytes consumed from buf, the number of entries added
+// to names, and the new names slice.
+func ParseDirent(buf []byte, max int, names []string) (consumed int, count int, newnames []string) {
+ origlen := len(buf)
+ for max != 0 && len(buf) > 0 {
+ dirent := (*Dirent)(unsafe.Pointer(&buf[0]))
+ if dirent.Reclen == 0 {
+ buf = nil
+ break
+ }
+ buf = buf[dirent.Reclen:]
+ if dirent.Ino == 0 { // File absent in directory.
+ continue
+ }
+ bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0]))
+ var name = string(bytes[0:clen(bytes[:])])
+ if name == "." || name == ".." { // Useless names
+ continue
+ }
+ max--
+ count++
+ names = append(names, name)
+ }
+ return origlen - len(buf), count, names
+}
+
+//sysnb pipe(p *[2]_C_int) (n int, err error)
+
+func Pipe(p []int) (err error) {
+ if len(p) != 2 {
+ return EINVAL
+ }
+ var pp [2]_C_int
+ n, err := pipe(&pp)
+ if n != 0 {
+ return err
+ }
+ p[0] = int(pp[0])
+ p[1] = int(pp[1])
+ return nil
+}
+
+func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) {
+ if sa.Port < 0 || sa.Port > 0xFFFF {
+ return nil, 0, EINVAL
+ }
+ sa.raw.Family = AF_INET
+ p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port))
+ p[0] = byte(sa.Port >> 8)
+ p[1] = byte(sa.Port)
+ for i := 0; i < len(sa.Addr); i++ {
+ sa.raw.Addr[i] = sa.Addr[i]
+ }
+ return unsafe.Pointer(&sa.raw), SizeofSockaddrInet4, nil
+}
+
+func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, _Socklen, error) {
+ if sa.Port < 0 || sa.Port > 0xFFFF {
+ return nil, 0, EINVAL
+ }
+ sa.raw.Family = AF_INET6
+ p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port))
+ p[0] = byte(sa.Port >> 8)
+ p[1] = byte(sa.Port)
+ sa.raw.Scope_id = sa.ZoneId
+ for i := 0; i < len(sa.Addr); i++ {
+ sa.raw.Addr[i] = sa.Addr[i]
+ }
+ return unsafe.Pointer(&sa.raw), SizeofSockaddrInet6, nil
+}
+
+func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) {
+ name := sa.Name
+ n := len(name)
+ if n >= len(sa.raw.Path) {
+ return nil, 0, EINVAL
+ }
+ sa.raw.Family = AF_UNIX
+ for i := 0; i < n; i++ {
+ sa.raw.Path[i] = int8(name[i])
+ }
+ // length is family (uint16), name, NUL.
+ sl := _Socklen(2)
+ if n > 0 {
+ sl += _Socklen(n) + 1
+ }
+ if sa.raw.Path[0] == '@' {
+ sa.raw.Path[0] = 0
+ // Don't count trailing NUL for abstract address.
+ sl--
+ }
+
+ return unsafe.Pointer(&sa.raw), sl, nil
+}
+
+//sys getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) = libsocket.getsockname
+
+func Getsockname(fd int) (sa Sockaddr, err error) {
+ var rsa RawSockaddrAny
+ var len _Socklen = SizeofSockaddrAny
+ if err = getsockname(fd, &rsa, &len); err != nil {
+ return
+ }
+ return anyToSockaddr(&rsa)
+}
+
+const ImplementsGetwd = true
+
+//sys Getcwd(buf []byte) (n int, err error)
+
+func Getwd() (wd string, err error) {
+ var buf [PathMax]byte
+ // Getcwd will return an error if it failed for any reason.
+ _, err = Getcwd(buf[0:])
+ if err != nil {
+ return "", err
+ }
+ n := clen(buf[:])
+ if n < 1 {
+ return "", EINVAL
+ }
+ return string(buf[:n]), nil
+}
+
+/*
+ * Wrapped
+ */
+
+//sysnb getgroups(ngid int, gid *_Gid_t) (n int, err error)
+//sysnb setgroups(ngid int, gid *_Gid_t) (err error)
+
+func Getgroups() (gids []int, err error) {
+ n, err := getgroups(0, nil)
+ // Check for error and sanity check group count. Newer versions of
+ // Solaris allow up to 1024 (NGROUPS_MAX).
+ if n < 0 || n > 1024 {
+ if err != nil {
+ return nil, err
+ }
+ return nil, EINVAL
+ } else if n == 0 {
+ return nil, nil
+ }
+
+ a := make([]_Gid_t, n)
+ n, err = getgroups(n, &a[0])
+ if n == -1 {
+ return nil, err
+ }
+ gids = make([]int, n)
+ for i, v := range a[0:n] {
+ gids[i] = int(v)
+ }
+ return
+}
+
+func Setgroups(gids []int) (err error) {
+ if len(gids) == 0 {
+ return setgroups(0, nil)
+ }
+
+ a := make([]_Gid_t, len(gids))
+ for i, v := range gids {
+ a[i] = _Gid_t(v)
+ }
+ return setgroups(len(a), &a[0])
+}
+
+func ReadDirent(fd int, buf []byte) (n int, err error) {
+ // Final argument is (basep *uintptr) and the syscall doesn't take nil.
+ // TODO(rsc): Can we use a single global basep for all calls?
+ return Getdents(fd, buf, new(uintptr))
+}
+
+// Wait status is 7 bits at bottom, either 0 (exited),
+// 0x7F (stopped), or a signal number that caused an exit.
+// The 0x80 bit is whether there was a core dump.
+// An extra number (exit code, signal causing a stop)
+// is in the high bits.
+
+type WaitStatus uint32
+
+const (
+ mask = 0x7F
+ core = 0x80
+ shift = 8
+
+ exited = 0
+ stopped = 0x7F
+)
+
+func (w WaitStatus) Exited() bool { return w&mask == exited }
+
+func (w WaitStatus) ExitStatus() int {
+ if w&mask != exited {
+ return -1
+ }
+ return int(w >> shift)
+}
+
+func (w WaitStatus) Signaled() bool { return w&mask != stopped && w&mask != 0 }
+
+func (w WaitStatus) Signal() syscall.Signal {
+ sig := syscall.Signal(w & mask)
+ if sig == stopped || sig == 0 {
+ return -1
+ }
+ return sig
+}
+
+func (w WaitStatus) CoreDump() bool { return w.Signaled() && w&core != 0 }
+
+func (w WaitStatus) Stopped() bool { return w&mask == stopped && syscall.Signal(w>>shift) != SIGSTOP }
+
+func (w WaitStatus) Continued() bool { return w&mask == stopped && syscall.Signal(w>>shift) == SIGSTOP }
+
+func (w WaitStatus) StopSignal() syscall.Signal {
+ if !w.Stopped() {
+ return -1
+ }
+ return syscall.Signal(w>>shift) & 0xFF
+}
+
+func (w WaitStatus) TrapCause() int { return -1 }
+
+//sys wait4(pid int32, statusp *_C_int, options int, rusage *Rusage) (wpid int32, err error)
+
+func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (int, error) {
+ var status _C_int
+ rpid, err := wait4(int32(pid), &status, options, rusage)
+ wpid := int(rpid)
+ if wpid == -1 {
+ return wpid, err
+ }
+ if wstatus != nil {
+ *wstatus = WaitStatus(status)
+ }
+ return wpid, nil
+}
+
+//sys gethostname(buf []byte) (n int, err error)
+
+func Gethostname() (name string, err error) {
+ var buf [MaxHostNameLen]byte
+ n, err := gethostname(buf[:])
+ if n != 0 {
+ return "", err
+ }
+ n = clen(buf[:])
+ if n < 1 {
+ return "", EFAULT
+ }
+ return string(buf[:n]), nil
+}
+
+//sys utimes(path string, times *[2]Timeval) (err error)
+
+func Utimes(path string, tv []Timeval) (err error) {
+ if tv == nil {
+ return utimes(path, nil)
+ }
+ if len(tv) != 2 {
+ return EINVAL
+ }
+ return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0])))
+}
+
+//sys utimensat(fd int, path string, times *[2]Timespec, flag int) (err error)
+
+func UtimesNano(path string, ts []Timespec) error {
+ if ts == nil {
+ return utimensat(AT_FDCWD, path, nil, 0)
+ }
+ if len(ts) != 2 {
+ return EINVAL
+ }
+ return utimensat(AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0)
+}
+
+func UtimesNanoAt(dirfd int, path string, ts []Timespec, flags int) error {
+ if ts == nil {
+ return utimensat(dirfd, path, nil, flags)
+ }
+ if len(ts) != 2 {
+ return EINVAL
+ }
+ return utimensat(dirfd, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), flags)
+}
+
+//sys fcntl(fd int, cmd int, arg int) (val int, err error)
+
+// FcntlFlock performs a fcntl syscall for the F_GETLK, F_SETLK or F_SETLKW command.
+func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error {
+ _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procfcntl)), 3, uintptr(fd), uintptr(cmd), uintptr(unsafe.Pointer(lk)), 0, 0, 0)
+ if e1 != 0 {
+ return e1
+ }
+ return nil
+}
+
+//sys futimesat(fildes int, path *byte, times *[2]Timeval) (err error)
+
+func Futimesat(dirfd int, path string, tv []Timeval) error {
+ pathp, err := BytePtrFromString(path)
+ if err != nil {
+ return err
+ }
+ if tv == nil {
+ return futimesat(dirfd, pathp, nil)
+ }
+ if len(tv) != 2 {
+ return EINVAL
+ }
+ return futimesat(dirfd, pathp, (*[2]Timeval)(unsafe.Pointer(&tv[0])))
+}
+
+// Solaris doesn't have an futimes function because it allows NULL to be
+// specified as the path for futimesat. However, Go doesn't like
+// NULL-style string interfaces, so this simple wrapper is provided.
+func Futimes(fd int, tv []Timeval) error {
+ if tv == nil {
+ return futimesat(fd, nil, nil)
+ }
+ if len(tv) != 2 {
+ return EINVAL
+ }
+ return futimesat(fd, nil, (*[2]Timeval)(unsafe.Pointer(&tv[0])))
+}
+
+func anyToSockaddr(rsa *RawSockaddrAny) (Sockaddr, error) {
+ switch rsa.Addr.Family {
+ case AF_UNIX:
+ pp := (*RawSockaddrUnix)(unsafe.Pointer(rsa))
+ sa := new(SockaddrUnix)
+ // Assume path ends at NUL.
+ // This is not technically the Solaris semantics for
+ // abstract Unix domain sockets -- they are supposed
+ // to be uninterpreted fixed-size binary blobs -- but
+ // everyone uses this convention.
+ n := 0
+ for n < len(pp.Path) && pp.Path[n] != 0 {
+ n++
+ }
+ bytes := (*[10000]byte)(unsafe.Pointer(&pp.Path[0]))[0:n]
+ sa.Name = string(bytes)
+ return sa, nil
+
+ case AF_INET:
+ pp := (*RawSockaddrInet4)(unsafe.Pointer(rsa))
+ sa := new(SockaddrInet4)
+ p := (*[2]byte)(unsafe.Pointer(&pp.Port))
+ sa.Port = int(p[0])<<8 + int(p[1])
+ for i := 0; i < len(sa.Addr); i++ {
+ sa.Addr[i] = pp.Addr[i]
+ }
+ return sa, nil
+
+ case AF_INET6:
+ pp := (*RawSockaddrInet6)(unsafe.Pointer(rsa))
+ sa := new(SockaddrInet6)
+ p := (*[2]byte)(unsafe.Pointer(&pp.Port))
+ sa.Port = int(p[0])<<8 + int(p[1])
+ sa.ZoneId = pp.Scope_id
+ for i := 0; i < len(sa.Addr); i++ {
+ sa.Addr[i] = pp.Addr[i]
+ }
+ return sa, nil
+ }
+ return nil, EAFNOSUPPORT
+}
+
+//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) = libsocket.accept
+
+func Accept(fd int) (nfd int, sa Sockaddr, err error) {
+ var rsa RawSockaddrAny
+ var len _Socklen = SizeofSockaddrAny
+ nfd, err = accept(fd, &rsa, &len)
+ if nfd == -1 {
+ return
+ }
+ sa, err = anyToSockaddr(&rsa)
+ if err != nil {
+ Close(nfd)
+ nfd = 0
+ }
+ return
+}
+
+//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) = libsocket.recvmsg
+
+func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) {
+ var msg Msghdr
+ var rsa RawSockaddrAny
+ msg.Name = (*byte)(unsafe.Pointer(&rsa))
+ msg.Namelen = uint32(SizeofSockaddrAny)
+ var iov Iovec
+ if len(p) > 0 {
+ iov.Base = (*int8)(unsafe.Pointer(&p[0]))
+ iov.SetLen(len(p))
+ }
+ var dummy int8
+ if len(oob) > 0 {
+ // receive at least one normal byte
+ if len(p) == 0 {
+ iov.Base = &dummy
+ iov.SetLen(1)
+ }
+ msg.Accrights = (*int8)(unsafe.Pointer(&oob[0]))
+ }
+ msg.Iov = &iov
+ msg.Iovlen = 1
+ if n, err = recvmsg(fd, &msg, flags); n == -1 {
+ return
+ }
+ oobn = int(msg.Accrightslen)
+ // source address is only specified if the socket is unconnected
+ if rsa.Addr.Family != AF_UNSPEC {
+ from, err = anyToSockaddr(&rsa)
+ }
+ return
+}
+
+func Sendmsg(fd int, p, oob []byte, to Sockaddr, flags int) (err error) {
+ _, err = SendmsgN(fd, p, oob, to, flags)
+ return
+}
+
+//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) = libsocket.sendmsg
+
+func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) {
+ var ptr unsafe.Pointer
+ var salen _Socklen
+ if to != nil {
+ ptr, salen, err = to.sockaddr()
+ if err != nil {
+ return 0, err
+ }
+ }
+ var msg Msghdr
+ msg.Name = (*byte)(unsafe.Pointer(ptr))
+ msg.Namelen = uint32(salen)
+ var iov Iovec
+ if len(p) > 0 {
+ iov.Base = (*int8)(unsafe.Pointer(&p[0]))
+ iov.SetLen(len(p))
+ }
+ var dummy int8
+ if len(oob) > 0 {
+ // send at least one normal byte
+ if len(p) == 0 {
+ iov.Base = &dummy
+ iov.SetLen(1)
+ }
+ msg.Accrights = (*int8)(unsafe.Pointer(&oob[0]))
+ }
+ msg.Iov = &iov
+ msg.Iovlen = 1
+ if n, err = sendmsg(fd, &msg, flags); err != nil {
+ return 0, err
+ }
+ if len(oob) > 0 && len(p) == 0 {
+ n = 0
+ }
+ return n, nil
+}
+
+//sys acct(path *byte) (err error)
+
+func Acct(path string) (err error) {
+ if len(path) == 0 {
+ // Assume caller wants to disable accounting.
+ return acct(nil)
+ }
+
+ pathp, err := BytePtrFromString(path)
+ if err != nil {
+ return err
+ }
+ return acct(pathp)
+}
+
+/*
+ * Expose the ioctl function
+ */
+
+//sys ioctl(fd int, req int, arg uintptr) (err error)
+
+func IoctlSetInt(fd int, req int, value int) (err error) {
+ return ioctl(fd, req, uintptr(value))
+}
+
+func IoctlSetWinsize(fd int, req int, value *Winsize) (err error) {
+ return ioctl(fd, req, uintptr(unsafe.Pointer(value)))
+}
+
+func IoctlSetTermios(fd int, req int, value *Termios) (err error) {
+ return ioctl(fd, req, uintptr(unsafe.Pointer(value)))
+}
+
+func IoctlSetTermio(fd int, req int, value *Termio) (err error) {
+ return ioctl(fd, req, uintptr(unsafe.Pointer(value)))
+}
+
+func IoctlGetInt(fd int, req int) (int, error) {
+ var value int
+ err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
+ return value, err
+}
+
+func IoctlGetWinsize(fd int, req int) (*Winsize, error) {
+ var value Winsize
+ err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
+ return &value, err
+}
+
+func IoctlGetTermios(fd int, req int) (*Termios, error) {
+ var value Termios
+ err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
+ return &value, err
+}
+
+func IoctlGetTermio(fd int, req int) (*Termio, error) {
+ var value Termio
+ err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
+ return &value, err
+}
+
+/*
+ * Exposed directly
+ */
+//sys Access(path string, mode uint32) (err error)
+//sys Adjtime(delta *Timeval, olddelta *Timeval) (err error)
+//sys Chdir(path string) (err error)
+//sys Chmod(path string, mode uint32) (err error)
+//sys Chown(path string, uid int, gid int) (err error)
+//sys Chroot(path string) (err error)
+//sys Close(fd int) (err error)
+//sys Creat(path string, mode uint32) (fd int, err error)
+//sys Dup(fd int) (nfd int, err error)
+//sys Dup2(oldfd int, newfd int) (err error)
+//sys Exit(code int)
+//sys Fchdir(fd int) (err error)
+//sys Fchmod(fd int, mode uint32) (err error)
+//sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error)
+//sys Fchown(fd int, uid int, gid int) (err error)
+//sys Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error)
+//sys Fdatasync(fd int) (err error)
+//sys Fpathconf(fd int, name int) (val int, err error)
+//sys Fstat(fd int, stat *Stat_t) (err error)
+//sys Getdents(fd int, buf []byte, basep *uintptr) (n int, err error)
+//sysnb Getgid() (gid int)
+//sysnb Getpid() (pid int)
+//sysnb Getpgid(pid int) (pgid int, err error)
+//sysnb Getpgrp() (pgid int, err error)
+//sys Geteuid() (euid int)
+//sys Getegid() (egid int)
+//sys Getppid() (ppid int)
+//sys Getpriority(which int, who int) (n int, err error)
+//sysnb Getrlimit(which int, lim *Rlimit) (err error)
+//sysnb Getrusage(who int, rusage *Rusage) (err error)
+//sysnb Gettimeofday(tv *Timeval) (err error)
+//sysnb Getuid() (uid int)
+//sys Kill(pid int, signum syscall.Signal) (err error)
+//sys Lchown(path string, uid int, gid int) (err error)
+//sys Link(path string, link string) (err error)
+//sys Listen(s int, backlog int) (err error) = libsocket.listen
+//sys Lstat(path string, stat *Stat_t) (err error)
+//sys Madvise(b []byte, advice int) (err error)
+//sys Mkdir(path string, mode uint32) (err error)
+//sys Mkdirat(dirfd int, path string, mode uint32) (err error)
+//sys Mkfifo(path string, mode uint32) (err error)
+//sys Mkfifoat(dirfd int, path string, mode uint32) (err error)
+//sys Mknod(path string, mode uint32, dev int) (err error)
+//sys Mknodat(dirfd int, path string, mode uint32, dev int) (err error)
+//sys Mlock(b []byte) (err error)
+//sys Mlockall(flags int) (err error)
+//sys Mprotect(b []byte, prot int) (err error)
+//sys Munlock(b []byte) (err error)
+//sys Munlockall() (err error)
+//sys Nanosleep(time *Timespec, leftover *Timespec) (err error)
+//sys Open(path string, mode int, perm uint32) (fd int, err error)
+//sys Openat(dirfd int, path string, flags int, mode uint32) (fd int, err error)
+//sys Pathconf(path string, name int) (val int, err error)
+//sys Pause() (err error)
+//sys Pread(fd int, p []byte, offset int64) (n int, err error)
+//sys Pwrite(fd int, p []byte, offset int64) (n int, err error)
+//sys read(fd int, p []byte) (n int, err error)
+//sys Readlink(path string, buf []byte) (n int, err error)
+//sys Rename(from string, to string) (err error)
+//sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error)
+//sys Rmdir(path string) (err error)
+//sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = lseek
+//sysnb Setegid(egid int) (err error)
+//sysnb Seteuid(euid int) (err error)
+//sysnb Setgid(gid int) (err error)
+//sys Sethostname(p []byte) (err error)
+//sysnb Setpgid(pid int, pgid int) (err error)
+//sys Setpriority(which int, who int, prio int) (err error)
+//sysnb Setregid(rgid int, egid int) (err error)
+//sysnb Setreuid(ruid int, euid int) (err error)
+//sysnb Setrlimit(which int, lim *Rlimit) (err error)
+//sysnb Setsid() (pid int, err error)
+//sysnb Setuid(uid int) (err error)
+//sys Shutdown(s int, how int) (err error) = libsocket.shutdown
+//sys Stat(path string, stat *Stat_t) (err error)
+//sys Symlink(path string, link string) (err error)
+//sys Sync() (err error)
+//sysnb Times(tms *Tms) (ticks uintptr, err error)
+//sys Truncate(path string, length int64) (err error)
+//sys Fsync(fd int) (err error)
+//sys Ftruncate(fd int, length int64) (err error)
+//sys Umask(mask int) (oldmask int)
+//sysnb Uname(buf *Utsname) (err error)
+//sys Unmount(target string, flags int) (err error) = libc.umount
+//sys Unlink(path string) (err error)
+//sys Unlinkat(dirfd int, path string, flags int) (err error)
+//sys Ustat(dev int, ubuf *Ustat_t) (err error)
+//sys Utime(path string, buf *Utimbuf) (err error)
+//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) = libsocket.bind
+//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) = libsocket.connect
+//sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error)
+//sys munmap(addr uintptr, length uintptr) (err error)
+//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) = libsocket.sendto
+//sys socket(domain int, typ int, proto int) (fd int, err error) = libsocket.socket
+//sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) = libsocket.socketpair
+//sys write(fd int, p []byte) (n int, err error)
+//sys getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) = libsocket.getsockopt
+//sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) = libsocket.getpeername
+//sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) = libsocket.setsockopt
+//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) = libsocket.recvfrom
+
+func readlen(fd int, buf *byte, nbuf int) (n int, err error) {
+ r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procread)), 3, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf), 0, 0, 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
+func writelen(fd int, buf *byte, nbuf int) (n int, err error) {
+ r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procwrite)), 3, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf), 0, 0, 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
+var mapper = &mmapper{
+ active: make(map[*byte][]byte),
+ mmap: mmap,
+ munmap: munmap,
+}
+
+func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) {
+ return mapper.Mmap(fd, offset, length, prot, flags)
+}
+
+func Munmap(b []byte) (err error) {
+ return mapper.Munmap(b)
+}
+
+//sys sysconf(name int) (n int64, err error)
+
+// pageSize caches the value of Getpagesize, since it can't change
+// once the system is booted.
+var pageSize int64 // accessed atomically
+
+func Getpagesize() int {
+ n := atomic.LoadInt64(&pageSize)
+ if n == 0 {
+ n, _ = sysconf(_SC_PAGESIZE)
+ atomic.StoreInt64(&pageSize, n)
+ }
+ return int(n)
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go
new file mode 100644
index 0000000..5aff62c
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go
@@ -0,0 +1,35 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build amd64,solaris
+
+package unix
+
+func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
+
+func NsecToTimespec(nsec int64) (ts Timespec) {
+ ts.Sec = nsec / 1e9
+ ts.Nsec = nsec % 1e9
+ return
+}
+
+func NsecToTimeval(nsec int64) (tv Timeval) {
+ nsec += 999 // round up to microsecond
+ tv.Usec = nsec % 1e9 / 1e3
+ tv.Sec = int64(nsec / 1e9)
+ return
+}
+
+func (iov *Iovec) SetLen(length int) {
+ iov.Len = uint64(length)
+}
+
+func (cmsg *Cmsghdr) SetLen(length int) {
+ cmsg.Len = uint32(length)
+}
+
+func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
+ // TODO(aram): implement this, see issue 5847.
+ panic("unimplemented")
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go
new file mode 100644
index 0000000..b46b250
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_unix.go
@@ -0,0 +1,297 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd solaris
+
+package unix
+
+import (
+ "runtime"
+ "sync"
+ "syscall"
+ "unsafe"
+)
+
+var (
+ Stdin = 0
+ Stdout = 1
+ Stderr = 2
+)
+
+const (
+ darwin64Bit = runtime.GOOS == "darwin" && sizeofPtr == 8
+ dragonfly64Bit = runtime.GOOS == "dragonfly" && sizeofPtr == 8
+ netbsd32Bit = runtime.GOOS == "netbsd" && sizeofPtr == 4
+)
+
+// Do the interface allocations only once for common
+// Errno values.
+var (
+ errEAGAIN error = syscall.EAGAIN
+ errEINVAL error = syscall.EINVAL
+ errENOENT error = syscall.ENOENT
+)
+
+// errnoErr returns common boxed Errno values, to prevent
+// allocations at runtime.
+func errnoErr(e syscall.Errno) error {
+ switch e {
+ case 0:
+ return nil
+ case EAGAIN:
+ return errEAGAIN
+ case EINVAL:
+ return errEINVAL
+ case ENOENT:
+ return errENOENT
+ }
+ return e
+}
+
+func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno)
+func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno)
+func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno)
+func RawSyscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno)
+
+// Mmap manager, for use by operating system-specific implementations.
+
+type mmapper struct {
+ sync.Mutex
+ active map[*byte][]byte // active mappings; key is last byte in mapping
+ mmap func(addr, length uintptr, prot, flags, fd int, offset int64) (uintptr, error)
+ munmap func(addr uintptr, length uintptr) error
+}
+
+func (m *mmapper) Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) {
+ if length <= 0 {
+ return nil, EINVAL
+ }
+
+ // Map the requested memory.
+ addr, errno := m.mmap(0, uintptr(length), prot, flags, fd, offset)
+ if errno != nil {
+ return nil, errno
+ }
+
+ // Slice memory layout
+ var sl = struct {
+ addr uintptr
+ len int
+ cap int
+ }{addr, length, length}
+
+ // Use unsafe to turn sl into a []byte.
+ b := *(*[]byte)(unsafe.Pointer(&sl))
+
+ // Register mapping in m and return it.
+ p := &b[cap(b)-1]
+ m.Lock()
+ defer m.Unlock()
+ m.active[p] = b
+ return b, nil
+}
+
+func (m *mmapper) Munmap(data []byte) (err error) {
+ if len(data) == 0 || len(data) != cap(data) {
+ return EINVAL
+ }
+
+ // Find the base of the mapping.
+ p := &data[cap(data)-1]
+ m.Lock()
+ defer m.Unlock()
+ b := m.active[p]
+ if b == nil || &b[0] != &data[0] {
+ return EINVAL
+ }
+
+ // Unmap the memory and update m.
+ if errno := m.munmap(uintptr(unsafe.Pointer(&b[0])), uintptr(len(b))); errno != nil {
+ return errno
+ }
+ delete(m.active, p)
+ return nil
+}
+
+func Read(fd int, p []byte) (n int, err error) {
+ n, err = read(fd, p)
+ if raceenabled {
+ if n > 0 {
+ raceWriteRange(unsafe.Pointer(&p[0]), n)
+ }
+ if err == nil {
+ raceAcquire(unsafe.Pointer(&ioSync))
+ }
+ }
+ return
+}
+
+func Write(fd int, p []byte) (n int, err error) {
+ if raceenabled {
+ raceReleaseMerge(unsafe.Pointer(&ioSync))
+ }
+ n, err = write(fd, p)
+ if raceenabled && n > 0 {
+ raceReadRange(unsafe.Pointer(&p[0]), n)
+ }
+ return
+}
+
+// For testing: clients can set this flag to force
+// creation of IPv6 sockets to return EAFNOSUPPORT.
+var SocketDisableIPv6 bool
+
+type Sockaddr interface {
+ sockaddr() (ptr unsafe.Pointer, len _Socklen, err error) // lowercase; only we can define Sockaddrs
+}
+
+type SockaddrInet4 struct {
+ Port int
+ Addr [4]byte
+ raw RawSockaddrInet4
+}
+
+type SockaddrInet6 struct {
+ Port int
+ ZoneId uint32
+ Addr [16]byte
+ raw RawSockaddrInet6
+}
+
+type SockaddrUnix struct {
+ Name string
+ raw RawSockaddrUnix
+}
+
+func Bind(fd int, sa Sockaddr) (err error) {
+ ptr, n, err := sa.sockaddr()
+ if err != nil {
+ return err
+ }
+ return bind(fd, ptr, n)
+}
+
+func Connect(fd int, sa Sockaddr) (err error) {
+ ptr, n, err := sa.sockaddr()
+ if err != nil {
+ return err
+ }
+ return connect(fd, ptr, n)
+}
+
+func Getpeername(fd int) (sa Sockaddr, err error) {
+ var rsa RawSockaddrAny
+ var len _Socklen = SizeofSockaddrAny
+ if err = getpeername(fd, &rsa, &len); err != nil {
+ return
+ }
+ return anyToSockaddr(&rsa)
+}
+
+func GetsockoptInt(fd, level, opt int) (value int, err error) {
+ var n int32
+ vallen := _Socklen(4)
+ err = getsockopt(fd, level, opt, unsafe.Pointer(&n), &vallen)
+ return int(n), err
+}
+
+func Recvfrom(fd int, p []byte, flags int) (n int, from Sockaddr, err error) {
+ var rsa RawSockaddrAny
+ var len _Socklen = SizeofSockaddrAny
+ if n, err = recvfrom(fd, p, flags, &rsa, &len); err != nil {
+ return
+ }
+ if rsa.Addr.Family != AF_UNSPEC {
+ from, err = anyToSockaddr(&rsa)
+ }
+ return
+}
+
+func Sendto(fd int, p []byte, flags int, to Sockaddr) (err error) {
+ ptr, n, err := to.sockaddr()
+ if err != nil {
+ return err
+ }
+ return sendto(fd, p, flags, ptr, n)
+}
+
+func SetsockoptByte(fd, level, opt int, value byte) (err error) {
+ return setsockopt(fd, level, opt, unsafe.Pointer(&value), 1)
+}
+
+func SetsockoptInt(fd, level, opt int, value int) (err error) {
+ var n = int32(value)
+ return setsockopt(fd, level, opt, unsafe.Pointer(&n), 4)
+}
+
+func SetsockoptInet4Addr(fd, level, opt int, value [4]byte) (err error) {
+ return setsockopt(fd, level, opt, unsafe.Pointer(&value[0]), 4)
+}
+
+func SetsockoptIPMreq(fd, level, opt int, mreq *IPMreq) (err error) {
+ return setsockopt(fd, level, opt, unsafe.Pointer(mreq), SizeofIPMreq)
+}
+
+func SetsockoptIPv6Mreq(fd, level, opt int, mreq *IPv6Mreq) (err error) {
+ return setsockopt(fd, level, opt, unsafe.Pointer(mreq), SizeofIPv6Mreq)
+}
+
+func SetsockoptICMPv6Filter(fd, level, opt int, filter *ICMPv6Filter) error {
+ return setsockopt(fd, level, opt, unsafe.Pointer(filter), SizeofICMPv6Filter)
+}
+
+func SetsockoptLinger(fd, level, opt int, l *Linger) (err error) {
+ return setsockopt(fd, level, opt, unsafe.Pointer(l), SizeofLinger)
+}
+
+func SetsockoptString(fd, level, opt int, s string) (err error) {
+ return setsockopt(fd, level, opt, unsafe.Pointer(&[]byte(s)[0]), uintptr(len(s)))
+}
+
+func SetsockoptTimeval(fd, level, opt int, tv *Timeval) (err error) {
+ return setsockopt(fd, level, opt, unsafe.Pointer(tv), unsafe.Sizeof(*tv))
+}
+
+func Socket(domain, typ, proto int) (fd int, err error) {
+ if domain == AF_INET6 && SocketDisableIPv6 {
+ return -1, EAFNOSUPPORT
+ }
+ fd, err = socket(domain, typ, proto)
+ return
+}
+
+func Socketpair(domain, typ, proto int) (fd [2]int, err error) {
+ var fdx [2]int32
+ err = socketpair(domain, typ, proto, &fdx)
+ if err == nil {
+ fd[0] = int(fdx[0])
+ fd[1] = int(fdx[1])
+ }
+ return
+}
+
+func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
+ if raceenabled {
+ raceReleaseMerge(unsafe.Pointer(&ioSync))
+ }
+ return sendfile(outfd, infd, offset, count)
+}
+
+var ioSync int64
+
+func CloseOnExec(fd int) { fcntl(fd, F_SETFD, FD_CLOEXEC) }
+
+func SetNonblock(fd int, nonblocking bool) (err error) {
+ flag, err := fcntl(fd, F_GETFL, 0)
+ if err != nil {
+ return err
+ }
+ if nonblocking {
+ flag |= O_NONBLOCK
+ } else {
+ flag &= ^O_NONBLOCK
+ }
+ _, err = fcntl(fd, F_SETFL, flag)
+ return err
+}
diff --git a/vendor/golang.org/x/sys/unix/types_darwin.go b/vendor/golang.org/x/sys/unix/types_darwin.go
new file mode 100644
index 0000000..1153261
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/types_darwin.go
@@ -0,0 +1,250 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+/*
+Input to cgo -godefs. See also mkerrors.sh and mkall.sh
+*/
+
+// +godefs map struct_in_addr [4]byte /* in_addr */
+// +godefs map struct_in6_addr [16]byte /* in6_addr */
+
+package unix
+
+/*
+#define __DARWIN_UNIX03 0
+#define KERNEL
+#define _DARWIN_USE_64_BIT_INODE
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+enum {
+ sizeofPtr = sizeof(void*),
+};
+
+union sockaddr_all {
+ struct sockaddr s1; // this one gets used for fields
+ struct sockaddr_in s2; // these pad it out
+ struct sockaddr_in6 s3;
+ struct sockaddr_un s4;
+ struct sockaddr_dl s5;
+};
+
+struct sockaddr_any {
+ struct sockaddr addr;
+ char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
+};
+
+*/
+import "C"
+
+// Machine characteristics; for internal use.
+
+const (
+ sizeofPtr = C.sizeofPtr
+ sizeofShort = C.sizeof_short
+ sizeofInt = C.sizeof_int
+ sizeofLong = C.sizeof_long
+ sizeofLongLong = C.sizeof_longlong
+)
+
+// Basic types
+
+type (
+ _C_short C.short
+ _C_int C.int
+ _C_long C.long
+ _C_long_long C.longlong
+)
+
+// Time
+
+type Timespec C.struct_timespec
+
+type Timeval C.struct_timeval
+
+type Timeval32 C.struct_timeval32
+
+// Processes
+
+type Rusage C.struct_rusage
+
+type Rlimit C.struct_rlimit
+
+type _Gid_t C.gid_t
+
+// Files
+
+type Stat_t C.struct_stat64
+
+type Statfs_t C.struct_statfs64
+
+type Flock_t C.struct_flock
+
+type Fstore_t C.struct_fstore
+
+type Radvisory_t C.struct_radvisory
+
+type Fbootstraptransfer_t C.struct_fbootstraptransfer
+
+type Log2phys_t C.struct_log2phys
+
+type Fsid C.struct_fsid
+
+type Dirent C.struct_dirent
+
+// Sockets
+
+type RawSockaddrInet4 C.struct_sockaddr_in
+
+type RawSockaddrInet6 C.struct_sockaddr_in6
+
+type RawSockaddrUnix C.struct_sockaddr_un
+
+type RawSockaddrDatalink C.struct_sockaddr_dl
+
+type RawSockaddr C.struct_sockaddr
+
+type RawSockaddrAny C.struct_sockaddr_any
+
+type _Socklen C.socklen_t
+
+type Linger C.struct_linger
+
+type Iovec C.struct_iovec
+
+type IPMreq C.struct_ip_mreq
+
+type IPv6Mreq C.struct_ipv6_mreq
+
+type Msghdr C.struct_msghdr
+
+type Cmsghdr C.struct_cmsghdr
+
+type Inet4Pktinfo C.struct_in_pktinfo
+
+type Inet6Pktinfo C.struct_in6_pktinfo
+
+type IPv6MTUInfo C.struct_ip6_mtuinfo
+
+type ICMPv6Filter C.struct_icmp6_filter
+
+const (
+ SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
+ SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
+ SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
+ SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
+ SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl
+ SizeofLinger = C.sizeof_struct_linger
+ SizeofIPMreq = C.sizeof_struct_ip_mreq
+ SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
+ SizeofMsghdr = C.sizeof_struct_msghdr
+ SizeofCmsghdr = C.sizeof_struct_cmsghdr
+ SizeofInet4Pktinfo = C.sizeof_struct_in_pktinfo
+ SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
+ SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
+ SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
+)
+
+// Ptrace requests
+
+const (
+ PTRACE_TRACEME = C.PT_TRACE_ME
+ PTRACE_CONT = C.PT_CONTINUE
+ PTRACE_KILL = C.PT_KILL
+)
+
+// Events (kqueue, kevent)
+
+type Kevent_t C.struct_kevent
+
+// Select
+
+type FdSet C.fd_set
+
+// Routing and interface messages
+
+const (
+ SizeofIfMsghdr = C.sizeof_struct_if_msghdr
+ SizeofIfData = C.sizeof_struct_if_data
+ SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr
+ SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr
+ SizeofIfmaMsghdr2 = C.sizeof_struct_ifma_msghdr2
+ SizeofRtMsghdr = C.sizeof_struct_rt_msghdr
+ SizeofRtMetrics = C.sizeof_struct_rt_metrics
+)
+
+type IfMsghdr C.struct_if_msghdr
+
+type IfData C.struct_if_data
+
+type IfaMsghdr C.struct_ifa_msghdr
+
+type IfmaMsghdr C.struct_ifma_msghdr
+
+type IfmaMsghdr2 C.struct_ifma_msghdr2
+
+type RtMsghdr C.struct_rt_msghdr
+
+type RtMetrics C.struct_rt_metrics
+
+// Berkeley packet filter
+
+const (
+ SizeofBpfVersion = C.sizeof_struct_bpf_version
+ SizeofBpfStat = C.sizeof_struct_bpf_stat
+ SizeofBpfProgram = C.sizeof_struct_bpf_program
+ SizeofBpfInsn = C.sizeof_struct_bpf_insn
+ SizeofBpfHdr = C.sizeof_struct_bpf_hdr
+)
+
+type BpfVersion C.struct_bpf_version
+
+type BpfStat C.struct_bpf_stat
+
+type BpfProgram C.struct_bpf_program
+
+type BpfInsn C.struct_bpf_insn
+
+type BpfHdr C.struct_bpf_hdr
+
+// Terminal handling
+
+type Termios C.struct_termios
+
+// fchmodat-like syscalls.
+
+const (
+ AT_FDCWD = C.AT_FDCWD
+ AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
+)
diff --git a/vendor/golang.org/x/sys/unix/types_dragonfly.go b/vendor/golang.org/x/sys/unix/types_dragonfly.go
new file mode 100644
index 0000000..f3c971d
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/types_dragonfly.go
@@ -0,0 +1,242 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+/*
+Input to cgo -godefs. See also mkerrors.sh and mkall.sh
+*/
+
+// +godefs map struct_in_addr [4]byte /* in_addr */
+// +godefs map struct_in6_addr [16]byte /* in6_addr */
+
+package unix
+
+/*
+#define KERNEL
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include