mirror of
https://github.com/ribbybibby/ssl_exporter.git
synced 2026-02-19 20:19:52 +00:00
Compare commits
1 Commits
v2.2.1
...
dane-verif
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4a18f54c81 |
@@ -9,7 +9,6 @@ builds:
|
||||
- windows
|
||||
goarch:
|
||||
- amd64
|
||||
- mips64le
|
||||
flags:
|
||||
- -v
|
||||
ldflags: |
|
||||
|
||||
76
README.md
76
README.md
@@ -1,14 +1,34 @@
|
||||
# SSL Certificate Exporter
|
||||
|
||||
Exports metrics for certificates collected from various sources:
|
||||
- [TCP probes](#tcp)
|
||||
- [HTTPS probes](#https)
|
||||
- [PEM files](#file)
|
||||
- [Kubernetes secrets](#kubernetes)
|
||||
- [Kubeconfig files](#kubeconfig)
|
||||
Exports metrics for certificates collected from TCP probes, local files or
|
||||
Kubernetes secrets. The metrics are labelled with fields from the certificate,
|
||||
which allows for informational dashboards and flexible alert routing.
|
||||
|
||||
The metrics are labelled with fields from the certificate, which allows for
|
||||
informational dashboards and flexible alert routing.
|
||||
## Table of Contents
|
||||
|
||||
- [SSL Certificate Exporter](#ssl-certificate-exporter)
|
||||
- [Table of Contents](#table-of-contents)
|
||||
- [Building](#building)
|
||||
- [Docker](#docker)
|
||||
- [Release process](#release-process)
|
||||
- [Usage](#usage)
|
||||
- [Metrics](#metrics)
|
||||
- [Configuration](#configuration)
|
||||
- [TCP](#tcp)
|
||||
- [HTTPS](#https)
|
||||
- [File](#file)
|
||||
- [Kubernetes](#kubernetes)
|
||||
- [Configuration file](#configuration-file)
|
||||
- [<module>](#module)
|
||||
- [<tls_config>](#tls_config)
|
||||
- [<https_probe>](#https_probe)
|
||||
- [<tcp_probe>](#tcp_probe)
|
||||
- [<kubernetes_probe>](#kubernetes_probe)
|
||||
- [Example Queries](#example-queries)
|
||||
- [Peer Certificates vs Verified Chain Certificates](#peer-certificates-vs-verified-chain-certificates)
|
||||
- [Grafana](#grafana)
|
||||
|
||||
Created by [gh-md-toc](https://github.com/ekalinin/github-markdown-toc)
|
||||
|
||||
## Building
|
||||
|
||||
@@ -66,8 +86,6 @@ Flags:
|
||||
| ssl_file_cert_not_before | The date before which a certificate found by the file prober is not valid. Expressed as a Unix Epoch Time. | file, serial_no, issuer_cn, cn, dnsnames, ips, emails, ou | file |
|
||||
| ssl_kubernetes_cert_not_after | The date after which a certificate found by the kubernetes prober expires. Expressed as a Unix Epoch Time. | namespace, secret, key, serial_no, issuer_cn, cn, dnsnames, ips, emails, ou | kubernetes |
|
||||
| ssl_kubernetes_cert_not_before | The date before which a certificate found by the kubernetes prober is not valid. Expressed as a Unix Epoch Time. | namespace, secret, key, serial_no, issuer_cn, cn, dnsnames, ips, emails, ou | kubernetes |
|
||||
| ssl_kubeconfig_cert_not_after | The date after which a certificate found by the kubeconfig prober expires. Expressed as a Unix Epoch Time. | kubeconfig, name, type, serial_no, issuer_cn, cn, dnsnames, ips, emails, ou | kubeconfig |
|
||||
| ssl_kubeconfig_cert_not_before | The date before which a certificate found by the kubeconfig prober is not valid. Expressed as a Unix Epoch Time. | kubeconfig, name, type, serial_no, issuer_cn, cn, dnsnames, ips, emails, ou | kubeconfig |
|
||||
| ssl_ocsp_response_next_update | The nextUpdate value in the OCSP response. Expressed as a Unix Epoch Time | | tcp, https |
|
||||
| ssl_ocsp_response_produced_at | The producedAt value in the OCSP response. Expressed as a Unix Epoch Time | | tcp, https |
|
||||
| ssl_ocsp_response_revoked_at | The revocationTime value in the OCSP response. Expressed as a Unix Epoch Time | | tcp, https |
|
||||
@@ -211,38 +229,6 @@ sources in the following order:
|
||||
- The default configuration file (`$HOME/.kube/config`)
|
||||
- The in-cluster environment, if running in a pod
|
||||
|
||||
### Kubeconfig
|
||||
|
||||
The `kubeconfig` prober exports `ssl_kubeconfig_cert_not_after` and
|
||||
`ssl_kubeconfig_cert_not_before` for PEM encoded certificates found in the specified kubeconfig file.
|
||||
|
||||
Kubeconfigs local to the exporter can be scraped by providing them as the target
|
||||
parameter:
|
||||
|
||||
```
|
||||
curl "localhost:9219/probe?module=kubeconfig&target=/etc/kubernetes/admin.conf"
|
||||
```
|
||||
|
||||
One specific usage of this prober could be to run the exporter as a DaemonSet in
|
||||
Kubernetes and then scrape each instance to check the expiry of certificates on
|
||||
each node:
|
||||
|
||||
```yml
|
||||
scrape_configs:
|
||||
- job_name: "ssl-kubernetes-kubeconfig"
|
||||
metrics_path: /probe
|
||||
params:
|
||||
module: ["kubeconfig"]
|
||||
target: ["/etc/kubernetes/admin.conf"]
|
||||
kubernetes_sd_configs:
|
||||
- role: node
|
||||
relabel_configs:
|
||||
- source_labels: [__address__]
|
||||
regex: ^(.*):(.*)$
|
||||
target_label: __address__
|
||||
replacement: ${1}:9219
|
||||
```
|
||||
|
||||
## Configuration file
|
||||
|
||||
You can provide further module configuration by providing the path to a
|
||||
@@ -256,7 +242,7 @@ modules: [<module>]
|
||||
### \<module\>
|
||||
|
||||
```
|
||||
# The type of probe (https, tcp, file, kubernetes, kubeconfig)
|
||||
# The type of probe (https, tcp, file, kubernetes)
|
||||
prober: <prober_string>
|
||||
|
||||
# How long the probe will wait before giving up.
|
||||
@@ -277,6 +263,10 @@ prober: <prober_string>
|
||||
# Disable target certificate validation.
|
||||
[ insecure_skip_verify: <boolean> | default = false ]
|
||||
|
||||
# Use DANE to validate target certificates. Ignored if insecure_skip_verify is
|
||||
# true
|
||||
[ dane_verify: <boolean> | default = false ]
|
||||
|
||||
# The CA cert to use for the targets.
|
||||
[ ca_file: <filename> ]
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
@@ -30,9 +31,6 @@ var (
|
||||
"kubernetes": Module{
|
||||
Prober: "kubernetes",
|
||||
},
|
||||
"kubeconfig": Module{
|
||||
Prober: "kubeconfig",
|
||||
},
|
||||
},
|
||||
}
|
||||
)
|
||||
@@ -63,12 +61,33 @@ type Config struct {
|
||||
|
||||
// Module configures a prober
|
||||
type Module struct {
|
||||
Prober string `yaml:"prober,omitempty"`
|
||||
Timeout time.Duration `yaml:"timeout,omitempty"`
|
||||
TLSConfig config.TLSConfig `yaml:"tls_config,omitempty"`
|
||||
HTTPS HTTPSProbe `yaml:"https,omitempty"`
|
||||
TCP TCPProbe `yaml:"tcp,omitempty"`
|
||||
Kubernetes KubernetesProbe `yaml:"kubernetes,omitempty"`
|
||||
Prober string `yaml:"prober,omitempty"`
|
||||
Timeout time.Duration `yaml:"timeout,omitempty"`
|
||||
TLSConfig TLSConfig `yaml:"tls_config,omitempty"`
|
||||
HTTPS HTTPSProbe `yaml:"https,omitempty"`
|
||||
TCP TCPProbe `yaml:"tcp,omitempty"`
|
||||
Kubernetes KubernetesProbe `yaml:"kubernetes,omitempty"`
|
||||
}
|
||||
|
||||
// TLSConfig configures the options for TLS connections.
|
||||
type TLSConfig struct {
|
||||
CAFile string `yaml:"ca_file,omitempty"`
|
||||
CertFile string `yaml:"cert_file,omitempty"`
|
||||
KeyFile string `yaml:"key_file,omitempty"`
|
||||
ServerName string `yaml:"server_name,omitempty"`
|
||||
InsecureSkipVerify bool `yaml:"insecure_skip_verify"`
|
||||
DANEVerify bool `yaml:"dane_verify,omitempty"`
|
||||
}
|
||||
|
||||
// NewTLSConfig creates a new tls.Config from the given TLSConfig
|
||||
func NewTLSConfig(cfg *TLSConfig) (*tls.Config, error) {
|
||||
return config.NewTLSConfig(&config.TLSConfig{
|
||||
CAFile: cfg.CAFile,
|
||||
CertFile: cfg.CertFile,
|
||||
KeyFile: cfg.KeyFile,
|
||||
ServerName: cfg.ServerName,
|
||||
InsecureSkipVerify: cfg.InsecureSkipVerify,
|
||||
})
|
||||
}
|
||||
|
||||
// TCPProbe configures a tcp probe
|
||||
|
||||
@@ -28,6 +28,12 @@ modules:
|
||||
prober: tcp
|
||||
tcp:
|
||||
starttls: smtp
|
||||
tcp_smtp_dane:
|
||||
prober: tcp
|
||||
tls_config:
|
||||
dane_verify: true
|
||||
tcp:
|
||||
starttls: smtp
|
||||
file:
|
||||
prober: file
|
||||
kubernetes:
|
||||
@@ -36,5 +42,3 @@ modules:
|
||||
prober: kubernetes
|
||||
kubernetes:
|
||||
kubeconfig: /root/.kube/config
|
||||
kubeconfig:
|
||||
prober: kubeconfig
|
||||
|
||||
3
go.mod
3
go.mod
@@ -2,10 +2,11 @@ module github.com/ribbybibby/ssl_exporter
|
||||
|
||||
require (
|
||||
github.com/bmatcuk/doublestar/v2 v2.0.3
|
||||
github.com/go-kit/log v0.1.0
|
||||
github.com/miekg/dns v1.0.14
|
||||
github.com/prometheus/client_golang v1.8.0
|
||||
github.com/prometheus/client_model v0.2.0
|
||||
github.com/prometheus/common v0.14.0
|
||||
github.com/sirupsen/logrus v1.7.0 // indirect
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9
|
||||
golang.org/x/net v0.0.0-20201016165138-7b1cca2348c0 // indirect
|
||||
golang.org/x/sys v0.0.0-20201018121011-98379d014ca7 // indirect
|
||||
|
||||
13
go.sum
13
go.sum
@@ -105,13 +105,9 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo=
|
||||
github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o=
|
||||
github.com/go-kit/log v0.1.0 h1:DGJh0Sm43HbOeYDNnVZFl8BvcYVvjD5bqYJvp0REbwQ=
|
||||
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4=
|
||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
|
||||
github.com/go-logr/logr v0.2.0 h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY=
|
||||
@@ -121,7 +117,6 @@ github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1
|
||||
github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
|
||||
github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
|
||||
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
@@ -207,7 +202,6 @@ github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q=
|
||||
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
|
||||
@@ -249,6 +243,7 @@ github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNx
|
||||
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/miekg/dns v1.0.14 h1:9jZdLNd/P4+SfEJ0TNyxYpsK8N4GtfylBLqtbYN1sbA=
|
||||
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
|
||||
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
@@ -353,6 +348,8 @@ github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPx
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM=
|
||||
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
@@ -447,13 +444,13 @@ golang.org/x/net v0.0.0-20201016165138-7b1cca2348c0/go.mod h1:sp8m0HH+o8qH0wwXwY
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6 h1:pE8b58s1HRDMi8RDc79m0HISf9D4TzseP40cEA6IGfs=
|
||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@@ -474,6 +471,7 @@ golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -618,7 +616,6 @@ k8s.io/klog/v2 v2.2.0 h1:XRvcwJozkgZ1UQJmfMGpvRthQHOvihEhYtDfAaxMz/A=
|
||||
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||
k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6 h1:+WnxoVtG8TMiudHBSEtrVL1egv36TkkJm+bA8AxicmQ=
|
||||
k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o=
|
||||
k8s.io/utils v0.0.0-20200729134348-d5654de09c73 h1:uJmqzgNWG7XyClnU/mLPBWwfKKF1K8Hf8whTseBgJcg=
|
||||
k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.0.1 h1:YXTMot5Qz/X1iBRJhAt+vI+HVttY0WkSqqhKxQ0xVbA=
|
||||
|
||||
@@ -5,13 +5,12 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/bmatcuk/doublestar/v2"
|
||||
"github.com/go-kit/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/ribbybibby/ssl_exporter/config"
|
||||
)
|
||||
|
||||
// ProbeFile collects certificate metrics from local files
|
||||
func ProbeFile(ctx context.Context, logger log.Logger, target string, module config.Module, registry *prometheus.Registry) error {
|
||||
func ProbeFile(ctx context.Context, target string, module config.Module, registry *prometheus.Registry) error {
|
||||
errCh := make(chan error, 1)
|
||||
|
||||
go func() {
|
||||
@@ -24,7 +23,7 @@ func ProbeFile(ctx context.Context, logger log.Logger, target string, module con
|
||||
if len(files) == 0 {
|
||||
errCh <- fmt.Errorf("No files found")
|
||||
} else {
|
||||
errCh <- collectFileMetrics(logger, files, registry)
|
||||
errCh <- collectFileMetrics(files, registry)
|
||||
}
|
||||
}()
|
||||
|
||||
|
||||
@@ -32,7 +32,7 @@ func TestProbeFile(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
if err := ProbeFile(ctx, newTestLogger(), certFile, module, registry); err != nil {
|
||||
if err := ProbeFile(ctx, certFile, module, registry); err != nil {
|
||||
t.Fatalf("error: %s", err)
|
||||
}
|
||||
|
||||
@@ -56,7 +56,7 @@ func TestProbeFileGlob(t *testing.T) {
|
||||
|
||||
glob := filepath.Dir(certFile) + "/*.crt"
|
||||
|
||||
if err := ProbeFile(ctx, newTestLogger(), glob, module, registry); err != nil {
|
||||
if err := ProbeFile(ctx, glob, module, registry); err != nil {
|
||||
t.Fatalf("error: %s", err)
|
||||
}
|
||||
|
||||
@@ -84,7 +84,7 @@ func TestProbeFileGlobDoubleStar(t *testing.T) {
|
||||
|
||||
glob := filepath.Dir(filepath.Dir(certFile)) + "/**/*.crt"
|
||||
|
||||
if err := ProbeFile(ctx, newTestLogger(), glob, module, registry); err != nil {
|
||||
if err := ProbeFile(ctx, glob, module, registry); err != nil {
|
||||
t.Fatalf("error: %s", err)
|
||||
}
|
||||
|
||||
@@ -126,7 +126,7 @@ func TestProbeFileGlobDoubleStarMultiple(t *testing.T) {
|
||||
|
||||
glob := tmpDir + "/**/*.crt"
|
||||
|
||||
if err := ProbeFile(ctx, newTestLogger(), glob, module, registry); err != nil {
|
||||
if err := ProbeFile(ctx, glob, module, registry); err != nil {
|
||||
t.Fatalf("error: %s", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -9,15 +9,14 @@ import (
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
"github.com/ribbybibby/ssl_exporter/config"
|
||||
)
|
||||
|
||||
// ProbeHTTPS performs a https probe
|
||||
func ProbeHTTPS(ctx context.Context, logger log.Logger, target string, module config.Module, registry *prometheus.Registry) error {
|
||||
tlsConfig, err := newTLSConfig("", registry, &module.TLSConfig)
|
||||
func ProbeHTTPS(ctx context.Context, target string, module config.Module, registry *prometheus.Registry) error {
|
||||
tlsConfig, err := newTLSConfig(target, registry, &module.TLSConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -64,7 +63,7 @@ func ProbeHTTPS(ctx context.Context, logger log.Logger, target string, module co
|
||||
defer func() {
|
||||
_, err := io.Copy(ioutil.Discard, resp.Body)
|
||||
if err != nil {
|
||||
level.Error(logger).Log("msg", err)
|
||||
log.Errorln(err)
|
||||
}
|
||||
resp.Body.Close()
|
||||
}()
|
||||
|
||||
@@ -17,7 +17,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
pconfig "github.com/prometheus/common/config"
|
||||
"github.com/ribbybibby/ssl_exporter/config"
|
||||
"github.com/ribbybibby/ssl_exporter/test"
|
||||
"golang.org/x/crypto/ocsp"
|
||||
@@ -35,7 +34,7 @@ func TestProbeHTTPS(t *testing.T) {
|
||||
defer server.Close()
|
||||
|
||||
module := config.Module{
|
||||
TLSConfig: pconfig.TLSConfig{
|
||||
TLSConfig: config.TLSConfig{
|
||||
CAFile: caFile,
|
||||
InsecureSkipVerify: false,
|
||||
},
|
||||
@@ -46,7 +45,12 @@ func TestProbeHTTPS(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
if err := ProbeHTTPS(ctx, newTestLogger(), server.URL, module, registry); err != nil {
|
||||
u, err := url.Parse(server.URL)
|
||||
if err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
|
||||
if err := ProbeHTTPS(ctx, u.Host, module, registry); err != nil {
|
||||
t.Fatalf("error: %s", err)
|
||||
}
|
||||
|
||||
@@ -77,7 +81,7 @@ func TestProbeHTTPSTimeout(t *testing.T) {
|
||||
defer server.Close()
|
||||
|
||||
module := config.Module{
|
||||
TLSConfig: pconfig.TLSConfig{
|
||||
TLSConfig: config.TLSConfig{
|
||||
CAFile: caFile,
|
||||
},
|
||||
}
|
||||
@@ -87,7 +91,12 @@ func TestProbeHTTPSTimeout(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
|
||||
defer cancel()
|
||||
|
||||
if err := ProbeHTTPS(ctx, newTestLogger(), server.URL, module, registry); err == nil {
|
||||
u, err := url.Parse(server.URL)
|
||||
if err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
|
||||
if err := ProbeHTTPS(ctx, u.Host, module, registry); err == nil {
|
||||
t.Fatalf("Expected error but returned error was nil")
|
||||
}
|
||||
}
|
||||
@@ -105,7 +114,7 @@ func TestProbeHTTPSInvalidName(t *testing.T) {
|
||||
defer server.Close()
|
||||
|
||||
module := config.Module{
|
||||
TLSConfig: pconfig.TLSConfig{
|
||||
TLSConfig: config.TLSConfig{
|
||||
CAFile: caFile,
|
||||
InsecureSkipVerify: false,
|
||||
},
|
||||
@@ -121,53 +130,11 @@ func TestProbeHTTPSInvalidName(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
if err := ProbeHTTPS(ctx, newTestLogger(), "https://localhost:"+u.Port(), module, registry); err == nil {
|
||||
if err := ProbeHTTPS(ctx, "localhost:"+u.Port(), module, registry); err == nil {
|
||||
t.Fatalf("expected error, but err was nil")
|
||||
}
|
||||
}
|
||||
|
||||
// TestProbeHTTPSNoScheme tests that the probe is successful when the scheme is
|
||||
// omitted from the target. The scheme should be added by the prober.
|
||||
func TestProbeHTTPSNoScheme(t *testing.T) {
|
||||
server, certPEM, _, caFile, teardown, err := test.SetupHTTPSServer()
|
||||
if err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
defer teardown()
|
||||
|
||||
server.StartTLS()
|
||||
defer server.Close()
|
||||
|
||||
module := config.Module{
|
||||
TLSConfig: pconfig.TLSConfig{
|
||||
CAFile: caFile,
|
||||
InsecureSkipVerify: false,
|
||||
},
|
||||
}
|
||||
|
||||
u, err := url.Parse(server.URL)
|
||||
if err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
|
||||
registry := prometheus.NewRegistry()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
if err := ProbeHTTPS(ctx, newTestLogger(), u.Host, module, registry); err != nil {
|
||||
t.Fatalf("error: %s", err)
|
||||
}
|
||||
|
||||
cert, err := newCertificate(certPEM)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
checkCertificateMetrics(cert, registry, t)
|
||||
checkOCSPMetrics([]byte{}, registry, t)
|
||||
checkTLSVersionMetrics("TLS 1.3", registry, t)
|
||||
}
|
||||
|
||||
// TestProbeHTTPSServername tests that the probe is successful when the
|
||||
// servername is provided in the TLS config
|
||||
func TestProbeHTTPSServerName(t *testing.T) {
|
||||
@@ -186,7 +153,7 @@ func TestProbeHTTPSServerName(t *testing.T) {
|
||||
}
|
||||
|
||||
module := config.Module{
|
||||
TLSConfig: pconfig.TLSConfig{
|
||||
TLSConfig: config.TLSConfig{
|
||||
CAFile: caFile,
|
||||
InsecureSkipVerify: false,
|
||||
ServerName: u.Hostname(),
|
||||
@@ -198,7 +165,7 @@ func TestProbeHTTPSServerName(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
if err := ProbeHTTPS(ctx, newTestLogger(), "https://localhost:"+u.Port(), module, registry); err != nil {
|
||||
if err := ProbeHTTPS(ctx, "localhost:"+u.Port(), module, registry); err != nil {
|
||||
t.Fatalf("error: %s", err)
|
||||
}
|
||||
|
||||
@@ -224,7 +191,12 @@ func TestProbeHTTPSHTTP(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
if err := ProbeHTTPS(ctx, newTestLogger(), server.URL, config.Module{}, registry); err == nil {
|
||||
u, err := url.Parse(server.URL)
|
||||
if err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
|
||||
if err := ProbeHTTPS(ctx, u.Host, config.Module{}, registry); err == nil {
|
||||
t.Fatalf("expected error, but err was nil")
|
||||
}
|
||||
}
|
||||
@@ -263,7 +235,7 @@ func TestProbeHTTPSClientAuth(t *testing.T) {
|
||||
defer os.Remove(keyFile)
|
||||
|
||||
module := config.Module{
|
||||
TLSConfig: pconfig.TLSConfig{
|
||||
TLSConfig: config.TLSConfig{
|
||||
CAFile: caFile,
|
||||
CertFile: certFile,
|
||||
KeyFile: keyFile,
|
||||
@@ -276,7 +248,12 @@ func TestProbeHTTPSClientAuth(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
if err := ProbeHTTPS(ctx, newTestLogger(), server.URL, module, registry); err != nil {
|
||||
u, err := url.Parse(server.URL)
|
||||
if err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
|
||||
if err := ProbeHTTPS(ctx, u.Host, module, registry); err != nil {
|
||||
t.Fatalf("error: %s", err)
|
||||
}
|
||||
|
||||
@@ -327,7 +304,7 @@ func TestProbeHTTPSClientAuthWrongClientCert(t *testing.T) {
|
||||
defer os.Remove(keyFile)
|
||||
|
||||
module := config.Module{
|
||||
TLSConfig: pconfig.TLSConfig{
|
||||
TLSConfig: config.TLSConfig{
|
||||
CAFile: caFile,
|
||||
CertFile: certFile,
|
||||
KeyFile: keyFile,
|
||||
@@ -340,7 +317,12 @@ func TestProbeHTTPSClientAuthWrongClientCert(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
if err := ProbeHTTPS(ctx, newTestLogger(), server.URL, module, registry); err == nil {
|
||||
u, err := url.Parse(server.URL)
|
||||
if err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
|
||||
if err := ProbeHTTPS(ctx, u.Host, module, registry); err == nil {
|
||||
t.Fatalf("expected error but err is nil")
|
||||
}
|
||||
}
|
||||
@@ -365,7 +347,7 @@ func TestProbeHTTPSExpired(t *testing.T) {
|
||||
defer server.Close()
|
||||
|
||||
module := config.Module{
|
||||
TLSConfig: pconfig.TLSConfig{
|
||||
TLSConfig: config.TLSConfig{
|
||||
CAFile: caFile,
|
||||
InsecureSkipVerify: false,
|
||||
},
|
||||
@@ -376,7 +358,12 @@ func TestProbeHTTPSExpired(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
if err := ProbeHTTPS(ctx, newTestLogger(), server.URL, module, registry); err == nil {
|
||||
u, err := url.Parse(server.URL)
|
||||
if err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
|
||||
if err := ProbeHTTPS(ctx, u.Host, module, registry); err == nil {
|
||||
t.Fatalf("expected error but err is nil")
|
||||
}
|
||||
}
|
||||
@@ -402,7 +389,7 @@ func TestProbeHTTPSExpiredInsecure(t *testing.T) {
|
||||
defer server.Close()
|
||||
|
||||
module := config.Module{
|
||||
TLSConfig: pconfig.TLSConfig{
|
||||
TLSConfig: config.TLSConfig{
|
||||
CAFile: caFile,
|
||||
InsecureSkipVerify: true,
|
||||
},
|
||||
@@ -413,7 +400,12 @@ func TestProbeHTTPSExpiredInsecure(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
if err := ProbeHTTPS(ctx, newTestLogger(), server.URL, module, registry); err != nil {
|
||||
u, err := url.Parse(server.URL)
|
||||
if err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
|
||||
if err := ProbeHTTPS(ctx, u.Host, module, registry); err != nil {
|
||||
t.Fatalf("error: %s", err)
|
||||
}
|
||||
|
||||
@@ -455,7 +447,7 @@ func TestProbeHTTPSProxy(t *testing.T) {
|
||||
}
|
||||
|
||||
module := config.Module{
|
||||
TLSConfig: pconfig.TLSConfig{
|
||||
TLSConfig: config.TLSConfig{
|
||||
CAFile: caFile,
|
||||
InsecureSkipVerify: false,
|
||||
},
|
||||
@@ -470,14 +462,19 @@ func TestProbeHTTPSProxy(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
if err := ProbeHTTPS(ctx, newTestLogger(), server.URL, module, registry); err == nil {
|
||||
u, err := url.Parse(server.URL)
|
||||
if err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
|
||||
if err := ProbeHTTPS(ctx, u.Host, module, registry); err == nil {
|
||||
t.Fatalf("expected error but err was nil")
|
||||
}
|
||||
|
||||
// Test with the proxy url, this shouldn't return an error
|
||||
module.HTTPS.ProxyURL = config.URL{URL: proxyURL}
|
||||
|
||||
if err := ProbeHTTPS(ctx, newTestLogger(), server.URL, module, registry); err != nil {
|
||||
if err := ProbeHTTPS(ctx, u.Host, module, registry); err != nil {
|
||||
t.Fatalf("error: %s", err)
|
||||
}
|
||||
|
||||
@@ -517,7 +514,7 @@ func TestProbeHTTPSOCSP(t *testing.T) {
|
||||
defer server.Close()
|
||||
|
||||
module := config.Module{
|
||||
TLSConfig: pconfig.TLSConfig{
|
||||
TLSConfig: config.TLSConfig{
|
||||
CAFile: caFile,
|
||||
},
|
||||
}
|
||||
@@ -527,7 +524,12 @@ func TestProbeHTTPSOCSP(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
if err := ProbeHTTPS(ctx, newTestLogger(), server.URL, module, registry); err != nil {
|
||||
u, err := url.Parse(server.URL)
|
||||
if err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
|
||||
if err := ProbeHTTPS(ctx, u.Host, module, registry); err != nil {
|
||||
t.Fatalf("error: %s", err)
|
||||
}
|
||||
|
||||
@@ -598,7 +600,7 @@ func TestProbeHTTPSVerifiedChains(t *testing.T) {
|
||||
defer server.Close()
|
||||
|
||||
module := config.Module{
|
||||
TLSConfig: pconfig.TLSConfig{
|
||||
TLSConfig: config.TLSConfig{
|
||||
CAFile: caFile,
|
||||
},
|
||||
}
|
||||
@@ -608,7 +610,12 @@ func TestProbeHTTPSVerifiedChains(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
if err := ProbeHTTPS(ctx, newTestLogger(), server.URL, module, registry); err != nil {
|
||||
u, err := url.Parse(server.URL)
|
||||
if err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
|
||||
if err := ProbeHTTPS(ctx, u.Host, module, registry); err != nil {
|
||||
t.Fatalf("error: %s", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,92 +0,0 @@
|
||||
package prober
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/ribbybibby/ssl_exporter/config"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
type KubeConfigCluster struct {
|
||||
Name string
|
||||
Cluster KubeConfigClusterCert
|
||||
}
|
||||
|
||||
type KubeConfigClusterCert struct {
|
||||
CertificateAuthority string `yaml:"certificate-authority"`
|
||||
CertificateAuthorityData string `yaml:"certificate-authority-data"`
|
||||
}
|
||||
|
||||
type KubeConfigUser struct {
|
||||
Name string
|
||||
User KubeConfigUserCert
|
||||
}
|
||||
|
||||
type KubeConfigUserCert struct {
|
||||
ClientCertificate string `yaml:"client-certificate"`
|
||||
ClientCertificateData string `yaml:"client-certificate-data"`
|
||||
}
|
||||
|
||||
type KubeConfig struct {
|
||||
Path string
|
||||
Clusters []KubeConfigCluster
|
||||
Users []KubeConfigUser
|
||||
}
|
||||
|
||||
// ProbeKubeconfig collects certificate metrics from kubeconfig files
|
||||
func ProbeKubeconfig(ctx context.Context, logger log.Logger, target string, module config.Module, registry *prometheus.Registry) error {
|
||||
if _, err := os.Stat(target); err != nil {
|
||||
return fmt.Errorf("kubeconfig not found: %s", target)
|
||||
}
|
||||
k, err := ParseKubeConfig(target)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = collectKubeconfigMetrics(logger, *k, registry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ParseKubeConfig(file string) (*KubeConfig, error) {
|
||||
k := &KubeConfig{}
|
||||
|
||||
data, err := ioutil.ReadFile(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = yaml.Unmarshal([]byte(data), k)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
k.Path = file
|
||||
clusters := []KubeConfigCluster{}
|
||||
users := []KubeConfigUser{}
|
||||
for _, c := range k.Clusters {
|
||||
// Path is relative to kubeconfig path
|
||||
if c.Cluster.CertificateAuthority != "" && !filepath.IsAbs(c.Cluster.CertificateAuthority) {
|
||||
newPath := filepath.Join(filepath.Dir(k.Path), c.Cluster.CertificateAuthority)
|
||||
c.Cluster.CertificateAuthority = newPath
|
||||
}
|
||||
clusters = append(clusters, c)
|
||||
}
|
||||
for _, u := range k.Users {
|
||||
// Path is relative to kubeconfig path
|
||||
if u.User.ClientCertificate != "" && !filepath.IsAbs(u.User.ClientCertificate) {
|
||||
newPath := filepath.Join(filepath.Dir(k.Path), u.User.ClientCertificate)
|
||||
u.User.ClientCertificate = newPath
|
||||
}
|
||||
users = append(users, u)
|
||||
}
|
||||
k.Clusters = clusters
|
||||
k.Users = users
|
||||
return k, nil
|
||||
}
|
||||
@@ -1,195 +0,0 @@
|
||||
package prober
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/x509"
|
||||
"encoding/base64"
|
||||
"encoding/pem"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ribbybibby/ssl_exporter/config"
|
||||
"github.com/ribbybibby/ssl_exporter/test"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
// TestProbeFile tests a file
|
||||
func TestProbeKubeconfig(t *testing.T) {
|
||||
cert, kubeconfig, err := createTestKubeconfig("", "kubeconfig")
|
||||
if err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
defer os.Remove(kubeconfig)
|
||||
|
||||
module := config.Module{}
|
||||
|
||||
registry := prometheus.NewRegistry()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
if err := ProbeKubeconfig(ctx, newTestLogger(), kubeconfig, module, registry); err != nil {
|
||||
t.Fatalf("error: %s", err)
|
||||
}
|
||||
|
||||
checkKubeconfigMetrics(cert, kubeconfig, registry, t)
|
||||
}
|
||||
|
||||
func TestParseKubeConfigRelative(t *testing.T) {
|
||||
tmpFile, err := ioutil.TempFile("", "kubeconfig")
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to create Tempfile: %s", err.Error())
|
||||
}
|
||||
defer os.Remove(tmpFile.Name())
|
||||
file := []byte(`
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority: certs/example/ca.pem
|
||||
server: https://master.example.com
|
||||
name: example
|
||||
users:
|
||||
- user:
|
||||
client-certificate: test/ca.pem
|
||||
name: example`)
|
||||
if _, err := tmpFile.Write(file); err != nil {
|
||||
t.Fatalf("Unable to write Tempfile: %s", err.Error())
|
||||
}
|
||||
expectedClusterPath := filepath.Join(filepath.Dir(tmpFile.Name()), "certs/example/ca.pem")
|
||||
expectedUserPath := filepath.Join(filepath.Dir(tmpFile.Name()), "test/ca.pem")
|
||||
k, err := ParseKubeConfig(tmpFile.Name())
|
||||
if err != nil {
|
||||
t.Fatalf("Error parsing kubeconfig: %s", err.Error())
|
||||
}
|
||||
if len(k.Clusters) != 1 {
|
||||
t.Fatalf("Unexpected length for Clusters, got %d", len(k.Clusters))
|
||||
}
|
||||
if k.Clusters[0].Cluster.CertificateAuthority != expectedClusterPath {
|
||||
t.Errorf("Unexpected CertificateAuthority value\nExpected: %s\nGot: %s", expectedClusterPath, k.Clusters[0].Cluster.CertificateAuthority)
|
||||
}
|
||||
if len(k.Users) != 1 {
|
||||
t.Fatalf("Unexpected length for Users, got %d", len(k.Users))
|
||||
}
|
||||
if k.Users[0].User.ClientCertificate != expectedUserPath {
|
||||
t.Errorf("Unexpected ClientCertificate value\nExpected: %s\nGot: %s", expectedUserPath, k.Users[0].User.ClientCertificate)
|
||||
}
|
||||
}
|
||||
|
||||
// Create a certificate and write it to a file
|
||||
func createTestKubeconfig(dir, filename string) (*x509.Certificate, string, error) {
|
||||
certPEM, _ := test.GenerateTestCertificate(time.Now().Add(time.Hour * 1))
|
||||
clusterCert := KubeConfigClusterCert{CertificateAuthorityData: base64.StdEncoding.EncodeToString([]byte(certPEM))}
|
||||
clusters := []KubeConfigCluster{KubeConfigCluster{Name: "kubernetes", Cluster: clusterCert}}
|
||||
userCert := KubeConfigUserCert{ClientCertificateData: base64.StdEncoding.EncodeToString([]byte(certPEM))}
|
||||
users := []KubeConfigUser{KubeConfigUser{Name: "kubernetes-admin", User: userCert}}
|
||||
k := KubeConfig{
|
||||
Clusters: clusters,
|
||||
Users: users,
|
||||
}
|
||||
block, _ := pem.Decode([]byte(certPEM))
|
||||
cert, err := x509.ParseCertificate(block.Bytes)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
tmpFile, err := ioutil.TempFile(dir, filename)
|
||||
if err != nil {
|
||||
return nil, tmpFile.Name(), err
|
||||
}
|
||||
k.Path = tmpFile.Name()
|
||||
d, err := yaml.Marshal(&k)
|
||||
if err != nil {
|
||||
return nil, tmpFile.Name(), err
|
||||
}
|
||||
if _, err := tmpFile.Write(d); err != nil {
|
||||
return nil, tmpFile.Name(), err
|
||||
}
|
||||
if err := tmpFile.Close(); err != nil {
|
||||
return nil, tmpFile.Name(), err
|
||||
}
|
||||
|
||||
return cert, tmpFile.Name(), nil
|
||||
}
|
||||
|
||||
// Check metrics
|
||||
func checkKubeconfigMetrics(cert *x509.Certificate, kubeconfig string, registry *prometheus.Registry, t *testing.T) {
|
||||
mfs, err := registry.Gather()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ips := ","
|
||||
for _, ip := range cert.IPAddresses {
|
||||
ips = ips + ip.String() + ","
|
||||
}
|
||||
expectedResults := []*registryResult{
|
||||
®istryResult{
|
||||
Name: "ssl_kubeconfig_cert_not_after",
|
||||
LabelValues: map[string]string{
|
||||
"kubeconfig": kubeconfig,
|
||||
"name": "kubernetes",
|
||||
"type": "cluster",
|
||||
"serial_no": cert.SerialNumber.String(),
|
||||
"issuer_cn": cert.Issuer.CommonName,
|
||||
"cn": cert.Subject.CommonName,
|
||||
"dnsnames": "," + strings.Join(cert.DNSNames, ",") + ",",
|
||||
"ips": ips,
|
||||
"emails": "," + strings.Join(cert.EmailAddresses, ",") + ",",
|
||||
"ou": "," + strings.Join(cert.Subject.OrganizationalUnit, ",") + ",",
|
||||
},
|
||||
Value: float64(cert.NotAfter.Unix()),
|
||||
},
|
||||
®istryResult{
|
||||
Name: "ssl_kubeconfig_cert_not_before",
|
||||
LabelValues: map[string]string{
|
||||
"kubeconfig": kubeconfig,
|
||||
"name": "kubernetes",
|
||||
"type": "cluster",
|
||||
"serial_no": cert.SerialNumber.String(),
|
||||
"issuer_cn": cert.Issuer.CommonName,
|
||||
"cn": cert.Subject.CommonName,
|
||||
"dnsnames": "," + strings.Join(cert.DNSNames, ",") + ",",
|
||||
"ips": ips,
|
||||
"emails": "," + strings.Join(cert.EmailAddresses, ",") + ",",
|
||||
"ou": "," + strings.Join(cert.Subject.OrganizationalUnit, ",") + ",",
|
||||
},
|
||||
Value: float64(cert.NotBefore.Unix()),
|
||||
},
|
||||
®istryResult{
|
||||
Name: "ssl_kubeconfig_cert_not_after",
|
||||
LabelValues: map[string]string{
|
||||
"kubeconfig": kubeconfig,
|
||||
"name": "kubernetes-admin",
|
||||
"type": "user",
|
||||
"serial_no": cert.SerialNumber.String(),
|
||||
"issuer_cn": cert.Issuer.CommonName,
|
||||
"cn": cert.Subject.CommonName,
|
||||
"dnsnames": "," + strings.Join(cert.DNSNames, ",") + ",",
|
||||
"ips": ips,
|
||||
"emails": "," + strings.Join(cert.EmailAddresses, ",") + ",",
|
||||
"ou": "," + strings.Join(cert.Subject.OrganizationalUnit, ",") + ",",
|
||||
},
|
||||
Value: float64(cert.NotAfter.Unix()),
|
||||
},
|
||||
®istryResult{
|
||||
Name: "ssl_kubeconfig_cert_not_before",
|
||||
LabelValues: map[string]string{
|
||||
"kubeconfig": kubeconfig,
|
||||
"name": "kubernetes-admin",
|
||||
"type": "user",
|
||||
"serial_no": cert.SerialNumber.String(),
|
||||
"issuer_cn": cert.Issuer.CommonName,
|
||||
"cn": cert.Subject.CommonName,
|
||||
"dnsnames": "," + strings.Join(cert.DNSNames, ",") + ",",
|
||||
"ips": ips,
|
||||
"emails": "," + strings.Join(cert.EmailAddresses, ",") + ",",
|
||||
"ou": "," + strings.Join(cert.Subject.OrganizationalUnit, ",") + ",",
|
||||
},
|
||||
Value: float64(cert.NotBefore.Unix()),
|
||||
},
|
||||
}
|
||||
checkRegistryResults(expectedResults, mfs, t)
|
||||
}
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/bmatcuk/doublestar/v2"
|
||||
"github.com/go-kit/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/ribbybibby/ssl_exporter/config"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
@@ -25,7 +24,7 @@ var (
|
||||
)
|
||||
|
||||
// ProbeKubernetes collects certificate metrics from kubernetes.io/tls Secrets
|
||||
func ProbeKubernetes(ctx context.Context, logger log.Logger, target string, module config.Module, registry *prometheus.Registry) error {
|
||||
func ProbeKubernetes(ctx context.Context, target string, module config.Module, registry *prometheus.Registry) error {
|
||||
client, err := newKubeClient(module.Kubernetes.Kubeconfig)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -3,7 +3,6 @@ package prober
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"sort"
|
||||
@@ -11,9 +10,8 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
"golang.org/x/crypto/ocsp"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
@@ -31,10 +29,6 @@ func collectConnectionStateMetrics(state tls.ConnectionState, registry *promethe
|
||||
return err
|
||||
}
|
||||
|
||||
if err := collectVerifiedChainMetrics(state.VerifiedChains, registry); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return collectOCSPMetrics(state.OCSPResponse, registry)
|
||||
}
|
||||
|
||||
@@ -231,7 +225,7 @@ func collectOCSPMetrics(ocspResponse []byte, registry *prometheus.Registry) erro
|
||||
return nil
|
||||
}
|
||||
|
||||
func collectFileMetrics(logger log.Logger, files []string, registry *prometheus.Registry) error {
|
||||
func collectFileMetrics(files []string, registry *prometheus.Registry) error {
|
||||
var (
|
||||
totalCerts []*x509.Certificate
|
||||
fileNotAfter = prometheus.NewGaugeVec(
|
||||
@@ -254,7 +248,7 @@ func collectFileMetrics(logger log.Logger, files []string, registry *prometheus.
|
||||
for _, f := range files {
|
||||
data, err := ioutil.ReadFile(f)
|
||||
if err != nil {
|
||||
level.Debug(logger).Log("msg", fmt.Sprintf("Error reading file %s: %s", f, err))
|
||||
log.Debugf("Error reading file: %s error=%s", f, err)
|
||||
continue
|
||||
}
|
||||
certs, err := decodeCertificates(data)
|
||||
@@ -334,105 +328,6 @@ func collectKubernetesSecretMetrics(secrets []v1.Secret, registry *prometheus.Re
|
||||
return nil
|
||||
}
|
||||
|
||||
func collectKubeconfigMetrics(logger log.Logger, kubeconfig KubeConfig, registry *prometheus.Registry) error {
|
||||
var (
|
||||
totalCerts []*x509.Certificate
|
||||
kubeconfigNotAfter = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: prometheus.BuildFQName(namespace, "kubeconfig", "cert_not_after"),
|
||||
Help: "NotAfter expressed as a Unix Epoch Time for a certificate found in a kubeconfig",
|
||||
},
|
||||
[]string{"kubeconfig", "name", "type", "serial_no", "issuer_cn", "cn", "dnsnames", "ips", "emails", "ou"},
|
||||
)
|
||||
kubeconfigNotBefore = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: prometheus.BuildFQName(namespace, "kubeconfig", "cert_not_before"),
|
||||
Help: "NotBefore expressed as a Unix Epoch Time for a certificate found in a kubeconfig",
|
||||
},
|
||||
[]string{"kubeconfig", "name", "type", "serial_no", "issuer_cn", "cn", "dnsnames", "ips", "emails", "ou"},
|
||||
)
|
||||
)
|
||||
registry.MustRegister(kubeconfigNotAfter, kubeconfigNotBefore)
|
||||
|
||||
for _, c := range kubeconfig.Clusters {
|
||||
var data []byte
|
||||
var err error
|
||||
if c.Cluster.CertificateAuthorityData != "" {
|
||||
data, err = base64.StdEncoding.DecodeString(c.Cluster.CertificateAuthorityData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else if c.Cluster.CertificateAuthority != "" {
|
||||
data, err = ioutil.ReadFile(c.Cluster.CertificateAuthority)
|
||||
if err != nil {
|
||||
level.Debug(logger).Log("msg", fmt.Sprintf("Error reading file %s: %s", c.Cluster.CertificateAuthority, err))
|
||||
return err
|
||||
}
|
||||
}
|
||||
if data == nil {
|
||||
continue
|
||||
}
|
||||
certs, err := decodeCertificates(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
totalCerts = append(totalCerts, certs...)
|
||||
for _, cert := range certs {
|
||||
labels := append([]string{kubeconfig.Path, c.Name, "cluster"}, labelValues(cert)...)
|
||||
|
||||
if !cert.NotAfter.IsZero() {
|
||||
kubeconfigNotAfter.WithLabelValues(labels...).Set(float64(cert.NotAfter.Unix()))
|
||||
}
|
||||
|
||||
if !cert.NotBefore.IsZero() {
|
||||
kubeconfigNotBefore.WithLabelValues(labels...).Set(float64(cert.NotBefore.Unix()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, u := range kubeconfig.Users {
|
||||
var data []byte
|
||||
var err error
|
||||
if u.User.ClientCertificateData != "" {
|
||||
data, err = base64.StdEncoding.DecodeString(u.User.ClientCertificateData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else if u.User.ClientCertificate != "" {
|
||||
data, err = ioutil.ReadFile(u.User.ClientCertificate)
|
||||
if err != nil {
|
||||
level.Debug(logger).Log("msg", fmt.Sprintf("Error reading file %s: %s", u.User.ClientCertificate, err))
|
||||
return err
|
||||
}
|
||||
}
|
||||
if data == nil {
|
||||
continue
|
||||
}
|
||||
certs, err := decodeCertificates(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
totalCerts = append(totalCerts, certs...)
|
||||
for _, cert := range certs {
|
||||
labels := append([]string{kubeconfig.Path, u.Name, "user"}, labelValues(cert)...)
|
||||
|
||||
if !cert.NotAfter.IsZero() {
|
||||
kubeconfigNotAfter.WithLabelValues(labels...).Set(float64(cert.NotAfter.Unix()))
|
||||
}
|
||||
|
||||
if !cert.NotBefore.IsZero() {
|
||||
kubeconfigNotBefore.WithLabelValues(labels...).Set(float64(cert.NotBefore.Unix()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(totalCerts) == 0 {
|
||||
return fmt.Errorf("No certificates found")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func labelValues(cert *x509.Certificate) []string {
|
||||
return []string{
|
||||
cert.SerialNumber.String(),
|
||||
|
||||
@@ -3,7 +3,6 @@ package prober
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/ribbybibby/ssl_exporter/config"
|
||||
)
|
||||
@@ -16,9 +15,8 @@ var (
|
||||
"tcp": ProbeTCP,
|
||||
"file": ProbeFile,
|
||||
"kubernetes": ProbeKubernetes,
|
||||
"kubeconfig": ProbeKubeconfig,
|
||||
}
|
||||
)
|
||||
|
||||
// ProbeFn probes
|
||||
type ProbeFn func(ctx context.Context, logger log.Logger, target string, module config.Module, registry *prometheus.Registry) error
|
||||
type ProbeFn func(ctx context.Context, target string, module config.Module, registry *prometheus.Registry) error
|
||||
|
||||
@@ -8,14 +8,13 @@ import (
|
||||
"net"
|
||||
"regexp"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
"github.com/ribbybibby/ssl_exporter/config"
|
||||
)
|
||||
|
||||
// ProbeTCP performs a tcp probe
|
||||
func ProbeTCP(ctx context.Context, logger log.Logger, target string, module config.Module, registry *prometheus.Registry) error {
|
||||
func ProbeTCP(ctx context.Context, target string, module config.Module, registry *prometheus.Registry) error {
|
||||
tlsConfig, err := newTLSConfig(target, registry, &module.TLSConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -34,7 +33,7 @@ func ProbeTCP(ctx context.Context, logger log.Logger, target string, module conf
|
||||
}
|
||||
|
||||
if module.TCP.StartTLS != "" {
|
||||
err = startTLS(logger, conn, module.TCP.StartTLS)
|
||||
err = startTLS(conn, module.TCP.StartTLS)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -111,7 +110,7 @@ var (
|
||||
)
|
||||
|
||||
// startTLS will send the STARTTLS command for the given protocol
|
||||
func startTLS(logger log.Logger, conn net.Conn, proto string) error {
|
||||
func startTLS(conn net.Conn, proto string) error {
|
||||
var err error
|
||||
|
||||
qr, ok := startTLSqueryResponses[proto]
|
||||
@@ -124,13 +123,13 @@ func startTLS(logger log.Logger, conn net.Conn, proto string) error {
|
||||
if qr.expect != "" {
|
||||
var match bool
|
||||
for scanner.Scan() {
|
||||
level.Debug(logger).Log("msg", fmt.Sprintf("read line: %s", scanner.Text()))
|
||||
log.Debugf("read line: %s", scanner.Text())
|
||||
match, err = regexp.Match(qr.expect, scanner.Bytes())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if match {
|
||||
level.Debug(logger).Log("msg", fmt.Sprintf("regex: %s matched: %s", qr.expect, scanner.Text()))
|
||||
log.Debugf("regex: %s matched: %s", qr.expect, scanner.Text())
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -142,7 +141,7 @@ func startTLS(logger log.Logger, conn net.Conn, proto string) error {
|
||||
}
|
||||
}
|
||||
if qr.send != "" {
|
||||
level.Debug(logger).Log("msg", fmt.Sprintf("sending line: %s", qr.send))
|
||||
log.Debugf("sending line: %s", qr.send)
|
||||
if _, err := fmt.Fprintf(conn, "%s\r\n", qr.send); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -17,7 +17,6 @@ import (
|
||||
"golang.org/x/crypto/ocsp"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
pconfig "github.com/prometheus/common/config"
|
||||
)
|
||||
|
||||
// TestProbeTCP tests the typical case
|
||||
@@ -32,7 +31,7 @@ func TestProbeTCP(t *testing.T) {
|
||||
defer server.Close()
|
||||
|
||||
module := config.Module{
|
||||
TLSConfig: pconfig.TLSConfig{
|
||||
TLSConfig: config.TLSConfig{
|
||||
CAFile: caFile,
|
||||
InsecureSkipVerify: false,
|
||||
},
|
||||
@@ -43,7 +42,7 @@ func TestProbeTCP(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
if err := ProbeTCP(ctx, newTestLogger(), server.Listener.Addr().String(), module, registry); err != nil {
|
||||
if err := ProbeTCP(ctx, server.Listener.Addr().String(), module, registry); err != nil {
|
||||
t.Fatalf("error: %s", err)
|
||||
}
|
||||
|
||||
@@ -69,7 +68,7 @@ func TestProbeTCPInvalidName(t *testing.T) {
|
||||
defer server.Close()
|
||||
|
||||
module := config.Module{
|
||||
TLSConfig: pconfig.TLSConfig{
|
||||
TLSConfig: config.TLSConfig{
|
||||
CAFile: caFile,
|
||||
InsecureSkipVerify: false,
|
||||
},
|
||||
@@ -82,7 +81,7 @@ func TestProbeTCPInvalidName(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
if err := ProbeTCP(ctx, newTestLogger(), "localhost:"+listenPort, module, registry); err == nil {
|
||||
if err := ProbeTCP(ctx, "localhost:"+listenPort, module, registry); err == nil {
|
||||
t.Fatalf("expected error but err was nil")
|
||||
}
|
||||
}
|
||||
@@ -102,7 +101,7 @@ func TestProbeTCPServerName(t *testing.T) {
|
||||
host, listenPort, _ := net.SplitHostPort(server.Listener.Addr().String())
|
||||
|
||||
module := config.Module{
|
||||
TLSConfig: pconfig.TLSConfig{
|
||||
TLSConfig: config.TLSConfig{
|
||||
CAFile: caFile,
|
||||
InsecureSkipVerify: false,
|
||||
ServerName: host,
|
||||
@@ -114,7 +113,7 @@ func TestProbeTCPServerName(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
if err := ProbeTCP(ctx, newTestLogger(), "localhost:"+listenPort, module, registry); err != nil {
|
||||
if err := ProbeTCP(ctx, "localhost:"+listenPort, module, registry); err != nil {
|
||||
t.Fatalf("error: %s", err)
|
||||
}
|
||||
|
||||
@@ -147,7 +146,7 @@ func TestProbeTCPExpired(t *testing.T) {
|
||||
defer server.Close()
|
||||
|
||||
module := config.Module{
|
||||
TLSConfig: pconfig.TLSConfig{
|
||||
TLSConfig: config.TLSConfig{
|
||||
CAFile: caFile,
|
||||
InsecureSkipVerify: false,
|
||||
},
|
||||
@@ -158,7 +157,7 @@ func TestProbeTCPExpired(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
if err := ProbeTCP(ctx, newTestLogger(), server.Listener.Addr().String(), module, registry); err == nil {
|
||||
if err := ProbeTCP(ctx, server.Listener.Addr().String(), module, registry); err == nil {
|
||||
t.Fatalf("expected error but err is nil")
|
||||
}
|
||||
}
|
||||
@@ -184,7 +183,7 @@ func TestProbeTCPExpiredInsecure(t *testing.T) {
|
||||
defer server.Close()
|
||||
|
||||
module := config.Module{
|
||||
TLSConfig: pconfig.TLSConfig{
|
||||
TLSConfig: config.TLSConfig{
|
||||
CAFile: caFile,
|
||||
InsecureSkipVerify: true,
|
||||
},
|
||||
@@ -195,7 +194,7 @@ func TestProbeTCPExpiredInsecure(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
if err := ProbeTCP(ctx, newTestLogger(), server.Listener.Addr().String(), module, registry); err != nil {
|
||||
if err := ProbeTCP(ctx, server.Listener.Addr().String(), module, registry); err != nil {
|
||||
t.Fatalf("error: %s", err)
|
||||
}
|
||||
|
||||
@@ -223,7 +222,7 @@ func TestProbeTCPStartTLSSMTP(t *testing.T) {
|
||||
TCP: config.TCPProbe{
|
||||
StartTLS: "smtp",
|
||||
},
|
||||
TLSConfig: pconfig.TLSConfig{
|
||||
TLSConfig: config.TLSConfig{
|
||||
CAFile: caFile,
|
||||
InsecureSkipVerify: false,
|
||||
},
|
||||
@@ -234,7 +233,7 @@ func TestProbeTCPStartTLSSMTP(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
if err := ProbeTCP(ctx, newTestLogger(), server.Listener.Addr().String(), module, registry); err != nil {
|
||||
if err := ProbeTCP(ctx, server.Listener.Addr().String(), module, registry); err != nil {
|
||||
t.Fatalf("error: %s", err)
|
||||
}
|
||||
|
||||
@@ -262,7 +261,7 @@ func TestProbeTCPStartTLSFTP(t *testing.T) {
|
||||
TCP: config.TCPProbe{
|
||||
StartTLS: "ftp",
|
||||
},
|
||||
TLSConfig: pconfig.TLSConfig{
|
||||
TLSConfig: config.TLSConfig{
|
||||
CAFile: caFile,
|
||||
InsecureSkipVerify: false,
|
||||
},
|
||||
@@ -273,7 +272,7 @@ func TestProbeTCPStartTLSFTP(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
if err := ProbeTCP(ctx, newTestLogger(), server.Listener.Addr().String(), module, registry); err != nil {
|
||||
if err := ProbeTCP(ctx, server.Listener.Addr().String(), module, registry); err != nil {
|
||||
t.Fatalf("error: %s", err)
|
||||
}
|
||||
|
||||
@@ -301,7 +300,7 @@ func TestProbeTCPStartTLSIMAP(t *testing.T) {
|
||||
TCP: config.TCPProbe{
|
||||
StartTLS: "imap",
|
||||
},
|
||||
TLSConfig: pconfig.TLSConfig{
|
||||
TLSConfig: config.TLSConfig{
|
||||
CAFile: caFile,
|
||||
InsecureSkipVerify: false,
|
||||
},
|
||||
@@ -312,7 +311,7 @@ func TestProbeTCPStartTLSIMAP(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
if err := ProbeTCP(ctx, newTestLogger(), server.Listener.Addr().String(), module, registry); err != nil {
|
||||
if err := ProbeTCP(ctx, server.Listener.Addr().String(), module, registry); err != nil {
|
||||
t.Fatalf("error: %s", err)
|
||||
}
|
||||
|
||||
@@ -338,7 +337,7 @@ func TestProbeTCPTimeout(t *testing.T) {
|
||||
defer server.Close()
|
||||
|
||||
module := config.Module{
|
||||
TLSConfig: pconfig.TLSConfig{
|
||||
TLSConfig: config.TLSConfig{
|
||||
CAFile: caFile,
|
||||
InsecureSkipVerify: false,
|
||||
},
|
||||
@@ -349,7 +348,7 @@ func TestProbeTCPTimeout(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
|
||||
defer cancel()
|
||||
|
||||
if err := ProbeTCP(ctx, newTestLogger(), server.Listener.Addr().String(), module, registry); err == nil {
|
||||
if err := ProbeTCP(ctx, server.Listener.Addr().String(), module, registry); err == nil {
|
||||
t.Fatalf("Expected error but returned error was nil")
|
||||
}
|
||||
}
|
||||
@@ -381,7 +380,7 @@ func TestProbeTCPOCSP(t *testing.T) {
|
||||
defer server.Close()
|
||||
|
||||
module := config.Module{
|
||||
TLSConfig: pconfig.TLSConfig{
|
||||
TLSConfig: config.TLSConfig{
|
||||
CAFile: caFile,
|
||||
InsecureSkipVerify: false,
|
||||
},
|
||||
@@ -392,7 +391,7 @@ func TestProbeTCPOCSP(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
if err := ProbeTCP(ctx, newTestLogger(), server.Listener.Addr().String(), module, registry); err != nil {
|
||||
if err := ProbeTCP(ctx, server.Listener.Addr().String(), module, registry); err != nil {
|
||||
t.Fatalf("error: %s", err)
|
||||
}
|
||||
|
||||
@@ -463,7 +462,7 @@ func TestProbeTCPVerifiedChains(t *testing.T) {
|
||||
defer server.Close()
|
||||
|
||||
module := config.Module{
|
||||
TLSConfig: pconfig.TLSConfig{
|
||||
TLSConfig: config.TLSConfig{
|
||||
CAFile: caFile,
|
||||
},
|
||||
}
|
||||
@@ -473,7 +472,7 @@ func TestProbeTCPVerifiedChains(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
if err := ProbeTCP(ctx, newTestLogger(), server.Listener.Addr().String(), module, registry); err != nil {
|
||||
if err := ProbeTCP(ctx, server.Listener.Addr().String(), module, registry); err != nil {
|
||||
t.Fatalf("error: %s", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
package prober
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
)
|
||||
|
||||
func newTestLogger() log.Logger {
|
||||
return log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout))
|
||||
}
|
||||
163
prober/tls.go
163
prober/tls.go
@@ -4,16 +4,24 @@ import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"net"
|
||||
|
||||
"github.com/miekg/dns"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
pconfig "github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/log"
|
||||
"github.com/ribbybibby/ssl_exporter/config"
|
||||
)
|
||||
|
||||
var (
|
||||
// The nameservers in resolvConf are used to perform DANE verification.
|
||||
resolvConf = "/etc/resolv.conf"
|
||||
)
|
||||
|
||||
// newTLSConfig sets up TLS config and instruments it with a function that
|
||||
// collects metrics for the verified chain
|
||||
func newTLSConfig(target string, registry *prometheus.Registry, pTLSConfig *pconfig.TLSConfig) (*tls.Config, error) {
|
||||
tlsConfig, err := pconfig.NewTLSConfig(pTLSConfig)
|
||||
func newTLSConfig(target string, registry *prometheus.Registry, cTLSConfig *config.TLSConfig) (*tls.Config, error) {
|
||||
tlsConfig, err := config.NewTLSConfig(cTLSConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -26,13 +34,162 @@ func newTLSConfig(target string, registry *prometheus.Registry, pTLSConfig *pcon
|
||||
tlsConfig.ServerName = targetAddress
|
||||
}
|
||||
|
||||
// Override the standard verification with our own. Capture the original
|
||||
// value of InsecureSkipVerify so we can use it later on.
|
||||
insecure := tlsConfig.InsecureSkipVerify
|
||||
tlsConfig.InsecureSkipVerify = true
|
||||
tlsConfig.VerifyConnection = func(state tls.ConnectionState) error {
|
||||
if !insecure {
|
||||
var verifiedChains [][]*x509.Certificate
|
||||
if cTLSConfig.DANEVerify {
|
||||
verifiedChains, err = verifyDANE(&state, target, tlsConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
verifiedChains, err = verifyPKIX(&state, tlsConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := collectVerifiedChainMetrics(verifiedChains, registry); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return collectConnectionStateMetrics(state, registry)
|
||||
}
|
||||
|
||||
return tlsConfig, nil
|
||||
}
|
||||
|
||||
// verifyPKIX performs typical PKIX verification of the target certificates
|
||||
func verifyPKIX(state *tls.ConnectionState, tlsConfig *tls.Config) ([][]*x509.Certificate, error) {
|
||||
opts := x509.VerifyOptions{
|
||||
Roots: tlsConfig.RootCAs,
|
||||
DNSName: state.ServerName,
|
||||
Intermediates: x509.NewCertPool(),
|
||||
}
|
||||
for _, cert := range state.PeerCertificates[1:] {
|
||||
opts.Intermediates.AddCert(cert)
|
||||
}
|
||||
return state.PeerCertificates[0].Verify(opts)
|
||||
}
|
||||
|
||||
// verifyDANE performs DANE verification
|
||||
func verifyDANE(state *tls.ConnectionState, target string, tlsConfig *tls.Config) ([][]*x509.Certificate, error) {
|
||||
host, port, err := net.SplitHostPort(target)
|
||||
if err != nil {
|
||||
return [][]*x509.Certificate{}, err
|
||||
}
|
||||
name, err := dns.TLSAName(dns.Fqdn(host), port, "tcp")
|
||||
if err != nil {
|
||||
return [][]*x509.Certificate{}, err
|
||||
}
|
||||
|
||||
c := &dns.Client{}
|
||||
|
||||
cc, err := dns.ClientConfigFromFile(resolvConf)
|
||||
if err != nil {
|
||||
return [][]*x509.Certificate{}, err
|
||||
}
|
||||
if len(cc.Servers) == 0 {
|
||||
return [][]*x509.Certificate{}, fmt.Errorf("no nameservers found in %s", resolvConf)
|
||||
}
|
||||
|
||||
m := &dns.Msg{
|
||||
MsgHdr: dns.MsgHdr{
|
||||
RecursionDesired: true,
|
||||
Id: dns.Id(),
|
||||
},
|
||||
Question: []dns.Question{
|
||||
dns.Question{
|
||||
Name: name,
|
||||
Qtype: dns.TypeTLSA,
|
||||
Qclass: dns.ClassINET,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, server := range cc.Servers {
|
||||
in, _, err := c.Exchange(m, server+":53")
|
||||
if err != nil {
|
||||
log.Errorln(err)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, rr := range in.Answer {
|
||||
tr, ok := rr.(*dns.TLSA)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
switch tr.Usage {
|
||||
case 0:
|
||||
// Record must be in the verified chain,
|
||||
// not including leaf AND must pass pkix
|
||||
verifiedChains, err := verifyPKIX(state, tlsConfig)
|
||||
if err == nil {
|
||||
for _, chain := range verifiedChains {
|
||||
for _, cert := range chain[1:] {
|
||||
if err := tr.Verify(cert); err == nil {
|
||||
return verifiedChains, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
case 1:
|
||||
// Must match leaf certificate
|
||||
// AND must pass pkix
|
||||
verifiedChains, err := verifyPKIX(state, tlsConfig)
|
||||
if err == nil {
|
||||
if err := tr.Verify(state.PeerCertificates[0]); err == nil {
|
||||
return verifiedChains, nil
|
||||
}
|
||||
}
|
||||
case 2:
|
||||
// Must be in peer certificate chain, not
|
||||
// including leaf
|
||||
verifiedChains, err := verifyChain(state.PeerCertificates)
|
||||
if err == nil {
|
||||
for _, chain := range verifiedChains {
|
||||
for _, cert := range chain[1:] {
|
||||
if err := tr.Verify(cert); err == nil {
|
||||
if err := state.PeerCertificates[0].VerifyHostname(tlsConfig.ServerName); err == nil {
|
||||
return verifiedChains, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
case 3:
|
||||
// Must match leaf certificate
|
||||
if err := tr.Verify(state.PeerCertificates[0]); err == nil {
|
||||
return [][]*x509.Certificate{}, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return [][]*x509.Certificate{}, fmt.Errorf("can't find matching TLSA record for %s", name)
|
||||
}
|
||||
|
||||
// verifyChain performs PKIX verification against the chain presented by the
|
||||
// server, without considering the root certificates of the client
|
||||
func verifyChain(certs []*x509.Certificate) ([][]*x509.Certificate, error) {
|
||||
opts := x509.VerifyOptions{
|
||||
Roots: x509.NewCertPool(),
|
||||
}
|
||||
opts.Roots.AddCert(certs[len(certs)-1])
|
||||
if len(certs) >= 3 {
|
||||
opts.Intermediates = x509.NewCertPool()
|
||||
for _, cert := range certs[1:] {
|
||||
opts.Intermediates.AddCert(cert)
|
||||
}
|
||||
}
|
||||
|
||||
return certs[0].Verify(opts)
|
||||
}
|
||||
|
||||
func uniq(certs []*x509.Certificate) []*x509.Certificate {
|
||||
r := []*x509.Certificate{}
|
||||
|
||||
|
||||
@@ -4,16 +4,12 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/prometheus/common/promlog"
|
||||
promlogflag "github.com/prometheus/common/promlog/flag"
|
||||
"github.com/prometheus/common/log"
|
||||
"github.com/prometheus/common/version"
|
||||
"github.com/ribbybibby/ssl_exporter/config"
|
||||
"github.com/ribbybibby/ssl_exporter/prober"
|
||||
@@ -24,7 +20,7 @@ const (
|
||||
namespace = "ssl"
|
||||
)
|
||||
|
||||
func probeHandler(logger log.Logger, w http.ResponseWriter, r *http.Request, conf *config.Config) {
|
||||
func probeHandler(w http.ResponseWriter, r *http.Request, conf *config.Config) {
|
||||
moduleName := r.URL.Query().Get("module")
|
||||
if moduleName == "" {
|
||||
moduleName = "tcp"
|
||||
@@ -92,11 +88,9 @@ func probeHandler(logger log.Logger, w http.ResponseWriter, r *http.Request, con
|
||||
registry.MustRegister(probeSuccess, proberType)
|
||||
proberType.WithLabelValues(module.Prober).Set(1)
|
||||
|
||||
logger = log.With(logger, "target", target, "prober", module.Prober, "timeout", timeout)
|
||||
|
||||
err := probeFunc(ctx, logger, target, module, registry)
|
||||
err := probeFunc(ctx, target, module, registry)
|
||||
if err != nil {
|
||||
level.Error(logger).Log("msg", err)
|
||||
log.Errorf("error=%s target=%s prober=%s timeout=%s", err, target, module.Prober, timeout)
|
||||
probeSuccess.Set(0)
|
||||
} else {
|
||||
probeSuccess.Set(1)
|
||||
@@ -117,32 +111,28 @@ func main() {
|
||||
metricsPath = kingpin.Flag("web.metrics-path", "Path under which to expose metrics").Default("/metrics").String()
|
||||
probePath = kingpin.Flag("web.probe-path", "Path under which to expose the probe endpoint").Default("/probe").String()
|
||||
configFile = kingpin.Flag("config.file", "SSL exporter configuration file").Default("").String()
|
||||
promlogConfig = promlog.Config{}
|
||||
err error
|
||||
)
|
||||
|
||||
promlogflag.AddFlags(kingpin.CommandLine, &promlogConfig)
|
||||
log.AddFlags(kingpin.CommandLine)
|
||||
kingpin.Version(version.Print(namespace + "_exporter"))
|
||||
kingpin.HelpFlag.Short('h')
|
||||
kingpin.Parse()
|
||||
|
||||
logger := promlog.New(&promlogConfig)
|
||||
|
||||
conf := config.DefaultConfig
|
||||
if *configFile != "" {
|
||||
conf, err = config.LoadConfig(*configFile)
|
||||
if err != nil {
|
||||
level.Error(logger).Log("msg", err)
|
||||
os.Exit(1)
|
||||
log.Fatalln(err)
|
||||
}
|
||||
}
|
||||
|
||||
level.Info(logger).Log("msg", fmt.Sprintf("Starting %s_exporter %s", namespace, version.Info()))
|
||||
level.Info(logger).Log("msg", fmt.Sprintf("Build context %s", version.BuildContext()))
|
||||
log.Infoln("Starting "+namespace+"_exporter", version.Info())
|
||||
log.Infoln("Build context", version.BuildContext())
|
||||
|
||||
http.Handle(*metricsPath, promhttp.Handler())
|
||||
http.HandleFunc(*probePath, func(w http.ResponseWriter, r *http.Request) {
|
||||
probeHandler(logger, w, r, conf)
|
||||
probeHandler(w, r, conf)
|
||||
})
|
||||
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
|
||||
_, _ = w.Write([]byte(`<html>
|
||||
@@ -155,7 +145,6 @@ func main() {
|
||||
</html>`))
|
||||
})
|
||||
|
||||
level.Info(logger).Log("msg", fmt.Sprintf("Listening on %s", *listenAddress))
|
||||
level.Error(logger).Log("msg", http.ListenAndServe(*listenAddress, nil))
|
||||
os.Exit(1)
|
||||
log.Infoln("Listening on", *listenAddress)
|
||||
log.Fatal(http.ListenAndServe(*listenAddress, nil))
|
||||
}
|
||||
|
||||
@@ -3,12 +3,10 @@ package main
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"net/url"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
pconfig "github.com/prometheus/common/config"
|
||||
"github.com/ribbybibby/ssl_exporter/config"
|
||||
"github.com/ribbybibby/ssl_exporter/test"
|
||||
)
|
||||
@@ -29,14 +27,19 @@ func TestProbeHandler(t *testing.T) {
|
||||
Modules: map[string]config.Module{
|
||||
"https": config.Module{
|
||||
Prober: "https",
|
||||
TLSConfig: pconfig.TLSConfig{
|
||||
TLSConfig: config.TLSConfig{
|
||||
CAFile: caFile,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
rr, err := probe(server.URL, "https", conf)
|
||||
u, err := url.Parse(server.URL)
|
||||
if err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
|
||||
rr, err := probe(u.Host, "https", conf)
|
||||
if err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
@@ -83,14 +86,10 @@ func probe(target, module string, conf *config.Config) (*httptest.ResponseRecord
|
||||
|
||||
rr := httptest.NewRecorder()
|
||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
probeHandler(newTestLogger(), w, r, conf)
|
||||
probeHandler(w, r, conf)
|
||||
})
|
||||
|
||||
handler.ServeHTTP(rr, req)
|
||||
|
||||
return rr, nil
|
||||
}
|
||||
|
||||
func newTestLogger() log.Logger {
|
||||
return log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout))
|
||||
}
|
||||
|
||||
15
test/tcp.go
15
test/tcp.go
@@ -7,8 +7,7 @@ import (
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/prometheus/common/log"
|
||||
)
|
||||
|
||||
// TCPServer allows manipulation of the tls.Config before starting the listener
|
||||
@@ -16,7 +15,6 @@ type TCPServer struct {
|
||||
Listener net.Listener
|
||||
TLS *tls.Config
|
||||
stopCh chan struct{}
|
||||
logger log.Logger
|
||||
}
|
||||
|
||||
// StartTLS starts a listener that performs an immediate TLS handshake
|
||||
@@ -31,7 +29,7 @@ func (t *TCPServer) StartTLS() {
|
||||
|
||||
// Immediately upgrade to TLS.
|
||||
if err := conn.(*tls.Conn).Handshake(); err != nil {
|
||||
level.Error(t.logger).Log("msg", err)
|
||||
log.Errorln(err)
|
||||
} else {
|
||||
// Send some bytes before terminating the connection.
|
||||
fmt.Fprintf(conn, "Hello World!\n")
|
||||
@@ -55,7 +53,7 @@ func (t *TCPServer) StartTLSWait(d time.Duration) {
|
||||
time.Sleep(d)
|
||||
|
||||
if err := conn.(*tls.Conn).Handshake(); err != nil {
|
||||
level.Error(t.logger).Log(err)
|
||||
log.Errorln(err)
|
||||
} else {
|
||||
// Send some bytes before terminating the connection.
|
||||
fmt.Fprintf(conn, "Hello World!\n")
|
||||
@@ -95,7 +93,7 @@ func (t *TCPServer) StartSMTP() {
|
||||
// Upgrade to TLS.
|
||||
tlsConn := tls.Server(conn, t.TLS)
|
||||
if err := tlsConn.Handshake(); err != nil {
|
||||
level.Error(t.logger).Log("msg", err)
|
||||
log.Errorln(err)
|
||||
}
|
||||
defer tlsConn.Close()
|
||||
|
||||
@@ -122,7 +120,7 @@ func (t *TCPServer) StartFTP() {
|
||||
// Upgrade to TLS.
|
||||
tlsConn := tls.Server(conn, t.TLS)
|
||||
if err := tlsConn.Handshake(); err != nil {
|
||||
level.Error(t.logger).Log(err)
|
||||
log.Errorln(err)
|
||||
}
|
||||
defer tlsConn.Close()
|
||||
|
||||
@@ -154,7 +152,7 @@ func (t *TCPServer) StartIMAP() {
|
||||
// Upgrade to TLS.
|
||||
tlsConn := tls.Server(conn, t.TLS)
|
||||
if err := tlsConn.Handshake(); err != nil {
|
||||
level.Error(t.logger).Log("msg", err)
|
||||
log.Errorln(err)
|
||||
}
|
||||
defer tlsConn.Close()
|
||||
|
||||
@@ -215,7 +213,6 @@ func SetupTCPServerWithCertAndKey(caPEM, certPEM, keyPEM []byte) (*TCPServer, st
|
||||
Listener: ln,
|
||||
TLS: tlsConfig,
|
||||
stopCh: make(chan (struct{})),
|
||||
logger: log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout)),
|
||||
}
|
||||
|
||||
return server, caFile, teardown, err
|
||||
|
||||
22
vendor/github.com/go-kit/kit/LICENSE
generated
vendored
22
vendor/github.com/go-kit/kit/LICENSE
generated
vendored
@@ -1,22 +0,0 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 Peter Bourgon
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
151
vendor/github.com/go-kit/kit/log/README.md
generated
vendored
151
vendor/github.com/go-kit/kit/log/README.md
generated
vendored
@@ -1,151 +0,0 @@
|
||||
# package log
|
||||
|
||||
`package log` provides a minimal interface for structured logging in services.
|
||||
It may be wrapped to encode conventions, enforce type-safety, provide leveled
|
||||
logging, and so on. It can be used for both typical application log events,
|
||||
and log-structured data streams.
|
||||
|
||||
## Structured logging
|
||||
|
||||
Structured logging is, basically, conceding to the reality that logs are
|
||||
_data_, and warrant some level of schematic rigor. Using a stricter,
|
||||
key/value-oriented message format for our logs, containing contextual and
|
||||
semantic information, makes it much easier to get insight into the
|
||||
operational activity of the systems we build. Consequently, `package log` is
|
||||
of the strong belief that "[the benefits of structured logging outweigh the
|
||||
minimal effort involved](https://www.thoughtworks.com/radar/techniques/structured-logging)".
|
||||
|
||||
Migrating from unstructured to structured logging is probably a lot easier
|
||||
than you'd expect.
|
||||
|
||||
```go
|
||||
// Unstructured
|
||||
log.Printf("HTTP server listening on %s", addr)
|
||||
|
||||
// Structured
|
||||
logger.Log("transport", "HTTP", "addr", addr, "msg", "listening")
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
### Typical application logging
|
||||
|
||||
```go
|
||||
w := log.NewSyncWriter(os.Stderr)
|
||||
logger := log.NewLogfmtLogger(w)
|
||||
logger.Log("question", "what is the meaning of life?", "answer", 42)
|
||||
|
||||
// Output:
|
||||
// question="what is the meaning of life?" answer=42
|
||||
```
|
||||
|
||||
### Contextual Loggers
|
||||
|
||||
```go
|
||||
func main() {
|
||||
var logger log.Logger
|
||||
logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr))
|
||||
logger = log.With(logger, "instance_id", 123)
|
||||
|
||||
logger.Log("msg", "starting")
|
||||
NewWorker(log.With(logger, "component", "worker")).Run()
|
||||
NewSlacker(log.With(logger, "component", "slacker")).Run()
|
||||
}
|
||||
|
||||
// Output:
|
||||
// instance_id=123 msg=starting
|
||||
// instance_id=123 component=worker msg=running
|
||||
// instance_id=123 component=slacker msg=running
|
||||
```
|
||||
|
||||
### Interact with stdlib logger
|
||||
|
||||
Redirect stdlib logger to Go kit logger.
|
||||
|
||||
```go
|
||||
import (
|
||||
"os"
|
||||
stdlog "log"
|
||||
kitlog "github.com/go-kit/kit/log"
|
||||
)
|
||||
|
||||
func main() {
|
||||
logger := kitlog.NewJSONLogger(kitlog.NewSyncWriter(os.Stdout))
|
||||
stdlog.SetOutput(kitlog.NewStdlibAdapter(logger))
|
||||
stdlog.Print("I sure like pie")
|
||||
}
|
||||
|
||||
// Output:
|
||||
// {"msg":"I sure like pie","ts":"2016/01/01 12:34:56"}
|
||||
```
|
||||
|
||||
Or, if, for legacy reasons, you need to pipe all of your logging through the
|
||||
stdlib log package, you can redirect Go kit logger to the stdlib logger.
|
||||
|
||||
```go
|
||||
logger := kitlog.NewLogfmtLogger(kitlog.StdlibWriter{})
|
||||
logger.Log("legacy", true, "msg", "at least it's something")
|
||||
|
||||
// Output:
|
||||
// 2016/01/01 12:34:56 legacy=true msg="at least it's something"
|
||||
```
|
||||
|
||||
### Timestamps and callers
|
||||
|
||||
```go
|
||||
var logger log.Logger
|
||||
logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr))
|
||||
logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller)
|
||||
|
||||
logger.Log("msg", "hello")
|
||||
|
||||
// Output:
|
||||
// ts=2016-01-01T12:34:56Z caller=main.go:15 msg=hello
|
||||
```
|
||||
|
||||
## Levels
|
||||
|
||||
Log levels are supported via the [level package](https://godoc.org/github.com/go-kit/kit/log/level).
|
||||
|
||||
## Supported output formats
|
||||
|
||||
- [Logfmt](https://brandur.org/logfmt) ([see also](https://blog.codeship.com/logfmt-a-log-format-thats-easy-to-read-and-write))
|
||||
- JSON
|
||||
|
||||
## Enhancements
|
||||
|
||||
`package log` is centered on the one-method Logger interface.
|
||||
|
||||
```go
|
||||
type Logger interface {
|
||||
Log(keyvals ...interface{}) error
|
||||
}
|
||||
```
|
||||
|
||||
This interface, and its supporting code like is the product of much iteration
|
||||
and evaluation. For more details on the evolution of the Logger interface,
|
||||
see [The Hunt for a Logger Interface](http://go-talks.appspot.com/github.com/ChrisHines/talks/structured-logging/structured-logging.slide#1),
|
||||
a talk by [Chris Hines](https://github.com/ChrisHines).
|
||||
Also, please see
|
||||
[#63](https://github.com/go-kit/kit/issues/63),
|
||||
[#76](https://github.com/go-kit/kit/pull/76),
|
||||
[#131](https://github.com/go-kit/kit/issues/131),
|
||||
[#157](https://github.com/go-kit/kit/pull/157),
|
||||
[#164](https://github.com/go-kit/kit/issues/164), and
|
||||
[#252](https://github.com/go-kit/kit/pull/252)
|
||||
to review historical conversations about package log and the Logger interface.
|
||||
|
||||
Value-add packages and suggestions,
|
||||
like improvements to [the leveled logger](https://godoc.org/github.com/go-kit/kit/log/level),
|
||||
are of course welcome. Good proposals should
|
||||
|
||||
- Be composable with [contextual loggers](https://godoc.org/github.com/go-kit/kit/log#With),
|
||||
- Not break the behavior of [log.Caller](https://godoc.org/github.com/go-kit/kit/log#Caller) in any wrapped contextual loggers, and
|
||||
- Be friendly to packages that accept only an unadorned log.Logger.
|
||||
|
||||
## Benchmarks & comparisons
|
||||
|
||||
There are a few Go logging benchmarks and comparisons that include Go kit's package log.
|
||||
|
||||
- [imkira/go-loggers-bench](https://github.com/imkira/go-loggers-bench) includes kit/log
|
||||
- [uber-common/zap](https://github.com/uber-common/zap), a zero-alloc logging library, includes a comparison with kit/log
|
||||
116
vendor/github.com/go-kit/kit/log/doc.go
generated
vendored
116
vendor/github.com/go-kit/kit/log/doc.go
generated
vendored
@@ -1,116 +0,0 @@
|
||||
// Package log provides a structured logger.
|
||||
//
|
||||
// Structured logging produces logs easily consumed later by humans or
|
||||
// machines. Humans might be interested in debugging errors, or tracing
|
||||
// specific requests. Machines might be interested in counting interesting
|
||||
// events, or aggregating information for off-line processing. In both cases,
|
||||
// it is important that the log messages are structured and actionable.
|
||||
// Package log is designed to encourage both of these best practices.
|
||||
//
|
||||
// Basic Usage
|
||||
//
|
||||
// The fundamental interface is Logger. Loggers create log events from
|
||||
// key/value data. The Logger interface has a single method, Log, which
|
||||
// accepts a sequence of alternating key/value pairs, which this package names
|
||||
// keyvals.
|
||||
//
|
||||
// type Logger interface {
|
||||
// Log(keyvals ...interface{}) error
|
||||
// }
|
||||
//
|
||||
// Here is an example of a function using a Logger to create log events.
|
||||
//
|
||||
// func RunTask(task Task, logger log.Logger) string {
|
||||
// logger.Log("taskID", task.ID, "event", "starting task")
|
||||
// ...
|
||||
// logger.Log("taskID", task.ID, "event", "task complete")
|
||||
// }
|
||||
//
|
||||
// The keys in the above example are "taskID" and "event". The values are
|
||||
// task.ID, "starting task", and "task complete". Every key is followed
|
||||
// immediately by its value.
|
||||
//
|
||||
// Keys are usually plain strings. Values may be any type that has a sensible
|
||||
// encoding in the chosen log format. With structured logging it is a good
|
||||
// idea to log simple values without formatting them. This practice allows
|
||||
// the chosen logger to encode values in the most appropriate way.
|
||||
//
|
||||
// Contextual Loggers
|
||||
//
|
||||
// A contextual logger stores keyvals that it includes in all log events.
|
||||
// Building appropriate contextual loggers reduces repetition and aids
|
||||
// consistency in the resulting log output. With and WithPrefix add context to
|
||||
// a logger. We can use With to improve the RunTask example.
|
||||
//
|
||||
// func RunTask(task Task, logger log.Logger) string {
|
||||
// logger = log.With(logger, "taskID", task.ID)
|
||||
// logger.Log("event", "starting task")
|
||||
// ...
|
||||
// taskHelper(task.Cmd, logger)
|
||||
// ...
|
||||
// logger.Log("event", "task complete")
|
||||
// }
|
||||
//
|
||||
// The improved version emits the same log events as the original for the
|
||||
// first and last calls to Log. Passing the contextual logger to taskHelper
|
||||
// enables each log event created by taskHelper to include the task.ID even
|
||||
// though taskHelper does not have access to that value. Using contextual
|
||||
// loggers this way simplifies producing log output that enables tracing the
|
||||
// life cycle of individual tasks. (See the Contextual example for the full
|
||||
// code of the above snippet.)
|
||||
//
|
||||
// Dynamic Contextual Values
|
||||
//
|
||||
// A Valuer function stored in a contextual logger generates a new value each
|
||||
// time an event is logged. The Valuer example demonstrates how this feature
|
||||
// works.
|
||||
//
|
||||
// Valuers provide the basis for consistently logging timestamps and source
|
||||
// code location. The log package defines several valuers for that purpose.
|
||||
// See Timestamp, DefaultTimestamp, DefaultTimestampUTC, Caller, and
|
||||
// DefaultCaller. A common logger initialization sequence that ensures all log
|
||||
// entries contain a timestamp and source location looks like this:
|
||||
//
|
||||
// logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout))
|
||||
// logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller)
|
||||
//
|
||||
// Concurrent Safety
|
||||
//
|
||||
// Applications with multiple goroutines want each log event written to the
|
||||
// same logger to remain separate from other log events. Package log provides
|
||||
// two simple solutions for concurrent safe logging.
|
||||
//
|
||||
// NewSyncWriter wraps an io.Writer and serializes each call to its Write
|
||||
// method. Using a SyncWriter has the benefit that the smallest practical
|
||||
// portion of the logging logic is performed within a mutex, but it requires
|
||||
// the formatting Logger to make only one call to Write per log event.
|
||||
//
|
||||
// NewSyncLogger wraps any Logger and serializes each call to its Log method.
|
||||
// Using a SyncLogger has the benefit that it guarantees each log event is
|
||||
// handled atomically within the wrapped logger, but it typically serializes
|
||||
// both the formatting and output logic. Use a SyncLogger if the formatting
|
||||
// logger may perform multiple writes per log event.
|
||||
//
|
||||
// Error Handling
|
||||
//
|
||||
// This package relies on the practice of wrapping or decorating loggers with
|
||||
// other loggers to provide composable pieces of functionality. It also means
|
||||
// that Logger.Log must return an error because some
|
||||
// implementations—especially those that output log data to an io.Writer—may
|
||||
// encounter errors that cannot be handled locally. This in turn means that
|
||||
// Loggers that wrap other loggers should return errors from the wrapped
|
||||
// logger up the stack.
|
||||
//
|
||||
// Fortunately, the decorator pattern also provides a way to avoid the
|
||||
// necessity to check for errors every time an application calls Logger.Log.
|
||||
// An application required to panic whenever its Logger encounters
|
||||
// an error could initialize its logger as follows.
|
||||
//
|
||||
// fmtlogger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout))
|
||||
// logger := log.LoggerFunc(func(keyvals ...interface{}) error {
|
||||
// if err := fmtlogger.Log(keyvals...); err != nil {
|
||||
// panic(err)
|
||||
// }
|
||||
// return nil
|
||||
// })
|
||||
package log
|
||||
91
vendor/github.com/go-kit/kit/log/json_logger.go
generated
vendored
91
vendor/github.com/go-kit/kit/log/json_logger.go
generated
vendored
@@ -1,91 +0,0 @@
|
||||
package log
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
type jsonLogger struct {
|
||||
io.Writer
|
||||
}
|
||||
|
||||
// NewJSONLogger returns a Logger that encodes keyvals to the Writer as a
|
||||
// single JSON object. Each log event produces no more than one call to
|
||||
// w.Write. The passed Writer must be safe for concurrent use by multiple
|
||||
// goroutines if the returned Logger will be used concurrently.
|
||||
func NewJSONLogger(w io.Writer) Logger {
|
||||
return &jsonLogger{w}
|
||||
}
|
||||
|
||||
func (l *jsonLogger) Log(keyvals ...interface{}) error {
|
||||
n := (len(keyvals) + 1) / 2 // +1 to handle case when len is odd
|
||||
m := make(map[string]interface{}, n)
|
||||
for i := 0; i < len(keyvals); i += 2 {
|
||||
k := keyvals[i]
|
||||
var v interface{} = ErrMissingValue
|
||||
if i+1 < len(keyvals) {
|
||||
v = keyvals[i+1]
|
||||
}
|
||||
merge(m, k, v)
|
||||
}
|
||||
enc := json.NewEncoder(l.Writer)
|
||||
enc.SetEscapeHTML(false)
|
||||
return enc.Encode(m)
|
||||
}
|
||||
|
||||
func merge(dst map[string]interface{}, k, v interface{}) {
|
||||
var key string
|
||||
switch x := k.(type) {
|
||||
case string:
|
||||
key = x
|
||||
case fmt.Stringer:
|
||||
key = safeString(x)
|
||||
default:
|
||||
key = fmt.Sprint(x)
|
||||
}
|
||||
|
||||
// We want json.Marshaler and encoding.TextMarshaller to take priority over
|
||||
// err.Error() and v.String(). But json.Marshall (called later) does that by
|
||||
// default so we force a no-op if it's one of those 2 case.
|
||||
switch x := v.(type) {
|
||||
case json.Marshaler:
|
||||
case encoding.TextMarshaler:
|
||||
case error:
|
||||
v = safeError(x)
|
||||
case fmt.Stringer:
|
||||
v = safeString(x)
|
||||
}
|
||||
|
||||
dst[key] = v
|
||||
}
|
||||
|
||||
func safeString(str fmt.Stringer) (s string) {
|
||||
defer func() {
|
||||
if panicVal := recover(); panicVal != nil {
|
||||
if v := reflect.ValueOf(str); v.Kind() == reflect.Ptr && v.IsNil() {
|
||||
s = "NULL"
|
||||
} else {
|
||||
panic(panicVal)
|
||||
}
|
||||
}
|
||||
}()
|
||||
s = str.String()
|
||||
return
|
||||
}
|
||||
|
||||
func safeError(err error) (s interface{}) {
|
||||
defer func() {
|
||||
if panicVal := recover(); panicVal != nil {
|
||||
if v := reflect.ValueOf(err); v.Kind() == reflect.Ptr && v.IsNil() {
|
||||
s = nil
|
||||
} else {
|
||||
panic(panicVal)
|
||||
}
|
||||
}
|
||||
}()
|
||||
s = err.Error()
|
||||
return
|
||||
}
|
||||
22
vendor/github.com/go-kit/kit/log/level/doc.go
generated
vendored
22
vendor/github.com/go-kit/kit/log/level/doc.go
generated
vendored
@@ -1,22 +0,0 @@
|
||||
// Package level implements leveled logging on top of Go kit's log package. To
|
||||
// use the level package, create a logger as per normal in your func main, and
|
||||
// wrap it with level.NewFilter.
|
||||
//
|
||||
// var logger log.Logger
|
||||
// logger = log.NewLogfmtLogger(os.Stderr)
|
||||
// logger = level.NewFilter(logger, level.AllowInfo()) // <--
|
||||
// logger = log.With(logger, "ts", log.DefaultTimestampUTC)
|
||||
//
|
||||
// Then, at the callsites, use one of the level.Debug, Info, Warn, or Error
|
||||
// helper methods to emit leveled log events.
|
||||
//
|
||||
// logger.Log("foo", "bar") // as normal, no level
|
||||
// level.Debug(logger).Log("request_id", reqID, "trace_data", trace.Get())
|
||||
// if value > 100 {
|
||||
// level.Error(logger).Log("value", value)
|
||||
// }
|
||||
//
|
||||
// NewFilter allows precise control over what happens when a log event is
|
||||
// emitted without a level key, or if a squelched level is used. Check the
|
||||
// Option functions for details.
|
||||
package level
|
||||
205
vendor/github.com/go-kit/kit/log/level/level.go
generated
vendored
205
vendor/github.com/go-kit/kit/log/level/level.go
generated
vendored
@@ -1,205 +0,0 @@
|
||||
package level
|
||||
|
||||
import "github.com/go-kit/kit/log"
|
||||
|
||||
// Error returns a logger that includes a Key/ErrorValue pair.
|
||||
func Error(logger log.Logger) log.Logger {
|
||||
return log.WithPrefix(logger, Key(), ErrorValue())
|
||||
}
|
||||
|
||||
// Warn returns a logger that includes a Key/WarnValue pair.
|
||||
func Warn(logger log.Logger) log.Logger {
|
||||
return log.WithPrefix(logger, Key(), WarnValue())
|
||||
}
|
||||
|
||||
// Info returns a logger that includes a Key/InfoValue pair.
|
||||
func Info(logger log.Logger) log.Logger {
|
||||
return log.WithPrefix(logger, Key(), InfoValue())
|
||||
}
|
||||
|
||||
// Debug returns a logger that includes a Key/DebugValue pair.
|
||||
func Debug(logger log.Logger) log.Logger {
|
||||
return log.WithPrefix(logger, Key(), DebugValue())
|
||||
}
|
||||
|
||||
// NewFilter wraps next and implements level filtering. See the commentary on
|
||||
// the Option functions for a detailed description of how to configure levels.
|
||||
// If no options are provided, all leveled log events created with Debug,
|
||||
// Info, Warn or Error helper methods are squelched and non-leveled log
|
||||
// events are passed to next unmodified.
|
||||
func NewFilter(next log.Logger, options ...Option) log.Logger {
|
||||
l := &logger{
|
||||
next: next,
|
||||
}
|
||||
for _, option := range options {
|
||||
option(l)
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
type logger struct {
|
||||
next log.Logger
|
||||
allowed level
|
||||
squelchNoLevel bool
|
||||
errNotAllowed error
|
||||
errNoLevel error
|
||||
}
|
||||
|
||||
func (l *logger) Log(keyvals ...interface{}) error {
|
||||
var hasLevel, levelAllowed bool
|
||||
for i := 1; i < len(keyvals); i += 2 {
|
||||
if v, ok := keyvals[i].(*levelValue); ok {
|
||||
hasLevel = true
|
||||
levelAllowed = l.allowed&v.level != 0
|
||||
break
|
||||
}
|
||||
}
|
||||
if !hasLevel && l.squelchNoLevel {
|
||||
return l.errNoLevel
|
||||
}
|
||||
if hasLevel && !levelAllowed {
|
||||
return l.errNotAllowed
|
||||
}
|
||||
return l.next.Log(keyvals...)
|
||||
}
|
||||
|
||||
// Option sets a parameter for the leveled logger.
|
||||
type Option func(*logger)
|
||||
|
||||
// AllowAll is an alias for AllowDebug.
|
||||
func AllowAll() Option {
|
||||
return AllowDebug()
|
||||
}
|
||||
|
||||
// AllowDebug allows error, warn, info and debug level log events to pass.
|
||||
func AllowDebug() Option {
|
||||
return allowed(levelError | levelWarn | levelInfo | levelDebug)
|
||||
}
|
||||
|
||||
// AllowInfo allows error, warn and info level log events to pass.
|
||||
func AllowInfo() Option {
|
||||
return allowed(levelError | levelWarn | levelInfo)
|
||||
}
|
||||
|
||||
// AllowWarn allows error and warn level log events to pass.
|
||||
func AllowWarn() Option {
|
||||
return allowed(levelError | levelWarn)
|
||||
}
|
||||
|
||||
// AllowError allows only error level log events to pass.
|
||||
func AllowError() Option {
|
||||
return allowed(levelError)
|
||||
}
|
||||
|
||||
// AllowNone allows no leveled log events to pass.
|
||||
func AllowNone() Option {
|
||||
return allowed(0)
|
||||
}
|
||||
|
||||
func allowed(allowed level) Option {
|
||||
return func(l *logger) { l.allowed = allowed }
|
||||
}
|
||||
|
||||
// ErrNotAllowed sets the error to return from Log when it squelches a log
|
||||
// event disallowed by the configured Allow[Level] option. By default,
|
||||
// ErrNotAllowed is nil; in this case the log event is squelched with no
|
||||
// error.
|
||||
func ErrNotAllowed(err error) Option {
|
||||
return func(l *logger) { l.errNotAllowed = err }
|
||||
}
|
||||
|
||||
// SquelchNoLevel instructs Log to squelch log events with no level, so that
|
||||
// they don't proceed through to the wrapped logger. If SquelchNoLevel is set
|
||||
// to true and a log event is squelched in this way, the error value
|
||||
// configured with ErrNoLevel is returned to the caller.
|
||||
func SquelchNoLevel(squelch bool) Option {
|
||||
return func(l *logger) { l.squelchNoLevel = squelch }
|
||||
}
|
||||
|
||||
// ErrNoLevel sets the error to return from Log when it squelches a log event
|
||||
// with no level. By default, ErrNoLevel is nil; in this case the log event is
|
||||
// squelched with no error.
|
||||
func ErrNoLevel(err error) Option {
|
||||
return func(l *logger) { l.errNoLevel = err }
|
||||
}
|
||||
|
||||
// NewInjector wraps next and returns a logger that adds a Key/level pair to
|
||||
// the beginning of log events that don't already contain a level. In effect,
|
||||
// this gives a default level to logs without a level.
|
||||
func NewInjector(next log.Logger, level Value) log.Logger {
|
||||
return &injector{
|
||||
next: next,
|
||||
level: level,
|
||||
}
|
||||
}
|
||||
|
||||
type injector struct {
|
||||
next log.Logger
|
||||
level interface{}
|
||||
}
|
||||
|
||||
func (l *injector) Log(keyvals ...interface{}) error {
|
||||
for i := 1; i < len(keyvals); i += 2 {
|
||||
if _, ok := keyvals[i].(*levelValue); ok {
|
||||
return l.next.Log(keyvals...)
|
||||
}
|
||||
}
|
||||
kvs := make([]interface{}, len(keyvals)+2)
|
||||
kvs[0], kvs[1] = key, l.level
|
||||
copy(kvs[2:], keyvals)
|
||||
return l.next.Log(kvs...)
|
||||
}
|
||||
|
||||
// Value is the interface that each of the canonical level values implement.
|
||||
// It contains unexported methods that prevent types from other packages from
|
||||
// implementing it and guaranteeing that NewFilter can distinguish the levels
|
||||
// defined in this package from all other values.
|
||||
type Value interface {
|
||||
String() string
|
||||
levelVal()
|
||||
}
|
||||
|
||||
// Key returns the unique key added to log events by the loggers in this
|
||||
// package.
|
||||
func Key() interface{} { return key }
|
||||
|
||||
// ErrorValue returns the unique value added to log events by Error.
|
||||
func ErrorValue() Value { return errorValue }
|
||||
|
||||
// WarnValue returns the unique value added to log events by Warn.
|
||||
func WarnValue() Value { return warnValue }
|
||||
|
||||
// InfoValue returns the unique value added to log events by Info.
|
||||
func InfoValue() Value { return infoValue }
|
||||
|
||||
// DebugValue returns the unique value added to log events by Warn.
|
||||
func DebugValue() Value { return debugValue }
|
||||
|
||||
var (
|
||||
// key is of type interface{} so that it allocates once during package
|
||||
// initialization and avoids allocating every time the value is added to a
|
||||
// []interface{} later.
|
||||
key interface{} = "level"
|
||||
|
||||
errorValue = &levelValue{level: levelError, name: "error"}
|
||||
warnValue = &levelValue{level: levelWarn, name: "warn"}
|
||||
infoValue = &levelValue{level: levelInfo, name: "info"}
|
||||
debugValue = &levelValue{level: levelDebug, name: "debug"}
|
||||
)
|
||||
|
||||
type level byte
|
||||
|
||||
const (
|
||||
levelDebug level = 1 << iota
|
||||
levelInfo
|
||||
levelWarn
|
||||
levelError
|
||||
)
|
||||
|
||||
type levelValue struct {
|
||||
name string
|
||||
level
|
||||
}
|
||||
|
||||
func (v *levelValue) String() string { return v.name }
|
||||
func (v *levelValue) levelVal() {}
|
||||
135
vendor/github.com/go-kit/kit/log/log.go
generated
vendored
135
vendor/github.com/go-kit/kit/log/log.go
generated
vendored
@@ -1,135 +0,0 @@
|
||||
package log
|
||||
|
||||
import "errors"
|
||||
|
||||
// Logger is the fundamental interface for all log operations. Log creates a
|
||||
// log event from keyvals, a variadic sequence of alternating keys and values.
|
||||
// Implementations must be safe for concurrent use by multiple goroutines. In
|
||||
// particular, any implementation of Logger that appends to keyvals or
|
||||
// modifies or retains any of its elements must make a copy first.
|
||||
type Logger interface {
|
||||
Log(keyvals ...interface{}) error
|
||||
}
|
||||
|
||||
// ErrMissingValue is appended to keyvals slices with odd length to substitute
|
||||
// the missing value.
|
||||
var ErrMissingValue = errors.New("(MISSING)")
|
||||
|
||||
// With returns a new contextual logger with keyvals prepended to those passed
|
||||
// to calls to Log. If logger is also a contextual logger created by With or
|
||||
// WithPrefix, keyvals is appended to the existing context.
|
||||
//
|
||||
// The returned Logger replaces all value elements (odd indexes) containing a
|
||||
// Valuer with their generated value for each call to its Log method.
|
||||
func With(logger Logger, keyvals ...interface{}) Logger {
|
||||
if len(keyvals) == 0 {
|
||||
return logger
|
||||
}
|
||||
l := newContext(logger)
|
||||
kvs := append(l.keyvals, keyvals...)
|
||||
if len(kvs)%2 != 0 {
|
||||
kvs = append(kvs, ErrMissingValue)
|
||||
}
|
||||
return &context{
|
||||
logger: l.logger,
|
||||
// Limiting the capacity of the stored keyvals ensures that a new
|
||||
// backing array is created if the slice must grow in Log or With.
|
||||
// Using the extra capacity without copying risks a data race that
|
||||
// would violate the Logger interface contract.
|
||||
keyvals: kvs[:len(kvs):len(kvs)],
|
||||
hasValuer: l.hasValuer || containsValuer(keyvals),
|
||||
}
|
||||
}
|
||||
|
||||
// WithPrefix returns a new contextual logger with keyvals prepended to those
|
||||
// passed to calls to Log. If logger is also a contextual logger created by
|
||||
// With or WithPrefix, keyvals is prepended to the existing context.
|
||||
//
|
||||
// The returned Logger replaces all value elements (odd indexes) containing a
|
||||
// Valuer with their generated value for each call to its Log method.
|
||||
func WithPrefix(logger Logger, keyvals ...interface{}) Logger {
|
||||
if len(keyvals) == 0 {
|
||||
return logger
|
||||
}
|
||||
l := newContext(logger)
|
||||
// Limiting the capacity of the stored keyvals ensures that a new
|
||||
// backing array is created if the slice must grow in Log or With.
|
||||
// Using the extra capacity without copying risks a data race that
|
||||
// would violate the Logger interface contract.
|
||||
n := len(l.keyvals) + len(keyvals)
|
||||
if len(keyvals)%2 != 0 {
|
||||
n++
|
||||
}
|
||||
kvs := make([]interface{}, 0, n)
|
||||
kvs = append(kvs, keyvals...)
|
||||
if len(kvs)%2 != 0 {
|
||||
kvs = append(kvs, ErrMissingValue)
|
||||
}
|
||||
kvs = append(kvs, l.keyvals...)
|
||||
return &context{
|
||||
logger: l.logger,
|
||||
keyvals: kvs,
|
||||
hasValuer: l.hasValuer || containsValuer(keyvals),
|
||||
}
|
||||
}
|
||||
|
||||
// context is the Logger implementation returned by With and WithPrefix. It
|
||||
// wraps a Logger and holds keyvals that it includes in all log events. Its
|
||||
// Log method calls bindValues to generate values for each Valuer in the
|
||||
// context keyvals.
|
||||
//
|
||||
// A context must always have the same number of stack frames between calls to
|
||||
// its Log method and the eventual binding of Valuers to their value. This
|
||||
// requirement comes from the functional requirement to allow a context to
|
||||
// resolve application call site information for a Caller stored in the
|
||||
// context. To do this we must be able to predict the number of logging
|
||||
// functions on the stack when bindValues is called.
|
||||
//
|
||||
// Two implementation details provide the needed stack depth consistency.
|
||||
//
|
||||
// 1. newContext avoids introducing an additional layer when asked to
|
||||
// wrap another context.
|
||||
// 2. With and WithPrefix avoid introducing an additional layer by
|
||||
// returning a newly constructed context with a merged keyvals rather
|
||||
// than simply wrapping the existing context.
|
||||
type context struct {
|
||||
logger Logger
|
||||
keyvals []interface{}
|
||||
hasValuer bool
|
||||
}
|
||||
|
||||
func newContext(logger Logger) *context {
|
||||
if c, ok := logger.(*context); ok {
|
||||
return c
|
||||
}
|
||||
return &context{logger: logger}
|
||||
}
|
||||
|
||||
// Log replaces all value elements (odd indexes) containing a Valuer in the
|
||||
// stored context with their generated value, appends keyvals, and passes the
|
||||
// result to the wrapped Logger.
|
||||
func (l *context) Log(keyvals ...interface{}) error {
|
||||
kvs := append(l.keyvals, keyvals...)
|
||||
if len(kvs)%2 != 0 {
|
||||
kvs = append(kvs, ErrMissingValue)
|
||||
}
|
||||
if l.hasValuer {
|
||||
// If no keyvals were appended above then we must copy l.keyvals so
|
||||
// that future log events will reevaluate the stored Valuers.
|
||||
if len(keyvals) == 0 {
|
||||
kvs = append([]interface{}{}, l.keyvals...)
|
||||
}
|
||||
bindValues(kvs[:len(l.keyvals)])
|
||||
}
|
||||
return l.logger.Log(kvs...)
|
||||
}
|
||||
|
||||
// LoggerFunc is an adapter to allow use of ordinary functions as Loggers. If
|
||||
// f is a function with the appropriate signature, LoggerFunc(f) is a Logger
|
||||
// object that calls f.
|
||||
type LoggerFunc func(...interface{}) error
|
||||
|
||||
// Log implements Logger by calling f(keyvals...).
|
||||
func (f LoggerFunc) Log(keyvals ...interface{}) error {
|
||||
return f(keyvals...)
|
||||
}
|
||||
62
vendor/github.com/go-kit/kit/log/logfmt_logger.go
generated
vendored
62
vendor/github.com/go-kit/kit/log/logfmt_logger.go
generated
vendored
@@ -1,62 +0,0 @@
|
||||
package log
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/go-logfmt/logfmt"
|
||||
)
|
||||
|
||||
type logfmtEncoder struct {
|
||||
*logfmt.Encoder
|
||||
buf bytes.Buffer
|
||||
}
|
||||
|
||||
func (l *logfmtEncoder) Reset() {
|
||||
l.Encoder.Reset()
|
||||
l.buf.Reset()
|
||||
}
|
||||
|
||||
var logfmtEncoderPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
var enc logfmtEncoder
|
||||
enc.Encoder = logfmt.NewEncoder(&enc.buf)
|
||||
return &enc
|
||||
},
|
||||
}
|
||||
|
||||
type logfmtLogger struct {
|
||||
w io.Writer
|
||||
}
|
||||
|
||||
// NewLogfmtLogger returns a logger that encodes keyvals to the Writer in
|
||||
// logfmt format. Each log event produces no more than one call to w.Write.
|
||||
// The passed Writer must be safe for concurrent use by multiple goroutines if
|
||||
// the returned Logger will be used concurrently.
|
||||
func NewLogfmtLogger(w io.Writer) Logger {
|
||||
return &logfmtLogger{w}
|
||||
}
|
||||
|
||||
func (l logfmtLogger) Log(keyvals ...interface{}) error {
|
||||
enc := logfmtEncoderPool.Get().(*logfmtEncoder)
|
||||
enc.Reset()
|
||||
defer logfmtEncoderPool.Put(enc)
|
||||
|
||||
if err := enc.EncodeKeyvals(keyvals...); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Add newline to the end of the buffer
|
||||
if err := enc.EndRecord(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// The Logger interface requires implementations to be safe for concurrent
|
||||
// use by multiple goroutines. For this implementation that means making
|
||||
// only one call to l.w.Write() for each call to Log.
|
||||
if _, err := l.w.Write(enc.buf.Bytes()); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
8
vendor/github.com/go-kit/kit/log/nop_logger.go
generated
vendored
8
vendor/github.com/go-kit/kit/log/nop_logger.go
generated
vendored
@@ -1,8 +0,0 @@
|
||||
package log
|
||||
|
||||
type nopLogger struct{}
|
||||
|
||||
// NewNopLogger returns a logger that doesn't do anything.
|
||||
func NewNopLogger() Logger { return nopLogger{} }
|
||||
|
||||
func (nopLogger) Log(...interface{}) error { return nil }
|
||||
116
vendor/github.com/go-kit/kit/log/stdlib.go
generated
vendored
116
vendor/github.com/go-kit/kit/log/stdlib.go
generated
vendored
@@ -1,116 +0,0 @@
|
||||
package log
|
||||
|
||||
import (
|
||||
"io"
|
||||
"log"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// StdlibWriter implements io.Writer by invoking the stdlib log.Print. It's
|
||||
// designed to be passed to a Go kit logger as the writer, for cases where
|
||||
// it's necessary to redirect all Go kit log output to the stdlib logger.
|
||||
//
|
||||
// If you have any choice in the matter, you shouldn't use this. Prefer to
|
||||
// redirect the stdlib log to the Go kit logger via NewStdlibAdapter.
|
||||
type StdlibWriter struct{}
|
||||
|
||||
// Write implements io.Writer.
|
||||
func (w StdlibWriter) Write(p []byte) (int, error) {
|
||||
log.Print(strings.TrimSpace(string(p)))
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// StdlibAdapter wraps a Logger and allows it to be passed to the stdlib
|
||||
// logger's SetOutput. It will extract date/timestamps, filenames, and
|
||||
// messages, and place them under relevant keys.
|
||||
type StdlibAdapter struct {
|
||||
Logger
|
||||
timestampKey string
|
||||
fileKey string
|
||||
messageKey string
|
||||
}
|
||||
|
||||
// StdlibAdapterOption sets a parameter for the StdlibAdapter.
|
||||
type StdlibAdapterOption func(*StdlibAdapter)
|
||||
|
||||
// TimestampKey sets the key for the timestamp field. By default, it's "ts".
|
||||
func TimestampKey(key string) StdlibAdapterOption {
|
||||
return func(a *StdlibAdapter) { a.timestampKey = key }
|
||||
}
|
||||
|
||||
// FileKey sets the key for the file and line field. By default, it's "caller".
|
||||
func FileKey(key string) StdlibAdapterOption {
|
||||
return func(a *StdlibAdapter) { a.fileKey = key }
|
||||
}
|
||||
|
||||
// MessageKey sets the key for the actual log message. By default, it's "msg".
|
||||
func MessageKey(key string) StdlibAdapterOption {
|
||||
return func(a *StdlibAdapter) { a.messageKey = key }
|
||||
}
|
||||
|
||||
// NewStdlibAdapter returns a new StdlibAdapter wrapper around the passed
|
||||
// logger. It's designed to be passed to log.SetOutput.
|
||||
func NewStdlibAdapter(logger Logger, options ...StdlibAdapterOption) io.Writer {
|
||||
a := StdlibAdapter{
|
||||
Logger: logger,
|
||||
timestampKey: "ts",
|
||||
fileKey: "caller",
|
||||
messageKey: "msg",
|
||||
}
|
||||
for _, option := range options {
|
||||
option(&a)
|
||||
}
|
||||
return a
|
||||
}
|
||||
|
||||
func (a StdlibAdapter) Write(p []byte) (int, error) {
|
||||
result := subexps(p)
|
||||
keyvals := []interface{}{}
|
||||
var timestamp string
|
||||
if date, ok := result["date"]; ok && date != "" {
|
||||
timestamp = date
|
||||
}
|
||||
if time, ok := result["time"]; ok && time != "" {
|
||||
if timestamp != "" {
|
||||
timestamp += " "
|
||||
}
|
||||
timestamp += time
|
||||
}
|
||||
if timestamp != "" {
|
||||
keyvals = append(keyvals, a.timestampKey, timestamp)
|
||||
}
|
||||
if file, ok := result["file"]; ok && file != "" {
|
||||
keyvals = append(keyvals, a.fileKey, file)
|
||||
}
|
||||
if msg, ok := result["msg"]; ok {
|
||||
keyvals = append(keyvals, a.messageKey, msg)
|
||||
}
|
||||
if err := a.Logger.Log(keyvals...); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
const (
|
||||
logRegexpDate = `(?P<date>[0-9]{4}/[0-9]{2}/[0-9]{2})?[ ]?`
|
||||
logRegexpTime = `(?P<time>[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+)?)?[ ]?`
|
||||
logRegexpFile = `(?P<file>.+?:[0-9]+)?`
|
||||
logRegexpMsg = `(: )?(?P<msg>.*)`
|
||||
)
|
||||
|
||||
var (
|
||||
logRegexp = regexp.MustCompile(logRegexpDate + logRegexpTime + logRegexpFile + logRegexpMsg)
|
||||
)
|
||||
|
||||
func subexps(line []byte) map[string]string {
|
||||
m := logRegexp.FindSubmatch(line)
|
||||
if len(m) < len(logRegexp.SubexpNames()) {
|
||||
return map[string]string{}
|
||||
}
|
||||
result := map[string]string{}
|
||||
for i, name := range logRegexp.SubexpNames() {
|
||||
result[name] = string(m[i])
|
||||
}
|
||||
return result
|
||||
}
|
||||
116
vendor/github.com/go-kit/kit/log/sync.go
generated
vendored
116
vendor/github.com/go-kit/kit/log/sync.go
generated
vendored
@@ -1,116 +0,0 @@
|
||||
package log
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// SwapLogger wraps another logger that may be safely replaced while other
|
||||
// goroutines use the SwapLogger concurrently. The zero value for a SwapLogger
|
||||
// will discard all log events without error.
|
||||
//
|
||||
// SwapLogger serves well as a package global logger that can be changed by
|
||||
// importers.
|
||||
type SwapLogger struct {
|
||||
logger atomic.Value
|
||||
}
|
||||
|
||||
type loggerStruct struct {
|
||||
Logger
|
||||
}
|
||||
|
||||
// Log implements the Logger interface by forwarding keyvals to the currently
|
||||
// wrapped logger. It does not log anything if the wrapped logger is nil.
|
||||
func (l *SwapLogger) Log(keyvals ...interface{}) error {
|
||||
s, ok := l.logger.Load().(loggerStruct)
|
||||
if !ok || s.Logger == nil {
|
||||
return nil
|
||||
}
|
||||
return s.Log(keyvals...)
|
||||
}
|
||||
|
||||
// Swap replaces the currently wrapped logger with logger. Swap may be called
|
||||
// concurrently with calls to Log from other goroutines.
|
||||
func (l *SwapLogger) Swap(logger Logger) {
|
||||
l.logger.Store(loggerStruct{logger})
|
||||
}
|
||||
|
||||
// NewSyncWriter returns a new writer that is safe for concurrent use by
|
||||
// multiple goroutines. Writes to the returned writer are passed on to w. If
|
||||
// another write is already in progress, the calling goroutine blocks until
|
||||
// the writer is available.
|
||||
//
|
||||
// If w implements the following interface, so does the returned writer.
|
||||
//
|
||||
// interface {
|
||||
// Fd() uintptr
|
||||
// }
|
||||
func NewSyncWriter(w io.Writer) io.Writer {
|
||||
switch w := w.(type) {
|
||||
case fdWriter:
|
||||
return &fdSyncWriter{fdWriter: w}
|
||||
default:
|
||||
return &syncWriter{Writer: w}
|
||||
}
|
||||
}
|
||||
|
||||
// syncWriter synchronizes concurrent writes to an io.Writer.
|
||||
type syncWriter struct {
|
||||
sync.Mutex
|
||||
io.Writer
|
||||
}
|
||||
|
||||
// Write writes p to the underlying io.Writer. If another write is already in
|
||||
// progress, the calling goroutine blocks until the syncWriter is available.
|
||||
func (w *syncWriter) Write(p []byte) (n int, err error) {
|
||||
w.Lock()
|
||||
n, err = w.Writer.Write(p)
|
||||
w.Unlock()
|
||||
return n, err
|
||||
}
|
||||
|
||||
// fdWriter is an io.Writer that also has an Fd method. The most common
|
||||
// example of an fdWriter is an *os.File.
|
||||
type fdWriter interface {
|
||||
io.Writer
|
||||
Fd() uintptr
|
||||
}
|
||||
|
||||
// fdSyncWriter synchronizes concurrent writes to an fdWriter.
|
||||
type fdSyncWriter struct {
|
||||
sync.Mutex
|
||||
fdWriter
|
||||
}
|
||||
|
||||
// Write writes p to the underlying io.Writer. If another write is already in
|
||||
// progress, the calling goroutine blocks until the fdSyncWriter is available.
|
||||
func (w *fdSyncWriter) Write(p []byte) (n int, err error) {
|
||||
w.Lock()
|
||||
n, err = w.fdWriter.Write(p)
|
||||
w.Unlock()
|
||||
return n, err
|
||||
}
|
||||
|
||||
// syncLogger provides concurrent safe logging for another Logger.
|
||||
type syncLogger struct {
|
||||
mu sync.Mutex
|
||||
logger Logger
|
||||
}
|
||||
|
||||
// NewSyncLogger returns a logger that synchronizes concurrent use of the
|
||||
// wrapped logger. When multiple goroutines use the SyncLogger concurrently
|
||||
// only one goroutine will be allowed to log to the wrapped logger at a time.
|
||||
// The other goroutines will block until the logger is available.
|
||||
func NewSyncLogger(logger Logger) Logger {
|
||||
return &syncLogger{logger: logger}
|
||||
}
|
||||
|
||||
// Log logs keyvals to the underlying Logger. If another log is already in
|
||||
// progress, the calling goroutine blocks until the syncLogger is available.
|
||||
func (l *syncLogger) Log(keyvals ...interface{}) error {
|
||||
l.mu.Lock()
|
||||
err := l.logger.Log(keyvals...)
|
||||
l.mu.Unlock()
|
||||
return err
|
||||
}
|
||||
110
vendor/github.com/go-kit/kit/log/value.go
generated
vendored
110
vendor/github.com/go-kit/kit/log/value.go
generated
vendored
@@ -1,110 +0,0 @@
|
||||
package log
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// A Valuer generates a log value. When passed to With or WithPrefix in a
|
||||
// value element (odd indexes), it represents a dynamic value which is re-
|
||||
// evaluated with each log event.
|
||||
type Valuer func() interface{}
|
||||
|
||||
// bindValues replaces all value elements (odd indexes) containing a Valuer
|
||||
// with their generated value.
|
||||
func bindValues(keyvals []interface{}) {
|
||||
for i := 1; i < len(keyvals); i += 2 {
|
||||
if v, ok := keyvals[i].(Valuer); ok {
|
||||
keyvals[i] = v()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// containsValuer returns true if any of the value elements (odd indexes)
|
||||
// contain a Valuer.
|
||||
func containsValuer(keyvals []interface{}) bool {
|
||||
for i := 1; i < len(keyvals); i += 2 {
|
||||
if _, ok := keyvals[i].(Valuer); ok {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Timestamp returns a timestamp Valuer. It invokes the t function to get the
|
||||
// time; unless you are doing something tricky, pass time.Now.
|
||||
//
|
||||
// Most users will want to use DefaultTimestamp or DefaultTimestampUTC, which
|
||||
// are TimestampFormats that use the RFC3339Nano format.
|
||||
func Timestamp(t func() time.Time) Valuer {
|
||||
return func() interface{} { return t() }
|
||||
}
|
||||
|
||||
// TimestampFormat returns a timestamp Valuer with a custom time format. It
|
||||
// invokes the t function to get the time to format; unless you are doing
|
||||
// something tricky, pass time.Now. The layout string is passed to
|
||||
// Time.Format.
|
||||
//
|
||||
// Most users will want to use DefaultTimestamp or DefaultTimestampUTC, which
|
||||
// are TimestampFormats that use the RFC3339Nano format.
|
||||
func TimestampFormat(t func() time.Time, layout string) Valuer {
|
||||
return func() interface{} {
|
||||
return timeFormat{
|
||||
time: t(),
|
||||
layout: layout,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// A timeFormat represents an instant in time and a layout used when
|
||||
// marshaling to a text format.
|
||||
type timeFormat struct {
|
||||
time time.Time
|
||||
layout string
|
||||
}
|
||||
|
||||
func (tf timeFormat) String() string {
|
||||
return tf.time.Format(tf.layout)
|
||||
}
|
||||
|
||||
// MarshalText implements encoding.TextMarshaller.
|
||||
func (tf timeFormat) MarshalText() (text []byte, err error) {
|
||||
// The following code adapted from the standard library time.Time.Format
|
||||
// method. Using the same undocumented magic constant to extend the size
|
||||
// of the buffer as seen there.
|
||||
b := make([]byte, 0, len(tf.layout)+10)
|
||||
b = tf.time.AppendFormat(b, tf.layout)
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// Caller returns a Valuer that returns a file and line from a specified depth
|
||||
// in the callstack. Users will probably want to use DefaultCaller.
|
||||
func Caller(depth int) Valuer {
|
||||
return func() interface{} {
|
||||
_, file, line, _ := runtime.Caller(depth)
|
||||
idx := strings.LastIndexByte(file, '/')
|
||||
// using idx+1 below handles both of following cases:
|
||||
// idx == -1 because no "/" was found, or
|
||||
// idx >= 0 and we want to start at the character after the found "/".
|
||||
return file[idx+1:] + ":" + strconv.Itoa(line)
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
// DefaultTimestamp is a Valuer that returns the current wallclock time,
|
||||
// respecting time zones, when bound.
|
||||
DefaultTimestamp = TimestampFormat(time.Now, time.RFC3339Nano)
|
||||
|
||||
// DefaultTimestampUTC is a Valuer that returns the current time in UTC
|
||||
// when bound.
|
||||
DefaultTimestampUTC = TimestampFormat(
|
||||
func() time.Time { return time.Now().UTC() },
|
||||
time.RFC3339Nano,
|
||||
)
|
||||
|
||||
// DefaultCaller is a Valuer that returns the file and line where the Log
|
||||
// method was invoked. It can only be used with log.With.
|
||||
DefaultCaller = Caller(3)
|
||||
)
|
||||
15
vendor/github.com/go-kit/log/.gitignore
generated
vendored
15
vendor/github.com/go-kit/log/.gitignore
generated
vendored
@@ -1,15 +0,0 @@
|
||||
# Binaries for programs and plugins
|
||||
*.exe
|
||||
*.exe~
|
||||
*.dll
|
||||
*.so
|
||||
*.dylib
|
||||
|
||||
# Test binary, built with `go test -c`
|
||||
*.test
|
||||
|
||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||
*.out
|
||||
|
||||
# Dependency directories (remove the comment below to include it)
|
||||
# vendor/
|
||||
21
vendor/github.com/go-kit/log/LICENSE
generated
vendored
21
vendor/github.com/go-kit/log/LICENSE
generated
vendored
@@ -1,21 +0,0 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2021 Go kit
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
151
vendor/github.com/go-kit/log/README.md
generated
vendored
151
vendor/github.com/go-kit/log/README.md
generated
vendored
@@ -1,151 +0,0 @@
|
||||
# package log
|
||||
|
||||
`package log` provides a minimal interface for structured logging in services.
|
||||
It may be wrapped to encode conventions, enforce type-safety, provide leveled
|
||||
logging, and so on. It can be used for both typical application log events,
|
||||
and log-structured data streams.
|
||||
|
||||
## Structured logging
|
||||
|
||||
Structured logging is, basically, conceding to the reality that logs are
|
||||
_data_, and warrant some level of schematic rigor. Using a stricter,
|
||||
key/value-oriented message format for our logs, containing contextual and
|
||||
semantic information, makes it much easier to get insight into the
|
||||
operational activity of the systems we build. Consequently, `package log` is
|
||||
of the strong belief that "[the benefits of structured logging outweigh the
|
||||
minimal effort involved](https://www.thoughtworks.com/radar/techniques/structured-logging)".
|
||||
|
||||
Migrating from unstructured to structured logging is probably a lot easier
|
||||
than you'd expect.
|
||||
|
||||
```go
|
||||
// Unstructured
|
||||
log.Printf("HTTP server listening on %s", addr)
|
||||
|
||||
// Structured
|
||||
logger.Log("transport", "HTTP", "addr", addr, "msg", "listening")
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
### Typical application logging
|
||||
|
||||
```go
|
||||
w := log.NewSyncWriter(os.Stderr)
|
||||
logger := log.NewLogfmtLogger(w)
|
||||
logger.Log("question", "what is the meaning of life?", "answer", 42)
|
||||
|
||||
// Output:
|
||||
// question="what is the meaning of life?" answer=42
|
||||
```
|
||||
|
||||
### Contextual Loggers
|
||||
|
||||
```go
|
||||
func main() {
|
||||
var logger log.Logger
|
||||
logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr))
|
||||
logger = log.With(logger, "instance_id", 123)
|
||||
|
||||
logger.Log("msg", "starting")
|
||||
NewWorker(log.With(logger, "component", "worker")).Run()
|
||||
NewSlacker(log.With(logger, "component", "slacker")).Run()
|
||||
}
|
||||
|
||||
// Output:
|
||||
// instance_id=123 msg=starting
|
||||
// instance_id=123 component=worker msg=running
|
||||
// instance_id=123 component=slacker msg=running
|
||||
```
|
||||
|
||||
### Interact with stdlib logger
|
||||
|
||||
Redirect stdlib logger to Go kit logger.
|
||||
|
||||
```go
|
||||
import (
|
||||
"os"
|
||||
stdlog "log"
|
||||
kitlog "github.com/go-kit/log"
|
||||
)
|
||||
|
||||
func main() {
|
||||
logger := kitlog.NewJSONLogger(kitlog.NewSyncWriter(os.Stdout))
|
||||
stdlog.SetOutput(kitlog.NewStdlibAdapter(logger))
|
||||
stdlog.Print("I sure like pie")
|
||||
}
|
||||
|
||||
// Output:
|
||||
// {"msg":"I sure like pie","ts":"2016/01/01 12:34:56"}
|
||||
```
|
||||
|
||||
Or, if, for legacy reasons, you need to pipe all of your logging through the
|
||||
stdlib log package, you can redirect Go kit logger to the stdlib logger.
|
||||
|
||||
```go
|
||||
logger := kitlog.NewLogfmtLogger(kitlog.StdlibWriter{})
|
||||
logger.Log("legacy", true, "msg", "at least it's something")
|
||||
|
||||
// Output:
|
||||
// 2016/01/01 12:34:56 legacy=true msg="at least it's something"
|
||||
```
|
||||
|
||||
### Timestamps and callers
|
||||
|
||||
```go
|
||||
var logger log.Logger
|
||||
logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr))
|
||||
logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller)
|
||||
|
||||
logger.Log("msg", "hello")
|
||||
|
||||
// Output:
|
||||
// ts=2016-01-01T12:34:56Z caller=main.go:15 msg=hello
|
||||
```
|
||||
|
||||
## Levels
|
||||
|
||||
Log levels are supported via the [level package](https://godoc.org/github.com/go-kit/log/level).
|
||||
|
||||
## Supported output formats
|
||||
|
||||
- [Logfmt](https://brandur.org/logfmt) ([see also](https://blog.codeship.com/logfmt-a-log-format-thats-easy-to-read-and-write))
|
||||
- JSON
|
||||
|
||||
## Enhancements
|
||||
|
||||
`package log` is centered on the one-method Logger interface.
|
||||
|
||||
```go
|
||||
type Logger interface {
|
||||
Log(keyvals ...interface{}) error
|
||||
}
|
||||
```
|
||||
|
||||
This interface, and its supporting code like is the product of much iteration
|
||||
and evaluation. For more details on the evolution of the Logger interface,
|
||||
see [The Hunt for a Logger Interface](http://go-talks.appspot.com/github.com/ChrisHines/talks/structured-logging/structured-logging.slide#1),
|
||||
a talk by [Chris Hines](https://github.com/ChrisHines).
|
||||
Also, please see
|
||||
[#63](https://github.com/go-kit/kit/issues/63),
|
||||
[#76](https://github.com/go-kit/kit/pull/76),
|
||||
[#131](https://github.com/go-kit/kit/issues/131),
|
||||
[#157](https://github.com/go-kit/kit/pull/157),
|
||||
[#164](https://github.com/go-kit/kit/issues/164), and
|
||||
[#252](https://github.com/go-kit/kit/pull/252)
|
||||
to review historical conversations about package log and the Logger interface.
|
||||
|
||||
Value-add packages and suggestions,
|
||||
like improvements to [the leveled logger](https://godoc.org/github.com/go-kit/log/level),
|
||||
are of course welcome. Good proposals should
|
||||
|
||||
- Be composable with [contextual loggers](https://godoc.org/github.com/go-kit/log#With),
|
||||
- Not break the behavior of [log.Caller](https://godoc.org/github.com/go-kit/log#Caller) in any wrapped contextual loggers, and
|
||||
- Be friendly to packages that accept only an unadorned log.Logger.
|
||||
|
||||
## Benchmarks & comparisons
|
||||
|
||||
There are a few Go logging benchmarks and comparisons that include Go kit's package log.
|
||||
|
||||
- [imkira/go-loggers-bench](https://github.com/imkira/go-loggers-bench) includes kit/log
|
||||
- [uber-common/zap](https://github.com/uber-common/zap), a zero-alloc logging library, includes a comparison with kit/log
|
||||
116
vendor/github.com/go-kit/log/doc.go
generated
vendored
116
vendor/github.com/go-kit/log/doc.go
generated
vendored
@@ -1,116 +0,0 @@
|
||||
// Package log provides a structured logger.
|
||||
//
|
||||
// Structured logging produces logs easily consumed later by humans or
|
||||
// machines. Humans might be interested in debugging errors, or tracing
|
||||
// specific requests. Machines might be interested in counting interesting
|
||||
// events, or aggregating information for off-line processing. In both cases,
|
||||
// it is important that the log messages are structured and actionable.
|
||||
// Package log is designed to encourage both of these best practices.
|
||||
//
|
||||
// Basic Usage
|
||||
//
|
||||
// The fundamental interface is Logger. Loggers create log events from
|
||||
// key/value data. The Logger interface has a single method, Log, which
|
||||
// accepts a sequence of alternating key/value pairs, which this package names
|
||||
// keyvals.
|
||||
//
|
||||
// type Logger interface {
|
||||
// Log(keyvals ...interface{}) error
|
||||
// }
|
||||
//
|
||||
// Here is an example of a function using a Logger to create log events.
|
||||
//
|
||||
// func RunTask(task Task, logger log.Logger) string {
|
||||
// logger.Log("taskID", task.ID, "event", "starting task")
|
||||
// ...
|
||||
// logger.Log("taskID", task.ID, "event", "task complete")
|
||||
// }
|
||||
//
|
||||
// The keys in the above example are "taskID" and "event". The values are
|
||||
// task.ID, "starting task", and "task complete". Every key is followed
|
||||
// immediately by its value.
|
||||
//
|
||||
// Keys are usually plain strings. Values may be any type that has a sensible
|
||||
// encoding in the chosen log format. With structured logging it is a good
|
||||
// idea to log simple values without formatting them. This practice allows
|
||||
// the chosen logger to encode values in the most appropriate way.
|
||||
//
|
||||
// Contextual Loggers
|
||||
//
|
||||
// A contextual logger stores keyvals that it includes in all log events.
|
||||
// Building appropriate contextual loggers reduces repetition and aids
|
||||
// consistency in the resulting log output. With, WithPrefix, and WithSuffix
|
||||
// add context to a logger. We can use With to improve the RunTask example.
|
||||
//
|
||||
// func RunTask(task Task, logger log.Logger) string {
|
||||
// logger = log.With(logger, "taskID", task.ID)
|
||||
// logger.Log("event", "starting task")
|
||||
// ...
|
||||
// taskHelper(task.Cmd, logger)
|
||||
// ...
|
||||
// logger.Log("event", "task complete")
|
||||
// }
|
||||
//
|
||||
// The improved version emits the same log events as the original for the
|
||||
// first and last calls to Log. Passing the contextual logger to taskHelper
|
||||
// enables each log event created by taskHelper to include the task.ID even
|
||||
// though taskHelper does not have access to that value. Using contextual
|
||||
// loggers this way simplifies producing log output that enables tracing the
|
||||
// life cycle of individual tasks. (See the Contextual example for the full
|
||||
// code of the above snippet.)
|
||||
//
|
||||
// Dynamic Contextual Values
|
||||
//
|
||||
// A Valuer function stored in a contextual logger generates a new value each
|
||||
// time an event is logged. The Valuer example demonstrates how this feature
|
||||
// works.
|
||||
//
|
||||
// Valuers provide the basis for consistently logging timestamps and source
|
||||
// code location. The log package defines several valuers for that purpose.
|
||||
// See Timestamp, DefaultTimestamp, DefaultTimestampUTC, Caller, and
|
||||
// DefaultCaller. A common logger initialization sequence that ensures all log
|
||||
// entries contain a timestamp and source location looks like this:
|
||||
//
|
||||
// logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout))
|
||||
// logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller)
|
||||
//
|
||||
// Concurrent Safety
|
||||
//
|
||||
// Applications with multiple goroutines want each log event written to the
|
||||
// same logger to remain separate from other log events. Package log provides
|
||||
// two simple solutions for concurrent safe logging.
|
||||
//
|
||||
// NewSyncWriter wraps an io.Writer and serializes each call to its Write
|
||||
// method. Using a SyncWriter has the benefit that the smallest practical
|
||||
// portion of the logging logic is performed within a mutex, but it requires
|
||||
// the formatting Logger to make only one call to Write per log event.
|
||||
//
|
||||
// NewSyncLogger wraps any Logger and serializes each call to its Log method.
|
||||
// Using a SyncLogger has the benefit that it guarantees each log event is
|
||||
// handled atomically within the wrapped logger, but it typically serializes
|
||||
// both the formatting and output logic. Use a SyncLogger if the formatting
|
||||
// logger may perform multiple writes per log event.
|
||||
//
|
||||
// Error Handling
|
||||
//
|
||||
// This package relies on the practice of wrapping or decorating loggers with
|
||||
// other loggers to provide composable pieces of functionality. It also means
|
||||
// that Logger.Log must return an error because some
|
||||
// implementations—especially those that output log data to an io.Writer—may
|
||||
// encounter errors that cannot be handled locally. This in turn means that
|
||||
// Loggers that wrap other loggers should return errors from the wrapped
|
||||
// logger up the stack.
|
||||
//
|
||||
// Fortunately, the decorator pattern also provides a way to avoid the
|
||||
// necessity to check for errors every time an application calls Logger.Log.
|
||||
// An application required to panic whenever its Logger encounters
|
||||
// an error could initialize its logger as follows.
|
||||
//
|
||||
// fmtlogger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout))
|
||||
// logger := log.LoggerFunc(func(keyvals ...interface{}) error {
|
||||
// if err := fmtlogger.Log(keyvals...); err != nil {
|
||||
// panic(err)
|
||||
// }
|
||||
// return nil
|
||||
// })
|
||||
package log
|
||||
8
vendor/github.com/go-kit/log/go.mod
generated
vendored
8
vendor/github.com/go-kit/log/go.mod
generated
vendored
@@ -1,8 +0,0 @@
|
||||
module github.com/go-kit/log
|
||||
|
||||
go 1.16
|
||||
|
||||
require (
|
||||
github.com/go-logfmt/logfmt v0.5.0
|
||||
github.com/go-stack/stack v1.8.0
|
||||
)
|
||||
4
vendor/github.com/go-kit/log/go.sum
generated
vendored
4
vendor/github.com/go-kit/log/go.sum
generated
vendored
@@ -1,4 +0,0 @@
|
||||
github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4=
|
||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
91
vendor/github.com/go-kit/log/json_logger.go
generated
vendored
91
vendor/github.com/go-kit/log/json_logger.go
generated
vendored
@@ -1,91 +0,0 @@
|
||||
package log
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
type jsonLogger struct {
|
||||
io.Writer
|
||||
}
|
||||
|
||||
// NewJSONLogger returns a Logger that encodes keyvals to the Writer as a
|
||||
// single JSON object. Each log event produces no more than one call to
|
||||
// w.Write. The passed Writer must be safe for concurrent use by multiple
|
||||
// goroutines if the returned Logger will be used concurrently.
|
||||
func NewJSONLogger(w io.Writer) Logger {
|
||||
return &jsonLogger{w}
|
||||
}
|
||||
|
||||
func (l *jsonLogger) Log(keyvals ...interface{}) error {
|
||||
n := (len(keyvals) + 1) / 2 // +1 to handle case when len is odd
|
||||
m := make(map[string]interface{}, n)
|
||||
for i := 0; i < len(keyvals); i += 2 {
|
||||
k := keyvals[i]
|
||||
var v interface{} = ErrMissingValue
|
||||
if i+1 < len(keyvals) {
|
||||
v = keyvals[i+1]
|
||||
}
|
||||
merge(m, k, v)
|
||||
}
|
||||
enc := json.NewEncoder(l.Writer)
|
||||
enc.SetEscapeHTML(false)
|
||||
return enc.Encode(m)
|
||||
}
|
||||
|
||||
func merge(dst map[string]interface{}, k, v interface{}) {
|
||||
var key string
|
||||
switch x := k.(type) {
|
||||
case string:
|
||||
key = x
|
||||
case fmt.Stringer:
|
||||
key = safeString(x)
|
||||
default:
|
||||
key = fmt.Sprint(x)
|
||||
}
|
||||
|
||||
// We want json.Marshaler and encoding.TextMarshaller to take priority over
|
||||
// err.Error() and v.String(). But json.Marshall (called later) does that by
|
||||
// default so we force a no-op if it's one of those 2 case.
|
||||
switch x := v.(type) {
|
||||
case json.Marshaler:
|
||||
case encoding.TextMarshaler:
|
||||
case error:
|
||||
v = safeError(x)
|
||||
case fmt.Stringer:
|
||||
v = safeString(x)
|
||||
}
|
||||
|
||||
dst[key] = v
|
||||
}
|
||||
|
||||
func safeString(str fmt.Stringer) (s string) {
|
||||
defer func() {
|
||||
if panicVal := recover(); panicVal != nil {
|
||||
if v := reflect.ValueOf(str); v.Kind() == reflect.Ptr && v.IsNil() {
|
||||
s = "NULL"
|
||||
} else {
|
||||
panic(panicVal)
|
||||
}
|
||||
}
|
||||
}()
|
||||
s = str.String()
|
||||
return
|
||||
}
|
||||
|
||||
func safeError(err error) (s interface{}) {
|
||||
defer func() {
|
||||
if panicVal := recover(); panicVal != nil {
|
||||
if v := reflect.ValueOf(err); v.Kind() == reflect.Ptr && v.IsNil() {
|
||||
s = nil
|
||||
} else {
|
||||
panic(panicVal)
|
||||
}
|
||||
}
|
||||
}()
|
||||
s = err.Error()
|
||||
return
|
||||
}
|
||||
22
vendor/github.com/go-kit/log/level/doc.go
generated
vendored
22
vendor/github.com/go-kit/log/level/doc.go
generated
vendored
@@ -1,22 +0,0 @@
|
||||
// Package level implements leveled logging on top of Go kit's log package. To
|
||||
// use the level package, create a logger as per normal in your func main, and
|
||||
// wrap it with level.NewFilter.
|
||||
//
|
||||
// var logger log.Logger
|
||||
// logger = log.NewLogfmtLogger(os.Stderr)
|
||||
// logger = level.NewFilter(logger, level.AllowInfo()) // <--
|
||||
// logger = log.With(logger, "ts", log.DefaultTimestampUTC)
|
||||
//
|
||||
// Then, at the callsites, use one of the level.Debug, Info, Warn, or Error
|
||||
// helper methods to emit leveled log events.
|
||||
//
|
||||
// logger.Log("foo", "bar") // as normal, no level
|
||||
// level.Debug(logger).Log("request_id", reqID, "trace_data", trace.Get())
|
||||
// if value > 100 {
|
||||
// level.Error(logger).Log("value", value)
|
||||
// }
|
||||
//
|
||||
// NewFilter allows precise control over what happens when a log event is
|
||||
// emitted without a level key, or if a squelched level is used. Check the
|
||||
// Option functions for details.
|
||||
package level
|
||||
205
vendor/github.com/go-kit/log/level/level.go
generated
vendored
205
vendor/github.com/go-kit/log/level/level.go
generated
vendored
@@ -1,205 +0,0 @@
|
||||
package level
|
||||
|
||||
import "github.com/go-kit/log"
|
||||
|
||||
// Error returns a logger that includes a Key/ErrorValue pair.
|
||||
func Error(logger log.Logger) log.Logger {
|
||||
return log.WithPrefix(logger, Key(), ErrorValue())
|
||||
}
|
||||
|
||||
// Warn returns a logger that includes a Key/WarnValue pair.
|
||||
func Warn(logger log.Logger) log.Logger {
|
||||
return log.WithPrefix(logger, Key(), WarnValue())
|
||||
}
|
||||
|
||||
// Info returns a logger that includes a Key/InfoValue pair.
|
||||
func Info(logger log.Logger) log.Logger {
|
||||
return log.WithPrefix(logger, Key(), InfoValue())
|
||||
}
|
||||
|
||||
// Debug returns a logger that includes a Key/DebugValue pair.
|
||||
func Debug(logger log.Logger) log.Logger {
|
||||
return log.WithPrefix(logger, Key(), DebugValue())
|
||||
}
|
||||
|
||||
// NewFilter wraps next and implements level filtering. See the commentary on
|
||||
// the Option functions for a detailed description of how to configure levels.
|
||||
// If no options are provided, all leveled log events created with Debug,
|
||||
// Info, Warn or Error helper methods are squelched and non-leveled log
|
||||
// events are passed to next unmodified.
|
||||
func NewFilter(next log.Logger, options ...Option) log.Logger {
|
||||
l := &logger{
|
||||
next: next,
|
||||
}
|
||||
for _, option := range options {
|
||||
option(l)
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
type logger struct {
|
||||
next log.Logger
|
||||
allowed level
|
||||
squelchNoLevel bool
|
||||
errNotAllowed error
|
||||
errNoLevel error
|
||||
}
|
||||
|
||||
func (l *logger) Log(keyvals ...interface{}) error {
|
||||
var hasLevel, levelAllowed bool
|
||||
for i := 1; i < len(keyvals); i += 2 {
|
||||
if v, ok := keyvals[i].(*levelValue); ok {
|
||||
hasLevel = true
|
||||
levelAllowed = l.allowed&v.level != 0
|
||||
break
|
||||
}
|
||||
}
|
||||
if !hasLevel && l.squelchNoLevel {
|
||||
return l.errNoLevel
|
||||
}
|
||||
if hasLevel && !levelAllowed {
|
||||
return l.errNotAllowed
|
||||
}
|
||||
return l.next.Log(keyvals...)
|
||||
}
|
||||
|
||||
// Option sets a parameter for the leveled logger.
|
||||
type Option func(*logger)
|
||||
|
||||
// AllowAll is an alias for AllowDebug.
|
||||
func AllowAll() Option {
|
||||
return AllowDebug()
|
||||
}
|
||||
|
||||
// AllowDebug allows error, warn, info and debug level log events to pass.
|
||||
func AllowDebug() Option {
|
||||
return allowed(levelError | levelWarn | levelInfo | levelDebug)
|
||||
}
|
||||
|
||||
// AllowInfo allows error, warn and info level log events to pass.
|
||||
func AllowInfo() Option {
|
||||
return allowed(levelError | levelWarn | levelInfo)
|
||||
}
|
||||
|
||||
// AllowWarn allows error and warn level log events to pass.
|
||||
func AllowWarn() Option {
|
||||
return allowed(levelError | levelWarn)
|
||||
}
|
||||
|
||||
// AllowError allows only error level log events to pass.
|
||||
func AllowError() Option {
|
||||
return allowed(levelError)
|
||||
}
|
||||
|
||||
// AllowNone allows no leveled log events to pass.
|
||||
func AllowNone() Option {
|
||||
return allowed(0)
|
||||
}
|
||||
|
||||
func allowed(allowed level) Option {
|
||||
return func(l *logger) { l.allowed = allowed }
|
||||
}
|
||||
|
||||
// ErrNotAllowed sets the error to return from Log when it squelches a log
|
||||
// event disallowed by the configured Allow[Level] option. By default,
|
||||
// ErrNotAllowed is nil; in this case the log event is squelched with no
|
||||
// error.
|
||||
func ErrNotAllowed(err error) Option {
|
||||
return func(l *logger) { l.errNotAllowed = err }
|
||||
}
|
||||
|
||||
// SquelchNoLevel instructs Log to squelch log events with no level, so that
|
||||
// they don't proceed through to the wrapped logger. If SquelchNoLevel is set
|
||||
// to true and a log event is squelched in this way, the error value
|
||||
// configured with ErrNoLevel is returned to the caller.
|
||||
func SquelchNoLevel(squelch bool) Option {
|
||||
return func(l *logger) { l.squelchNoLevel = squelch }
|
||||
}
|
||||
|
||||
// ErrNoLevel sets the error to return from Log when it squelches a log event
|
||||
// with no level. By default, ErrNoLevel is nil; in this case the log event is
|
||||
// squelched with no error.
|
||||
func ErrNoLevel(err error) Option {
|
||||
return func(l *logger) { l.errNoLevel = err }
|
||||
}
|
||||
|
||||
// NewInjector wraps next and returns a logger that adds a Key/level pair to
|
||||
// the beginning of log events that don't already contain a level. In effect,
|
||||
// this gives a default level to logs without a level.
|
||||
func NewInjector(next log.Logger, level Value) log.Logger {
|
||||
return &injector{
|
||||
next: next,
|
||||
level: level,
|
||||
}
|
||||
}
|
||||
|
||||
type injector struct {
|
||||
next log.Logger
|
||||
level interface{}
|
||||
}
|
||||
|
||||
func (l *injector) Log(keyvals ...interface{}) error {
|
||||
for i := 1; i < len(keyvals); i += 2 {
|
||||
if _, ok := keyvals[i].(*levelValue); ok {
|
||||
return l.next.Log(keyvals...)
|
||||
}
|
||||
}
|
||||
kvs := make([]interface{}, len(keyvals)+2)
|
||||
kvs[0], kvs[1] = key, l.level
|
||||
copy(kvs[2:], keyvals)
|
||||
return l.next.Log(kvs...)
|
||||
}
|
||||
|
||||
// Value is the interface that each of the canonical level values implement.
|
||||
// It contains unexported methods that prevent types from other packages from
|
||||
// implementing it and guaranteeing that NewFilter can distinguish the levels
|
||||
// defined in this package from all other values.
|
||||
type Value interface {
|
||||
String() string
|
||||
levelVal()
|
||||
}
|
||||
|
||||
// Key returns the unique key added to log events by the loggers in this
|
||||
// package.
|
||||
func Key() interface{} { return key }
|
||||
|
||||
// ErrorValue returns the unique value added to log events by Error.
|
||||
func ErrorValue() Value { return errorValue }
|
||||
|
||||
// WarnValue returns the unique value added to log events by Warn.
|
||||
func WarnValue() Value { return warnValue }
|
||||
|
||||
// InfoValue returns the unique value added to log events by Info.
|
||||
func InfoValue() Value { return infoValue }
|
||||
|
||||
// DebugValue returns the unique value added to log events by Debug.
|
||||
func DebugValue() Value { return debugValue }
|
||||
|
||||
var (
|
||||
// key is of type interface{} so that it allocates once during package
|
||||
// initialization and avoids allocating every time the value is added to a
|
||||
// []interface{} later.
|
||||
key interface{} = "level"
|
||||
|
||||
errorValue = &levelValue{level: levelError, name: "error"}
|
||||
warnValue = &levelValue{level: levelWarn, name: "warn"}
|
||||
infoValue = &levelValue{level: levelInfo, name: "info"}
|
||||
debugValue = &levelValue{level: levelDebug, name: "debug"}
|
||||
)
|
||||
|
||||
type level byte
|
||||
|
||||
const (
|
||||
levelDebug level = 1 << iota
|
||||
levelInfo
|
||||
levelWarn
|
||||
levelError
|
||||
)
|
||||
|
||||
type levelValue struct {
|
||||
name string
|
||||
level
|
||||
}
|
||||
|
||||
func (v *levelValue) String() string { return v.name }
|
||||
func (v *levelValue) levelVal() {}
|
||||
179
vendor/github.com/go-kit/log/log.go
generated
vendored
179
vendor/github.com/go-kit/log/log.go
generated
vendored
@@ -1,179 +0,0 @@
|
||||
package log
|
||||
|
||||
import "errors"
|
||||
|
||||
// Logger is the fundamental interface for all log operations. Log creates a
|
||||
// log event from keyvals, a variadic sequence of alternating keys and values.
|
||||
// Implementations must be safe for concurrent use by multiple goroutines. In
|
||||
// particular, any implementation of Logger that appends to keyvals or
|
||||
// modifies or retains any of its elements must make a copy first.
|
||||
type Logger interface {
|
||||
Log(keyvals ...interface{}) error
|
||||
}
|
||||
|
||||
// ErrMissingValue is appended to keyvals slices with odd length to substitute
|
||||
// the missing value.
|
||||
var ErrMissingValue = errors.New("(MISSING)")
|
||||
|
||||
// With returns a new contextual logger with keyvals prepended to those passed
|
||||
// to calls to Log. If logger is also a contextual logger created by With,
|
||||
// WithPrefix, or WithSuffix, keyvals is appended to the existing context.
|
||||
//
|
||||
// The returned Logger replaces all value elements (odd indexes) containing a
|
||||
// Valuer with their generated value for each call to its Log method.
|
||||
func With(logger Logger, keyvals ...interface{}) Logger {
|
||||
if len(keyvals) == 0 {
|
||||
return logger
|
||||
}
|
||||
l := newContext(logger)
|
||||
kvs := append(l.keyvals, keyvals...)
|
||||
if len(kvs)%2 != 0 {
|
||||
kvs = append(kvs, ErrMissingValue)
|
||||
}
|
||||
return &context{
|
||||
logger: l.logger,
|
||||
// Limiting the capacity of the stored keyvals ensures that a new
|
||||
// backing array is created if the slice must grow in Log or With.
|
||||
// Using the extra capacity without copying risks a data race that
|
||||
// would violate the Logger interface contract.
|
||||
keyvals: kvs[:len(kvs):len(kvs)],
|
||||
hasValuer: l.hasValuer || containsValuer(keyvals),
|
||||
sKeyvals: l.sKeyvals,
|
||||
sHasValuer: l.sHasValuer,
|
||||
}
|
||||
}
|
||||
|
||||
// WithPrefix returns a new contextual logger with keyvals prepended to those
|
||||
// passed to calls to Log. If logger is also a contextual logger created by
|
||||
// With, WithPrefix, or WithSuffix, keyvals is prepended to the existing context.
|
||||
//
|
||||
// The returned Logger replaces all value elements (odd indexes) containing a
|
||||
// Valuer with their generated value for each call to its Log method.
|
||||
func WithPrefix(logger Logger, keyvals ...interface{}) Logger {
|
||||
if len(keyvals) == 0 {
|
||||
return logger
|
||||
}
|
||||
l := newContext(logger)
|
||||
// Limiting the capacity of the stored keyvals ensures that a new
|
||||
// backing array is created if the slice must grow in Log or With.
|
||||
// Using the extra capacity without copying risks a data race that
|
||||
// would violate the Logger interface contract.
|
||||
n := len(l.keyvals) + len(keyvals)
|
||||
if len(keyvals)%2 != 0 {
|
||||
n++
|
||||
}
|
||||
kvs := make([]interface{}, 0, n)
|
||||
kvs = append(kvs, keyvals...)
|
||||
if len(kvs)%2 != 0 {
|
||||
kvs = append(kvs, ErrMissingValue)
|
||||
}
|
||||
kvs = append(kvs, l.keyvals...)
|
||||
return &context{
|
||||
logger: l.logger,
|
||||
keyvals: kvs,
|
||||
hasValuer: l.hasValuer || containsValuer(keyvals),
|
||||
sKeyvals: l.sKeyvals,
|
||||
sHasValuer: l.sHasValuer,
|
||||
}
|
||||
}
|
||||
|
||||
// WithSuffix returns a new contextual logger with keyvals appended to those
|
||||
// passed to calls to Log. If logger is also a contextual logger created by
|
||||
// With, WithPrefix, or WithSuffix, keyvals is appended to the existing context.
|
||||
//
|
||||
// The returned Logger replaces all value elements (odd indexes) containing a
|
||||
// Valuer with their generated value for each call to its Log method.
|
||||
func WithSuffix(logger Logger, keyvals ...interface{}) Logger {
|
||||
if len(keyvals) == 0 {
|
||||
return logger
|
||||
}
|
||||
l := newContext(logger)
|
||||
// Limiting the capacity of the stored keyvals ensures that a new
|
||||
// backing array is created if the slice must grow in Log or With.
|
||||
// Using the extra capacity without copying risks a data race that
|
||||
// would violate the Logger interface contract.
|
||||
n := len(l.sKeyvals) + len(keyvals)
|
||||
if len(keyvals)%2 != 0 {
|
||||
n++
|
||||
}
|
||||
kvs := make([]interface{}, 0, n)
|
||||
kvs = append(kvs, keyvals...)
|
||||
if len(kvs)%2 != 0 {
|
||||
kvs = append(kvs, ErrMissingValue)
|
||||
}
|
||||
kvs = append(l.sKeyvals, kvs...)
|
||||
return &context{
|
||||
logger: l.logger,
|
||||
keyvals: l.keyvals,
|
||||
hasValuer: l.hasValuer,
|
||||
sKeyvals: kvs,
|
||||
sHasValuer: l.sHasValuer || containsValuer(keyvals),
|
||||
}
|
||||
}
|
||||
|
||||
// context is the Logger implementation returned by With, WithPrefix, and
|
||||
// WithSuffix. It wraps a Logger and holds keyvals that it includes in all
|
||||
// log events. Its Log method calls bindValues to generate values for each
|
||||
// Valuer in the context keyvals.
|
||||
//
|
||||
// A context must always have the same number of stack frames between calls to
|
||||
// its Log method and the eventual binding of Valuers to their value. This
|
||||
// requirement comes from the functional requirement to allow a context to
|
||||
// resolve application call site information for a Caller stored in the
|
||||
// context. To do this we must be able to predict the number of logging
|
||||
// functions on the stack when bindValues is called.
|
||||
//
|
||||
// Two implementation details provide the needed stack depth consistency.
|
||||
//
|
||||
// 1. newContext avoids introducing an additional layer when asked to
|
||||
// wrap another context.
|
||||
// 2. With, WithPrefix, and WithSuffix avoid introducing an additional
|
||||
// layer by returning a newly constructed context with a merged keyvals
|
||||
// rather than simply wrapping the existing context.
|
||||
type context struct {
|
||||
logger Logger
|
||||
keyvals []interface{}
|
||||
sKeyvals []interface{} // suffixes
|
||||
hasValuer bool
|
||||
sHasValuer bool
|
||||
}
|
||||
|
||||
func newContext(logger Logger) *context {
|
||||
if c, ok := logger.(*context); ok {
|
||||
return c
|
||||
}
|
||||
return &context{logger: logger}
|
||||
}
|
||||
|
||||
// Log replaces all value elements (odd indexes) containing a Valuer in the
|
||||
// stored context with their generated value, appends keyvals, and passes the
|
||||
// result to the wrapped Logger.
|
||||
func (l *context) Log(keyvals ...interface{}) error {
|
||||
kvs := append(l.keyvals, keyvals...)
|
||||
if len(kvs)%2 != 0 {
|
||||
kvs = append(kvs, ErrMissingValue)
|
||||
}
|
||||
if l.hasValuer {
|
||||
// If no keyvals were appended above then we must copy l.keyvals so
|
||||
// that future log events will reevaluate the stored Valuers.
|
||||
if len(keyvals) == 0 {
|
||||
kvs = append([]interface{}{}, l.keyvals...)
|
||||
}
|
||||
bindValues(kvs[:(len(l.keyvals))])
|
||||
}
|
||||
kvs = append(kvs, l.sKeyvals...)
|
||||
if l.sHasValuer {
|
||||
bindValues(kvs[len(kvs)-len(l.sKeyvals):])
|
||||
}
|
||||
return l.logger.Log(kvs...)
|
||||
}
|
||||
|
||||
// LoggerFunc is an adapter to allow use of ordinary functions as Loggers. If
|
||||
// f is a function with the appropriate signature, LoggerFunc(f) is a Logger
|
||||
// object that calls f.
|
||||
type LoggerFunc func(...interface{}) error
|
||||
|
||||
// Log implements Logger by calling f(keyvals...).
|
||||
func (f LoggerFunc) Log(keyvals ...interface{}) error {
|
||||
return f(keyvals...)
|
||||
}
|
||||
62
vendor/github.com/go-kit/log/logfmt_logger.go
generated
vendored
62
vendor/github.com/go-kit/log/logfmt_logger.go
generated
vendored
@@ -1,62 +0,0 @@
|
||||
package log
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/go-logfmt/logfmt"
|
||||
)
|
||||
|
||||
type logfmtEncoder struct {
|
||||
*logfmt.Encoder
|
||||
buf bytes.Buffer
|
||||
}
|
||||
|
||||
func (l *logfmtEncoder) Reset() {
|
||||
l.Encoder.Reset()
|
||||
l.buf.Reset()
|
||||
}
|
||||
|
||||
var logfmtEncoderPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
var enc logfmtEncoder
|
||||
enc.Encoder = logfmt.NewEncoder(&enc.buf)
|
||||
return &enc
|
||||
},
|
||||
}
|
||||
|
||||
type logfmtLogger struct {
|
||||
w io.Writer
|
||||
}
|
||||
|
||||
// NewLogfmtLogger returns a logger that encodes keyvals to the Writer in
|
||||
// logfmt format. Each log event produces no more than one call to w.Write.
|
||||
// The passed Writer must be safe for concurrent use by multiple goroutines if
|
||||
// the returned Logger will be used concurrently.
|
||||
func NewLogfmtLogger(w io.Writer) Logger {
|
||||
return &logfmtLogger{w}
|
||||
}
|
||||
|
||||
func (l logfmtLogger) Log(keyvals ...interface{}) error {
|
||||
enc := logfmtEncoderPool.Get().(*logfmtEncoder)
|
||||
enc.Reset()
|
||||
defer logfmtEncoderPool.Put(enc)
|
||||
|
||||
if err := enc.EncodeKeyvals(keyvals...); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Add newline to the end of the buffer
|
||||
if err := enc.EndRecord(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// The Logger interface requires implementations to be safe for concurrent
|
||||
// use by multiple goroutines. For this implementation that means making
|
||||
// only one call to l.w.Write() for each call to Log.
|
||||
if _, err := l.w.Write(enc.buf.Bytes()); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
8
vendor/github.com/go-kit/log/nop_logger.go
generated
vendored
8
vendor/github.com/go-kit/log/nop_logger.go
generated
vendored
@@ -1,8 +0,0 @@
|
||||
package log
|
||||
|
||||
type nopLogger struct{}
|
||||
|
||||
// NewNopLogger returns a logger that doesn't do anything.
|
||||
func NewNopLogger() Logger { return nopLogger{} }
|
||||
|
||||
func (nopLogger) Log(...interface{}) error { return nil }
|
||||
151
vendor/github.com/go-kit/log/stdlib.go
generated
vendored
151
vendor/github.com/go-kit/log/stdlib.go
generated
vendored
@@ -1,151 +0,0 @@
|
||||
package log
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"log"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// StdlibWriter implements io.Writer by invoking the stdlib log.Print. It's
|
||||
// designed to be passed to a Go kit logger as the writer, for cases where
|
||||
// it's necessary to redirect all Go kit log output to the stdlib logger.
|
||||
//
|
||||
// If you have any choice in the matter, you shouldn't use this. Prefer to
|
||||
// redirect the stdlib log to the Go kit logger via NewStdlibAdapter.
|
||||
type StdlibWriter struct{}
|
||||
|
||||
// Write implements io.Writer.
|
||||
func (w StdlibWriter) Write(p []byte) (int, error) {
|
||||
log.Print(strings.TrimSpace(string(p)))
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// StdlibAdapter wraps a Logger and allows it to be passed to the stdlib
|
||||
// logger's SetOutput. It will extract date/timestamps, filenames, and
|
||||
// messages, and place them under relevant keys.
|
||||
type StdlibAdapter struct {
|
||||
Logger
|
||||
timestampKey string
|
||||
fileKey string
|
||||
messageKey string
|
||||
prefix string
|
||||
joinPrefixToMsg bool
|
||||
}
|
||||
|
||||
// StdlibAdapterOption sets a parameter for the StdlibAdapter.
|
||||
type StdlibAdapterOption func(*StdlibAdapter)
|
||||
|
||||
// TimestampKey sets the key for the timestamp field. By default, it's "ts".
|
||||
func TimestampKey(key string) StdlibAdapterOption {
|
||||
return func(a *StdlibAdapter) { a.timestampKey = key }
|
||||
}
|
||||
|
||||
// FileKey sets the key for the file and line field. By default, it's "caller".
|
||||
func FileKey(key string) StdlibAdapterOption {
|
||||
return func(a *StdlibAdapter) { a.fileKey = key }
|
||||
}
|
||||
|
||||
// MessageKey sets the key for the actual log message. By default, it's "msg".
|
||||
func MessageKey(key string) StdlibAdapterOption {
|
||||
return func(a *StdlibAdapter) { a.messageKey = key }
|
||||
}
|
||||
|
||||
// Prefix configures the adapter to parse a prefix from stdlib log events. If
|
||||
// you provide a non-empty prefix to the stdlib logger, then your should provide
|
||||
// that same prefix to the adapter via this option.
|
||||
//
|
||||
// By default, the prefix isn't included in the msg key. Set joinPrefixToMsg to
|
||||
// true if you want to include the parsed prefix in the msg.
|
||||
func Prefix(prefix string, joinPrefixToMsg bool) StdlibAdapterOption {
|
||||
return func(a *StdlibAdapter) { a.prefix = prefix; a.joinPrefixToMsg = joinPrefixToMsg }
|
||||
}
|
||||
|
||||
// NewStdlibAdapter returns a new StdlibAdapter wrapper around the passed
|
||||
// logger. It's designed to be passed to log.SetOutput.
|
||||
func NewStdlibAdapter(logger Logger, options ...StdlibAdapterOption) io.Writer {
|
||||
a := StdlibAdapter{
|
||||
Logger: logger,
|
||||
timestampKey: "ts",
|
||||
fileKey: "caller",
|
||||
messageKey: "msg",
|
||||
}
|
||||
for _, option := range options {
|
||||
option(&a)
|
||||
}
|
||||
return a
|
||||
}
|
||||
|
||||
func (a StdlibAdapter) Write(p []byte) (int, error) {
|
||||
p = a.handlePrefix(p)
|
||||
|
||||
result := subexps(p)
|
||||
keyvals := []interface{}{}
|
||||
var timestamp string
|
||||
if date, ok := result["date"]; ok && date != "" {
|
||||
timestamp = date
|
||||
}
|
||||
if time, ok := result["time"]; ok && time != "" {
|
||||
if timestamp != "" {
|
||||
timestamp += " "
|
||||
}
|
||||
timestamp += time
|
||||
}
|
||||
if timestamp != "" {
|
||||
keyvals = append(keyvals, a.timestampKey, timestamp)
|
||||
}
|
||||
if file, ok := result["file"]; ok && file != "" {
|
||||
keyvals = append(keyvals, a.fileKey, file)
|
||||
}
|
||||
if msg, ok := result["msg"]; ok {
|
||||
msg = a.handleMessagePrefix(msg)
|
||||
keyvals = append(keyvals, a.messageKey, msg)
|
||||
}
|
||||
if err := a.Logger.Log(keyvals...); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func (a StdlibAdapter) handlePrefix(p []byte) []byte {
|
||||
if a.prefix != "" {
|
||||
p = bytes.TrimPrefix(p, []byte(a.prefix))
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
func (a StdlibAdapter) handleMessagePrefix(msg string) string {
|
||||
if a.prefix == "" {
|
||||
return msg
|
||||
}
|
||||
|
||||
msg = strings.TrimPrefix(msg, a.prefix)
|
||||
if a.joinPrefixToMsg {
|
||||
msg = a.prefix + msg
|
||||
}
|
||||
return msg
|
||||
}
|
||||
|
||||
const (
|
||||
logRegexpDate = `(?P<date>[0-9]{4}/[0-9]{2}/[0-9]{2})?[ ]?`
|
||||
logRegexpTime = `(?P<time>[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+)?)?[ ]?`
|
||||
logRegexpFile = `(?P<file>.+?:[0-9]+)?`
|
||||
logRegexpMsg = `(: )?(?P<msg>(?s:.*))`
|
||||
)
|
||||
|
||||
var (
|
||||
logRegexp = regexp.MustCompile(logRegexpDate + logRegexpTime + logRegexpFile + logRegexpMsg)
|
||||
)
|
||||
|
||||
func subexps(line []byte) map[string]string {
|
||||
m := logRegexp.FindSubmatch(line)
|
||||
if len(m) < len(logRegexp.SubexpNames()) {
|
||||
return map[string]string{}
|
||||
}
|
||||
result := map[string]string{}
|
||||
for i, name := range logRegexp.SubexpNames() {
|
||||
result[name] = strings.TrimRight(string(m[i]), "\n")
|
||||
}
|
||||
return result
|
||||
}
|
||||
113
vendor/github.com/go-kit/log/sync.go
generated
vendored
113
vendor/github.com/go-kit/log/sync.go
generated
vendored
@@ -1,113 +0,0 @@
|
||||
package log
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// SwapLogger wraps another logger that may be safely replaced while other
|
||||
// goroutines use the SwapLogger concurrently. The zero value for a SwapLogger
|
||||
// will discard all log events without error.
|
||||
//
|
||||
// SwapLogger serves well as a package global logger that can be changed by
|
||||
// importers.
|
||||
type SwapLogger struct {
|
||||
logger atomic.Value
|
||||
}
|
||||
|
||||
type loggerStruct struct {
|
||||
Logger
|
||||
}
|
||||
|
||||
// Log implements the Logger interface by forwarding keyvals to the currently
|
||||
// wrapped logger. It does not log anything if the wrapped logger is nil.
|
||||
func (l *SwapLogger) Log(keyvals ...interface{}) error {
|
||||
s, ok := l.logger.Load().(loggerStruct)
|
||||
if !ok || s.Logger == nil {
|
||||
return nil
|
||||
}
|
||||
return s.Log(keyvals...)
|
||||
}
|
||||
|
||||
// Swap replaces the currently wrapped logger with logger. Swap may be called
|
||||
// concurrently with calls to Log from other goroutines.
|
||||
func (l *SwapLogger) Swap(logger Logger) {
|
||||
l.logger.Store(loggerStruct{logger})
|
||||
}
|
||||
|
||||
// NewSyncWriter returns a new writer that is safe for concurrent use by
|
||||
// multiple goroutines. Writes to the returned writer are passed on to w. If
|
||||
// another write is already in progress, the calling goroutine blocks until
|
||||
// the writer is available.
|
||||
//
|
||||
// If w implements the following interface, so does the returned writer.
|
||||
//
|
||||
// interface {
|
||||
// Fd() uintptr
|
||||
// }
|
||||
func NewSyncWriter(w io.Writer) io.Writer {
|
||||
switch w := w.(type) {
|
||||
case fdWriter:
|
||||
return &fdSyncWriter{fdWriter: w}
|
||||
default:
|
||||
return &syncWriter{Writer: w}
|
||||
}
|
||||
}
|
||||
|
||||
// syncWriter synchronizes concurrent writes to an io.Writer.
|
||||
type syncWriter struct {
|
||||
sync.Mutex
|
||||
io.Writer
|
||||
}
|
||||
|
||||
// Write writes p to the underlying io.Writer. If another write is already in
|
||||
// progress, the calling goroutine blocks until the syncWriter is available.
|
||||
func (w *syncWriter) Write(p []byte) (n int, err error) {
|
||||
w.Lock()
|
||||
defer w.Unlock()
|
||||
return w.Writer.Write(p)
|
||||
}
|
||||
|
||||
// fdWriter is an io.Writer that also has an Fd method. The most common
|
||||
// example of an fdWriter is an *os.File.
|
||||
type fdWriter interface {
|
||||
io.Writer
|
||||
Fd() uintptr
|
||||
}
|
||||
|
||||
// fdSyncWriter synchronizes concurrent writes to an fdWriter.
|
||||
type fdSyncWriter struct {
|
||||
sync.Mutex
|
||||
fdWriter
|
||||
}
|
||||
|
||||
// Write writes p to the underlying io.Writer. If another write is already in
|
||||
// progress, the calling goroutine blocks until the fdSyncWriter is available.
|
||||
func (w *fdSyncWriter) Write(p []byte) (n int, err error) {
|
||||
w.Lock()
|
||||
defer w.Unlock()
|
||||
return w.fdWriter.Write(p)
|
||||
}
|
||||
|
||||
// syncLogger provides concurrent safe logging for another Logger.
|
||||
type syncLogger struct {
|
||||
mu sync.Mutex
|
||||
logger Logger
|
||||
}
|
||||
|
||||
// NewSyncLogger returns a logger that synchronizes concurrent use of the
|
||||
// wrapped logger. When multiple goroutines use the SyncLogger concurrently
|
||||
// only one goroutine will be allowed to log to the wrapped logger at a time.
|
||||
// The other goroutines will block until the logger is available.
|
||||
func NewSyncLogger(logger Logger) Logger {
|
||||
return &syncLogger{logger: logger}
|
||||
}
|
||||
|
||||
// Log logs keyvals to the underlying Logger. If another log is already in
|
||||
// progress, the calling goroutine blocks until the syncLogger is available.
|
||||
func (l *syncLogger) Log(keyvals ...interface{}) error {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
return l.logger.Log(keyvals...)
|
||||
}
|
||||
110
vendor/github.com/go-kit/log/value.go
generated
vendored
110
vendor/github.com/go-kit/log/value.go
generated
vendored
@@ -1,110 +0,0 @@
|
||||
package log
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// A Valuer generates a log value. When passed to With, WithPrefix, or
|
||||
// WithSuffix in a value element (odd indexes), it represents a dynamic
|
||||
// value which is re-evaluated with each log event.
|
||||
type Valuer func() interface{}
|
||||
|
||||
// bindValues replaces all value elements (odd indexes) containing a Valuer
|
||||
// with their generated value.
|
||||
func bindValues(keyvals []interface{}) {
|
||||
for i := 1; i < len(keyvals); i += 2 {
|
||||
if v, ok := keyvals[i].(Valuer); ok {
|
||||
keyvals[i] = v()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// containsValuer returns true if any of the value elements (odd indexes)
|
||||
// contain a Valuer.
|
||||
func containsValuer(keyvals []interface{}) bool {
|
||||
for i := 1; i < len(keyvals); i += 2 {
|
||||
if _, ok := keyvals[i].(Valuer); ok {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Timestamp returns a timestamp Valuer. It invokes the t function to get the
|
||||
// time; unless you are doing something tricky, pass time.Now.
|
||||
//
|
||||
// Most users will want to use DefaultTimestamp or DefaultTimestampUTC, which
|
||||
// are TimestampFormats that use the RFC3339Nano format.
|
||||
func Timestamp(t func() time.Time) Valuer {
|
||||
return func() interface{} { return t() }
|
||||
}
|
||||
|
||||
// TimestampFormat returns a timestamp Valuer with a custom time format. It
|
||||
// invokes the t function to get the time to format; unless you are doing
|
||||
// something tricky, pass time.Now. The layout string is passed to
|
||||
// Time.Format.
|
||||
//
|
||||
// Most users will want to use DefaultTimestamp or DefaultTimestampUTC, which
|
||||
// are TimestampFormats that use the RFC3339Nano format.
|
||||
func TimestampFormat(t func() time.Time, layout string) Valuer {
|
||||
return func() interface{} {
|
||||
return timeFormat{
|
||||
time: t(),
|
||||
layout: layout,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// A timeFormat represents an instant in time and a layout used when
|
||||
// marshaling to a text format.
|
||||
type timeFormat struct {
|
||||
time time.Time
|
||||
layout string
|
||||
}
|
||||
|
||||
func (tf timeFormat) String() string {
|
||||
return tf.time.Format(tf.layout)
|
||||
}
|
||||
|
||||
// MarshalText implements encoding.TextMarshaller.
|
||||
func (tf timeFormat) MarshalText() (text []byte, err error) {
|
||||
// The following code adapted from the standard library time.Time.Format
|
||||
// method. Using the same undocumented magic constant to extend the size
|
||||
// of the buffer as seen there.
|
||||
b := make([]byte, 0, len(tf.layout)+10)
|
||||
b = tf.time.AppendFormat(b, tf.layout)
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// Caller returns a Valuer that returns a file and line from a specified depth
|
||||
// in the callstack. Users will probably want to use DefaultCaller.
|
||||
func Caller(depth int) Valuer {
|
||||
return func() interface{} {
|
||||
_, file, line, _ := runtime.Caller(depth)
|
||||
idx := strings.LastIndexByte(file, '/')
|
||||
// using idx+1 below handles both of following cases:
|
||||
// idx == -1 because no "/" was found, or
|
||||
// idx >= 0 and we want to start at the character after the found "/".
|
||||
return file[idx+1:] + ":" + strconv.Itoa(line)
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
// DefaultTimestamp is a Valuer that returns the current wallclock time,
|
||||
// respecting time zones, when bound.
|
||||
DefaultTimestamp = TimestampFormat(time.Now, time.RFC3339Nano)
|
||||
|
||||
// DefaultTimestampUTC is a Valuer that returns the current time in UTC
|
||||
// when bound.
|
||||
DefaultTimestampUTC = TimestampFormat(
|
||||
func() time.Time { return time.Now().UTC() },
|
||||
time.RFC3339Nano,
|
||||
)
|
||||
|
||||
// DefaultCaller is a Valuer that returns the file and line where the Log
|
||||
// method was invoked. It can only be used with log.With.
|
||||
DefaultCaller = Caller(3)
|
||||
)
|
||||
1
vendor/github.com/go-logfmt/logfmt/.gitignore
generated
vendored
1
vendor/github.com/go-logfmt/logfmt/.gitignore
generated
vendored
@@ -1 +0,0 @@
|
||||
.vscode/
|
||||
18
vendor/github.com/go-logfmt/logfmt/.travis.yml
generated
vendored
18
vendor/github.com/go-logfmt/logfmt/.travis.yml
generated
vendored
@@ -1,18 +0,0 @@
|
||||
language: go
|
||||
sudo: false
|
||||
go:
|
||||
- "1.7.x"
|
||||
- "1.8.x"
|
||||
- "1.9.x"
|
||||
- "1.10.x"
|
||||
- "1.11.x"
|
||||
- "1.12.x"
|
||||
- "1.13.x"
|
||||
- "tip"
|
||||
|
||||
before_install:
|
||||
- go get github.com/mattn/goveralls
|
||||
- go get golang.org/x/tools/cmd/cover
|
||||
|
||||
script:
|
||||
- goveralls -service=travis-ci
|
||||
48
vendor/github.com/go-logfmt/logfmt/CHANGELOG.md
generated
vendored
48
vendor/github.com/go-logfmt/logfmt/CHANGELOG.md
generated
vendored
@@ -1,48 +0,0 @@
|
||||
# Changelog
|
||||
All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## [0.5.0] - 2020-01-03
|
||||
|
||||
### Changed
|
||||
- Remove the dependency on github.com/kr/logfmt by [@ChrisHines]
|
||||
- Move fuzz code to github.com/go-logfmt/fuzzlogfmt by [@ChrisHines]
|
||||
|
||||
## [0.4.0] - 2018-11-21
|
||||
|
||||
### Added
|
||||
- Go module support by [@ChrisHines]
|
||||
- CHANGELOG by [@ChrisHines]
|
||||
|
||||
### Changed
|
||||
- Drop invalid runes from keys instead of returning ErrInvalidKey by [@ChrisHines]
|
||||
- On panic while printing, attempt to print panic value by [@bboreham]
|
||||
|
||||
## [0.3.0] - 2016-11-15
|
||||
### Added
|
||||
- Pool buffers for quoted strings and byte slices by [@nussjustin]
|
||||
### Fixed
|
||||
- Fuzz fix, quote invalid UTF-8 values by [@judwhite]
|
||||
|
||||
## [0.2.0] - 2016-05-08
|
||||
### Added
|
||||
- Encoder.EncodeKeyvals by [@ChrisHines]
|
||||
|
||||
## [0.1.0] - 2016-03-28
|
||||
### Added
|
||||
- Encoder by [@ChrisHines]
|
||||
- Decoder by [@ChrisHines]
|
||||
- MarshalKeyvals by [@ChrisHines]
|
||||
|
||||
[0.5.0]: https://github.com/go-logfmt/logfmt/compare/v0.4.0...v0.5.0
|
||||
[0.4.0]: https://github.com/go-logfmt/logfmt/compare/v0.3.0...v0.4.0
|
||||
[0.3.0]: https://github.com/go-logfmt/logfmt/compare/v0.2.0...v0.3.0
|
||||
[0.2.0]: https://github.com/go-logfmt/logfmt/compare/v0.1.0...v0.2.0
|
||||
[0.1.0]: https://github.com/go-logfmt/logfmt/commits/v0.1.0
|
||||
|
||||
[@ChrisHines]: https://github.com/ChrisHines
|
||||
[@bboreham]: https://github.com/bboreham
|
||||
[@judwhite]: https://github.com/judwhite
|
||||
[@nussjustin]: https://github.com/nussjustin
|
||||
33
vendor/github.com/go-logfmt/logfmt/README.md
generated
vendored
33
vendor/github.com/go-logfmt/logfmt/README.md
generated
vendored
@@ -1,33 +0,0 @@
|
||||
[](https://godoc.org/github.com/go-logfmt/logfmt)
|
||||
[](https://goreportcard.com/report/go-logfmt/logfmt)
|
||||
[](https://travis-ci.org/go-logfmt/logfmt)
|
||||
[](https://coveralls.io/github/go-logfmt/logfmt?branch=master)
|
||||
|
||||
# logfmt
|
||||
|
||||
Package logfmt implements utilities to marshal and unmarshal data in the [logfmt
|
||||
format](https://brandur.org/logfmt). It provides an API similar to
|
||||
[encoding/json](http://golang.org/pkg/encoding/json/) and
|
||||
[encoding/xml](http://golang.org/pkg/encoding/xml/).
|
||||
|
||||
The logfmt format was first documented by Brandur Leach in [this
|
||||
article](https://brandur.org/logfmt). The format has not been formally
|
||||
standardized. The most authoritative public specification to date has been the
|
||||
documentation of a Go Language [package](http://godoc.org/github.com/kr/logfmt)
|
||||
written by Blake Mizerany and Keith Rarick.
|
||||
|
||||
## Goals
|
||||
|
||||
This project attempts to conform as closely as possible to the prior art, while
|
||||
also removing ambiguity where necessary to provide well behaved encoder and
|
||||
decoder implementations.
|
||||
|
||||
## Non-goals
|
||||
|
||||
This project does not attempt to formally standardize the logfmt format. In the
|
||||
event that logfmt is standardized this project would take conforming to the
|
||||
standard as a goal.
|
||||
|
||||
## Versioning
|
||||
|
||||
Package logfmt publishes releases via [semver](http://semver.org/) compatible Git tags prefixed with a single 'v'.
|
||||
237
vendor/github.com/go-logfmt/logfmt/decode.go
generated
vendored
237
vendor/github.com/go-logfmt/logfmt/decode.go
generated
vendored
@@ -1,237 +0,0 @@
|
||||
package logfmt
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// A Decoder reads and decodes logfmt records from an input stream.
|
||||
type Decoder struct {
|
||||
pos int
|
||||
key []byte
|
||||
value []byte
|
||||
lineNum int
|
||||
s *bufio.Scanner
|
||||
err error
|
||||
}
|
||||
|
||||
// NewDecoder returns a new decoder that reads from r.
|
||||
//
|
||||
// The decoder introduces its own buffering and may read data from r beyond
|
||||
// the logfmt records requested.
|
||||
func NewDecoder(r io.Reader) *Decoder {
|
||||
dec := &Decoder{
|
||||
s: bufio.NewScanner(r),
|
||||
}
|
||||
return dec
|
||||
}
|
||||
|
||||
// ScanRecord advances the Decoder to the next record, which can then be
|
||||
// parsed with the ScanKeyval method. It returns false when decoding stops,
|
||||
// either by reaching the end of the input or an error. After ScanRecord
|
||||
// returns false, the Err method will return any error that occurred during
|
||||
// decoding, except that if it was io.EOF, Err will return nil.
|
||||
func (dec *Decoder) ScanRecord() bool {
|
||||
if dec.err != nil {
|
||||
return false
|
||||
}
|
||||
if !dec.s.Scan() {
|
||||
dec.err = dec.s.Err()
|
||||
return false
|
||||
}
|
||||
dec.lineNum++
|
||||
dec.pos = 0
|
||||
return true
|
||||
}
|
||||
|
||||
// ScanKeyval advances the Decoder to the next key/value pair of the current
|
||||
// record, which can then be retrieved with the Key and Value methods. It
|
||||
// returns false when decoding stops, either by reaching the end of the
|
||||
// current record or an error.
|
||||
func (dec *Decoder) ScanKeyval() bool {
|
||||
dec.key, dec.value = nil, nil
|
||||
if dec.err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
line := dec.s.Bytes()
|
||||
|
||||
// garbage
|
||||
for p, c := range line[dec.pos:] {
|
||||
if c > ' ' {
|
||||
dec.pos += p
|
||||
goto key
|
||||
}
|
||||
}
|
||||
dec.pos = len(line)
|
||||
return false
|
||||
|
||||
key:
|
||||
const invalidKeyError = "invalid key"
|
||||
|
||||
start, multibyte := dec.pos, false
|
||||
for p, c := range line[dec.pos:] {
|
||||
switch {
|
||||
case c == '=':
|
||||
dec.pos += p
|
||||
if dec.pos > start {
|
||||
dec.key = line[start:dec.pos]
|
||||
if multibyte && bytes.ContainsRune(dec.key, utf8.RuneError) {
|
||||
dec.syntaxError(invalidKeyError)
|
||||
return false
|
||||
}
|
||||
}
|
||||
if dec.key == nil {
|
||||
dec.unexpectedByte(c)
|
||||
return false
|
||||
}
|
||||
goto equal
|
||||
case c == '"':
|
||||
dec.pos += p
|
||||
dec.unexpectedByte(c)
|
||||
return false
|
||||
case c <= ' ':
|
||||
dec.pos += p
|
||||
if dec.pos > start {
|
||||
dec.key = line[start:dec.pos]
|
||||
if multibyte && bytes.ContainsRune(dec.key, utf8.RuneError) {
|
||||
dec.syntaxError(invalidKeyError)
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
case c >= utf8.RuneSelf:
|
||||
multibyte = true
|
||||
}
|
||||
}
|
||||
dec.pos = len(line)
|
||||
if dec.pos > start {
|
||||
dec.key = line[start:dec.pos]
|
||||
if multibyte && bytes.ContainsRune(dec.key, utf8.RuneError) {
|
||||
dec.syntaxError(invalidKeyError)
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
|
||||
equal:
|
||||
dec.pos++
|
||||
if dec.pos >= len(line) {
|
||||
return true
|
||||
}
|
||||
switch c := line[dec.pos]; {
|
||||
case c <= ' ':
|
||||
return true
|
||||
case c == '"':
|
||||
goto qvalue
|
||||
}
|
||||
|
||||
// value
|
||||
start = dec.pos
|
||||
for p, c := range line[dec.pos:] {
|
||||
switch {
|
||||
case c == '=' || c == '"':
|
||||
dec.pos += p
|
||||
dec.unexpectedByte(c)
|
||||
return false
|
||||
case c <= ' ':
|
||||
dec.pos += p
|
||||
if dec.pos > start {
|
||||
dec.value = line[start:dec.pos]
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
dec.pos = len(line)
|
||||
if dec.pos > start {
|
||||
dec.value = line[start:dec.pos]
|
||||
}
|
||||
return true
|
||||
|
||||
qvalue:
|
||||
const (
|
||||
untermQuote = "unterminated quoted value"
|
||||
invalidQuote = "invalid quoted value"
|
||||
)
|
||||
|
||||
hasEsc, esc := false, false
|
||||
start = dec.pos
|
||||
for p, c := range line[dec.pos+1:] {
|
||||
switch {
|
||||
case esc:
|
||||
esc = false
|
||||
case c == '\\':
|
||||
hasEsc, esc = true, true
|
||||
case c == '"':
|
||||
dec.pos += p + 2
|
||||
if hasEsc {
|
||||
v, ok := unquoteBytes(line[start:dec.pos])
|
||||
if !ok {
|
||||
dec.syntaxError(invalidQuote)
|
||||
return false
|
||||
}
|
||||
dec.value = v
|
||||
} else {
|
||||
start++
|
||||
end := dec.pos - 1
|
||||
if end > start {
|
||||
dec.value = line[start:end]
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
dec.pos = len(line)
|
||||
dec.syntaxError(untermQuote)
|
||||
return false
|
||||
}
|
||||
|
||||
// Key returns the most recent key found by a call to ScanKeyval. The returned
|
||||
// slice may point to internal buffers and is only valid until the next call
|
||||
// to ScanRecord. It does no allocation.
|
||||
func (dec *Decoder) Key() []byte {
|
||||
return dec.key
|
||||
}
|
||||
|
||||
// Value returns the most recent value found by a call to ScanKeyval. The
|
||||
// returned slice may point to internal buffers and is only valid until the
|
||||
// next call to ScanRecord. It does no allocation when the value has no
|
||||
// escape sequences.
|
||||
func (dec *Decoder) Value() []byte {
|
||||
return dec.value
|
||||
}
|
||||
|
||||
// Err returns the first non-EOF error that was encountered by the Scanner.
|
||||
func (dec *Decoder) Err() error {
|
||||
return dec.err
|
||||
}
|
||||
|
||||
func (dec *Decoder) syntaxError(msg string) {
|
||||
dec.err = &SyntaxError{
|
||||
Msg: msg,
|
||||
Line: dec.lineNum,
|
||||
Pos: dec.pos + 1,
|
||||
}
|
||||
}
|
||||
|
||||
func (dec *Decoder) unexpectedByte(c byte) {
|
||||
dec.err = &SyntaxError{
|
||||
Msg: fmt.Sprintf("unexpected %q", c),
|
||||
Line: dec.lineNum,
|
||||
Pos: dec.pos + 1,
|
||||
}
|
||||
}
|
||||
|
||||
// A SyntaxError represents a syntax error in the logfmt input stream.
|
||||
type SyntaxError struct {
|
||||
Msg string
|
||||
Line int
|
||||
Pos int
|
||||
}
|
||||
|
||||
func (e *SyntaxError) Error() string {
|
||||
return fmt.Sprintf("logfmt syntax error at pos %d on line %d: %s", e.Pos, e.Line, e.Msg)
|
||||
}
|
||||
6
vendor/github.com/go-logfmt/logfmt/doc.go
generated
vendored
6
vendor/github.com/go-logfmt/logfmt/doc.go
generated
vendored
@@ -1,6 +0,0 @@
|
||||
// Package logfmt implements utilities to marshal and unmarshal data in the
|
||||
// logfmt format. The logfmt format records key/value pairs in a way that
|
||||
// balances readability for humans and simplicity of computer parsing. It is
|
||||
// most commonly used as a more human friendly alternative to JSON for
|
||||
// structured logging.
|
||||
package logfmt
|
||||
322
vendor/github.com/go-logfmt/logfmt/encode.go
generated
vendored
322
vendor/github.com/go-logfmt/logfmt/encode.go
generated
vendored
@@ -1,322 +0,0 @@
|
||||
package logfmt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// MarshalKeyvals returns the logfmt encoding of keyvals, a variadic sequence
|
||||
// of alternating keys and values.
|
||||
func MarshalKeyvals(keyvals ...interface{}) ([]byte, error) {
|
||||
buf := &bytes.Buffer{}
|
||||
if err := NewEncoder(buf).EncodeKeyvals(keyvals...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
// An Encoder writes logfmt data to an output stream.
|
||||
type Encoder struct {
|
||||
w io.Writer
|
||||
scratch bytes.Buffer
|
||||
needSep bool
|
||||
}
|
||||
|
||||
// NewEncoder returns a new encoder that writes to w.
|
||||
func NewEncoder(w io.Writer) *Encoder {
|
||||
return &Encoder{
|
||||
w: w,
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
space = []byte(" ")
|
||||
equals = []byte("=")
|
||||
newline = []byte("\n")
|
||||
null = []byte("null")
|
||||
)
|
||||
|
||||
// EncodeKeyval writes the logfmt encoding of key and value to the stream. A
|
||||
// single space is written before the second and subsequent keys in a record.
|
||||
// Nothing is written if a non-nil error is returned.
|
||||
func (enc *Encoder) EncodeKeyval(key, value interface{}) error {
|
||||
enc.scratch.Reset()
|
||||
if enc.needSep {
|
||||
if _, err := enc.scratch.Write(space); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := writeKey(&enc.scratch, key); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := enc.scratch.Write(equals); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := writeValue(&enc.scratch, value); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err := enc.w.Write(enc.scratch.Bytes())
|
||||
enc.needSep = true
|
||||
return err
|
||||
}
|
||||
|
||||
// EncodeKeyvals writes the logfmt encoding of keyvals to the stream. Keyvals
|
||||
// is a variadic sequence of alternating keys and values. Keys of unsupported
|
||||
// type are skipped along with their corresponding value. Values of
|
||||
// unsupported type or that cause a MarshalerError are replaced by their error
|
||||
// but do not cause EncodeKeyvals to return an error. If a non-nil error is
|
||||
// returned some key/value pairs may not have be written.
|
||||
func (enc *Encoder) EncodeKeyvals(keyvals ...interface{}) error {
|
||||
if len(keyvals) == 0 {
|
||||
return nil
|
||||
}
|
||||
if len(keyvals)%2 == 1 {
|
||||
keyvals = append(keyvals, nil)
|
||||
}
|
||||
for i := 0; i < len(keyvals); i += 2 {
|
||||
k, v := keyvals[i], keyvals[i+1]
|
||||
err := enc.EncodeKeyval(k, v)
|
||||
if err == ErrUnsupportedKeyType {
|
||||
continue
|
||||
}
|
||||
if _, ok := err.(*MarshalerError); ok || err == ErrUnsupportedValueType {
|
||||
v = err
|
||||
err = enc.EncodeKeyval(k, v)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalerError represents an error encountered while marshaling a value.
|
||||
type MarshalerError struct {
|
||||
Type reflect.Type
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e *MarshalerError) Error() string {
|
||||
return "error marshaling value of type " + e.Type.String() + ": " + e.Err.Error()
|
||||
}
|
||||
|
||||
// ErrNilKey is returned by Marshal functions and Encoder methods if a key is
|
||||
// a nil interface or pointer value.
|
||||
var ErrNilKey = errors.New("nil key")
|
||||
|
||||
// ErrInvalidKey is returned by Marshal functions and Encoder methods if, after
|
||||
// dropping invalid runes, a key is empty.
|
||||
var ErrInvalidKey = errors.New("invalid key")
|
||||
|
||||
// ErrUnsupportedKeyType is returned by Encoder methods if a key has an
|
||||
// unsupported type.
|
||||
var ErrUnsupportedKeyType = errors.New("unsupported key type")
|
||||
|
||||
// ErrUnsupportedValueType is returned by Encoder methods if a value has an
|
||||
// unsupported type.
|
||||
var ErrUnsupportedValueType = errors.New("unsupported value type")
|
||||
|
||||
func writeKey(w io.Writer, key interface{}) error {
|
||||
if key == nil {
|
||||
return ErrNilKey
|
||||
}
|
||||
|
||||
switch k := key.(type) {
|
||||
case string:
|
||||
return writeStringKey(w, k)
|
||||
case []byte:
|
||||
if k == nil {
|
||||
return ErrNilKey
|
||||
}
|
||||
return writeBytesKey(w, k)
|
||||
case encoding.TextMarshaler:
|
||||
kb, err := safeMarshal(k)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if kb == nil {
|
||||
return ErrNilKey
|
||||
}
|
||||
return writeBytesKey(w, kb)
|
||||
case fmt.Stringer:
|
||||
ks, ok := safeString(k)
|
||||
if !ok {
|
||||
return ErrNilKey
|
||||
}
|
||||
return writeStringKey(w, ks)
|
||||
default:
|
||||
rkey := reflect.ValueOf(key)
|
||||
switch rkey.Kind() {
|
||||
case reflect.Array, reflect.Chan, reflect.Func, reflect.Map, reflect.Slice, reflect.Struct:
|
||||
return ErrUnsupportedKeyType
|
||||
case reflect.Ptr:
|
||||
if rkey.IsNil() {
|
||||
return ErrNilKey
|
||||
}
|
||||
return writeKey(w, rkey.Elem().Interface())
|
||||
}
|
||||
return writeStringKey(w, fmt.Sprint(k))
|
||||
}
|
||||
}
|
||||
|
||||
// keyRuneFilter returns r for all valid key runes, and -1 for all invalid key
|
||||
// runes. When used as the mapping function for strings.Map and bytes.Map
|
||||
// functions it causes them to remove invalid key runes from strings or byte
|
||||
// slices respectively.
|
||||
func keyRuneFilter(r rune) rune {
|
||||
if r <= ' ' || r == '=' || r == '"' || r == utf8.RuneError {
|
||||
return -1
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func writeStringKey(w io.Writer, key string) error {
|
||||
k := strings.Map(keyRuneFilter, key)
|
||||
if k == "" {
|
||||
return ErrInvalidKey
|
||||
}
|
||||
_, err := io.WriteString(w, k)
|
||||
return err
|
||||
}
|
||||
|
||||
func writeBytesKey(w io.Writer, key []byte) error {
|
||||
k := bytes.Map(keyRuneFilter, key)
|
||||
if len(k) == 0 {
|
||||
return ErrInvalidKey
|
||||
}
|
||||
_, err := w.Write(k)
|
||||
return err
|
||||
}
|
||||
|
||||
func writeValue(w io.Writer, value interface{}) error {
|
||||
switch v := value.(type) {
|
||||
case nil:
|
||||
return writeBytesValue(w, null)
|
||||
case string:
|
||||
return writeStringValue(w, v, true)
|
||||
case []byte:
|
||||
return writeBytesValue(w, v)
|
||||
case encoding.TextMarshaler:
|
||||
vb, err := safeMarshal(v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if vb == nil {
|
||||
vb = null
|
||||
}
|
||||
return writeBytesValue(w, vb)
|
||||
case error:
|
||||
se, ok := safeError(v)
|
||||
return writeStringValue(w, se, ok)
|
||||
case fmt.Stringer:
|
||||
ss, ok := safeString(v)
|
||||
return writeStringValue(w, ss, ok)
|
||||
default:
|
||||
rvalue := reflect.ValueOf(value)
|
||||
switch rvalue.Kind() {
|
||||
case reflect.Array, reflect.Chan, reflect.Func, reflect.Map, reflect.Slice, reflect.Struct:
|
||||
return ErrUnsupportedValueType
|
||||
case reflect.Ptr:
|
||||
if rvalue.IsNil() {
|
||||
return writeBytesValue(w, null)
|
||||
}
|
||||
return writeValue(w, rvalue.Elem().Interface())
|
||||
}
|
||||
return writeStringValue(w, fmt.Sprint(v), true)
|
||||
}
|
||||
}
|
||||
|
||||
func needsQuotedValueRune(r rune) bool {
|
||||
return r <= ' ' || r == '=' || r == '"' || r == utf8.RuneError
|
||||
}
|
||||
|
||||
func writeStringValue(w io.Writer, value string, ok bool) error {
|
||||
var err error
|
||||
if ok && value == "null" {
|
||||
_, err = io.WriteString(w, `"null"`)
|
||||
} else if strings.IndexFunc(value, needsQuotedValueRune) != -1 {
|
||||
_, err = writeQuotedString(w, value)
|
||||
} else {
|
||||
_, err = io.WriteString(w, value)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func writeBytesValue(w io.Writer, value []byte) error {
|
||||
var err error
|
||||
if bytes.IndexFunc(value, needsQuotedValueRune) != -1 {
|
||||
_, err = writeQuotedBytes(w, value)
|
||||
} else {
|
||||
_, err = w.Write(value)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// EndRecord writes a newline character to the stream and resets the encoder
|
||||
// to the beginning of a new record.
|
||||
func (enc *Encoder) EndRecord() error {
|
||||
_, err := enc.w.Write(newline)
|
||||
if err == nil {
|
||||
enc.needSep = false
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Reset resets the encoder to the beginning of a new record.
|
||||
func (enc *Encoder) Reset() {
|
||||
enc.needSep = false
|
||||
}
|
||||
|
||||
func safeError(err error) (s string, ok bool) {
|
||||
defer func() {
|
||||
if panicVal := recover(); panicVal != nil {
|
||||
if v := reflect.ValueOf(err); v.Kind() == reflect.Ptr && v.IsNil() {
|
||||
s, ok = "null", false
|
||||
} else {
|
||||
s, ok = fmt.Sprintf("PANIC:%v", panicVal), false
|
||||
}
|
||||
}
|
||||
}()
|
||||
s, ok = err.Error(), true
|
||||
return
|
||||
}
|
||||
|
||||
func safeString(str fmt.Stringer) (s string, ok bool) {
|
||||
defer func() {
|
||||
if panicVal := recover(); panicVal != nil {
|
||||
if v := reflect.ValueOf(str); v.Kind() == reflect.Ptr && v.IsNil() {
|
||||
s, ok = "null", false
|
||||
} else {
|
||||
s, ok = fmt.Sprintf("PANIC:%v", panicVal), true
|
||||
}
|
||||
}
|
||||
}()
|
||||
s, ok = str.String(), true
|
||||
return
|
||||
}
|
||||
|
||||
func safeMarshal(tm encoding.TextMarshaler) (b []byte, err error) {
|
||||
defer func() {
|
||||
if panicVal := recover(); panicVal != nil {
|
||||
if v := reflect.ValueOf(tm); v.Kind() == reflect.Ptr && v.IsNil() {
|
||||
b, err = nil, nil
|
||||
} else {
|
||||
b, err = nil, fmt.Errorf("panic when marshalling: %s", panicVal)
|
||||
}
|
||||
}
|
||||
}()
|
||||
b, err = tm.MarshalText()
|
||||
if err != nil {
|
||||
return nil, &MarshalerError{
|
||||
Type: reflect.TypeOf(tm),
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
3
vendor/github.com/go-logfmt/logfmt/go.mod
generated
vendored
3
vendor/github.com/go-logfmt/logfmt/go.mod
generated
vendored
@@ -1,3 +0,0 @@
|
||||
module github.com/go-logfmt/logfmt
|
||||
|
||||
go 1.13
|
||||
277
vendor/github.com/go-logfmt/logfmt/jsonstring.go
generated
vendored
277
vendor/github.com/go-logfmt/logfmt/jsonstring.go
generated
vendored
@@ -1,277 +0,0 @@
|
||||
package logfmt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"strconv"
|
||||
"sync"
|
||||
"unicode"
|
||||
"unicode/utf16"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// Taken from Go's encoding/json and modified for use here.
|
||||
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
var hex = "0123456789abcdef"
|
||||
|
||||
var bufferPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return &bytes.Buffer{}
|
||||
},
|
||||
}
|
||||
|
||||
func getBuffer() *bytes.Buffer {
|
||||
return bufferPool.Get().(*bytes.Buffer)
|
||||
}
|
||||
|
||||
func poolBuffer(buf *bytes.Buffer) {
|
||||
buf.Reset()
|
||||
bufferPool.Put(buf)
|
||||
}
|
||||
|
||||
// NOTE: keep in sync with writeQuotedBytes below.
|
||||
func writeQuotedString(w io.Writer, s string) (int, error) {
|
||||
buf := getBuffer()
|
||||
buf.WriteByte('"')
|
||||
start := 0
|
||||
for i := 0; i < len(s); {
|
||||
if b := s[i]; b < utf8.RuneSelf {
|
||||
if 0x20 <= b && b != '\\' && b != '"' {
|
||||
i++
|
||||
continue
|
||||
}
|
||||
if start < i {
|
||||
buf.WriteString(s[start:i])
|
||||
}
|
||||
switch b {
|
||||
case '\\', '"':
|
||||
buf.WriteByte('\\')
|
||||
buf.WriteByte(b)
|
||||
case '\n':
|
||||
buf.WriteByte('\\')
|
||||
buf.WriteByte('n')
|
||||
case '\r':
|
||||
buf.WriteByte('\\')
|
||||
buf.WriteByte('r')
|
||||
case '\t':
|
||||
buf.WriteByte('\\')
|
||||
buf.WriteByte('t')
|
||||
default:
|
||||
// This encodes bytes < 0x20 except for \n, \r, and \t.
|
||||
buf.WriteString(`\u00`)
|
||||
buf.WriteByte(hex[b>>4])
|
||||
buf.WriteByte(hex[b&0xF])
|
||||
}
|
||||
i++
|
||||
start = i
|
||||
continue
|
||||
}
|
||||
c, size := utf8.DecodeRuneInString(s[i:])
|
||||
if c == utf8.RuneError {
|
||||
if start < i {
|
||||
buf.WriteString(s[start:i])
|
||||
}
|
||||
buf.WriteString(`\ufffd`)
|
||||
i += size
|
||||
start = i
|
||||
continue
|
||||
}
|
||||
i += size
|
||||
}
|
||||
if start < len(s) {
|
||||
buf.WriteString(s[start:])
|
||||
}
|
||||
buf.WriteByte('"')
|
||||
n, err := w.Write(buf.Bytes())
|
||||
poolBuffer(buf)
|
||||
return n, err
|
||||
}
|
||||
|
||||
// NOTE: keep in sync with writeQuoteString above.
|
||||
func writeQuotedBytes(w io.Writer, s []byte) (int, error) {
|
||||
buf := getBuffer()
|
||||
buf.WriteByte('"')
|
||||
start := 0
|
||||
for i := 0; i < len(s); {
|
||||
if b := s[i]; b < utf8.RuneSelf {
|
||||
if 0x20 <= b && b != '\\' && b != '"' {
|
||||
i++
|
||||
continue
|
||||
}
|
||||
if start < i {
|
||||
buf.Write(s[start:i])
|
||||
}
|
||||
switch b {
|
||||
case '\\', '"':
|
||||
buf.WriteByte('\\')
|
||||
buf.WriteByte(b)
|
||||
case '\n':
|
||||
buf.WriteByte('\\')
|
||||
buf.WriteByte('n')
|
||||
case '\r':
|
||||
buf.WriteByte('\\')
|
||||
buf.WriteByte('r')
|
||||
case '\t':
|
||||
buf.WriteByte('\\')
|
||||
buf.WriteByte('t')
|
||||
default:
|
||||
// This encodes bytes < 0x20 except for \n, \r, and \t.
|
||||
buf.WriteString(`\u00`)
|
||||
buf.WriteByte(hex[b>>4])
|
||||
buf.WriteByte(hex[b&0xF])
|
||||
}
|
||||
i++
|
||||
start = i
|
||||
continue
|
||||
}
|
||||
c, size := utf8.DecodeRune(s[i:])
|
||||
if c == utf8.RuneError {
|
||||
if start < i {
|
||||
buf.Write(s[start:i])
|
||||
}
|
||||
buf.WriteString(`\ufffd`)
|
||||
i += size
|
||||
start = i
|
||||
continue
|
||||
}
|
||||
i += size
|
||||
}
|
||||
if start < len(s) {
|
||||
buf.Write(s[start:])
|
||||
}
|
||||
buf.WriteByte('"')
|
||||
n, err := w.Write(buf.Bytes())
|
||||
poolBuffer(buf)
|
||||
return n, err
|
||||
}
|
||||
|
||||
// getu4 decodes \uXXXX from the beginning of s, returning the hex value,
|
||||
// or it returns -1.
|
||||
func getu4(s []byte) rune {
|
||||
if len(s) < 6 || s[0] != '\\' || s[1] != 'u' {
|
||||
return -1
|
||||
}
|
||||
r, err := strconv.ParseUint(string(s[2:6]), 16, 64)
|
||||
if err != nil {
|
||||
return -1
|
||||
}
|
||||
return rune(r)
|
||||
}
|
||||
|
||||
func unquoteBytes(s []byte) (t []byte, ok bool) {
|
||||
if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' {
|
||||
return
|
||||
}
|
||||
s = s[1 : len(s)-1]
|
||||
|
||||
// Check for unusual characters. If there are none,
|
||||
// then no unquoting is needed, so return a slice of the
|
||||
// original bytes.
|
||||
r := 0
|
||||
for r < len(s) {
|
||||
c := s[r]
|
||||
if c == '\\' || c == '"' || c < ' ' {
|
||||
break
|
||||
}
|
||||
if c < utf8.RuneSelf {
|
||||
r++
|
||||
continue
|
||||
}
|
||||
rr, size := utf8.DecodeRune(s[r:])
|
||||
if rr == utf8.RuneError {
|
||||
break
|
||||
}
|
||||
r += size
|
||||
}
|
||||
if r == len(s) {
|
||||
return s, true
|
||||
}
|
||||
|
||||
b := make([]byte, len(s)+2*utf8.UTFMax)
|
||||
w := copy(b, s[0:r])
|
||||
for r < len(s) {
|
||||
// Out of room? Can only happen if s is full of
|
||||
// malformed UTF-8 and we're replacing each
|
||||
// byte with RuneError.
|
||||
if w >= len(b)-2*utf8.UTFMax {
|
||||
nb := make([]byte, (len(b)+utf8.UTFMax)*2)
|
||||
copy(nb, b[0:w])
|
||||
b = nb
|
||||
}
|
||||
switch c := s[r]; {
|
||||
case c == '\\':
|
||||
r++
|
||||
if r >= len(s) {
|
||||
return
|
||||
}
|
||||
switch s[r] {
|
||||
default:
|
||||
return
|
||||
case '"', '\\', '/', '\'':
|
||||
b[w] = s[r]
|
||||
r++
|
||||
w++
|
||||
case 'b':
|
||||
b[w] = '\b'
|
||||
r++
|
||||
w++
|
||||
case 'f':
|
||||
b[w] = '\f'
|
||||
r++
|
||||
w++
|
||||
case 'n':
|
||||
b[w] = '\n'
|
||||
r++
|
||||
w++
|
||||
case 'r':
|
||||
b[w] = '\r'
|
||||
r++
|
||||
w++
|
||||
case 't':
|
||||
b[w] = '\t'
|
||||
r++
|
||||
w++
|
||||
case 'u':
|
||||
r--
|
||||
rr := getu4(s[r:])
|
||||
if rr < 0 {
|
||||
return
|
||||
}
|
||||
r += 6
|
||||
if utf16.IsSurrogate(rr) {
|
||||
rr1 := getu4(s[r:])
|
||||
if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar {
|
||||
// A valid pair; consume.
|
||||
r += 6
|
||||
w += utf8.EncodeRune(b[w:], dec)
|
||||
break
|
||||
}
|
||||
// Invalid surrogate; fall back to replacement rune.
|
||||
rr = unicode.ReplacementChar
|
||||
}
|
||||
w += utf8.EncodeRune(b[w:], rr)
|
||||
}
|
||||
|
||||
// Quote, control characters are invalid.
|
||||
case c == '"', c < ' ':
|
||||
return
|
||||
|
||||
// ASCII
|
||||
case c < utf8.RuneSelf:
|
||||
b[w] = c
|
||||
r++
|
||||
w++
|
||||
|
||||
// Coerce to well-formed UTF-8.
|
||||
default:
|
||||
rr, size := utf8.DecodeRune(s[r:])
|
||||
r += size
|
||||
w += utf8.EncodeRune(b[w:], rr)
|
||||
}
|
||||
}
|
||||
return b[0:w], true
|
||||
}
|
||||
8
vendor/github.com/miekg/dns/.codecov.yml
generated
vendored
Normal file
8
vendor/github.com/miekg/dns/.codecov.yml
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
coverage:
|
||||
status:
|
||||
project:
|
||||
default:
|
||||
target: 40%
|
||||
threshold: null
|
||||
patch: false
|
||||
changes: false
|
||||
4
vendor/github.com/miekg/dns/.gitignore
generated
vendored
Normal file
4
vendor/github.com/miekg/dns/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
*.6
|
||||
tags
|
||||
test.out
|
||||
a.out
|
||||
18
vendor/github.com/miekg/dns/.travis.yml
generated
vendored
Normal file
18
vendor/github.com/miekg/dns/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
language: go
|
||||
sudo: false
|
||||
|
||||
go:
|
||||
- 1.10.x
|
||||
- 1.11.x
|
||||
- tip
|
||||
|
||||
before_install:
|
||||
# don't use the miekg/dns when testing forks
|
||||
- mkdir -p $GOPATH/src/github.com/miekg
|
||||
- ln -s $TRAVIS_BUILD_DIR $GOPATH/src/github.com/miekg/ || true
|
||||
|
||||
script:
|
||||
- go test -race -v -bench=. -coverprofile=coverage.txt -covermode=atomic ./...
|
||||
|
||||
after_success:
|
||||
- bash <(curl -s https://codecov.io/bash)
|
||||
1
vendor/github.com/miekg/dns/AUTHORS
generated
vendored
Normal file
1
vendor/github.com/miekg/dns/AUTHORS
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
Miek Gieben <miek@miek.nl>
|
||||
10
vendor/github.com/miekg/dns/CONTRIBUTORS
generated
vendored
Normal file
10
vendor/github.com/miekg/dns/CONTRIBUTORS
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
Alex A. Skinner
|
||||
Andrew Tunnell-Jones
|
||||
Ask Bjørn Hansen
|
||||
Dave Cheney
|
||||
Dusty Wilson
|
||||
Marek Majkowski
|
||||
Peter van Dijk
|
||||
Omri Bahumi
|
||||
Alex Sergeyev
|
||||
James Hartig
|
||||
9
vendor/github.com/miekg/dns/COPYRIGHT
generated
vendored
Normal file
9
vendor/github.com/miekg/dns/COPYRIGHT
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
Copyright 2009 The Go Authors. All rights reserved. Use of this source code
|
||||
is governed by a BSD-style license that can be found in the LICENSE file.
|
||||
Extensions of the original work are copyright (c) 2011 Miek Gieben
|
||||
|
||||
Copyright 2011 Miek Gieben. All rights reserved. Use of this source code is
|
||||
governed by a BSD-style license that can be found in the LICENSE file.
|
||||
|
||||
Copyright 2014 CloudFlare. All rights reserved. Use of this source code is
|
||||
governed by a BSD-style license that can be found in the LICENSE file.
|
||||
57
vendor/github.com/miekg/dns/Gopkg.lock
generated
vendored
Normal file
57
vendor/github.com/miekg/dns/Gopkg.lock
generated
vendored
Normal file
@@ -0,0 +1,57 @@
|
||||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:6914c49eed986dfb8dffb33516fa129c49929d4d873f41e073c83c11c372b870"
|
||||
name = "golang.org/x/crypto"
|
||||
packages = [
|
||||
"ed25519",
|
||||
"ed25519/internal/edwards25519",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "e3636079e1a4c1f337f212cc5cd2aca108f6c900"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:08e41d63f8dac84d83797368b56cf0b339e42d0224e5e56668963c28aec95685"
|
||||
name = "golang.org/x/net"
|
||||
packages = [
|
||||
"bpf",
|
||||
"context",
|
||||
"internal/iana",
|
||||
"internal/socket",
|
||||
"ipv4",
|
||||
"ipv6",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "4dfa2610cdf3b287375bbba5b8f2a14d3b01d8de"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:b2ea75de0ccb2db2ac79356407f8a4cd8f798fe15d41b381c00abf3ae8e55ed1"
|
||||
name = "golang.org/x/sync"
|
||||
packages = ["errgroup"]
|
||||
pruneopts = ""
|
||||
revision = "1d60e4601c6fd243af51cc01ddf169918a5407ca"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:149a432fabebb8221a80f77731b1cd63597197ded4f14af606ebe3a0959004ec"
|
||||
name = "golang.org/x/sys"
|
||||
packages = ["unix"]
|
||||
pruneopts = ""
|
||||
revision = "e4b3c5e9061176387e7cea65e4dc5853801f3fb7"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
input-imports = [
|
||||
"golang.org/x/crypto/ed25519",
|
||||
"golang.org/x/net/ipv4",
|
||||
"golang.org/x/net/ipv6",
|
||||
"golang.org/x/sync/errgroup",
|
||||
"golang.org/x/sys/unix",
|
||||
]
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
38
vendor/github.com/miekg/dns/Gopkg.toml
generated
vendored
Normal file
38
vendor/github.com/miekg/dns/Gopkg.toml
generated
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
|
||||
# Gopkg.toml example
|
||||
#
|
||||
# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
|
||||
# for detailed Gopkg.toml documentation.
|
||||
#
|
||||
# required = ["github.com/user/thing/cmd/thing"]
|
||||
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project"
|
||||
# version = "1.0.0"
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project2"
|
||||
# branch = "dev"
|
||||
# source = "github.com/myfork/project2"
|
||||
#
|
||||
# [[override]]
|
||||
# name = "github.com/x/y"
|
||||
# version = "2.4.0"
|
||||
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/crypto"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/net"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/sys"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/sync"
|
||||
32
vendor/github.com/miekg/dns/LICENSE
generated
vendored
Normal file
32
vendor/github.com/miekg/dns/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
Extensions of the original work are copyright (c) 2011 Miek Gieben
|
||||
|
||||
As this is fork of the official Go code the same license applies:
|
||||
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
33
vendor/github.com/miekg/dns/Makefile.fuzz
generated
vendored
Normal file
33
vendor/github.com/miekg/dns/Makefile.fuzz
generated
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
# Makefile for fuzzing
|
||||
#
|
||||
# Use go-fuzz and needs the tools installed.
|
||||
# See https://blog.cloudflare.com/dns-parser-meet-go-fuzzer/
|
||||
#
|
||||
# Installing go-fuzz:
|
||||
# $ make -f Makefile.fuzz get
|
||||
# Installs:
|
||||
# * github.com/dvyukov/go-fuzz/go-fuzz
|
||||
# * get github.com/dvyukov/go-fuzz/go-fuzz-build
|
||||
|
||||
all: build
|
||||
|
||||
.PHONY: build
|
||||
build:
|
||||
go-fuzz-build -tags fuzz github.com/miekg/dns
|
||||
|
||||
.PHONY: build-newrr
|
||||
build-newrr:
|
||||
go-fuzz-build -func FuzzNewRR -tags fuzz github.com/miekg/dns
|
||||
|
||||
.PHONY: fuzz
|
||||
fuzz:
|
||||
go-fuzz -bin=dns-fuzz.zip -workdir=fuzz
|
||||
|
||||
.PHONY: get
|
||||
get:
|
||||
go get github.com/dvyukov/go-fuzz/go-fuzz
|
||||
go get github.com/dvyukov/go-fuzz/go-fuzz-build
|
||||
|
||||
.PHONY: clean
|
||||
clean:
|
||||
rm *-fuzz.zip
|
||||
52
vendor/github.com/miekg/dns/Makefile.release
generated
vendored
Normal file
52
vendor/github.com/miekg/dns/Makefile.release
generated
vendored
Normal file
@@ -0,0 +1,52 @@
|
||||
# Makefile for releasing.
|
||||
#
|
||||
# The release is controlled from version.go. The version found there is
|
||||
# used to tag the git repo, we're not building any artifects so there is nothing
|
||||
# to upload to github.
|
||||
#
|
||||
# * Up the version in version.go
|
||||
# * Run: make -f Makefile.release release
|
||||
# * will *commit* your change with 'Release $VERSION'
|
||||
# * push to github
|
||||
#
|
||||
|
||||
define GO
|
||||
//+build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/miekg/dns"
|
||||
)
|
||||
|
||||
func main() {
|
||||
fmt.Println(dns.Version.String())
|
||||
}
|
||||
endef
|
||||
|
||||
$(file > version_release.go,$(GO))
|
||||
VERSION:=$(shell go run version_release.go)
|
||||
TAG="v$(VERSION)"
|
||||
|
||||
all:
|
||||
@echo Use the \'release\' target to start a release $(VERSION)
|
||||
rm -f version_release.go
|
||||
|
||||
.PHONY: release
|
||||
release: commit push
|
||||
@echo Released $(VERSION)
|
||||
rm -f version_release.go
|
||||
|
||||
.PHONY: commit
|
||||
commit:
|
||||
@echo Committing release $(VERSION)
|
||||
git commit -am"Release $(VERSION)"
|
||||
git tag $(TAG)
|
||||
|
||||
.PHONY: push
|
||||
push:
|
||||
@echo Pushing release $(VERSION) to master
|
||||
git push --tags
|
||||
git push
|
||||
172
vendor/github.com/miekg/dns/README.md
generated
vendored
Normal file
172
vendor/github.com/miekg/dns/README.md
generated
vendored
Normal file
@@ -0,0 +1,172 @@
|
||||
[](https://travis-ci.org/miekg/dns)
|
||||
[](https://codecov.io/github/miekg/dns?branch=master)
|
||||
[](https://goreportcard.com/report/miekg/dns)
|
||||
[](https://godoc.org/github.com/miekg/dns)
|
||||
|
||||
# Alternative (more granular) approach to a DNS library
|
||||
|
||||
> Less is more.
|
||||
|
||||
Complete and usable DNS library. All widely used Resource Records are supported, including the
|
||||
DNSSEC types. It follows a lean and mean philosophy. If there is stuff you should know as a DNS
|
||||
programmer there isn't a convenience function for it. Server side and client side programming is
|
||||
supported, i.e. you can build servers and resolvers with it.
|
||||
|
||||
We try to keep the "master" branch as sane as possible and at the bleeding edge of standards,
|
||||
avoiding breaking changes wherever reasonable. We support the last two versions of Go.
|
||||
|
||||
# Goals
|
||||
|
||||
* KISS;
|
||||
* Fast;
|
||||
* Small API. If it's easy to code in Go, don't make a function for it.
|
||||
|
||||
# Users
|
||||
|
||||
A not-so-up-to-date-list-that-may-be-actually-current:
|
||||
|
||||
* https://github.com/coredns/coredns
|
||||
* https://cloudflare.com
|
||||
* https://github.com/abh/geodns
|
||||
* http://www.statdns.com/
|
||||
* http://www.dnsinspect.com/
|
||||
* https://github.com/chuangbo/jianbing-dictionary-dns
|
||||
* http://www.dns-lg.com/
|
||||
* https://github.com/fcambus/rrda
|
||||
* https://github.com/kenshinx/godns
|
||||
* https://github.com/skynetservices/skydns
|
||||
* https://github.com/hashicorp/consul
|
||||
* https://github.com/DevelopersPL/godnsagent
|
||||
* https://github.com/duedil-ltd/discodns
|
||||
* https://github.com/StalkR/dns-reverse-proxy
|
||||
* https://github.com/tianon/rawdns
|
||||
* https://mesosphere.github.io/mesos-dns/
|
||||
* https://pulse.turbobytes.com/
|
||||
* https://play.google.com/store/apps/details?id=com.turbobytes.dig
|
||||
* https://github.com/fcambus/statzone
|
||||
* https://github.com/benschw/dns-clb-go
|
||||
* https://github.com/corny/dnscheck for http://public-dns.info/
|
||||
* https://namesmith.io
|
||||
* https://github.com/miekg/unbound
|
||||
* https://github.com/miekg/exdns
|
||||
* https://dnslookup.org
|
||||
* https://github.com/looterz/grimd
|
||||
* https://github.com/phamhongviet/serf-dns
|
||||
* https://github.com/mehrdadrad/mylg
|
||||
* https://github.com/bamarni/dockness
|
||||
* https://github.com/fffaraz/microdns
|
||||
* http://kelda.io
|
||||
* https://github.com/ipdcode/hades (JD.COM)
|
||||
* https://github.com/StackExchange/dnscontrol/
|
||||
* https://www.dnsperf.com/
|
||||
* https://dnssectest.net/
|
||||
* https://dns.apebits.com
|
||||
* https://github.com/oif/apex
|
||||
* https://github.com/jedisct1/dnscrypt-proxy
|
||||
* https://github.com/jedisct1/rpdns
|
||||
* https://github.com/xor-gate/sshfp
|
||||
* https://github.com/rs/dnstrace
|
||||
* https://blitiri.com.ar/p/dnss ([github mirror](https://github.com/albertito/dnss))
|
||||
* https://github.com/semihalev/sdns
|
||||
|
||||
Send pull request if you want to be listed here.
|
||||
|
||||
# Features
|
||||
|
||||
* UDP/TCP queries, IPv4 and IPv6;
|
||||
* RFC 1035 zone file parsing ($INCLUDE, $ORIGIN, $TTL and $GENERATE (for all record types) are supported;
|
||||
* Fast:
|
||||
* Reply speed around ~ 80K qps (faster hardware results in more qps);
|
||||
* Parsing RRs ~ 100K RR/s, that's 5M records in about 50 seconds;
|
||||
* Server side programming (mimicking the net/http package);
|
||||
* Client side programming;
|
||||
* DNSSEC: signing, validating and key generation for DSA, RSA, ECDSA and Ed25519;
|
||||
* EDNS0, NSID, Cookies;
|
||||
* AXFR/IXFR;
|
||||
* TSIG, SIG(0);
|
||||
* DNS over TLS: optional encrypted connection between client and server;
|
||||
* DNS name compression;
|
||||
* Depends only on the standard library.
|
||||
|
||||
Have fun!
|
||||
|
||||
Miek Gieben - 2010-2012 - <miek@miek.nl>
|
||||
|
||||
# Building
|
||||
|
||||
Building is done with the `go` tool. If you have setup your GOPATH correctly, the following should
|
||||
work:
|
||||
|
||||
go get github.com/miekg/dns
|
||||
go build github.com/miekg/dns
|
||||
|
||||
## Examples
|
||||
|
||||
A short "how to use the API" is at the beginning of doc.go (this also will show
|
||||
when you call `godoc github.com/miekg/dns`).
|
||||
|
||||
Example programs can be found in the `github.com/miekg/exdns` repository.
|
||||
|
||||
## Supported RFCs
|
||||
|
||||
*all of them*
|
||||
|
||||
* 103{4,5} - DNS standard
|
||||
* 1348 - NSAP record (removed the record)
|
||||
* 1982 - Serial Arithmetic
|
||||
* 1876 - LOC record
|
||||
* 1995 - IXFR
|
||||
* 1996 - DNS notify
|
||||
* 2136 - DNS Update (dynamic updates)
|
||||
* 2181 - RRset definition - there is no RRset type though, just []RR
|
||||
* 2537 - RSAMD5 DNS keys
|
||||
* 2065 - DNSSEC (updated in later RFCs)
|
||||
* 2671 - EDNS record
|
||||
* 2782 - SRV record
|
||||
* 2845 - TSIG record
|
||||
* 2915 - NAPTR record
|
||||
* 2929 - DNS IANA Considerations
|
||||
* 3110 - RSASHA1 DNS keys
|
||||
* 3225 - DO bit (DNSSEC OK)
|
||||
* 340{1,2,3} - NAPTR record
|
||||
* 3445 - Limiting the scope of (DNS)KEY
|
||||
* 3597 - Unknown RRs
|
||||
* 403{3,4,5} - DNSSEC + validation functions
|
||||
* 4255 - SSHFP record
|
||||
* 4343 - Case insensitivity
|
||||
* 4408 - SPF record
|
||||
* 4509 - SHA256 Hash in DS
|
||||
* 4592 - Wildcards in the DNS
|
||||
* 4635 - HMAC SHA TSIG
|
||||
* 4701 - DHCID
|
||||
* 4892 - id.server
|
||||
* 5001 - NSID
|
||||
* 5155 - NSEC3 record
|
||||
* 5205 - HIP record
|
||||
* 5702 - SHA2 in the DNS
|
||||
* 5936 - AXFR
|
||||
* 5966 - TCP implementation recommendations
|
||||
* 6605 - ECDSA
|
||||
* 6725 - IANA Registry Update
|
||||
* 6742 - ILNP DNS
|
||||
* 6840 - Clarifications and Implementation Notes for DNS Security
|
||||
* 6844 - CAA record
|
||||
* 6891 - EDNS0 update
|
||||
* 6895 - DNS IANA considerations
|
||||
* 6975 - Algorithm Understanding in DNSSEC
|
||||
* 7043 - EUI48/EUI64 records
|
||||
* 7314 - DNS (EDNS) EXPIRE Option
|
||||
* 7477 - CSYNC RR
|
||||
* 7828 - edns-tcp-keepalive EDNS0 Option
|
||||
* 7553 - URI record
|
||||
* 7858 - DNS over TLS: Initiation and Performance Considerations
|
||||
* 7871 - EDNS0 Client Subnet
|
||||
* 7873 - Domain Name System (DNS) Cookies (draft-ietf-dnsop-cookies)
|
||||
* 8080 - EdDSA for DNSSEC
|
||||
|
||||
## Loosely based upon
|
||||
|
||||
* `ldns`
|
||||
* `NSD`
|
||||
* `Net::DNS`
|
||||
* `GRONG`
|
||||
577
vendor/github.com/miekg/dns/client.go
generated
vendored
Normal file
577
vendor/github.com/miekg/dns/client.go
generated
vendored
Normal file
@@ -0,0 +1,577 @@
|
||||
package dns
|
||||
|
||||
// A client implementation.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
dnsTimeout time.Duration = 2 * time.Second
|
||||
tcpIdleTimeout time.Duration = 8 * time.Second
|
||||
|
||||
dohMimeType = "application/dns-message"
|
||||
)
|
||||
|
||||
// A Conn represents a connection to a DNS server.
|
||||
type Conn struct {
|
||||
net.Conn // a net.Conn holding the connection
|
||||
UDPSize uint16 // minimum receive buffer for UDP messages
|
||||
TsigSecret map[string]string // secret(s) for Tsig map[<zonename>]<base64 secret>, zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2)
|
||||
tsigRequestMAC string
|
||||
}
|
||||
|
||||
// A Client defines parameters for a DNS client.
|
||||
type Client struct {
|
||||
Net string // if "tcp" or "tcp-tls" (DNS over TLS) a TCP query will be initiated, otherwise an UDP one (default is "" for UDP)
|
||||
UDPSize uint16 // minimum receive buffer for UDP messages
|
||||
TLSConfig *tls.Config // TLS connection configuration
|
||||
Dialer *net.Dialer // a net.Dialer used to set local address, timeouts and more
|
||||
// Timeout is a cumulative timeout for dial, write and read, defaults to 0 (disabled) - overrides DialTimeout, ReadTimeout,
|
||||
// WriteTimeout when non-zero. Can be overridden with net.Dialer.Timeout (see Client.ExchangeWithDialer and
|
||||
// Client.Dialer) or context.Context.Deadline (see the deprecated ExchangeContext)
|
||||
Timeout time.Duration
|
||||
DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds, or net.Dialer.Timeout if expiring earlier - overridden by Timeout when that value is non-zero
|
||||
ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero
|
||||
WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero
|
||||
HTTPClient *http.Client // The http.Client to use for DNS-over-HTTPS
|
||||
TsigSecret map[string]string // secret(s) for Tsig map[<zonename>]<base64 secret>, zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2)
|
||||
SingleInflight bool // if true suppress multiple outstanding queries for the same Qname, Qtype and Qclass
|
||||
group singleflight
|
||||
}
|
||||
|
||||
// Exchange performs a synchronous UDP query. It sends the message m to the address
|
||||
// contained in a and waits for a reply. Exchange does not retry a failed query, nor
|
||||
// will it fall back to TCP in case of truncation.
|
||||
// See client.Exchange for more information on setting larger buffer sizes.
|
||||
func Exchange(m *Msg, a string) (r *Msg, err error) {
|
||||
client := Client{Net: "udp"}
|
||||
r, _, err = client.Exchange(m, a)
|
||||
return r, err
|
||||
}
|
||||
|
||||
func (c *Client) dialTimeout() time.Duration {
|
||||
if c.Timeout != 0 {
|
||||
return c.Timeout
|
||||
}
|
||||
if c.DialTimeout != 0 {
|
||||
return c.DialTimeout
|
||||
}
|
||||
return dnsTimeout
|
||||
}
|
||||
|
||||
func (c *Client) readTimeout() time.Duration {
|
||||
if c.ReadTimeout != 0 {
|
||||
return c.ReadTimeout
|
||||
}
|
||||
return dnsTimeout
|
||||
}
|
||||
|
||||
func (c *Client) writeTimeout() time.Duration {
|
||||
if c.WriteTimeout != 0 {
|
||||
return c.WriteTimeout
|
||||
}
|
||||
return dnsTimeout
|
||||
}
|
||||
|
||||
// Dial connects to the address on the named network.
|
||||
func (c *Client) Dial(address string) (conn *Conn, err error) {
|
||||
// create a new dialer with the appropriate timeout
|
||||
var d net.Dialer
|
||||
if c.Dialer == nil {
|
||||
d = net.Dialer{Timeout: c.getTimeoutForRequest(c.dialTimeout())}
|
||||
} else {
|
||||
d = *c.Dialer
|
||||
}
|
||||
|
||||
network := c.Net
|
||||
if network == "" {
|
||||
network = "udp"
|
||||
}
|
||||
|
||||
useTLS := strings.HasPrefix(network, "tcp") && strings.HasSuffix(network, "-tls")
|
||||
|
||||
conn = new(Conn)
|
||||
if useTLS {
|
||||
network = strings.TrimSuffix(network, "-tls")
|
||||
|
||||
conn.Conn, err = tls.DialWithDialer(&d, network, address, c.TLSConfig)
|
||||
} else {
|
||||
conn.Conn, err = d.Dial(network, address)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
// Exchange performs a synchronous query. It sends the message m to the address
|
||||
// contained in a and waits for a reply. Basic use pattern with a *dns.Client:
|
||||
//
|
||||
// c := new(dns.Client)
|
||||
// in, rtt, err := c.Exchange(message, "127.0.0.1:53")
|
||||
//
|
||||
// Exchange does not retry a failed query, nor will it fall back to TCP in
|
||||
// case of truncation.
|
||||
// It is up to the caller to create a message that allows for larger responses to be
|
||||
// returned. Specifically this means adding an EDNS0 OPT RR that will advertise a larger
|
||||
// buffer, see SetEdns0. Messages without an OPT RR will fallback to the historic limit
|
||||
// of 512 bytes
|
||||
// To specify a local address or a timeout, the caller has to set the `Client.Dialer`
|
||||
// attribute appropriately
|
||||
func (c *Client) Exchange(m *Msg, address string) (r *Msg, rtt time.Duration, err error) {
|
||||
if !c.SingleInflight {
|
||||
if c.Net == "https" {
|
||||
// TODO(tmthrgd): pipe timeouts into exchangeDOH
|
||||
return c.exchangeDOH(context.TODO(), m, address)
|
||||
}
|
||||
|
||||
return c.exchange(m, address)
|
||||
}
|
||||
|
||||
t := "nop"
|
||||
if t1, ok := TypeToString[m.Question[0].Qtype]; ok {
|
||||
t = t1
|
||||
}
|
||||
cl := "nop"
|
||||
if cl1, ok := ClassToString[m.Question[0].Qclass]; ok {
|
||||
cl = cl1
|
||||
}
|
||||
r, rtt, err, shared := c.group.Do(m.Question[0].Name+t+cl, func() (*Msg, time.Duration, error) {
|
||||
if c.Net == "https" {
|
||||
// TODO(tmthrgd): pipe timeouts into exchangeDOH
|
||||
return c.exchangeDOH(context.TODO(), m, address)
|
||||
}
|
||||
|
||||
return c.exchange(m, address)
|
||||
})
|
||||
if r != nil && shared {
|
||||
r = r.Copy()
|
||||
}
|
||||
return r, rtt, err
|
||||
}
|
||||
|
||||
func (c *Client) exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err error) {
|
||||
var co *Conn
|
||||
|
||||
co, err = c.Dial(a)
|
||||
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
defer co.Close()
|
||||
|
||||
opt := m.IsEdns0()
|
||||
// If EDNS0 is used use that for size.
|
||||
if opt != nil && opt.UDPSize() >= MinMsgSize {
|
||||
co.UDPSize = opt.UDPSize()
|
||||
}
|
||||
// Otherwise use the client's configured UDP size.
|
||||
if opt == nil && c.UDPSize >= MinMsgSize {
|
||||
co.UDPSize = c.UDPSize
|
||||
}
|
||||
|
||||
co.TsigSecret = c.TsigSecret
|
||||
t := time.Now()
|
||||
// write with the appropriate write timeout
|
||||
co.SetWriteDeadline(t.Add(c.getTimeoutForRequest(c.writeTimeout())))
|
||||
if err = co.WriteMsg(m); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
co.SetReadDeadline(time.Now().Add(c.getTimeoutForRequest(c.readTimeout())))
|
||||
r, err = co.ReadMsg()
|
||||
if err == nil && r.Id != m.Id {
|
||||
err = ErrId
|
||||
}
|
||||
rtt = time.Since(t)
|
||||
return r, rtt, err
|
||||
}
|
||||
|
||||
func (c *Client) exchangeDOH(ctx context.Context, m *Msg, a string) (r *Msg, rtt time.Duration, err error) {
|
||||
p, err := m.Pack()
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(http.MethodPost, a, bytes.NewReader(p))
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
req.Header.Set("Content-Type", dohMimeType)
|
||||
req.Header.Set("Accept", dohMimeType)
|
||||
|
||||
hc := http.DefaultClient
|
||||
if c.HTTPClient != nil {
|
||||
hc = c.HTTPClient
|
||||
}
|
||||
|
||||
if ctx != context.Background() && ctx != context.TODO() {
|
||||
req = req.WithContext(ctx)
|
||||
}
|
||||
|
||||
t := time.Now()
|
||||
|
||||
resp, err := hc.Do(req)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
defer closeHTTPBody(resp.Body)
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, 0, fmt.Errorf("dns: server returned HTTP %d error: %q", resp.StatusCode, resp.Status)
|
||||
}
|
||||
|
||||
if ct := resp.Header.Get("Content-Type"); ct != dohMimeType {
|
||||
return nil, 0, fmt.Errorf("dns: unexpected Content-Type %q; expected %q", ct, dohMimeType)
|
||||
}
|
||||
|
||||
p, err = ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
rtt = time.Since(t)
|
||||
|
||||
r = new(Msg)
|
||||
if err := r.Unpack(p); err != nil {
|
||||
return r, 0, err
|
||||
}
|
||||
|
||||
// TODO: TSIG? Is it even supported over DoH?
|
||||
|
||||
return r, rtt, nil
|
||||
}
|
||||
|
||||
func closeHTTPBody(r io.ReadCloser) error {
|
||||
io.Copy(ioutil.Discard, io.LimitReader(r, 8<<20))
|
||||
return r.Close()
|
||||
}
|
||||
|
||||
// ReadMsg reads a message from the connection co.
|
||||
// If the received message contains a TSIG record the transaction signature
|
||||
// is verified. This method always tries to return the message, however if an
|
||||
// error is returned there are no guarantees that the returned message is a
|
||||
// valid representation of the packet read.
|
||||
func (co *Conn) ReadMsg() (*Msg, error) {
|
||||
p, err := co.ReadMsgHeader(nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m := new(Msg)
|
||||
if err := m.Unpack(p); err != nil {
|
||||
// If an error was returned, we still want to allow the user to use
|
||||
// the message, but naively they can just check err if they don't want
|
||||
// to use an erroneous message
|
||||
return m, err
|
||||
}
|
||||
if t := m.IsTsig(); t != nil {
|
||||
if _, ok := co.TsigSecret[t.Hdr.Name]; !ok {
|
||||
return m, ErrSecret
|
||||
}
|
||||
// Need to work on the original message p, as that was used to calculate the tsig.
|
||||
err = TsigVerify(p, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false)
|
||||
}
|
||||
return m, err
|
||||
}
|
||||
|
||||
// ReadMsgHeader reads a DNS message, parses and populates hdr (when hdr is not nil).
|
||||
// Returns message as a byte slice to be parsed with Msg.Unpack later on.
|
||||
// Note that error handling on the message body is not possible as only the header is parsed.
|
||||
func (co *Conn) ReadMsgHeader(hdr *Header) ([]byte, error) {
|
||||
var (
|
||||
p []byte
|
||||
n int
|
||||
err error
|
||||
)
|
||||
|
||||
switch t := co.Conn.(type) {
|
||||
case *net.TCPConn, *tls.Conn:
|
||||
r := t.(io.Reader)
|
||||
|
||||
// First two bytes specify the length of the entire message.
|
||||
l, err := tcpMsgLen(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p = make([]byte, l)
|
||||
n, err = tcpRead(r, p)
|
||||
default:
|
||||
if co.UDPSize > MinMsgSize {
|
||||
p = make([]byte, co.UDPSize)
|
||||
} else {
|
||||
p = make([]byte, MinMsgSize)
|
||||
}
|
||||
n, err = co.Read(p)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if n < headerSize {
|
||||
return nil, ErrShortRead
|
||||
}
|
||||
|
||||
p = p[:n]
|
||||
if hdr != nil {
|
||||
dh, _, err := unpackMsgHdr(p, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
*hdr = dh
|
||||
}
|
||||
return p, err
|
||||
}
|
||||
|
||||
// tcpMsgLen is a helper func to read first two bytes of stream as uint16 packet length.
|
||||
func tcpMsgLen(t io.Reader) (int, error) {
|
||||
p := []byte{0, 0}
|
||||
n, err := t.Read(p)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// As seen with my local router/switch, returns 1 byte on the above read,
|
||||
// resulting a a ShortRead. Just write it out (instead of loop) and read the
|
||||
// other byte.
|
||||
if n == 1 {
|
||||
n1, err := t.Read(p[1:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
n += n1
|
||||
}
|
||||
|
||||
if n != 2 {
|
||||
return 0, ErrShortRead
|
||||
}
|
||||
l := binary.BigEndian.Uint16(p)
|
||||
if l == 0 {
|
||||
return 0, ErrShortRead
|
||||
}
|
||||
return int(l), nil
|
||||
}
|
||||
|
||||
// tcpRead calls TCPConn.Read enough times to fill allocated buffer.
|
||||
func tcpRead(t io.Reader, p []byte) (int, error) {
|
||||
n, err := t.Read(p)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
for n < len(p) {
|
||||
j, err := t.Read(p[n:])
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
n += j
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Read implements the net.Conn read method.
|
||||
func (co *Conn) Read(p []byte) (n int, err error) {
|
||||
if co.Conn == nil {
|
||||
return 0, ErrConnEmpty
|
||||
}
|
||||
if len(p) < 2 {
|
||||
return 0, io.ErrShortBuffer
|
||||
}
|
||||
switch t := co.Conn.(type) {
|
||||
case *net.TCPConn, *tls.Conn:
|
||||
r := t.(io.Reader)
|
||||
|
||||
l, err := tcpMsgLen(r)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if l > len(p) {
|
||||
return int(l), io.ErrShortBuffer
|
||||
}
|
||||
return tcpRead(r, p[:l])
|
||||
}
|
||||
// UDP connection
|
||||
n, err = co.Conn.Read(p)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// WriteMsg sends a message through the connection co.
|
||||
// If the message m contains a TSIG record the transaction
|
||||
// signature is calculated.
|
||||
func (co *Conn) WriteMsg(m *Msg) (err error) {
|
||||
var out []byte
|
||||
if t := m.IsTsig(); t != nil {
|
||||
mac := ""
|
||||
if _, ok := co.TsigSecret[t.Hdr.Name]; !ok {
|
||||
return ErrSecret
|
||||
}
|
||||
out, mac, err = TsigGenerate(m, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false)
|
||||
// Set for the next read, although only used in zone transfers
|
||||
co.tsigRequestMAC = mac
|
||||
} else {
|
||||
out, err = m.Pack()
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = co.Write(out); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write implements the net.Conn Write method.
|
||||
func (co *Conn) Write(p []byte) (n int, err error) {
|
||||
switch t := co.Conn.(type) {
|
||||
case *net.TCPConn, *tls.Conn:
|
||||
w := t.(io.Writer)
|
||||
|
||||
lp := len(p)
|
||||
if lp < 2 {
|
||||
return 0, io.ErrShortBuffer
|
||||
}
|
||||
if lp > MaxMsgSize {
|
||||
return 0, &Error{err: "message too large"}
|
||||
}
|
||||
l := make([]byte, 2, lp+2)
|
||||
binary.BigEndian.PutUint16(l, uint16(lp))
|
||||
p = append(l, p...)
|
||||
n, err := io.Copy(w, bytes.NewReader(p))
|
||||
return int(n), err
|
||||
}
|
||||
n, err = co.Conn.Write(p)
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Return the appropriate timeout for a specific request
|
||||
func (c *Client) getTimeoutForRequest(timeout time.Duration) time.Duration {
|
||||
var requestTimeout time.Duration
|
||||
if c.Timeout != 0 {
|
||||
requestTimeout = c.Timeout
|
||||
} else {
|
||||
requestTimeout = timeout
|
||||
}
|
||||
// net.Dialer.Timeout has priority if smaller than the timeouts computed so
|
||||
// far
|
||||
if c.Dialer != nil && c.Dialer.Timeout != 0 {
|
||||
if c.Dialer.Timeout < requestTimeout {
|
||||
requestTimeout = c.Dialer.Timeout
|
||||
}
|
||||
}
|
||||
return requestTimeout
|
||||
}
|
||||
|
||||
// Dial connects to the address on the named network.
|
||||
func Dial(network, address string) (conn *Conn, err error) {
|
||||
conn = new(Conn)
|
||||
conn.Conn, err = net.Dial(network, address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
// ExchangeContext performs a synchronous UDP query, like Exchange. It
|
||||
// additionally obeys deadlines from the passed Context.
|
||||
func ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg, err error) {
|
||||
client := Client{Net: "udp"}
|
||||
r, _, err = client.ExchangeContext(ctx, m, a)
|
||||
// ignorint rtt to leave the original ExchangeContext API unchanged, but
|
||||
// this function will go away
|
||||
return r, err
|
||||
}
|
||||
|
||||
// ExchangeConn performs a synchronous query. It sends the message m via the connection
|
||||
// c and waits for a reply. The connection c is not closed by ExchangeConn.
|
||||
// This function is going away, but can easily be mimicked:
|
||||
//
|
||||
// co := &dns.Conn{Conn: c} // c is your net.Conn
|
||||
// co.WriteMsg(m)
|
||||
// in, _ := co.ReadMsg()
|
||||
// co.Close()
|
||||
//
|
||||
func ExchangeConn(c net.Conn, m *Msg) (r *Msg, err error) {
|
||||
println("dns: ExchangeConn: this function is deprecated")
|
||||
co := new(Conn)
|
||||
co.Conn = c
|
||||
if err = co.WriteMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r, err = co.ReadMsg()
|
||||
if err == nil && r.Id != m.Id {
|
||||
err = ErrId
|
||||
}
|
||||
return r, err
|
||||
}
|
||||
|
||||
// DialTimeout acts like Dial but takes a timeout.
|
||||
func DialTimeout(network, address string, timeout time.Duration) (conn *Conn, err error) {
|
||||
client := Client{Net: network, Dialer: &net.Dialer{Timeout: timeout}}
|
||||
conn, err = client.Dial(address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
// DialWithTLS connects to the address on the named network with TLS.
|
||||
func DialWithTLS(network, address string, tlsConfig *tls.Config) (conn *Conn, err error) {
|
||||
if !strings.HasSuffix(network, "-tls") {
|
||||
network += "-tls"
|
||||
}
|
||||
client := Client{Net: network, TLSConfig: tlsConfig}
|
||||
conn, err = client.Dial(address)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
// DialTimeoutWithTLS acts like DialWithTLS but takes a timeout.
|
||||
func DialTimeoutWithTLS(network, address string, tlsConfig *tls.Config, timeout time.Duration) (conn *Conn, err error) {
|
||||
if !strings.HasSuffix(network, "-tls") {
|
||||
network += "-tls"
|
||||
}
|
||||
client := Client{Net: network, Dialer: &net.Dialer{Timeout: timeout}, TLSConfig: tlsConfig}
|
||||
conn, err = client.Dial(address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
// ExchangeContext acts like Exchange, but honors the deadline on the provided
|
||||
// context, if present. If there is both a context deadline and a configured
|
||||
// timeout on the client, the earliest of the two takes effect.
|
||||
func (c *Client) ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg, rtt time.Duration, err error) {
|
||||
if !c.SingleInflight && c.Net == "https" {
|
||||
return c.exchangeDOH(ctx, m, a)
|
||||
}
|
||||
|
||||
var timeout time.Duration
|
||||
if deadline, ok := ctx.Deadline(); !ok {
|
||||
timeout = 0
|
||||
} else {
|
||||
timeout = time.Until(deadline)
|
||||
}
|
||||
// not passing the context to the underlying calls, as the API does not support
|
||||
// context. For timeouts you should set up Client.Dialer and call Client.Exchange.
|
||||
// TODO(tmthrgd): this is a race condition
|
||||
c.Dialer = &net.Dialer{Timeout: timeout}
|
||||
return c.Exchange(m, a)
|
||||
}
|
||||
139
vendor/github.com/miekg/dns/clientconfig.go
generated
vendored
Normal file
139
vendor/github.com/miekg/dns/clientconfig.go
generated
vendored
Normal file
@@ -0,0 +1,139 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ClientConfig wraps the contents of the /etc/resolv.conf file.
|
||||
type ClientConfig struct {
|
||||
Servers []string // servers to use
|
||||
Search []string // suffixes to append to local name
|
||||
Port string // what port to use
|
||||
Ndots int // number of dots in name to trigger absolute lookup
|
||||
Timeout int // seconds before giving up on packet
|
||||
Attempts int // lost packets before giving up on server, not used in the package dns
|
||||
}
|
||||
|
||||
// ClientConfigFromFile parses a resolv.conf(5) like file and returns
|
||||
// a *ClientConfig.
|
||||
func ClientConfigFromFile(resolvconf string) (*ClientConfig, error) {
|
||||
file, err := os.Open(resolvconf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer file.Close()
|
||||
return ClientConfigFromReader(file)
|
||||
}
|
||||
|
||||
// ClientConfigFromReader works like ClientConfigFromFile but takes an io.Reader as argument
|
||||
func ClientConfigFromReader(resolvconf io.Reader) (*ClientConfig, error) {
|
||||
c := new(ClientConfig)
|
||||
scanner := bufio.NewScanner(resolvconf)
|
||||
c.Servers = make([]string, 0)
|
||||
c.Search = make([]string, 0)
|
||||
c.Port = "53"
|
||||
c.Ndots = 1
|
||||
c.Timeout = 5
|
||||
c.Attempts = 2
|
||||
|
||||
for scanner.Scan() {
|
||||
if err := scanner.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
line := scanner.Text()
|
||||
f := strings.Fields(line)
|
||||
if len(f) < 1 {
|
||||
continue
|
||||
}
|
||||
switch f[0] {
|
||||
case "nameserver": // add one name server
|
||||
if len(f) > 1 {
|
||||
// One more check: make sure server name is
|
||||
// just an IP address. Otherwise we need DNS
|
||||
// to look it up.
|
||||
name := f[1]
|
||||
c.Servers = append(c.Servers, name)
|
||||
}
|
||||
|
||||
case "domain": // set search path to just this domain
|
||||
if len(f) > 1 {
|
||||
c.Search = make([]string, 1)
|
||||
c.Search[0] = f[1]
|
||||
} else {
|
||||
c.Search = make([]string, 0)
|
||||
}
|
||||
|
||||
case "search": // set search path to given servers
|
||||
c.Search = make([]string, len(f)-1)
|
||||
for i := 0; i < len(c.Search); i++ {
|
||||
c.Search[i] = f[i+1]
|
||||
}
|
||||
|
||||
case "options": // magic options
|
||||
for i := 1; i < len(f); i++ {
|
||||
s := f[i]
|
||||
switch {
|
||||
case len(s) >= 6 && s[:6] == "ndots:":
|
||||
n, _ := strconv.Atoi(s[6:])
|
||||
if n < 0 {
|
||||
n = 0
|
||||
} else if n > 15 {
|
||||
n = 15
|
||||
}
|
||||
c.Ndots = n
|
||||
case len(s) >= 8 && s[:8] == "timeout:":
|
||||
n, _ := strconv.Atoi(s[8:])
|
||||
if n < 1 {
|
||||
n = 1
|
||||
}
|
||||
c.Timeout = n
|
||||
case len(s) >= 9 && s[:9] == "attempts:":
|
||||
n, _ := strconv.Atoi(s[9:])
|
||||
if n < 1 {
|
||||
n = 1
|
||||
}
|
||||
c.Attempts = n
|
||||
case s == "rotate":
|
||||
/* not imp */
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// NameList returns all of the names that should be queried based on the
|
||||
// config. It is based off of go's net/dns name building, but it does not
|
||||
// check the length of the resulting names.
|
||||
func (c *ClientConfig) NameList(name string) []string {
|
||||
// if this domain is already fully qualified, no append needed.
|
||||
if IsFqdn(name) {
|
||||
return []string{name}
|
||||
}
|
||||
|
||||
// Check to see if the name has more labels than Ndots. Do this before making
|
||||
// the domain fully qualified.
|
||||
hasNdots := CountLabel(name) > c.Ndots
|
||||
// Make the domain fully qualified.
|
||||
name = Fqdn(name)
|
||||
|
||||
// Make a list of names based off search.
|
||||
names := []string{}
|
||||
|
||||
// If name has enough dots, try that first.
|
||||
if hasNdots {
|
||||
names = append(names, name)
|
||||
}
|
||||
for _, s := range c.Search {
|
||||
names = append(names, Fqdn(name+s))
|
||||
}
|
||||
// If we didn't have enough dots, try after suffixes.
|
||||
if !hasNdots {
|
||||
names = append(names, name)
|
||||
}
|
||||
return names
|
||||
}
|
||||
43
vendor/github.com/miekg/dns/dane.go
generated
vendored
Normal file
43
vendor/github.com/miekg/dns/dane.go
generated
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"crypto/sha512"
|
||||
"crypto/x509"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
)
|
||||
|
||||
// CertificateToDANE converts a certificate to a hex string as used in the TLSA or SMIMEA records.
|
||||
func CertificateToDANE(selector, matchingType uint8, cert *x509.Certificate) (string, error) {
|
||||
switch matchingType {
|
||||
case 0:
|
||||
switch selector {
|
||||
case 0:
|
||||
return hex.EncodeToString(cert.Raw), nil
|
||||
case 1:
|
||||
return hex.EncodeToString(cert.RawSubjectPublicKeyInfo), nil
|
||||
}
|
||||
case 1:
|
||||
h := sha256.New()
|
||||
switch selector {
|
||||
case 0:
|
||||
h.Write(cert.Raw)
|
||||
return hex.EncodeToString(h.Sum(nil)), nil
|
||||
case 1:
|
||||
h.Write(cert.RawSubjectPublicKeyInfo)
|
||||
return hex.EncodeToString(h.Sum(nil)), nil
|
||||
}
|
||||
case 2:
|
||||
h := sha512.New()
|
||||
switch selector {
|
||||
case 0:
|
||||
h.Write(cert.Raw)
|
||||
return hex.EncodeToString(h.Sum(nil)), nil
|
||||
case 1:
|
||||
h.Write(cert.RawSubjectPublicKeyInfo)
|
||||
return hex.EncodeToString(h.Sum(nil)), nil
|
||||
}
|
||||
}
|
||||
return "", errors.New("dns: bad MatchingType or Selector")
|
||||
}
|
||||
288
vendor/github.com/miekg/dns/defaults.go
generated
vendored
Normal file
288
vendor/github.com/miekg/dns/defaults.go
generated
vendored
Normal file
@@ -0,0 +1,288 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
const hexDigit = "0123456789abcdef"
|
||||
|
||||
// Everything is assumed in ClassINET.
|
||||
|
||||
// SetReply creates a reply message from a request message.
|
||||
func (dns *Msg) SetReply(request *Msg) *Msg {
|
||||
dns.Id = request.Id
|
||||
dns.Response = true
|
||||
dns.Opcode = request.Opcode
|
||||
if dns.Opcode == OpcodeQuery {
|
||||
dns.RecursionDesired = request.RecursionDesired // Copy rd bit
|
||||
dns.CheckingDisabled = request.CheckingDisabled // Copy cd bit
|
||||
}
|
||||
dns.Rcode = RcodeSuccess
|
||||
if len(request.Question) > 0 {
|
||||
dns.Question = make([]Question, 1)
|
||||
dns.Question[0] = request.Question[0]
|
||||
}
|
||||
return dns
|
||||
}
|
||||
|
||||
// SetQuestion creates a question message, it sets the Question
|
||||
// section, generates an Id and sets the RecursionDesired (RD)
|
||||
// bit to true.
|
||||
func (dns *Msg) SetQuestion(z string, t uint16) *Msg {
|
||||
dns.Id = Id()
|
||||
dns.RecursionDesired = true
|
||||
dns.Question = make([]Question, 1)
|
||||
dns.Question[0] = Question{z, t, ClassINET}
|
||||
return dns
|
||||
}
|
||||
|
||||
// SetNotify creates a notify message, it sets the Question
|
||||
// section, generates an Id and sets the Authoritative (AA)
|
||||
// bit to true.
|
||||
func (dns *Msg) SetNotify(z string) *Msg {
|
||||
dns.Opcode = OpcodeNotify
|
||||
dns.Authoritative = true
|
||||
dns.Id = Id()
|
||||
dns.Question = make([]Question, 1)
|
||||
dns.Question[0] = Question{z, TypeSOA, ClassINET}
|
||||
return dns
|
||||
}
|
||||
|
||||
// SetRcode creates an error message suitable for the request.
|
||||
func (dns *Msg) SetRcode(request *Msg, rcode int) *Msg {
|
||||
dns.SetReply(request)
|
||||
dns.Rcode = rcode
|
||||
return dns
|
||||
}
|
||||
|
||||
// SetRcodeFormatError creates a message with FormError set.
|
||||
func (dns *Msg) SetRcodeFormatError(request *Msg) *Msg {
|
||||
dns.Rcode = RcodeFormatError
|
||||
dns.Opcode = OpcodeQuery
|
||||
dns.Response = true
|
||||
dns.Authoritative = false
|
||||
dns.Id = request.Id
|
||||
return dns
|
||||
}
|
||||
|
||||
// SetUpdate makes the message a dynamic update message. It
|
||||
// sets the ZONE section to: z, TypeSOA, ClassINET.
|
||||
func (dns *Msg) SetUpdate(z string) *Msg {
|
||||
dns.Id = Id()
|
||||
dns.Response = false
|
||||
dns.Opcode = OpcodeUpdate
|
||||
dns.Compress = false // BIND9 cannot handle compression
|
||||
dns.Question = make([]Question, 1)
|
||||
dns.Question[0] = Question{z, TypeSOA, ClassINET}
|
||||
return dns
|
||||
}
|
||||
|
||||
// SetIxfr creates message for requesting an IXFR.
|
||||
func (dns *Msg) SetIxfr(z string, serial uint32, ns, mbox string) *Msg {
|
||||
dns.Id = Id()
|
||||
dns.Question = make([]Question, 1)
|
||||
dns.Ns = make([]RR, 1)
|
||||
s := new(SOA)
|
||||
s.Hdr = RR_Header{z, TypeSOA, ClassINET, defaultTtl, 0}
|
||||
s.Serial = serial
|
||||
s.Ns = ns
|
||||
s.Mbox = mbox
|
||||
dns.Question[0] = Question{z, TypeIXFR, ClassINET}
|
||||
dns.Ns[0] = s
|
||||
return dns
|
||||
}
|
||||
|
||||
// SetAxfr creates message for requesting an AXFR.
|
||||
func (dns *Msg) SetAxfr(z string) *Msg {
|
||||
dns.Id = Id()
|
||||
dns.Question = make([]Question, 1)
|
||||
dns.Question[0] = Question{z, TypeAXFR, ClassINET}
|
||||
return dns
|
||||
}
|
||||
|
||||
// SetTsig appends a TSIG RR to the message.
|
||||
// This is only a skeleton TSIG RR that is added as the last RR in the
|
||||
// additional section. The Tsig is calculated when the message is being send.
|
||||
func (dns *Msg) SetTsig(z, algo string, fudge uint16, timesigned int64) *Msg {
|
||||
t := new(TSIG)
|
||||
t.Hdr = RR_Header{z, TypeTSIG, ClassANY, 0, 0}
|
||||
t.Algorithm = algo
|
||||
t.Fudge = fudge
|
||||
t.TimeSigned = uint64(timesigned)
|
||||
t.OrigId = dns.Id
|
||||
dns.Extra = append(dns.Extra, t)
|
||||
return dns
|
||||
}
|
||||
|
||||
// SetEdns0 appends a EDNS0 OPT RR to the message.
|
||||
// TSIG should always the last RR in a message.
|
||||
func (dns *Msg) SetEdns0(udpsize uint16, do bool) *Msg {
|
||||
e := new(OPT)
|
||||
e.Hdr.Name = "."
|
||||
e.Hdr.Rrtype = TypeOPT
|
||||
e.SetUDPSize(udpsize)
|
||||
if do {
|
||||
e.SetDo()
|
||||
}
|
||||
dns.Extra = append(dns.Extra, e)
|
||||
return dns
|
||||
}
|
||||
|
||||
// IsTsig checks if the message has a TSIG record as the last record
|
||||
// in the additional section. It returns the TSIG record found or nil.
|
||||
func (dns *Msg) IsTsig() *TSIG {
|
||||
if len(dns.Extra) > 0 {
|
||||
if dns.Extra[len(dns.Extra)-1].Header().Rrtype == TypeTSIG {
|
||||
return dns.Extra[len(dns.Extra)-1].(*TSIG)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsEdns0 checks if the message has a EDNS0 (OPT) record, any EDNS0
|
||||
// record in the additional section will do. It returns the OPT record
|
||||
// found or nil.
|
||||
func (dns *Msg) IsEdns0() *OPT {
|
||||
// EDNS0 is at the end of the additional section, start there.
|
||||
// We might want to change this to *only* look at the last two
|
||||
// records. So we see TSIG and/or OPT - this a slightly bigger
|
||||
// change though.
|
||||
for i := len(dns.Extra) - 1; i >= 0; i-- {
|
||||
if dns.Extra[i].Header().Rrtype == TypeOPT {
|
||||
return dns.Extra[i].(*OPT)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsDomainName checks if s is a valid domain name, it returns the number of
|
||||
// labels and true, when a domain name is valid. Note that non fully qualified
|
||||
// domain name is considered valid, in this case the last label is counted in
|
||||
// the number of labels. When false is returned the number of labels is not
|
||||
// defined. Also note that this function is extremely liberal; almost any
|
||||
// string is a valid domain name as the DNS is 8 bit protocol. It checks if each
|
||||
// label fits in 63 characters, but there is no length check for the entire
|
||||
// string s. I.e. a domain name longer than 255 characters is considered valid.
|
||||
func IsDomainName(s string) (labels int, ok bool) {
|
||||
_, labels, err := packDomainName(s, nil, 0, nil, false)
|
||||
return labels, err == nil
|
||||
}
|
||||
|
||||
// IsSubDomain checks if child is indeed a child of the parent. If child and parent
|
||||
// are the same domain true is returned as well.
|
||||
func IsSubDomain(parent, child string) bool {
|
||||
// Entire child is contained in parent
|
||||
return CompareDomainName(parent, child) == CountLabel(parent)
|
||||
}
|
||||
|
||||
// IsMsg sanity checks buf and returns an error if it isn't a valid DNS packet.
|
||||
// The checking is performed on the binary payload.
|
||||
func IsMsg(buf []byte) error {
|
||||
// Header
|
||||
if len(buf) < 12 {
|
||||
return errors.New("dns: bad message header")
|
||||
}
|
||||
// Header: Opcode
|
||||
// TODO(miek): more checks here, e.g. check all header bits.
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsFqdn checks if a domain name is fully qualified.
|
||||
func IsFqdn(s string) bool {
|
||||
l := len(s)
|
||||
if l == 0 {
|
||||
return false
|
||||
}
|
||||
return s[l-1] == '.'
|
||||
}
|
||||
|
||||
// IsRRset checks if a set of RRs is a valid RRset as defined by RFC 2181.
|
||||
// This means the RRs need to have the same type, name, and class. Returns true
|
||||
// if the RR set is valid, otherwise false.
|
||||
func IsRRset(rrset []RR) bool {
|
||||
if len(rrset) == 0 {
|
||||
return false
|
||||
}
|
||||
if len(rrset) == 1 {
|
||||
return true
|
||||
}
|
||||
rrHeader := rrset[0].Header()
|
||||
rrType := rrHeader.Rrtype
|
||||
rrClass := rrHeader.Class
|
||||
rrName := rrHeader.Name
|
||||
|
||||
for _, rr := range rrset[1:] {
|
||||
curRRHeader := rr.Header()
|
||||
if curRRHeader.Rrtype != rrType || curRRHeader.Class != rrClass || curRRHeader.Name != rrName {
|
||||
// Mismatch between the records, so this is not a valid rrset for
|
||||
//signing/verifying
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Fqdn return the fully qualified domain name from s.
|
||||
// If s is already fully qualified, it behaves as the identity function.
|
||||
func Fqdn(s string) string {
|
||||
if IsFqdn(s) {
|
||||
return s
|
||||
}
|
||||
return s + "."
|
||||
}
|
||||
|
||||
// Copied from the official Go code.
|
||||
|
||||
// ReverseAddr returns the in-addr.arpa. or ip6.arpa. hostname of the IP
|
||||
// address suitable for reverse DNS (PTR) record lookups or an error if it fails
|
||||
// to parse the IP address.
|
||||
func ReverseAddr(addr string) (arpa string, err error) {
|
||||
ip := net.ParseIP(addr)
|
||||
if ip == nil {
|
||||
return "", &Error{err: "unrecognized address: " + addr}
|
||||
}
|
||||
if ip.To4() != nil {
|
||||
return strconv.Itoa(int(ip[15])) + "." + strconv.Itoa(int(ip[14])) + "." + strconv.Itoa(int(ip[13])) + "." +
|
||||
strconv.Itoa(int(ip[12])) + ".in-addr.arpa.", nil
|
||||
}
|
||||
// Must be IPv6
|
||||
buf := make([]byte, 0, len(ip)*4+len("ip6.arpa."))
|
||||
// Add it, in reverse, to the buffer
|
||||
for i := len(ip) - 1; i >= 0; i-- {
|
||||
v := ip[i]
|
||||
buf = append(buf, hexDigit[v&0xF])
|
||||
buf = append(buf, '.')
|
||||
buf = append(buf, hexDigit[v>>4])
|
||||
buf = append(buf, '.')
|
||||
}
|
||||
// Append "ip6.arpa." and return (buf already has the final .)
|
||||
buf = append(buf, "ip6.arpa."...)
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
// String returns the string representation for the type t.
|
||||
func (t Type) String() string {
|
||||
if t1, ok := TypeToString[uint16(t)]; ok {
|
||||
return t1
|
||||
}
|
||||
return "TYPE" + strconv.Itoa(int(t))
|
||||
}
|
||||
|
||||
// String returns the string representation for the class c.
|
||||
func (c Class) String() string {
|
||||
if s, ok := ClassToString[uint16(c)]; ok {
|
||||
// Only emit mnemonics when they are unambiguous, specically ANY is in both.
|
||||
if _, ok := StringToType[s]; !ok {
|
||||
return s
|
||||
}
|
||||
}
|
||||
return "CLASS" + strconv.Itoa(int(c))
|
||||
}
|
||||
|
||||
// String returns the string representation for the name n.
|
||||
func (n Name) String() string {
|
||||
return sprintName(string(n))
|
||||
}
|
||||
97
vendor/github.com/miekg/dns/dns.go
generated
vendored
Normal file
97
vendor/github.com/miekg/dns/dns.go
generated
vendored
Normal file
@@ -0,0 +1,97 @@
|
||||
package dns
|
||||
|
||||
import "strconv"
|
||||
|
||||
const (
|
||||
year68 = 1 << 31 // For RFC1982 (Serial Arithmetic) calculations in 32 bits.
|
||||
defaultTtl = 3600 // Default internal TTL.
|
||||
|
||||
// DefaultMsgSize is the standard default for messages larger than 512 bytes.
|
||||
DefaultMsgSize = 4096
|
||||
// MinMsgSize is the minimal size of a DNS packet.
|
||||
MinMsgSize = 512
|
||||
// MaxMsgSize is the largest possible DNS packet.
|
||||
MaxMsgSize = 65535
|
||||
)
|
||||
|
||||
// Error represents a DNS error.
|
||||
type Error struct{ err string }
|
||||
|
||||
func (e *Error) Error() string {
|
||||
if e == nil {
|
||||
return "dns: <nil>"
|
||||
}
|
||||
return "dns: " + e.err
|
||||
}
|
||||
|
||||
// An RR represents a resource record.
|
||||
type RR interface {
|
||||
// Header returns the header of an resource record. The header contains
|
||||
// everything up to the rdata.
|
||||
Header() *RR_Header
|
||||
// String returns the text representation of the resource record.
|
||||
String() string
|
||||
|
||||
// copy returns a copy of the RR
|
||||
copy() RR
|
||||
// len returns the length (in octets) of the uncompressed RR in wire format.
|
||||
len() int
|
||||
// pack packs an RR into wire format.
|
||||
pack([]byte, int, map[string]int, bool) (int, error)
|
||||
}
|
||||
|
||||
// RR_Header is the header all DNS resource records share.
|
||||
type RR_Header struct {
|
||||
Name string `dns:"cdomain-name"`
|
||||
Rrtype uint16
|
||||
Class uint16
|
||||
Ttl uint32
|
||||
Rdlength uint16 // Length of data after header.
|
||||
}
|
||||
|
||||
// Header returns itself. This is here to make RR_Header implements the RR interface.
|
||||
func (h *RR_Header) Header() *RR_Header { return h }
|
||||
|
||||
// Just to implement the RR interface.
|
||||
func (h *RR_Header) copy() RR { return nil }
|
||||
|
||||
func (h *RR_Header) String() string {
|
||||
var s string
|
||||
|
||||
if h.Rrtype == TypeOPT {
|
||||
s = ";"
|
||||
// and maybe other things
|
||||
}
|
||||
|
||||
s += sprintName(h.Name) + "\t"
|
||||
s += strconv.FormatInt(int64(h.Ttl), 10) + "\t"
|
||||
s += Class(h.Class).String() + "\t"
|
||||
s += Type(h.Rrtype).String() + "\t"
|
||||
return s
|
||||
}
|
||||
|
||||
func (h *RR_Header) len() int {
|
||||
l := len(h.Name) + 1
|
||||
l += 10 // rrtype(2) + class(2) + ttl(4) + rdlength(2)
|
||||
return l
|
||||
}
|
||||
|
||||
// ToRFC3597 converts a known RR to the unknown RR representation from RFC 3597.
|
||||
func (rr *RFC3597) ToRFC3597(r RR) error {
|
||||
buf := make([]byte, r.len()*2)
|
||||
off, err := PackRR(r, buf, 0, nil, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
buf = buf[:off]
|
||||
if int(r.Header().Rdlength) > off {
|
||||
return ErrBuf
|
||||
}
|
||||
|
||||
rfc3597, _, err := unpackRFC3597(*r.Header(), buf, off-int(r.Header().Rdlength))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*rr = *rfc3597.(*RFC3597)
|
||||
return nil
|
||||
}
|
||||
801
vendor/github.com/miekg/dns/dnssec.go
generated
vendored
Normal file
801
vendor/github.com/miekg/dns/dnssec.go
generated
vendored
Normal file
@@ -0,0 +1,801 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto"
|
||||
"crypto/dsa"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
_ "crypto/md5"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
_ "crypto/sha1"
|
||||
_ "crypto/sha256"
|
||||
_ "crypto/sha512"
|
||||
"encoding/asn1"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"math/big"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/crypto/ed25519"
|
||||
)
|
||||
|
||||
// DNSSEC encryption algorithm codes.
|
||||
const (
|
||||
_ uint8 = iota
|
||||
RSAMD5
|
||||
DH
|
||||
DSA
|
||||
_ // Skip 4, RFC 6725, section 2.1
|
||||
RSASHA1
|
||||
DSANSEC3SHA1
|
||||
RSASHA1NSEC3SHA1
|
||||
RSASHA256
|
||||
_ // Skip 9, RFC 6725, section 2.1
|
||||
RSASHA512
|
||||
_ // Skip 11, RFC 6725, section 2.1
|
||||
ECCGOST
|
||||
ECDSAP256SHA256
|
||||
ECDSAP384SHA384
|
||||
ED25519
|
||||
ED448
|
||||
INDIRECT uint8 = 252
|
||||
PRIVATEDNS uint8 = 253 // Private (experimental keys)
|
||||
PRIVATEOID uint8 = 254
|
||||
)
|
||||
|
||||
// AlgorithmToString is a map of algorithm IDs to algorithm names.
|
||||
var AlgorithmToString = map[uint8]string{
|
||||
RSAMD5: "RSAMD5",
|
||||
DH: "DH",
|
||||
DSA: "DSA",
|
||||
RSASHA1: "RSASHA1",
|
||||
DSANSEC3SHA1: "DSA-NSEC3-SHA1",
|
||||
RSASHA1NSEC3SHA1: "RSASHA1-NSEC3-SHA1",
|
||||
RSASHA256: "RSASHA256",
|
||||
RSASHA512: "RSASHA512",
|
||||
ECCGOST: "ECC-GOST",
|
||||
ECDSAP256SHA256: "ECDSAP256SHA256",
|
||||
ECDSAP384SHA384: "ECDSAP384SHA384",
|
||||
ED25519: "ED25519",
|
||||
ED448: "ED448",
|
||||
INDIRECT: "INDIRECT",
|
||||
PRIVATEDNS: "PRIVATEDNS",
|
||||
PRIVATEOID: "PRIVATEOID",
|
||||
}
|
||||
|
||||
// StringToAlgorithm is the reverse of AlgorithmToString.
|
||||
var StringToAlgorithm = reverseInt8(AlgorithmToString)
|
||||
|
||||
// AlgorithmToHash is a map of algorithm crypto hash IDs to crypto.Hash's.
|
||||
var AlgorithmToHash = map[uint8]crypto.Hash{
|
||||
RSAMD5: crypto.MD5, // Deprecated in RFC 6725
|
||||
DSA: crypto.SHA1,
|
||||
RSASHA1: crypto.SHA1,
|
||||
RSASHA1NSEC3SHA1: crypto.SHA1,
|
||||
RSASHA256: crypto.SHA256,
|
||||
ECDSAP256SHA256: crypto.SHA256,
|
||||
ECDSAP384SHA384: crypto.SHA384,
|
||||
RSASHA512: crypto.SHA512,
|
||||
ED25519: crypto.Hash(0),
|
||||
}
|
||||
|
||||
// DNSSEC hashing algorithm codes.
|
||||
const (
|
||||
_ uint8 = iota
|
||||
SHA1 // RFC 4034
|
||||
SHA256 // RFC 4509
|
||||
GOST94 // RFC 5933
|
||||
SHA384 // Experimental
|
||||
SHA512 // Experimental
|
||||
)
|
||||
|
||||
// HashToString is a map of hash IDs to names.
|
||||
var HashToString = map[uint8]string{
|
||||
SHA1: "SHA1",
|
||||
SHA256: "SHA256",
|
||||
GOST94: "GOST94",
|
||||
SHA384: "SHA384",
|
||||
SHA512: "SHA512",
|
||||
}
|
||||
|
||||
// StringToHash is a map of names to hash IDs.
|
||||
var StringToHash = reverseInt8(HashToString)
|
||||
|
||||
// DNSKEY flag values.
|
||||
const (
|
||||
SEP = 1
|
||||
REVOKE = 1 << 7
|
||||
ZONE = 1 << 8
|
||||
)
|
||||
|
||||
// The RRSIG needs to be converted to wireformat with some of the rdata (the signature) missing.
|
||||
type rrsigWireFmt struct {
|
||||
TypeCovered uint16
|
||||
Algorithm uint8
|
||||
Labels uint8
|
||||
OrigTtl uint32
|
||||
Expiration uint32
|
||||
Inception uint32
|
||||
KeyTag uint16
|
||||
SignerName string `dns:"domain-name"`
|
||||
/* No Signature */
|
||||
}
|
||||
|
||||
// Used for converting DNSKEY's rdata to wirefmt.
|
||||
type dnskeyWireFmt struct {
|
||||
Flags uint16
|
||||
Protocol uint8
|
||||
Algorithm uint8
|
||||
PublicKey string `dns:"base64"`
|
||||
/* Nothing is left out */
|
||||
}
|
||||
|
||||
func divRoundUp(a, b int) int {
|
||||
return (a + b - 1) / b
|
||||
}
|
||||
|
||||
// KeyTag calculates the keytag (or key-id) of the DNSKEY.
|
||||
func (k *DNSKEY) KeyTag() uint16 {
|
||||
if k == nil {
|
||||
return 0
|
||||
}
|
||||
var keytag int
|
||||
switch k.Algorithm {
|
||||
case RSAMD5:
|
||||
// Look at the bottom two bytes of the modules, which the last
|
||||
// item in the pubkey. We could do this faster by looking directly
|
||||
// at the base64 values. But I'm lazy.
|
||||
modulus, _ := fromBase64([]byte(k.PublicKey))
|
||||
if len(modulus) > 1 {
|
||||
x := binary.BigEndian.Uint16(modulus[len(modulus)-2:])
|
||||
keytag = int(x)
|
||||
}
|
||||
default:
|
||||
keywire := new(dnskeyWireFmt)
|
||||
keywire.Flags = k.Flags
|
||||
keywire.Protocol = k.Protocol
|
||||
keywire.Algorithm = k.Algorithm
|
||||
keywire.PublicKey = k.PublicKey
|
||||
wire := make([]byte, DefaultMsgSize)
|
||||
n, err := packKeyWire(keywire, wire)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
wire = wire[:n]
|
||||
for i, v := range wire {
|
||||
if i&1 != 0 {
|
||||
keytag += int(v) // must be larger than uint32
|
||||
} else {
|
||||
keytag += int(v) << 8
|
||||
}
|
||||
}
|
||||
keytag += keytag >> 16 & 0xFFFF
|
||||
keytag &= 0xFFFF
|
||||
}
|
||||
return uint16(keytag)
|
||||
}
|
||||
|
||||
// ToDS converts a DNSKEY record to a DS record.
|
||||
func (k *DNSKEY) ToDS(h uint8) *DS {
|
||||
if k == nil {
|
||||
return nil
|
||||
}
|
||||
ds := new(DS)
|
||||
ds.Hdr.Name = k.Hdr.Name
|
||||
ds.Hdr.Class = k.Hdr.Class
|
||||
ds.Hdr.Rrtype = TypeDS
|
||||
ds.Hdr.Ttl = k.Hdr.Ttl
|
||||
ds.Algorithm = k.Algorithm
|
||||
ds.DigestType = h
|
||||
ds.KeyTag = k.KeyTag()
|
||||
|
||||
keywire := new(dnskeyWireFmt)
|
||||
keywire.Flags = k.Flags
|
||||
keywire.Protocol = k.Protocol
|
||||
keywire.Algorithm = k.Algorithm
|
||||
keywire.PublicKey = k.PublicKey
|
||||
wire := make([]byte, DefaultMsgSize)
|
||||
n, err := packKeyWire(keywire, wire)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
wire = wire[:n]
|
||||
|
||||
owner := make([]byte, 255)
|
||||
off, err1 := PackDomainName(strings.ToLower(k.Hdr.Name), owner, 0, nil, false)
|
||||
if err1 != nil {
|
||||
return nil
|
||||
}
|
||||
owner = owner[:off]
|
||||
// RFC4034:
|
||||
// digest = digest_algorithm( DNSKEY owner name | DNSKEY RDATA);
|
||||
// "|" denotes concatenation
|
||||
// DNSKEY RDATA = Flags | Protocol | Algorithm | Public Key.
|
||||
|
||||
var hash crypto.Hash
|
||||
switch h {
|
||||
case SHA1:
|
||||
hash = crypto.SHA1
|
||||
case SHA256:
|
||||
hash = crypto.SHA256
|
||||
case SHA384:
|
||||
hash = crypto.SHA384
|
||||
case SHA512:
|
||||
hash = crypto.SHA512
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
|
||||
s := hash.New()
|
||||
s.Write(owner)
|
||||
s.Write(wire)
|
||||
ds.Digest = hex.EncodeToString(s.Sum(nil))
|
||||
return ds
|
||||
}
|
||||
|
||||
// ToCDNSKEY converts a DNSKEY record to a CDNSKEY record.
|
||||
func (k *DNSKEY) ToCDNSKEY() *CDNSKEY {
|
||||
c := &CDNSKEY{DNSKEY: *k}
|
||||
c.Hdr = k.Hdr
|
||||
c.Hdr.Rrtype = TypeCDNSKEY
|
||||
return c
|
||||
}
|
||||
|
||||
// ToCDS converts a DS record to a CDS record.
|
||||
func (d *DS) ToCDS() *CDS {
|
||||
c := &CDS{DS: *d}
|
||||
c.Hdr = d.Hdr
|
||||
c.Hdr.Rrtype = TypeCDS
|
||||
return c
|
||||
}
|
||||
|
||||
// Sign signs an RRSet. The signature needs to be filled in with the values:
|
||||
// Inception, Expiration, KeyTag, SignerName and Algorithm. The rest is copied
|
||||
// from the RRset. Sign returns a non-nill error when the signing went OK.
|
||||
// There is no check if RRSet is a proper (RFC 2181) RRSet. If OrigTTL is non
|
||||
// zero, it is used as-is, otherwise the TTL of the RRset is used as the
|
||||
// OrigTTL.
|
||||
func (rr *RRSIG) Sign(k crypto.Signer, rrset []RR) error {
|
||||
if k == nil {
|
||||
return ErrPrivKey
|
||||
}
|
||||
// s.Inception and s.Expiration may be 0 (rollover etc.), the rest must be set
|
||||
if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 {
|
||||
return ErrKey
|
||||
}
|
||||
|
||||
rr.Hdr.Rrtype = TypeRRSIG
|
||||
rr.Hdr.Name = rrset[0].Header().Name
|
||||
rr.Hdr.Class = rrset[0].Header().Class
|
||||
if rr.OrigTtl == 0 { // If set don't override
|
||||
rr.OrigTtl = rrset[0].Header().Ttl
|
||||
}
|
||||
rr.TypeCovered = rrset[0].Header().Rrtype
|
||||
rr.Labels = uint8(CountLabel(rrset[0].Header().Name))
|
||||
|
||||
if strings.HasPrefix(rrset[0].Header().Name, "*") {
|
||||
rr.Labels-- // wildcard, remove from label count
|
||||
}
|
||||
|
||||
sigwire := new(rrsigWireFmt)
|
||||
sigwire.TypeCovered = rr.TypeCovered
|
||||
sigwire.Algorithm = rr.Algorithm
|
||||
sigwire.Labels = rr.Labels
|
||||
sigwire.OrigTtl = rr.OrigTtl
|
||||
sigwire.Expiration = rr.Expiration
|
||||
sigwire.Inception = rr.Inception
|
||||
sigwire.KeyTag = rr.KeyTag
|
||||
// For signing, lowercase this name
|
||||
sigwire.SignerName = strings.ToLower(rr.SignerName)
|
||||
|
||||
// Create the desired binary blob
|
||||
signdata := make([]byte, DefaultMsgSize)
|
||||
n, err := packSigWire(sigwire, signdata)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
signdata = signdata[:n]
|
||||
wire, err := rawSignatureData(rrset, rr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hash, ok := AlgorithmToHash[rr.Algorithm]
|
||||
if !ok {
|
||||
return ErrAlg
|
||||
}
|
||||
|
||||
switch rr.Algorithm {
|
||||
case ED25519:
|
||||
// ed25519 signs the raw message and performs hashing internally.
|
||||
// All other supported signature schemes operate over the pre-hashed
|
||||
// message, and thus ed25519 must be handled separately here.
|
||||
//
|
||||
// The raw message is passed directly into sign and crypto.Hash(0) is
|
||||
// used to signal to the crypto.Signer that the data has not been hashed.
|
||||
signature, err := sign(k, append(signdata, wire...), crypto.Hash(0), rr.Algorithm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rr.Signature = toBase64(signature)
|
||||
default:
|
||||
h := hash.New()
|
||||
h.Write(signdata)
|
||||
h.Write(wire)
|
||||
|
||||
signature, err := sign(k, h.Sum(nil), hash, rr.Algorithm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rr.Signature = toBase64(signature)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func sign(k crypto.Signer, hashed []byte, hash crypto.Hash, alg uint8) ([]byte, error) {
|
||||
signature, err := k.Sign(rand.Reader, hashed, hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch alg {
|
||||
case RSASHA1, RSASHA1NSEC3SHA1, RSASHA256, RSASHA512:
|
||||
return signature, nil
|
||||
|
||||
case ECDSAP256SHA256, ECDSAP384SHA384:
|
||||
ecdsaSignature := &struct {
|
||||
R, S *big.Int
|
||||
}{}
|
||||
if _, err := asn1.Unmarshal(signature, ecdsaSignature); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var intlen int
|
||||
switch alg {
|
||||
case ECDSAP256SHA256:
|
||||
intlen = 32
|
||||
case ECDSAP384SHA384:
|
||||
intlen = 48
|
||||
}
|
||||
|
||||
signature := intToBytes(ecdsaSignature.R, intlen)
|
||||
signature = append(signature, intToBytes(ecdsaSignature.S, intlen)...)
|
||||
return signature, nil
|
||||
|
||||
// There is no defined interface for what a DSA backed crypto.Signer returns
|
||||
case DSA, DSANSEC3SHA1:
|
||||
// t := divRoundUp(divRoundUp(p.PublicKey.Y.BitLen(), 8)-64, 8)
|
||||
// signature := []byte{byte(t)}
|
||||
// signature = append(signature, intToBytes(r1, 20)...)
|
||||
// signature = append(signature, intToBytes(s1, 20)...)
|
||||
// rr.Signature = signature
|
||||
|
||||
case ED25519:
|
||||
return signature, nil
|
||||
}
|
||||
|
||||
return nil, ErrAlg
|
||||
}
|
||||
|
||||
// Verify validates an RRSet with the signature and key. This is only the
|
||||
// cryptographic test, the signature validity period must be checked separately.
|
||||
// This function copies the rdata of some RRs (to lowercase domain names) for the validation to work.
|
||||
func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error {
|
||||
// First the easy checks
|
||||
if !IsRRset(rrset) {
|
||||
return ErrRRset
|
||||
}
|
||||
if rr.KeyTag != k.KeyTag() {
|
||||
return ErrKey
|
||||
}
|
||||
if rr.Hdr.Class != k.Hdr.Class {
|
||||
return ErrKey
|
||||
}
|
||||
if rr.Algorithm != k.Algorithm {
|
||||
return ErrKey
|
||||
}
|
||||
if strings.ToLower(rr.SignerName) != strings.ToLower(k.Hdr.Name) {
|
||||
return ErrKey
|
||||
}
|
||||
if k.Protocol != 3 {
|
||||
return ErrKey
|
||||
}
|
||||
|
||||
// IsRRset checked that we have at least one RR and that the RRs in
|
||||
// the set have consistent type, class, and name. Also check that type and
|
||||
// class matches the RRSIG record.
|
||||
if rrset[0].Header().Class != rr.Hdr.Class {
|
||||
return ErrRRset
|
||||
}
|
||||
if rrset[0].Header().Rrtype != rr.TypeCovered {
|
||||
return ErrRRset
|
||||
}
|
||||
|
||||
// RFC 4035 5.3.2. Reconstructing the Signed Data
|
||||
// Copy the sig, except the rrsig data
|
||||
sigwire := new(rrsigWireFmt)
|
||||
sigwire.TypeCovered = rr.TypeCovered
|
||||
sigwire.Algorithm = rr.Algorithm
|
||||
sigwire.Labels = rr.Labels
|
||||
sigwire.OrigTtl = rr.OrigTtl
|
||||
sigwire.Expiration = rr.Expiration
|
||||
sigwire.Inception = rr.Inception
|
||||
sigwire.KeyTag = rr.KeyTag
|
||||
sigwire.SignerName = strings.ToLower(rr.SignerName)
|
||||
// Create the desired binary blob
|
||||
signeddata := make([]byte, DefaultMsgSize)
|
||||
n, err := packSigWire(sigwire, signeddata)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
signeddata = signeddata[:n]
|
||||
wire, err := rawSignatureData(rrset, rr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sigbuf := rr.sigBuf() // Get the binary signature data
|
||||
if rr.Algorithm == PRIVATEDNS { // PRIVATEOID
|
||||
// TODO(miek)
|
||||
// remove the domain name and assume its ours?
|
||||
}
|
||||
|
||||
hash, ok := AlgorithmToHash[rr.Algorithm]
|
||||
if !ok {
|
||||
return ErrAlg
|
||||
}
|
||||
|
||||
switch rr.Algorithm {
|
||||
case RSASHA1, RSASHA1NSEC3SHA1, RSASHA256, RSASHA512, RSAMD5:
|
||||
// TODO(mg): this can be done quicker, ie. cache the pubkey data somewhere??
|
||||
pubkey := k.publicKeyRSA() // Get the key
|
||||
if pubkey == nil {
|
||||
return ErrKey
|
||||
}
|
||||
|
||||
h := hash.New()
|
||||
h.Write(signeddata)
|
||||
h.Write(wire)
|
||||
return rsa.VerifyPKCS1v15(pubkey, hash, h.Sum(nil), sigbuf)
|
||||
|
||||
case ECDSAP256SHA256, ECDSAP384SHA384:
|
||||
pubkey := k.publicKeyECDSA()
|
||||
if pubkey == nil {
|
||||
return ErrKey
|
||||
}
|
||||
|
||||
// Split sigbuf into the r and s coordinates
|
||||
r := new(big.Int).SetBytes(sigbuf[:len(sigbuf)/2])
|
||||
s := new(big.Int).SetBytes(sigbuf[len(sigbuf)/2:])
|
||||
|
||||
h := hash.New()
|
||||
h.Write(signeddata)
|
||||
h.Write(wire)
|
||||
if ecdsa.Verify(pubkey, h.Sum(nil), r, s) {
|
||||
return nil
|
||||
}
|
||||
return ErrSig
|
||||
|
||||
case ED25519:
|
||||
pubkey := k.publicKeyED25519()
|
||||
if pubkey == nil {
|
||||
return ErrKey
|
||||
}
|
||||
|
||||
if ed25519.Verify(pubkey, append(signeddata, wire...), sigbuf) {
|
||||
return nil
|
||||
}
|
||||
return ErrSig
|
||||
|
||||
default:
|
||||
return ErrAlg
|
||||
}
|
||||
}
|
||||
|
||||
// ValidityPeriod uses RFC1982 serial arithmetic to calculate
|
||||
// if a signature period is valid. If t is the zero time, the
|
||||
// current time is taken other t is. Returns true if the signature
|
||||
// is valid at the given time, otherwise returns false.
|
||||
func (rr *RRSIG) ValidityPeriod(t time.Time) bool {
|
||||
var utc int64
|
||||
if t.IsZero() {
|
||||
utc = time.Now().UTC().Unix()
|
||||
} else {
|
||||
utc = t.UTC().Unix()
|
||||
}
|
||||
modi := (int64(rr.Inception) - utc) / year68
|
||||
mode := (int64(rr.Expiration) - utc) / year68
|
||||
ti := int64(rr.Inception) + modi*year68
|
||||
te := int64(rr.Expiration) + mode*year68
|
||||
return ti <= utc && utc <= te
|
||||
}
|
||||
|
||||
// Return the signatures base64 encodedig sigdata as a byte slice.
|
||||
func (rr *RRSIG) sigBuf() []byte {
|
||||
sigbuf, err := fromBase64([]byte(rr.Signature))
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return sigbuf
|
||||
}
|
||||
|
||||
// publicKeyRSA returns the RSA public key from a DNSKEY record.
|
||||
func (k *DNSKEY) publicKeyRSA() *rsa.PublicKey {
|
||||
keybuf, err := fromBase64([]byte(k.PublicKey))
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(keybuf) < 1+1+64 {
|
||||
// Exponent must be at least 1 byte and modulus at least 64
|
||||
return nil
|
||||
}
|
||||
|
||||
// RFC 2537/3110, section 2. RSA Public KEY Resource Records
|
||||
// Length is in the 0th byte, unless its zero, then it
|
||||
// it in bytes 1 and 2 and its a 16 bit number
|
||||
explen := uint16(keybuf[0])
|
||||
keyoff := 1
|
||||
if explen == 0 {
|
||||
explen = uint16(keybuf[1])<<8 | uint16(keybuf[2])
|
||||
keyoff = 3
|
||||
}
|
||||
|
||||
if explen > 4 || explen == 0 || keybuf[keyoff] == 0 {
|
||||
// Exponent larger than supported by the crypto package,
|
||||
// empty, or contains prohibited leading zero.
|
||||
return nil
|
||||
}
|
||||
|
||||
modoff := keyoff + int(explen)
|
||||
modlen := len(keybuf) - modoff
|
||||
if modlen < 64 || modlen > 512 || keybuf[modoff] == 0 {
|
||||
// Modulus is too small, large, or contains prohibited leading zero.
|
||||
return nil
|
||||
}
|
||||
|
||||
pubkey := new(rsa.PublicKey)
|
||||
|
||||
expo := uint64(0)
|
||||
for i := 0; i < int(explen); i++ {
|
||||
expo <<= 8
|
||||
expo |= uint64(keybuf[keyoff+i])
|
||||
}
|
||||
if expo > 1<<31-1 {
|
||||
// Larger exponent than supported by the crypto package.
|
||||
return nil
|
||||
}
|
||||
pubkey.E = int(expo)
|
||||
|
||||
pubkey.N = big.NewInt(0)
|
||||
pubkey.N.SetBytes(keybuf[modoff:])
|
||||
|
||||
return pubkey
|
||||
}
|
||||
|
||||
// publicKeyECDSA returns the Curve public key from the DNSKEY record.
|
||||
func (k *DNSKEY) publicKeyECDSA() *ecdsa.PublicKey {
|
||||
keybuf, err := fromBase64([]byte(k.PublicKey))
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
pubkey := new(ecdsa.PublicKey)
|
||||
switch k.Algorithm {
|
||||
case ECDSAP256SHA256:
|
||||
pubkey.Curve = elliptic.P256()
|
||||
if len(keybuf) != 64 {
|
||||
// wrongly encoded key
|
||||
return nil
|
||||
}
|
||||
case ECDSAP384SHA384:
|
||||
pubkey.Curve = elliptic.P384()
|
||||
if len(keybuf) != 96 {
|
||||
// Wrongly encoded key
|
||||
return nil
|
||||
}
|
||||
}
|
||||
pubkey.X = big.NewInt(0)
|
||||
pubkey.X.SetBytes(keybuf[:len(keybuf)/2])
|
||||
pubkey.Y = big.NewInt(0)
|
||||
pubkey.Y.SetBytes(keybuf[len(keybuf)/2:])
|
||||
return pubkey
|
||||
}
|
||||
|
||||
func (k *DNSKEY) publicKeyDSA() *dsa.PublicKey {
|
||||
keybuf, err := fromBase64([]byte(k.PublicKey))
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
if len(keybuf) < 22 {
|
||||
return nil
|
||||
}
|
||||
t, keybuf := int(keybuf[0]), keybuf[1:]
|
||||
size := 64 + t*8
|
||||
q, keybuf := keybuf[:20], keybuf[20:]
|
||||
if len(keybuf) != 3*size {
|
||||
return nil
|
||||
}
|
||||
p, keybuf := keybuf[:size], keybuf[size:]
|
||||
g, y := keybuf[:size], keybuf[size:]
|
||||
pubkey := new(dsa.PublicKey)
|
||||
pubkey.Parameters.Q = big.NewInt(0).SetBytes(q)
|
||||
pubkey.Parameters.P = big.NewInt(0).SetBytes(p)
|
||||
pubkey.Parameters.G = big.NewInt(0).SetBytes(g)
|
||||
pubkey.Y = big.NewInt(0).SetBytes(y)
|
||||
return pubkey
|
||||
}
|
||||
|
||||
func (k *DNSKEY) publicKeyED25519() ed25519.PublicKey {
|
||||
keybuf, err := fromBase64([]byte(k.PublicKey))
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
if len(keybuf) != ed25519.PublicKeySize {
|
||||
return nil
|
||||
}
|
||||
return keybuf
|
||||
}
|
||||
|
||||
type wireSlice [][]byte
|
||||
|
||||
func (p wireSlice) Len() int { return len(p) }
|
||||
func (p wireSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||
func (p wireSlice) Less(i, j int) bool {
|
||||
_, ioff, _ := UnpackDomainName(p[i], 0)
|
||||
_, joff, _ := UnpackDomainName(p[j], 0)
|
||||
return bytes.Compare(p[i][ioff+10:], p[j][joff+10:]) < 0
|
||||
}
|
||||
|
||||
// Return the raw signature data.
|
||||
func rawSignatureData(rrset []RR, s *RRSIG) (buf []byte, err error) {
|
||||
wires := make(wireSlice, len(rrset))
|
||||
for i, r := range rrset {
|
||||
r1 := r.copy()
|
||||
r1.Header().Ttl = s.OrigTtl
|
||||
labels := SplitDomainName(r1.Header().Name)
|
||||
// 6.2. Canonical RR Form. (4) - wildcards
|
||||
if len(labels) > int(s.Labels) {
|
||||
// Wildcard
|
||||
r1.Header().Name = "*." + strings.Join(labels[len(labels)-int(s.Labels):], ".") + "."
|
||||
}
|
||||
// RFC 4034: 6.2. Canonical RR Form. (2) - domain name to lowercase
|
||||
r1.Header().Name = strings.ToLower(r1.Header().Name)
|
||||
// 6.2. Canonical RR Form. (3) - domain rdata to lowercase.
|
||||
// NS, MD, MF, CNAME, SOA, MB, MG, MR, PTR,
|
||||
// HINFO, MINFO, MX, RP, AFSDB, RT, SIG, PX, NXT, NAPTR, KX,
|
||||
// SRV, DNAME, A6
|
||||
//
|
||||
// RFC 6840 - Clarifications and Implementation Notes for DNS Security (DNSSEC):
|
||||
// Section 6.2 of [RFC4034] also erroneously lists HINFO as a record
|
||||
// that needs conversion to lowercase, and twice at that. Since HINFO
|
||||
// records contain no domain names, they are not subject to case
|
||||
// conversion.
|
||||
switch x := r1.(type) {
|
||||
case *NS:
|
||||
x.Ns = strings.ToLower(x.Ns)
|
||||
case *MD:
|
||||
x.Md = strings.ToLower(x.Md)
|
||||
case *MF:
|
||||
x.Mf = strings.ToLower(x.Mf)
|
||||
case *CNAME:
|
||||
x.Target = strings.ToLower(x.Target)
|
||||
case *SOA:
|
||||
x.Ns = strings.ToLower(x.Ns)
|
||||
x.Mbox = strings.ToLower(x.Mbox)
|
||||
case *MB:
|
||||
x.Mb = strings.ToLower(x.Mb)
|
||||
case *MG:
|
||||
x.Mg = strings.ToLower(x.Mg)
|
||||
case *MR:
|
||||
x.Mr = strings.ToLower(x.Mr)
|
||||
case *PTR:
|
||||
x.Ptr = strings.ToLower(x.Ptr)
|
||||
case *MINFO:
|
||||
x.Rmail = strings.ToLower(x.Rmail)
|
||||
x.Email = strings.ToLower(x.Email)
|
||||
case *MX:
|
||||
x.Mx = strings.ToLower(x.Mx)
|
||||
case *RP:
|
||||
x.Mbox = strings.ToLower(x.Mbox)
|
||||
x.Txt = strings.ToLower(x.Txt)
|
||||
case *AFSDB:
|
||||
x.Hostname = strings.ToLower(x.Hostname)
|
||||
case *RT:
|
||||
x.Host = strings.ToLower(x.Host)
|
||||
case *SIG:
|
||||
x.SignerName = strings.ToLower(x.SignerName)
|
||||
case *PX:
|
||||
x.Map822 = strings.ToLower(x.Map822)
|
||||
x.Mapx400 = strings.ToLower(x.Mapx400)
|
||||
case *NAPTR:
|
||||
x.Replacement = strings.ToLower(x.Replacement)
|
||||
case *KX:
|
||||
x.Exchanger = strings.ToLower(x.Exchanger)
|
||||
case *SRV:
|
||||
x.Target = strings.ToLower(x.Target)
|
||||
case *DNAME:
|
||||
x.Target = strings.ToLower(x.Target)
|
||||
}
|
||||
// 6.2. Canonical RR Form. (5) - origTTL
|
||||
wire := make([]byte, r1.len()+1) // +1 to be safe(r)
|
||||
off, err1 := PackRR(r1, wire, 0, nil, false)
|
||||
if err1 != nil {
|
||||
return nil, err1
|
||||
}
|
||||
wire = wire[:off]
|
||||
wires[i] = wire
|
||||
}
|
||||
sort.Sort(wires)
|
||||
for i, wire := range wires {
|
||||
if i > 0 && bytes.Equal(wire, wires[i-1]) {
|
||||
continue
|
||||
}
|
||||
buf = append(buf, wire...)
|
||||
}
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
func packSigWire(sw *rrsigWireFmt, msg []byte) (int, error) {
|
||||
// copied from zmsg.go RRSIG packing
|
||||
off, err := packUint16(sw.TypeCovered, msg, 0)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
off, err = packUint8(sw.Algorithm, msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
off, err = packUint8(sw.Labels, msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
off, err = packUint32(sw.OrigTtl, msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
off, err = packUint32(sw.Expiration, msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
off, err = packUint32(sw.Inception, msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
off, err = packUint16(sw.KeyTag, msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
off, err = PackDomainName(sw.SignerName, msg, off, nil, false)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
return off, nil
|
||||
}
|
||||
|
||||
func packKeyWire(dw *dnskeyWireFmt, msg []byte) (int, error) {
|
||||
// copied from zmsg.go DNSKEY packing
|
||||
off, err := packUint16(dw.Flags, msg, 0)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
off, err = packUint8(dw.Protocol, msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
off, err = packUint8(dw.Algorithm, msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
off, err = packStringBase64(dw.PublicKey, msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
return off, nil
|
||||
}
|
||||
178
vendor/github.com/miekg/dns/dnssec_keygen.go
generated
vendored
Normal file
178
vendor/github.com/miekg/dns/dnssec_keygen.go
generated
vendored
Normal file
@@ -0,0 +1,178 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/dsa"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"math/big"
|
||||
|
||||
"golang.org/x/crypto/ed25519"
|
||||
)
|
||||
|
||||
// Generate generates a DNSKEY of the given bit size.
|
||||
// The public part is put inside the DNSKEY record.
|
||||
// The Algorithm in the key must be set as this will define
|
||||
// what kind of DNSKEY will be generated.
|
||||
// The ECDSA algorithms imply a fixed keysize, in that case
|
||||
// bits should be set to the size of the algorithm.
|
||||
func (k *DNSKEY) Generate(bits int) (crypto.PrivateKey, error) {
|
||||
switch k.Algorithm {
|
||||
case DSA, DSANSEC3SHA1:
|
||||
if bits != 1024 {
|
||||
return nil, ErrKeySize
|
||||
}
|
||||
case RSAMD5, RSASHA1, RSASHA256, RSASHA1NSEC3SHA1:
|
||||
if bits < 512 || bits > 4096 {
|
||||
return nil, ErrKeySize
|
||||
}
|
||||
case RSASHA512:
|
||||
if bits < 1024 || bits > 4096 {
|
||||
return nil, ErrKeySize
|
||||
}
|
||||
case ECDSAP256SHA256:
|
||||
if bits != 256 {
|
||||
return nil, ErrKeySize
|
||||
}
|
||||
case ECDSAP384SHA384:
|
||||
if bits != 384 {
|
||||
return nil, ErrKeySize
|
||||
}
|
||||
case ED25519:
|
||||
if bits != 256 {
|
||||
return nil, ErrKeySize
|
||||
}
|
||||
}
|
||||
|
||||
switch k.Algorithm {
|
||||
case DSA, DSANSEC3SHA1:
|
||||
params := new(dsa.Parameters)
|
||||
if err := dsa.GenerateParameters(params, rand.Reader, dsa.L1024N160); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
priv := new(dsa.PrivateKey)
|
||||
priv.PublicKey.Parameters = *params
|
||||
err := dsa.GenerateKey(priv, rand.Reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
k.setPublicKeyDSA(params.Q, params.P, params.G, priv.PublicKey.Y)
|
||||
return priv, nil
|
||||
case RSAMD5, RSASHA1, RSASHA256, RSASHA512, RSASHA1NSEC3SHA1:
|
||||
priv, err := rsa.GenerateKey(rand.Reader, bits)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
k.setPublicKeyRSA(priv.PublicKey.E, priv.PublicKey.N)
|
||||
return priv, nil
|
||||
case ECDSAP256SHA256, ECDSAP384SHA384:
|
||||
var c elliptic.Curve
|
||||
switch k.Algorithm {
|
||||
case ECDSAP256SHA256:
|
||||
c = elliptic.P256()
|
||||
case ECDSAP384SHA384:
|
||||
c = elliptic.P384()
|
||||
}
|
||||
priv, err := ecdsa.GenerateKey(c, rand.Reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
k.setPublicKeyECDSA(priv.PublicKey.X, priv.PublicKey.Y)
|
||||
return priv, nil
|
||||
case ED25519:
|
||||
pub, priv, err := ed25519.GenerateKey(rand.Reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
k.setPublicKeyED25519(pub)
|
||||
return priv, nil
|
||||
default:
|
||||
return nil, ErrAlg
|
||||
}
|
||||
}
|
||||
|
||||
// Set the public key (the value E and N)
|
||||
func (k *DNSKEY) setPublicKeyRSA(_E int, _N *big.Int) bool {
|
||||
if _E == 0 || _N == nil {
|
||||
return false
|
||||
}
|
||||
buf := exponentToBuf(_E)
|
||||
buf = append(buf, _N.Bytes()...)
|
||||
k.PublicKey = toBase64(buf)
|
||||
return true
|
||||
}
|
||||
|
||||
// Set the public key for Elliptic Curves
|
||||
func (k *DNSKEY) setPublicKeyECDSA(_X, _Y *big.Int) bool {
|
||||
if _X == nil || _Y == nil {
|
||||
return false
|
||||
}
|
||||
var intlen int
|
||||
switch k.Algorithm {
|
||||
case ECDSAP256SHA256:
|
||||
intlen = 32
|
||||
case ECDSAP384SHA384:
|
||||
intlen = 48
|
||||
}
|
||||
k.PublicKey = toBase64(curveToBuf(_X, _Y, intlen))
|
||||
return true
|
||||
}
|
||||
|
||||
// Set the public key for DSA
|
||||
func (k *DNSKEY) setPublicKeyDSA(_Q, _P, _G, _Y *big.Int) bool {
|
||||
if _Q == nil || _P == nil || _G == nil || _Y == nil {
|
||||
return false
|
||||
}
|
||||
buf := dsaToBuf(_Q, _P, _G, _Y)
|
||||
k.PublicKey = toBase64(buf)
|
||||
return true
|
||||
}
|
||||
|
||||
// Set the public key for Ed25519
|
||||
func (k *DNSKEY) setPublicKeyED25519(_K ed25519.PublicKey) bool {
|
||||
if _K == nil {
|
||||
return false
|
||||
}
|
||||
k.PublicKey = toBase64(_K)
|
||||
return true
|
||||
}
|
||||
|
||||
// Set the public key (the values E and N) for RSA
|
||||
// RFC 3110: Section 2. RSA Public KEY Resource Records
|
||||
func exponentToBuf(_E int) []byte {
|
||||
var buf []byte
|
||||
i := big.NewInt(int64(_E)).Bytes()
|
||||
if len(i) < 256 {
|
||||
buf = make([]byte, 1, 1+len(i))
|
||||
buf[0] = uint8(len(i))
|
||||
} else {
|
||||
buf = make([]byte, 3, 3+len(i))
|
||||
buf[0] = 0
|
||||
buf[1] = uint8(len(i) >> 8)
|
||||
buf[2] = uint8(len(i))
|
||||
}
|
||||
buf = append(buf, i...)
|
||||
return buf
|
||||
}
|
||||
|
||||
// Set the public key for X and Y for Curve. The two
|
||||
// values are just concatenated.
|
||||
func curveToBuf(_X, _Y *big.Int, intlen int) []byte {
|
||||
buf := intToBytes(_X, intlen)
|
||||
buf = append(buf, intToBytes(_Y, intlen)...)
|
||||
return buf
|
||||
}
|
||||
|
||||
// Set the public key for X and Y for Curve. The two
|
||||
// values are just concatenated.
|
||||
func dsaToBuf(_Q, _P, _G, _Y *big.Int) []byte {
|
||||
t := divRoundUp(divRoundUp(_G.BitLen(), 8)-64, 8)
|
||||
buf := []byte{byte(t)}
|
||||
buf = append(buf, intToBytes(_Q, 20)...)
|
||||
buf = append(buf, intToBytes(_P, 64+t*8)...)
|
||||
buf = append(buf, intToBytes(_G, 64+t*8)...)
|
||||
buf = append(buf, intToBytes(_Y, 64+t*8)...)
|
||||
return buf
|
||||
}
|
||||
352
vendor/github.com/miekg/dns/dnssec_keyscan.go
generated
vendored
Normal file
352
vendor/github.com/miekg/dns/dnssec_keyscan.go
generated
vendored
Normal file
@@ -0,0 +1,352 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"crypto"
|
||||
"crypto/dsa"
|
||||
"crypto/ecdsa"
|
||||
"crypto/rsa"
|
||||
"io"
|
||||
"math/big"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/crypto/ed25519"
|
||||
)
|
||||
|
||||
// NewPrivateKey returns a PrivateKey by parsing the string s.
|
||||
// s should be in the same form of the BIND private key files.
|
||||
func (k *DNSKEY) NewPrivateKey(s string) (crypto.PrivateKey, error) {
|
||||
if s == "" || s[len(s)-1] != '\n' { // We need a closing newline
|
||||
return k.ReadPrivateKey(strings.NewReader(s+"\n"), "")
|
||||
}
|
||||
return k.ReadPrivateKey(strings.NewReader(s), "")
|
||||
}
|
||||
|
||||
// ReadPrivateKey reads a private key from the io.Reader q. The string file is
|
||||
// only used in error reporting.
|
||||
// The public key must be known, because some cryptographic algorithms embed
|
||||
// the public inside the privatekey.
|
||||
func (k *DNSKEY) ReadPrivateKey(q io.Reader, file string) (crypto.PrivateKey, error) {
|
||||
m, err := parseKey(q, file)
|
||||
if m == nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, ok := m["private-key-format"]; !ok {
|
||||
return nil, ErrPrivKey
|
||||
}
|
||||
if m["private-key-format"] != "v1.2" && m["private-key-format"] != "v1.3" {
|
||||
return nil, ErrPrivKey
|
||||
}
|
||||
// TODO(mg): check if the pubkey matches the private key
|
||||
algo, err := strconv.ParseUint(strings.SplitN(m["algorithm"], " ", 2)[0], 10, 8)
|
||||
if err != nil {
|
||||
return nil, ErrPrivKey
|
||||
}
|
||||
switch uint8(algo) {
|
||||
case DSA:
|
||||
priv, err := readPrivateKeyDSA(m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pub := k.publicKeyDSA()
|
||||
if pub == nil {
|
||||
return nil, ErrKey
|
||||
}
|
||||
priv.PublicKey = *pub
|
||||
return priv, nil
|
||||
case RSAMD5:
|
||||
fallthrough
|
||||
case RSASHA1:
|
||||
fallthrough
|
||||
case RSASHA1NSEC3SHA1:
|
||||
fallthrough
|
||||
case RSASHA256:
|
||||
fallthrough
|
||||
case RSASHA512:
|
||||
priv, err := readPrivateKeyRSA(m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pub := k.publicKeyRSA()
|
||||
if pub == nil {
|
||||
return nil, ErrKey
|
||||
}
|
||||
priv.PublicKey = *pub
|
||||
return priv, nil
|
||||
case ECCGOST:
|
||||
return nil, ErrPrivKey
|
||||
case ECDSAP256SHA256:
|
||||
fallthrough
|
||||
case ECDSAP384SHA384:
|
||||
priv, err := readPrivateKeyECDSA(m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pub := k.publicKeyECDSA()
|
||||
if pub == nil {
|
||||
return nil, ErrKey
|
||||
}
|
||||
priv.PublicKey = *pub
|
||||
return priv, nil
|
||||
case ED25519:
|
||||
return readPrivateKeyED25519(m)
|
||||
default:
|
||||
return nil, ErrPrivKey
|
||||
}
|
||||
}
|
||||
|
||||
// Read a private key (file) string and create a public key. Return the private key.
|
||||
func readPrivateKeyRSA(m map[string]string) (*rsa.PrivateKey, error) {
|
||||
p := new(rsa.PrivateKey)
|
||||
p.Primes = []*big.Int{nil, nil}
|
||||
for k, v := range m {
|
||||
switch k {
|
||||
case "modulus", "publicexponent", "privateexponent", "prime1", "prime2":
|
||||
v1, err := fromBase64([]byte(v))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch k {
|
||||
case "modulus":
|
||||
p.PublicKey.N = big.NewInt(0)
|
||||
p.PublicKey.N.SetBytes(v1)
|
||||
case "publicexponent":
|
||||
i := big.NewInt(0)
|
||||
i.SetBytes(v1)
|
||||
p.PublicKey.E = int(i.Int64()) // int64 should be large enough
|
||||
case "privateexponent":
|
||||
p.D = big.NewInt(0)
|
||||
p.D.SetBytes(v1)
|
||||
case "prime1":
|
||||
p.Primes[0] = big.NewInt(0)
|
||||
p.Primes[0].SetBytes(v1)
|
||||
case "prime2":
|
||||
p.Primes[1] = big.NewInt(0)
|
||||
p.Primes[1].SetBytes(v1)
|
||||
}
|
||||
case "exponent1", "exponent2", "coefficient":
|
||||
// not used in Go (yet)
|
||||
case "created", "publish", "activate":
|
||||
// not used in Go (yet)
|
||||
}
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func readPrivateKeyDSA(m map[string]string) (*dsa.PrivateKey, error) {
|
||||
p := new(dsa.PrivateKey)
|
||||
p.X = big.NewInt(0)
|
||||
for k, v := range m {
|
||||
switch k {
|
||||
case "private_value(x)":
|
||||
v1, err := fromBase64([]byte(v))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.X.SetBytes(v1)
|
||||
case "created", "publish", "activate":
|
||||
/* not used in Go (yet) */
|
||||
}
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func readPrivateKeyECDSA(m map[string]string) (*ecdsa.PrivateKey, error) {
|
||||
p := new(ecdsa.PrivateKey)
|
||||
p.D = big.NewInt(0)
|
||||
// TODO: validate that the required flags are present
|
||||
for k, v := range m {
|
||||
switch k {
|
||||
case "privatekey":
|
||||
v1, err := fromBase64([]byte(v))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.D.SetBytes(v1)
|
||||
case "created", "publish", "activate":
|
||||
/* not used in Go (yet) */
|
||||
}
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func readPrivateKeyED25519(m map[string]string) (ed25519.PrivateKey, error) {
|
||||
var p ed25519.PrivateKey
|
||||
// TODO: validate that the required flags are present
|
||||
for k, v := range m {
|
||||
switch k {
|
||||
case "privatekey":
|
||||
p1, err := fromBase64([]byte(v))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(p1) != ed25519.SeedSize {
|
||||
return nil, ErrPrivKey
|
||||
}
|
||||
p = ed25519.NewKeyFromSeed(p1)
|
||||
case "created", "publish", "activate":
|
||||
/* not used in Go (yet) */
|
||||
}
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// parseKey reads a private key from r. It returns a map[string]string,
|
||||
// with the key-value pairs, or an error when the file is not correct.
|
||||
func parseKey(r io.Reader, file string) (map[string]string, error) {
|
||||
m := make(map[string]string)
|
||||
var k string
|
||||
|
||||
c := newKLexer(r)
|
||||
|
||||
for l, ok := c.Next(); ok; l, ok = c.Next() {
|
||||
// It should alternate
|
||||
switch l.value {
|
||||
case zKey:
|
||||
k = l.token
|
||||
case zValue:
|
||||
if k == "" {
|
||||
return nil, &ParseError{file, "no private key seen", l}
|
||||
}
|
||||
|
||||
m[strings.ToLower(k)] = l.token
|
||||
k = ""
|
||||
}
|
||||
}
|
||||
|
||||
// Surface any read errors from r.
|
||||
if err := c.Err(); err != nil {
|
||||
return nil, &ParseError{file: file, err: err.Error()}
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
type klexer struct {
|
||||
br io.ByteReader
|
||||
|
||||
readErr error
|
||||
|
||||
line int
|
||||
column int
|
||||
|
||||
key bool
|
||||
|
||||
eol bool // end-of-line
|
||||
}
|
||||
|
||||
func newKLexer(r io.Reader) *klexer {
|
||||
br, ok := r.(io.ByteReader)
|
||||
if !ok {
|
||||
br = bufio.NewReaderSize(r, 1024)
|
||||
}
|
||||
|
||||
return &klexer{
|
||||
br: br,
|
||||
|
||||
line: 1,
|
||||
|
||||
key: true,
|
||||
}
|
||||
}
|
||||
|
||||
func (kl *klexer) Err() error {
|
||||
if kl.readErr == io.EOF {
|
||||
return nil
|
||||
}
|
||||
|
||||
return kl.readErr
|
||||
}
|
||||
|
||||
// readByte returns the next byte from the input
|
||||
func (kl *klexer) readByte() (byte, bool) {
|
||||
if kl.readErr != nil {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
c, err := kl.br.ReadByte()
|
||||
if err != nil {
|
||||
kl.readErr = err
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// delay the newline handling until the next token is delivered,
|
||||
// fixes off-by-one errors when reporting a parse error.
|
||||
if kl.eol {
|
||||
kl.line++
|
||||
kl.column = 0
|
||||
kl.eol = false
|
||||
}
|
||||
|
||||
if c == '\n' {
|
||||
kl.eol = true
|
||||
} else {
|
||||
kl.column++
|
||||
}
|
||||
|
||||
return c, true
|
||||
}
|
||||
|
||||
func (kl *klexer) Next() (lex, bool) {
|
||||
var (
|
||||
l lex
|
||||
|
||||
str strings.Builder
|
||||
|
||||
commt bool
|
||||
)
|
||||
|
||||
for x, ok := kl.readByte(); ok; x, ok = kl.readByte() {
|
||||
l.line, l.column = kl.line, kl.column
|
||||
|
||||
switch x {
|
||||
case ':':
|
||||
if commt || !kl.key {
|
||||
break
|
||||
}
|
||||
|
||||
kl.key = false
|
||||
|
||||
// Next token is a space, eat it
|
||||
kl.readByte()
|
||||
|
||||
l.value = zKey
|
||||
l.token = str.String()
|
||||
return l, true
|
||||
case ';':
|
||||
commt = true
|
||||
case '\n':
|
||||
if commt {
|
||||
// Reset a comment
|
||||
commt = false
|
||||
}
|
||||
|
||||
kl.key = true
|
||||
|
||||
l.value = zValue
|
||||
l.token = str.String()
|
||||
return l, true
|
||||
default:
|
||||
if commt {
|
||||
break
|
||||
}
|
||||
|
||||
str.WriteByte(x)
|
||||
}
|
||||
}
|
||||
|
||||
if kl.readErr != nil && kl.readErr != io.EOF {
|
||||
// Don't return any tokens after a read error occurs.
|
||||
return lex{value: zEOF}, false
|
||||
}
|
||||
|
||||
if str.Len() > 0 {
|
||||
// Send remainder
|
||||
l.value = zValue
|
||||
l.token = str.String()
|
||||
return l, true
|
||||
}
|
||||
|
||||
return lex{value: zEOF}, false
|
||||
}
|
||||
93
vendor/github.com/miekg/dns/dnssec_privkey.go
generated
vendored
Normal file
93
vendor/github.com/miekg/dns/dnssec_privkey.go
generated
vendored
Normal file
@@ -0,0 +1,93 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/dsa"
|
||||
"crypto/ecdsa"
|
||||
"crypto/rsa"
|
||||
"math/big"
|
||||
"strconv"
|
||||
|
||||
"golang.org/x/crypto/ed25519"
|
||||
)
|
||||
|
||||
const format = "Private-key-format: v1.3\n"
|
||||
|
||||
// PrivateKeyString converts a PrivateKey to a string. This string has the same
|
||||
// format as the private-key-file of BIND9 (Private-key-format: v1.3).
|
||||
// It needs some info from the key (the algorithm), so its a method of the DNSKEY
|
||||
// It supports rsa.PrivateKey, ecdsa.PrivateKey and dsa.PrivateKey
|
||||
func (r *DNSKEY) PrivateKeyString(p crypto.PrivateKey) string {
|
||||
algorithm := strconv.Itoa(int(r.Algorithm))
|
||||
algorithm += " (" + AlgorithmToString[r.Algorithm] + ")"
|
||||
|
||||
switch p := p.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
modulus := toBase64(p.PublicKey.N.Bytes())
|
||||
e := big.NewInt(int64(p.PublicKey.E))
|
||||
publicExponent := toBase64(e.Bytes())
|
||||
privateExponent := toBase64(p.D.Bytes())
|
||||
prime1 := toBase64(p.Primes[0].Bytes())
|
||||
prime2 := toBase64(p.Primes[1].Bytes())
|
||||
// Calculate Exponent1/2 and Coefficient as per: http://en.wikipedia.org/wiki/RSA#Using_the_Chinese_remainder_algorithm
|
||||
// and from: http://code.google.com/p/go/issues/detail?id=987
|
||||
one := big.NewInt(1)
|
||||
p1 := big.NewInt(0).Sub(p.Primes[0], one)
|
||||
q1 := big.NewInt(0).Sub(p.Primes[1], one)
|
||||
exp1 := big.NewInt(0).Mod(p.D, p1)
|
||||
exp2 := big.NewInt(0).Mod(p.D, q1)
|
||||
coeff := big.NewInt(0).ModInverse(p.Primes[1], p.Primes[0])
|
||||
|
||||
exponent1 := toBase64(exp1.Bytes())
|
||||
exponent2 := toBase64(exp2.Bytes())
|
||||
coefficient := toBase64(coeff.Bytes())
|
||||
|
||||
return format +
|
||||
"Algorithm: " + algorithm + "\n" +
|
||||
"Modulus: " + modulus + "\n" +
|
||||
"PublicExponent: " + publicExponent + "\n" +
|
||||
"PrivateExponent: " + privateExponent + "\n" +
|
||||
"Prime1: " + prime1 + "\n" +
|
||||
"Prime2: " + prime2 + "\n" +
|
||||
"Exponent1: " + exponent1 + "\n" +
|
||||
"Exponent2: " + exponent2 + "\n" +
|
||||
"Coefficient: " + coefficient + "\n"
|
||||
|
||||
case *ecdsa.PrivateKey:
|
||||
var intlen int
|
||||
switch r.Algorithm {
|
||||
case ECDSAP256SHA256:
|
||||
intlen = 32
|
||||
case ECDSAP384SHA384:
|
||||
intlen = 48
|
||||
}
|
||||
private := toBase64(intToBytes(p.D, intlen))
|
||||
return format +
|
||||
"Algorithm: " + algorithm + "\n" +
|
||||
"PrivateKey: " + private + "\n"
|
||||
|
||||
case *dsa.PrivateKey:
|
||||
T := divRoundUp(divRoundUp(p.PublicKey.Parameters.G.BitLen(), 8)-64, 8)
|
||||
prime := toBase64(intToBytes(p.PublicKey.Parameters.P, 64+T*8))
|
||||
subprime := toBase64(intToBytes(p.PublicKey.Parameters.Q, 20))
|
||||
base := toBase64(intToBytes(p.PublicKey.Parameters.G, 64+T*8))
|
||||
priv := toBase64(intToBytes(p.X, 20))
|
||||
pub := toBase64(intToBytes(p.PublicKey.Y, 64+T*8))
|
||||
return format +
|
||||
"Algorithm: " + algorithm + "\n" +
|
||||
"Prime(p): " + prime + "\n" +
|
||||
"Subprime(q): " + subprime + "\n" +
|
||||
"Base(g): " + base + "\n" +
|
||||
"Private_value(x): " + priv + "\n" +
|
||||
"Public_value(y): " + pub + "\n"
|
||||
|
||||
case ed25519.PrivateKey:
|
||||
private := toBase64(p.Seed())
|
||||
return format +
|
||||
"Algorithm: " + algorithm + "\n" +
|
||||
"PrivateKey: " + private + "\n"
|
||||
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
272
vendor/github.com/miekg/dns/doc.go
generated
vendored
Normal file
272
vendor/github.com/miekg/dns/doc.go
generated
vendored
Normal file
@@ -0,0 +1,272 @@
|
||||
/*
|
||||
Package dns implements a full featured interface to the Domain Name System.
|
||||
Server- and client-side programming is supported.
|
||||
The package allows complete control over what is sent out to the DNS. The package
|
||||
API follows the less-is-more principle, by presenting a small, clean interface.
|
||||
|
||||
The package dns supports (asynchronous) querying/replying, incoming/outgoing zone transfers,
|
||||
TSIG, EDNS0, dynamic updates, notifies and DNSSEC validation/signing.
|
||||
Note that domain names MUST be fully qualified, before sending them, unqualified
|
||||
names in a message will result in a packing failure.
|
||||
|
||||
Resource records are native types. They are not stored in wire format.
|
||||
Basic usage pattern for creating a new resource record:
|
||||
|
||||
r := new(dns.MX)
|
||||
r.Hdr = dns.RR_Header{Name: "miek.nl.", Rrtype: dns.TypeMX,
|
||||
Class: dns.ClassINET, Ttl: 3600}
|
||||
r.Preference = 10
|
||||
r.Mx = "mx.miek.nl."
|
||||
|
||||
Or directly from a string:
|
||||
|
||||
mx, err := dns.NewRR("miek.nl. 3600 IN MX 10 mx.miek.nl.")
|
||||
|
||||
Or when the default origin (.) and TTL (3600) and class (IN) suit you:
|
||||
|
||||
mx, err := dns.NewRR("miek.nl MX 10 mx.miek.nl")
|
||||
|
||||
Or even:
|
||||
|
||||
mx, err := dns.NewRR("$ORIGIN nl.\nmiek 1H IN MX 10 mx.miek")
|
||||
|
||||
In the DNS messages are exchanged, these messages contain resource
|
||||
records (sets). Use pattern for creating a message:
|
||||
|
||||
m := new(dns.Msg)
|
||||
m.SetQuestion("miek.nl.", dns.TypeMX)
|
||||
|
||||
Or when not certain if the domain name is fully qualified:
|
||||
|
||||
m.SetQuestion(dns.Fqdn("miek.nl"), dns.TypeMX)
|
||||
|
||||
The message m is now a message with the question section set to ask
|
||||
the MX records for the miek.nl. zone.
|
||||
|
||||
The following is slightly more verbose, but more flexible:
|
||||
|
||||
m1 := new(dns.Msg)
|
||||
m1.Id = dns.Id()
|
||||
m1.RecursionDesired = true
|
||||
m1.Question = make([]dns.Question, 1)
|
||||
m1.Question[0] = dns.Question{"miek.nl.", dns.TypeMX, dns.ClassINET}
|
||||
|
||||
After creating a message it can be sent.
|
||||
Basic use pattern for synchronous querying the DNS at a
|
||||
server configured on 127.0.0.1 and port 53:
|
||||
|
||||
c := new(dns.Client)
|
||||
in, rtt, err := c.Exchange(m1, "127.0.0.1:53")
|
||||
|
||||
Suppressing multiple outstanding queries (with the same question, type and
|
||||
class) is as easy as setting:
|
||||
|
||||
c.SingleInflight = true
|
||||
|
||||
More advanced options are available using a net.Dialer and the corresponding API.
|
||||
For example it is possible to set a timeout, or to specify a source IP address
|
||||
and port to use for the connection:
|
||||
|
||||
c := new(dns.Client)
|
||||
laddr := net.UDPAddr{
|
||||
IP: net.ParseIP("[::1]"),
|
||||
Port: 12345,
|
||||
Zone: "",
|
||||
}
|
||||
c.Dialer := &net.Dialer{
|
||||
Timeout: 200 * time.Millisecond,
|
||||
LocalAddr: &laddr,
|
||||
}
|
||||
in, rtt, err := c.Exchange(m1, "8.8.8.8:53")
|
||||
|
||||
If these "advanced" features are not needed, a simple UDP query can be sent,
|
||||
with:
|
||||
|
||||
in, err := dns.Exchange(m1, "127.0.0.1:53")
|
||||
|
||||
When this functions returns you will get dns message. A dns message consists
|
||||
out of four sections.
|
||||
The question section: in.Question, the answer section: in.Answer,
|
||||
the authority section: in.Ns and the additional section: in.Extra.
|
||||
|
||||
Each of these sections (except the Question section) contain a []RR. Basic
|
||||
use pattern for accessing the rdata of a TXT RR as the first RR in
|
||||
the Answer section:
|
||||
|
||||
if t, ok := in.Answer[0].(*dns.TXT); ok {
|
||||
// do something with t.Txt
|
||||
}
|
||||
|
||||
Domain Name and TXT Character String Representations
|
||||
|
||||
Both domain names and TXT character strings are converted to presentation
|
||||
form both when unpacked and when converted to strings.
|
||||
|
||||
For TXT character strings, tabs, carriage returns and line feeds will be
|
||||
converted to \t, \r and \n respectively. Back slashes and quotations marks
|
||||
will be escaped. Bytes below 32 and above 127 will be converted to \DDD
|
||||
form.
|
||||
|
||||
For domain names, in addition to the above rules brackets, periods,
|
||||
spaces, semicolons and the at symbol are escaped.
|
||||
|
||||
DNSSEC
|
||||
|
||||
DNSSEC (DNS Security Extension) adds a layer of security to the DNS. It
|
||||
uses public key cryptography to sign resource records. The
|
||||
public keys are stored in DNSKEY records and the signatures in RRSIG records.
|
||||
|
||||
Requesting DNSSEC information for a zone is done by adding the DO (DNSSEC OK) bit
|
||||
to a request.
|
||||
|
||||
m := new(dns.Msg)
|
||||
m.SetEdns0(4096, true)
|
||||
|
||||
Signature generation, signature verification and key generation are all supported.
|
||||
|
||||
DYNAMIC UPDATES
|
||||
|
||||
Dynamic updates reuses the DNS message format, but renames three of
|
||||
the sections. Question is Zone, Answer is Prerequisite, Authority is
|
||||
Update, only the Additional is not renamed. See RFC 2136 for the gory details.
|
||||
|
||||
You can set a rather complex set of rules for the existence of absence of
|
||||
certain resource records or names in a zone to specify if resource records
|
||||
should be added or removed. The table from RFC 2136 supplemented with the Go
|
||||
DNS function shows which functions exist to specify the prerequisites.
|
||||
|
||||
3.2.4 - Table Of Metavalues Used In Prerequisite Section
|
||||
|
||||
CLASS TYPE RDATA Meaning Function
|
||||
--------------------------------------------------------------
|
||||
ANY ANY empty Name is in use dns.NameUsed
|
||||
ANY rrset empty RRset exists (value indep) dns.RRsetUsed
|
||||
NONE ANY empty Name is not in use dns.NameNotUsed
|
||||
NONE rrset empty RRset does not exist dns.RRsetNotUsed
|
||||
zone rrset rr RRset exists (value dep) dns.Used
|
||||
|
||||
The prerequisite section can also be left empty.
|
||||
If you have decided on the prerequisites you can tell what RRs should
|
||||
be added or deleted. The next table shows the options you have and
|
||||
what functions to call.
|
||||
|
||||
3.4.2.6 - Table Of Metavalues Used In Update Section
|
||||
|
||||
CLASS TYPE RDATA Meaning Function
|
||||
---------------------------------------------------------------
|
||||
ANY ANY empty Delete all RRsets from name dns.RemoveName
|
||||
ANY rrset empty Delete an RRset dns.RemoveRRset
|
||||
NONE rrset rr Delete an RR from RRset dns.Remove
|
||||
zone rrset rr Add to an RRset dns.Insert
|
||||
|
||||
TRANSACTION SIGNATURE
|
||||
|
||||
An TSIG or transaction signature adds a HMAC TSIG record to each message sent.
|
||||
The supported algorithms include: HmacMD5, HmacSHA1, HmacSHA256 and HmacSHA512.
|
||||
|
||||
Basic use pattern when querying with a TSIG name "axfr." (note that these key names
|
||||
must be fully qualified - as they are domain names) and the base64 secret
|
||||
"so6ZGir4GPAqINNh9U5c3A==":
|
||||
|
||||
If an incoming message contains a TSIG record it MUST be the last record in
|
||||
the additional section (RFC2845 3.2). This means that you should make the
|
||||
call to SetTsig last, right before executing the query. If you make any
|
||||
changes to the RRset after calling SetTsig() the signature will be incorrect.
|
||||
|
||||
c := new(dns.Client)
|
||||
c.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="}
|
||||
m := new(dns.Msg)
|
||||
m.SetQuestion("miek.nl.", dns.TypeMX)
|
||||
m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix())
|
||||
...
|
||||
// When sending the TSIG RR is calculated and filled in before sending
|
||||
|
||||
When requesting an zone transfer (almost all TSIG usage is when requesting zone transfers), with
|
||||
TSIG, this is the basic use pattern. In this example we request an AXFR for
|
||||
miek.nl. with TSIG key named "axfr." and secret "so6ZGir4GPAqINNh9U5c3A=="
|
||||
and using the server 176.58.119.54:
|
||||
|
||||
t := new(dns.Transfer)
|
||||
m := new(dns.Msg)
|
||||
t.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="}
|
||||
m.SetAxfr("miek.nl.")
|
||||
m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix())
|
||||
c, err := t.In(m, "176.58.119.54:53")
|
||||
for r := range c { ... }
|
||||
|
||||
You can now read the records from the transfer as they come in. Each envelope is checked with TSIG.
|
||||
If something is not correct an error is returned.
|
||||
|
||||
Basic use pattern validating and replying to a message that has TSIG set.
|
||||
|
||||
server := &dns.Server{Addr: ":53", Net: "udp"}
|
||||
server.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="}
|
||||
go server.ListenAndServe()
|
||||
dns.HandleFunc(".", handleRequest)
|
||||
|
||||
func handleRequest(w dns.ResponseWriter, r *dns.Msg) {
|
||||
m := new(dns.Msg)
|
||||
m.SetReply(r)
|
||||
if r.IsTsig() != nil {
|
||||
if w.TsigStatus() == nil {
|
||||
// *Msg r has an TSIG record and it was validated
|
||||
m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix())
|
||||
} else {
|
||||
// *Msg r has an TSIG records and it was not valided
|
||||
}
|
||||
}
|
||||
w.WriteMsg(m)
|
||||
}
|
||||
|
||||
PRIVATE RRS
|
||||
|
||||
RFC 6895 sets aside a range of type codes for private use. This range
|
||||
is 65,280 - 65,534 (0xFF00 - 0xFFFE). When experimenting with new Resource Records these
|
||||
can be used, before requesting an official type code from IANA.
|
||||
|
||||
see http://miek.nl/2014/September/21/idn-and-private-rr-in-go-dns/ for more
|
||||
information.
|
||||
|
||||
EDNS0
|
||||
|
||||
EDNS0 is an extension mechanism for the DNS defined in RFC 2671 and updated
|
||||
by RFC 6891. It defines an new RR type, the OPT RR, which is then completely
|
||||
abused.
|
||||
Basic use pattern for creating an (empty) OPT RR:
|
||||
|
||||
o := new(dns.OPT)
|
||||
o.Hdr.Name = "." // MUST be the root zone, per definition.
|
||||
o.Hdr.Rrtype = dns.TypeOPT
|
||||
|
||||
The rdata of an OPT RR consists out of a slice of EDNS0 (RFC 6891)
|
||||
interfaces. Currently only a few have been standardized: EDNS0_NSID
|
||||
(RFC 5001) and EDNS0_SUBNET (draft-vandergaast-edns-client-subnet-02). Note
|
||||
that these options may be combined in an OPT RR.
|
||||
Basic use pattern for a server to check if (and which) options are set:
|
||||
|
||||
// o is a dns.OPT
|
||||
for _, s := range o.Option {
|
||||
switch e := s.(type) {
|
||||
case *dns.EDNS0_NSID:
|
||||
// do stuff with e.Nsid
|
||||
case *dns.EDNS0_SUBNET:
|
||||
// access e.Family, e.Address, etc.
|
||||
}
|
||||
}
|
||||
|
||||
SIG(0)
|
||||
|
||||
From RFC 2931:
|
||||
|
||||
SIG(0) provides protection for DNS transactions and requests ....
|
||||
... protection for glue records, DNS requests, protection for message headers
|
||||
on requests and responses, and protection of the overall integrity of a response.
|
||||
|
||||
It works like TSIG, except that SIG(0) uses public key cryptography, instead of the shared
|
||||
secret approach in TSIG.
|
||||
Supported algorithms: DSA, ECDSAP256SHA256, ECDSAP384SHA384, RSASHA1, RSASHA256 and
|
||||
RSASHA512.
|
||||
|
||||
Signing subsequent messages in multi-message sessions is not implemented.
|
||||
*/
|
||||
package dns
|
||||
25
vendor/github.com/miekg/dns/duplicate.go
generated
vendored
Normal file
25
vendor/github.com/miekg/dns/duplicate.go
generated
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
package dns
|
||||
|
||||
//go:generate go run duplicate_generate.go
|
||||
|
||||
// IsDuplicate checks of r1 and r2 are duplicates of each other, excluding the TTL.
|
||||
// So this means the header data is equal *and* the RDATA is the same. Return true
|
||||
// is so, otherwise false.
|
||||
// It's is a protocol violation to have identical RRs in a message.
|
||||
func IsDuplicate(r1, r2 RR) bool {
|
||||
if r1.Header().Class != r2.Header().Class {
|
||||
return false
|
||||
}
|
||||
if r1.Header().Rrtype != r2.Header().Rrtype {
|
||||
return false
|
||||
}
|
||||
if !isDulicateName(r1.Header().Name, r2.Header().Name) {
|
||||
return false
|
||||
}
|
||||
// ignore TTL
|
||||
|
||||
return isDuplicateRdata(r1, r2)
|
||||
}
|
||||
|
||||
// isDulicateName checks if the domain names s1 and s2 are equal.
|
||||
func isDulicateName(s1, s2 string) bool { return equal(s1, s2) }
|
||||
630
vendor/github.com/miekg/dns/edns.go
generated
vendored
Normal file
630
vendor/github.com/miekg/dns/edns.go
generated
vendored
Normal file
@@ -0,0 +1,630 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// EDNS0 Option codes.
|
||||
const (
|
||||
EDNS0LLQ = 0x1 // long lived queries: http://tools.ietf.org/html/draft-sekar-dns-llq-01
|
||||
EDNS0UL = 0x2 // update lease draft: http://files.dns-sd.org/draft-sekar-dns-ul.txt
|
||||
EDNS0NSID = 0x3 // nsid (See RFC 5001)
|
||||
EDNS0DAU = 0x5 // DNSSEC Algorithm Understood
|
||||
EDNS0DHU = 0x6 // DS Hash Understood
|
||||
EDNS0N3U = 0x7 // NSEC3 Hash Understood
|
||||
EDNS0SUBNET = 0x8 // client-subnet (See RFC 7871)
|
||||
EDNS0EXPIRE = 0x9 // EDNS0 expire
|
||||
EDNS0COOKIE = 0xa // EDNS0 Cookie
|
||||
EDNS0TCPKEEPALIVE = 0xb // EDNS0 tcp keep alive (See RFC 7828)
|
||||
EDNS0PADDING = 0xc // EDNS0 padding (See RFC 7830)
|
||||
EDNS0LOCALSTART = 0xFDE9 // Beginning of range reserved for local/experimental use (See RFC 6891)
|
||||
EDNS0LOCALEND = 0xFFFE // End of range reserved for local/experimental use (See RFC 6891)
|
||||
_DO = 1 << 15 // DNSSEC OK
|
||||
)
|
||||
|
||||
// OPT is the EDNS0 RR appended to messages to convey extra (meta) information.
|
||||
// See RFC 6891.
|
||||
type OPT struct {
|
||||
Hdr RR_Header
|
||||
Option []EDNS0 `dns:"opt"`
|
||||
}
|
||||
|
||||
func (rr *OPT) String() string {
|
||||
s := "\n;; OPT PSEUDOSECTION:\n; EDNS: version " + strconv.Itoa(int(rr.Version())) + "; "
|
||||
if rr.Do() {
|
||||
s += "flags: do; "
|
||||
} else {
|
||||
s += "flags: ; "
|
||||
}
|
||||
s += "udp: " + strconv.Itoa(int(rr.UDPSize()))
|
||||
|
||||
for _, o := range rr.Option {
|
||||
switch o.(type) {
|
||||
case *EDNS0_NSID:
|
||||
s += "\n; NSID: " + o.String()
|
||||
h, e := o.pack()
|
||||
var r string
|
||||
if e == nil {
|
||||
for _, c := range h {
|
||||
r += "(" + string(c) + ")"
|
||||
}
|
||||
s += " " + r
|
||||
}
|
||||
case *EDNS0_SUBNET:
|
||||
s += "\n; SUBNET: " + o.String()
|
||||
case *EDNS0_COOKIE:
|
||||
s += "\n; COOKIE: " + o.String()
|
||||
case *EDNS0_UL:
|
||||
s += "\n; UPDATE LEASE: " + o.String()
|
||||
case *EDNS0_LLQ:
|
||||
s += "\n; LONG LIVED QUERIES: " + o.String()
|
||||
case *EDNS0_DAU:
|
||||
s += "\n; DNSSEC ALGORITHM UNDERSTOOD: " + o.String()
|
||||
case *EDNS0_DHU:
|
||||
s += "\n; DS HASH UNDERSTOOD: " + o.String()
|
||||
case *EDNS0_N3U:
|
||||
s += "\n; NSEC3 HASH UNDERSTOOD: " + o.String()
|
||||
case *EDNS0_LOCAL:
|
||||
s += "\n; LOCAL OPT: " + o.String()
|
||||
case *EDNS0_PADDING:
|
||||
s += "\n; PADDING: " + o.String()
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (rr *OPT) len() int {
|
||||
l := rr.Hdr.len()
|
||||
for i := 0; i < len(rr.Option); i++ {
|
||||
l += 4 // Account for 2-byte option code and 2-byte option length.
|
||||
lo, _ := rr.Option[i].pack()
|
||||
l += len(lo)
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
// return the old value -> delete SetVersion?
|
||||
|
||||
// Version returns the EDNS version used. Only zero is defined.
|
||||
func (rr *OPT) Version() uint8 {
|
||||
return uint8(rr.Hdr.Ttl & 0x00FF0000 >> 16)
|
||||
}
|
||||
|
||||
// SetVersion sets the version of EDNS. This is usually zero.
|
||||
func (rr *OPT) SetVersion(v uint8) {
|
||||
rr.Hdr.Ttl = rr.Hdr.Ttl&0xFF00FFFF | uint32(v)<<16
|
||||
}
|
||||
|
||||
// ExtendedRcode returns the EDNS extended RCODE field (the upper 8 bits of the TTL).
|
||||
func (rr *OPT) ExtendedRcode() int {
|
||||
return int(rr.Hdr.Ttl&0xFF000000>>24) + 15
|
||||
}
|
||||
|
||||
// SetExtendedRcode sets the EDNS extended RCODE field.
|
||||
func (rr *OPT) SetExtendedRcode(v uint8) {
|
||||
if v < RcodeBadVers { // Smaller than 16.. Use the 4 bits you have!
|
||||
return
|
||||
}
|
||||
rr.Hdr.Ttl = rr.Hdr.Ttl&0x00FFFFFF | uint32(v-15)<<24
|
||||
}
|
||||
|
||||
// UDPSize returns the UDP buffer size.
|
||||
func (rr *OPT) UDPSize() uint16 {
|
||||
return rr.Hdr.Class
|
||||
}
|
||||
|
||||
// SetUDPSize sets the UDP buffer size.
|
||||
func (rr *OPT) SetUDPSize(size uint16) {
|
||||
rr.Hdr.Class = size
|
||||
}
|
||||
|
||||
// Do returns the value of the DO (DNSSEC OK) bit.
|
||||
func (rr *OPT) Do() bool {
|
||||
return rr.Hdr.Ttl&_DO == _DO
|
||||
}
|
||||
|
||||
// SetDo sets the DO (DNSSEC OK) bit.
|
||||
// If we pass an argument, set the DO bit to that value.
|
||||
// It is possible to pass 2 or more arguments. Any arguments after the 1st is silently ignored.
|
||||
func (rr *OPT) SetDo(do ...bool) {
|
||||
if len(do) == 1 {
|
||||
if do[0] {
|
||||
rr.Hdr.Ttl |= _DO
|
||||
} else {
|
||||
rr.Hdr.Ttl &^= _DO
|
||||
}
|
||||
} else {
|
||||
rr.Hdr.Ttl |= _DO
|
||||
}
|
||||
}
|
||||
|
||||
// EDNS0 defines an EDNS0 Option. An OPT RR can have multiple options appended to it.
|
||||
type EDNS0 interface {
|
||||
// Option returns the option code for the option.
|
||||
Option() uint16
|
||||
// pack returns the bytes of the option data.
|
||||
pack() ([]byte, error)
|
||||
// unpack sets the data as found in the buffer. Is also sets
|
||||
// the length of the slice as the length of the option data.
|
||||
unpack([]byte) error
|
||||
// String returns the string representation of the option.
|
||||
String() string
|
||||
}
|
||||
|
||||
// EDNS0_NSID option is used to retrieve a nameserver
|
||||
// identifier. When sending a request Nsid must be set to the empty string
|
||||
// The identifier is an opaque string encoded as hex.
|
||||
// Basic use pattern for creating an nsid option:
|
||||
//
|
||||
// o := new(dns.OPT)
|
||||
// o.Hdr.Name = "."
|
||||
// o.Hdr.Rrtype = dns.TypeOPT
|
||||
// e := new(dns.EDNS0_NSID)
|
||||
// e.Code = dns.EDNS0NSID
|
||||
// e.Nsid = "AA"
|
||||
// o.Option = append(o.Option, e)
|
||||
type EDNS0_NSID struct {
|
||||
Code uint16 // Always EDNS0NSID
|
||||
Nsid string // This string needs to be hex encoded
|
||||
}
|
||||
|
||||
func (e *EDNS0_NSID) pack() ([]byte, error) {
|
||||
h, err := hex.DecodeString(e.Nsid)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return h, nil
|
||||
}
|
||||
|
||||
// Option implements the EDNS0 interface.
|
||||
func (e *EDNS0_NSID) Option() uint16 { return EDNS0NSID } // Option returns the option code.
|
||||
func (e *EDNS0_NSID) unpack(b []byte) error { e.Nsid = hex.EncodeToString(b); return nil }
|
||||
func (e *EDNS0_NSID) String() string { return string(e.Nsid) }
|
||||
|
||||
// EDNS0_SUBNET is the subnet option that is used to give the remote nameserver
|
||||
// an idea of where the client lives. See RFC 7871. It can then give back a different
|
||||
// answer depending on the location or network topology.
|
||||
// Basic use pattern for creating an subnet option:
|
||||
//
|
||||
// o := new(dns.OPT)
|
||||
// o.Hdr.Name = "."
|
||||
// o.Hdr.Rrtype = dns.TypeOPT
|
||||
// e := new(dns.EDNS0_SUBNET)
|
||||
// e.Code = dns.EDNS0SUBNET
|
||||
// e.Family = 1 // 1 for IPv4 source address, 2 for IPv6
|
||||
// e.SourceNetmask = 32 // 32 for IPV4, 128 for IPv6
|
||||
// e.SourceScope = 0
|
||||
// e.Address = net.ParseIP("127.0.0.1").To4() // for IPv4
|
||||
// // e.Address = net.ParseIP("2001:7b8:32a::2") // for IPV6
|
||||
// o.Option = append(o.Option, e)
|
||||
//
|
||||
// This code will parse all the available bits when unpacking (up to optlen).
|
||||
// When packing it will apply SourceNetmask. If you need more advanced logic,
|
||||
// patches welcome and good luck.
|
||||
type EDNS0_SUBNET struct {
|
||||
Code uint16 // Always EDNS0SUBNET
|
||||
Family uint16 // 1 for IP, 2 for IP6
|
||||
SourceNetmask uint8
|
||||
SourceScope uint8
|
||||
Address net.IP
|
||||
}
|
||||
|
||||
// Option implements the EDNS0 interface.
|
||||
func (e *EDNS0_SUBNET) Option() uint16 { return EDNS0SUBNET }
|
||||
|
||||
func (e *EDNS0_SUBNET) pack() ([]byte, error) {
|
||||
b := make([]byte, 4)
|
||||
binary.BigEndian.PutUint16(b[0:], e.Family)
|
||||
b[2] = e.SourceNetmask
|
||||
b[3] = e.SourceScope
|
||||
switch e.Family {
|
||||
case 0:
|
||||
// "dig" sets AddressFamily to 0 if SourceNetmask is also 0
|
||||
// We might don't need to complain either
|
||||
if e.SourceNetmask != 0 {
|
||||
return nil, errors.New("dns: bad address family")
|
||||
}
|
||||
case 1:
|
||||
if e.SourceNetmask > net.IPv4len*8 {
|
||||
return nil, errors.New("dns: bad netmask")
|
||||
}
|
||||
if len(e.Address.To4()) != net.IPv4len {
|
||||
return nil, errors.New("dns: bad address")
|
||||
}
|
||||
ip := e.Address.To4().Mask(net.CIDRMask(int(e.SourceNetmask), net.IPv4len*8))
|
||||
needLength := (e.SourceNetmask + 8 - 1) / 8 // division rounding up
|
||||
b = append(b, ip[:needLength]...)
|
||||
case 2:
|
||||
if e.SourceNetmask > net.IPv6len*8 {
|
||||
return nil, errors.New("dns: bad netmask")
|
||||
}
|
||||
if len(e.Address) != net.IPv6len {
|
||||
return nil, errors.New("dns: bad address")
|
||||
}
|
||||
ip := e.Address.Mask(net.CIDRMask(int(e.SourceNetmask), net.IPv6len*8))
|
||||
needLength := (e.SourceNetmask + 8 - 1) / 8 // division rounding up
|
||||
b = append(b, ip[:needLength]...)
|
||||
default:
|
||||
return nil, errors.New("dns: bad address family")
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (e *EDNS0_SUBNET) unpack(b []byte) error {
|
||||
if len(b) < 4 {
|
||||
return ErrBuf
|
||||
}
|
||||
e.Family = binary.BigEndian.Uint16(b)
|
||||
e.SourceNetmask = b[2]
|
||||
e.SourceScope = b[3]
|
||||
switch e.Family {
|
||||
case 0:
|
||||
// "dig" sets AddressFamily to 0 if SourceNetmask is also 0
|
||||
// It's okay to accept such a packet
|
||||
if e.SourceNetmask != 0 {
|
||||
return errors.New("dns: bad address family")
|
||||
}
|
||||
e.Address = net.IPv4(0, 0, 0, 0)
|
||||
case 1:
|
||||
if e.SourceNetmask > net.IPv4len*8 || e.SourceScope > net.IPv4len*8 {
|
||||
return errors.New("dns: bad netmask")
|
||||
}
|
||||
addr := make([]byte, net.IPv4len)
|
||||
for i := 0; i < net.IPv4len && 4+i < len(b); i++ {
|
||||
addr[i] = b[4+i]
|
||||
}
|
||||
e.Address = net.IPv4(addr[0], addr[1], addr[2], addr[3])
|
||||
case 2:
|
||||
if e.SourceNetmask > net.IPv6len*8 || e.SourceScope > net.IPv6len*8 {
|
||||
return errors.New("dns: bad netmask")
|
||||
}
|
||||
addr := make([]byte, net.IPv6len)
|
||||
for i := 0; i < net.IPv6len && 4+i < len(b); i++ {
|
||||
addr[i] = b[4+i]
|
||||
}
|
||||
e.Address = net.IP{addr[0], addr[1], addr[2], addr[3], addr[4],
|
||||
addr[5], addr[6], addr[7], addr[8], addr[9], addr[10],
|
||||
addr[11], addr[12], addr[13], addr[14], addr[15]}
|
||||
default:
|
||||
return errors.New("dns: bad address family")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *EDNS0_SUBNET) String() (s string) {
|
||||
if e.Address == nil {
|
||||
s = "<nil>"
|
||||
} else if e.Address.To4() != nil {
|
||||
s = e.Address.String()
|
||||
} else {
|
||||
s = "[" + e.Address.String() + "]"
|
||||
}
|
||||
s += "/" + strconv.Itoa(int(e.SourceNetmask)) + "/" + strconv.Itoa(int(e.SourceScope))
|
||||
return
|
||||
}
|
||||
|
||||
// The EDNS0_COOKIE option is used to add a DNS Cookie to a message.
|
||||
//
|
||||
// o := new(dns.OPT)
|
||||
// o.Hdr.Name = "."
|
||||
// o.Hdr.Rrtype = dns.TypeOPT
|
||||
// e := new(dns.EDNS0_COOKIE)
|
||||
// e.Code = dns.EDNS0COOKIE
|
||||
// e.Cookie = "24a5ac.."
|
||||
// o.Option = append(o.Option, e)
|
||||
//
|
||||
// The Cookie field consists out of a client cookie (RFC 7873 Section 4), that is
|
||||
// always 8 bytes. It may then optionally be followed by the server cookie. The server
|
||||
// cookie is of variable length, 8 to a maximum of 32 bytes. In other words:
|
||||
//
|
||||
// cCookie := o.Cookie[:16]
|
||||
// sCookie := o.Cookie[16:]
|
||||
//
|
||||
// There is no guarantee that the Cookie string has a specific length.
|
||||
type EDNS0_COOKIE struct {
|
||||
Code uint16 // Always EDNS0COOKIE
|
||||
Cookie string // Hex-encoded cookie data
|
||||
}
|
||||
|
||||
func (e *EDNS0_COOKIE) pack() ([]byte, error) {
|
||||
h, err := hex.DecodeString(e.Cookie)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return h, nil
|
||||
}
|
||||
|
||||
// Option implements the EDNS0 interface.
|
||||
func (e *EDNS0_COOKIE) Option() uint16 { return EDNS0COOKIE }
|
||||
func (e *EDNS0_COOKIE) unpack(b []byte) error { e.Cookie = hex.EncodeToString(b); return nil }
|
||||
func (e *EDNS0_COOKIE) String() string { return e.Cookie }
|
||||
|
||||
// The EDNS0_UL (Update Lease) (draft RFC) option is used to tell the server to set
|
||||
// an expiration on an update RR. This is helpful for clients that cannot clean
|
||||
// up after themselves. This is a draft RFC and more information can be found at
|
||||
// http://files.dns-sd.org/draft-sekar-dns-ul.txt
|
||||
//
|
||||
// o := new(dns.OPT)
|
||||
// o.Hdr.Name = "."
|
||||
// o.Hdr.Rrtype = dns.TypeOPT
|
||||
// e := new(dns.EDNS0_UL)
|
||||
// e.Code = dns.EDNS0UL
|
||||
// e.Lease = 120 // in seconds
|
||||
// o.Option = append(o.Option, e)
|
||||
type EDNS0_UL struct {
|
||||
Code uint16 // Always EDNS0UL
|
||||
Lease uint32
|
||||
}
|
||||
|
||||
// Option implements the EDNS0 interface.
|
||||
func (e *EDNS0_UL) Option() uint16 { return EDNS0UL }
|
||||
func (e *EDNS0_UL) String() string { return strconv.FormatUint(uint64(e.Lease), 10) }
|
||||
|
||||
// Copied: http://golang.org/src/pkg/net/dnsmsg.go
|
||||
func (e *EDNS0_UL) pack() ([]byte, error) {
|
||||
b := make([]byte, 4)
|
||||
binary.BigEndian.PutUint32(b, e.Lease)
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (e *EDNS0_UL) unpack(b []byte) error {
|
||||
if len(b) < 4 {
|
||||
return ErrBuf
|
||||
}
|
||||
e.Lease = binary.BigEndian.Uint32(b)
|
||||
return nil
|
||||
}
|
||||
|
||||
// EDNS0_LLQ stands for Long Lived Queries: http://tools.ietf.org/html/draft-sekar-dns-llq-01
|
||||
// Implemented for completeness, as the EDNS0 type code is assigned.
|
||||
type EDNS0_LLQ struct {
|
||||
Code uint16 // Always EDNS0LLQ
|
||||
Version uint16
|
||||
Opcode uint16
|
||||
Error uint16
|
||||
Id uint64
|
||||
LeaseLife uint32
|
||||
}
|
||||
|
||||
// Option implements the EDNS0 interface.
|
||||
func (e *EDNS0_LLQ) Option() uint16 { return EDNS0LLQ }
|
||||
|
||||
func (e *EDNS0_LLQ) pack() ([]byte, error) {
|
||||
b := make([]byte, 18)
|
||||
binary.BigEndian.PutUint16(b[0:], e.Version)
|
||||
binary.BigEndian.PutUint16(b[2:], e.Opcode)
|
||||
binary.BigEndian.PutUint16(b[4:], e.Error)
|
||||
binary.BigEndian.PutUint64(b[6:], e.Id)
|
||||
binary.BigEndian.PutUint32(b[14:], e.LeaseLife)
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (e *EDNS0_LLQ) unpack(b []byte) error {
|
||||
if len(b) < 18 {
|
||||
return ErrBuf
|
||||
}
|
||||
e.Version = binary.BigEndian.Uint16(b[0:])
|
||||
e.Opcode = binary.BigEndian.Uint16(b[2:])
|
||||
e.Error = binary.BigEndian.Uint16(b[4:])
|
||||
e.Id = binary.BigEndian.Uint64(b[6:])
|
||||
e.LeaseLife = binary.BigEndian.Uint32(b[14:])
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *EDNS0_LLQ) String() string {
|
||||
s := strconv.FormatUint(uint64(e.Version), 10) + " " + strconv.FormatUint(uint64(e.Opcode), 10) +
|
||||
" " + strconv.FormatUint(uint64(e.Error), 10) + " " + strconv.FormatUint(uint64(e.Id), 10) +
|
||||
" " + strconv.FormatUint(uint64(e.LeaseLife), 10)
|
||||
return s
|
||||
}
|
||||
|
||||
// EDNS0_DUA implements the EDNS0 "DNSSEC Algorithm Understood" option. See RFC 6975.
|
||||
type EDNS0_DAU struct {
|
||||
Code uint16 // Always EDNS0DAU
|
||||
AlgCode []uint8
|
||||
}
|
||||
|
||||
// Option implements the EDNS0 interface.
|
||||
func (e *EDNS0_DAU) Option() uint16 { return EDNS0DAU }
|
||||
func (e *EDNS0_DAU) pack() ([]byte, error) { return e.AlgCode, nil }
|
||||
func (e *EDNS0_DAU) unpack(b []byte) error { e.AlgCode = b; return nil }
|
||||
|
||||
func (e *EDNS0_DAU) String() string {
|
||||
s := ""
|
||||
for i := 0; i < len(e.AlgCode); i++ {
|
||||
if a, ok := AlgorithmToString[e.AlgCode[i]]; ok {
|
||||
s += " " + a
|
||||
} else {
|
||||
s += " " + strconv.Itoa(int(e.AlgCode[i]))
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// EDNS0_DHU implements the EDNS0 "DS Hash Understood" option. See RFC 6975.
|
||||
type EDNS0_DHU struct {
|
||||
Code uint16 // Always EDNS0DHU
|
||||
AlgCode []uint8
|
||||
}
|
||||
|
||||
// Option implements the EDNS0 interface.
|
||||
func (e *EDNS0_DHU) Option() uint16 { return EDNS0DHU }
|
||||
func (e *EDNS0_DHU) pack() ([]byte, error) { return e.AlgCode, nil }
|
||||
func (e *EDNS0_DHU) unpack(b []byte) error { e.AlgCode = b; return nil }
|
||||
|
||||
func (e *EDNS0_DHU) String() string {
|
||||
s := ""
|
||||
for i := 0; i < len(e.AlgCode); i++ {
|
||||
if a, ok := HashToString[e.AlgCode[i]]; ok {
|
||||
s += " " + a
|
||||
} else {
|
||||
s += " " + strconv.Itoa(int(e.AlgCode[i]))
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// EDNS0_N3U implements the EDNS0 "NSEC3 Hash Understood" option. See RFC 6975.
|
||||
type EDNS0_N3U struct {
|
||||
Code uint16 // Always EDNS0N3U
|
||||
AlgCode []uint8
|
||||
}
|
||||
|
||||
// Option implements the EDNS0 interface.
|
||||
func (e *EDNS0_N3U) Option() uint16 { return EDNS0N3U }
|
||||
func (e *EDNS0_N3U) pack() ([]byte, error) { return e.AlgCode, nil }
|
||||
func (e *EDNS0_N3U) unpack(b []byte) error { e.AlgCode = b; return nil }
|
||||
|
||||
func (e *EDNS0_N3U) String() string {
|
||||
// Re-use the hash map
|
||||
s := ""
|
||||
for i := 0; i < len(e.AlgCode); i++ {
|
||||
if a, ok := HashToString[e.AlgCode[i]]; ok {
|
||||
s += " " + a
|
||||
} else {
|
||||
s += " " + strconv.Itoa(int(e.AlgCode[i]))
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// EDNS0_EXPIRE implementes the EDNS0 option as described in RFC 7314.
|
||||
type EDNS0_EXPIRE struct {
|
||||
Code uint16 // Always EDNS0EXPIRE
|
||||
Expire uint32
|
||||
}
|
||||
|
||||
// Option implements the EDNS0 interface.
|
||||
func (e *EDNS0_EXPIRE) Option() uint16 { return EDNS0EXPIRE }
|
||||
func (e *EDNS0_EXPIRE) String() string { return strconv.FormatUint(uint64(e.Expire), 10) }
|
||||
|
||||
func (e *EDNS0_EXPIRE) pack() ([]byte, error) {
|
||||
b := make([]byte, 4)
|
||||
b[0] = byte(e.Expire >> 24)
|
||||
b[1] = byte(e.Expire >> 16)
|
||||
b[2] = byte(e.Expire >> 8)
|
||||
b[3] = byte(e.Expire)
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (e *EDNS0_EXPIRE) unpack(b []byte) error {
|
||||
if len(b) < 4 {
|
||||
return ErrBuf
|
||||
}
|
||||
e.Expire = binary.BigEndian.Uint32(b)
|
||||
return nil
|
||||
}
|
||||
|
||||
// The EDNS0_LOCAL option is used for local/experimental purposes. The option
|
||||
// code is recommended to be within the range [EDNS0LOCALSTART, EDNS0LOCALEND]
|
||||
// (RFC6891), although any unassigned code can actually be used. The content of
|
||||
// the option is made available in Data, unaltered.
|
||||
// Basic use pattern for creating a local option:
|
||||
//
|
||||
// o := new(dns.OPT)
|
||||
// o.Hdr.Name = "."
|
||||
// o.Hdr.Rrtype = dns.TypeOPT
|
||||
// e := new(dns.EDNS0_LOCAL)
|
||||
// e.Code = dns.EDNS0LOCALSTART
|
||||
// e.Data = []byte{72, 82, 74}
|
||||
// o.Option = append(o.Option, e)
|
||||
type EDNS0_LOCAL struct {
|
||||
Code uint16
|
||||
Data []byte
|
||||
}
|
||||
|
||||
// Option implements the EDNS0 interface.
|
||||
func (e *EDNS0_LOCAL) Option() uint16 { return e.Code }
|
||||
func (e *EDNS0_LOCAL) String() string {
|
||||
return strconv.FormatInt(int64(e.Code), 10) + ":0x" + hex.EncodeToString(e.Data)
|
||||
}
|
||||
|
||||
func (e *EDNS0_LOCAL) pack() ([]byte, error) {
|
||||
b := make([]byte, len(e.Data))
|
||||
copied := copy(b, e.Data)
|
||||
if copied != len(e.Data) {
|
||||
return nil, ErrBuf
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (e *EDNS0_LOCAL) unpack(b []byte) error {
|
||||
e.Data = make([]byte, len(b))
|
||||
copied := copy(e.Data, b)
|
||||
if copied != len(b) {
|
||||
return ErrBuf
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// EDNS0_TCP_KEEPALIVE is an EDNS0 option that instructs the server to keep
|
||||
// the TCP connection alive. See RFC 7828.
|
||||
type EDNS0_TCP_KEEPALIVE struct {
|
||||
Code uint16 // Always EDNSTCPKEEPALIVE
|
||||
Length uint16 // the value 0 if the TIMEOUT is omitted, the value 2 if it is present;
|
||||
Timeout uint16 // an idle timeout value for the TCP connection, specified in units of 100 milliseconds, encoded in network byte order.
|
||||
}
|
||||
|
||||
// Option implements the EDNS0 interface.
|
||||
func (e *EDNS0_TCP_KEEPALIVE) Option() uint16 { return EDNS0TCPKEEPALIVE }
|
||||
|
||||
func (e *EDNS0_TCP_KEEPALIVE) pack() ([]byte, error) {
|
||||
if e.Timeout != 0 && e.Length != 2 {
|
||||
return nil, errors.New("dns: timeout specified but length is not 2")
|
||||
}
|
||||
if e.Timeout == 0 && e.Length != 0 {
|
||||
return nil, errors.New("dns: timeout not specified but length is not 0")
|
||||
}
|
||||
b := make([]byte, 4+e.Length)
|
||||
binary.BigEndian.PutUint16(b[0:], e.Code)
|
||||
binary.BigEndian.PutUint16(b[2:], e.Length)
|
||||
if e.Length == 2 {
|
||||
binary.BigEndian.PutUint16(b[4:], e.Timeout)
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (e *EDNS0_TCP_KEEPALIVE) unpack(b []byte) error {
|
||||
if len(b) < 4 {
|
||||
return ErrBuf
|
||||
}
|
||||
e.Length = binary.BigEndian.Uint16(b[2:4])
|
||||
if e.Length != 0 && e.Length != 2 {
|
||||
return errors.New("dns: length mismatch, want 0/2 but got " + strconv.FormatUint(uint64(e.Length), 10))
|
||||
}
|
||||
if e.Length == 2 {
|
||||
if len(b) < 6 {
|
||||
return ErrBuf
|
||||
}
|
||||
e.Timeout = binary.BigEndian.Uint16(b[4:6])
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *EDNS0_TCP_KEEPALIVE) String() (s string) {
|
||||
s = "use tcp keep-alive"
|
||||
if e.Length == 0 {
|
||||
s += ", timeout omitted"
|
||||
} else {
|
||||
s += fmt.Sprintf(", timeout %dms", e.Timeout*100)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// EDNS0_PADDING option is used to add padding to a request/response. The default
|
||||
// value of padding SHOULD be 0x0 but other values MAY be used, for instance if
|
||||
// compression is applied before encryption which may break signatures.
|
||||
type EDNS0_PADDING struct {
|
||||
Padding []byte
|
||||
}
|
||||
|
||||
// Option implements the EDNS0 interface.
|
||||
func (e *EDNS0_PADDING) Option() uint16 { return EDNS0PADDING }
|
||||
func (e *EDNS0_PADDING) pack() ([]byte, error) { return e.Padding, nil }
|
||||
func (e *EDNS0_PADDING) unpack(b []byte) error { e.Padding = b; return nil }
|
||||
func (e *EDNS0_PADDING) String() string { return fmt.Sprintf("%0X", e.Padding) }
|
||||
87
vendor/github.com/miekg/dns/format.go
generated
vendored
Normal file
87
vendor/github.com/miekg/dns/format.go
generated
vendored
Normal file
@@ -0,0 +1,87 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"net"
|
||||
"reflect"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// NumField returns the number of rdata fields r has.
|
||||
func NumField(r RR) int {
|
||||
return reflect.ValueOf(r).Elem().NumField() - 1 // Remove RR_Header
|
||||
}
|
||||
|
||||
// Field returns the rdata field i as a string. Fields are indexed starting from 1.
|
||||
// RR types that holds slice data, for instance the NSEC type bitmap will return a single
|
||||
// string where the types are concatenated using a space.
|
||||
// Accessing non existing fields will cause a panic.
|
||||
func Field(r RR, i int) string {
|
||||
if i == 0 {
|
||||
return ""
|
||||
}
|
||||
d := reflect.ValueOf(r).Elem().Field(i)
|
||||
switch k := d.Kind(); k {
|
||||
case reflect.String:
|
||||
return d.String()
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return strconv.FormatInt(d.Int(), 10)
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
return strconv.FormatUint(d.Uint(), 10)
|
||||
case reflect.Slice:
|
||||
switch reflect.ValueOf(r).Elem().Type().Field(i).Tag {
|
||||
case `dns:"a"`:
|
||||
// TODO(miek): Hmm store this as 16 bytes
|
||||
if d.Len() < net.IPv6len {
|
||||
return net.IPv4(byte(d.Index(0).Uint()),
|
||||
byte(d.Index(1).Uint()),
|
||||
byte(d.Index(2).Uint()),
|
||||
byte(d.Index(3).Uint())).String()
|
||||
}
|
||||
return net.IPv4(byte(d.Index(12).Uint()),
|
||||
byte(d.Index(13).Uint()),
|
||||
byte(d.Index(14).Uint()),
|
||||
byte(d.Index(15).Uint())).String()
|
||||
case `dns:"aaaa"`:
|
||||
return net.IP{
|
||||
byte(d.Index(0).Uint()),
|
||||
byte(d.Index(1).Uint()),
|
||||
byte(d.Index(2).Uint()),
|
||||
byte(d.Index(3).Uint()),
|
||||
byte(d.Index(4).Uint()),
|
||||
byte(d.Index(5).Uint()),
|
||||
byte(d.Index(6).Uint()),
|
||||
byte(d.Index(7).Uint()),
|
||||
byte(d.Index(8).Uint()),
|
||||
byte(d.Index(9).Uint()),
|
||||
byte(d.Index(10).Uint()),
|
||||
byte(d.Index(11).Uint()),
|
||||
byte(d.Index(12).Uint()),
|
||||
byte(d.Index(13).Uint()),
|
||||
byte(d.Index(14).Uint()),
|
||||
byte(d.Index(15).Uint()),
|
||||
}.String()
|
||||
case `dns:"nsec"`:
|
||||
if d.Len() == 0 {
|
||||
return ""
|
||||
}
|
||||
s := Type(d.Index(0).Uint()).String()
|
||||
for i := 1; i < d.Len(); i++ {
|
||||
s += " " + Type(d.Index(i).Uint()).String()
|
||||
}
|
||||
return s
|
||||
default:
|
||||
// if it does not have a tag its a string slice
|
||||
fallthrough
|
||||
case `dns:"txt"`:
|
||||
if d.Len() == 0 {
|
||||
return ""
|
||||
}
|
||||
s := d.Index(0).String()
|
||||
for i := 1; i < d.Len(); i++ {
|
||||
s += " " + d.Index(i).String()
|
||||
}
|
||||
return s
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
23
vendor/github.com/miekg/dns/fuzz.go
generated
vendored
Normal file
23
vendor/github.com/miekg/dns/fuzz.go
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
// +build fuzz
|
||||
|
||||
package dns
|
||||
|
||||
func Fuzz(data []byte) int {
|
||||
msg := new(Msg)
|
||||
|
||||
if err := msg.Unpack(data); err != nil {
|
||||
return 0
|
||||
}
|
||||
if _, err := msg.Pack(); err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
func FuzzNewRR(data []byte) int {
|
||||
if _, err := NewRR(string(data)); err != nil {
|
||||
return 0
|
||||
}
|
||||
return 1
|
||||
}
|
||||
242
vendor/github.com/miekg/dns/generate.go
generated
vendored
Normal file
242
vendor/github.com/miekg/dns/generate.go
generated
vendored
Normal file
@@ -0,0 +1,242 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Parse the $GENERATE statement as used in BIND9 zones.
|
||||
// See http://www.zytrax.com/books/dns/ch8/generate.html for instance.
|
||||
// We are called after '$GENERATE '. After which we expect:
|
||||
// * the range (12-24/2)
|
||||
// * lhs (ownername)
|
||||
// * [[ttl][class]]
|
||||
// * type
|
||||
// * rhs (rdata)
|
||||
// But we are lazy here, only the range is parsed *all* occurrences
|
||||
// of $ after that are interpreted.
|
||||
func (zp *ZoneParser) generate(l lex) (RR, bool) {
|
||||
token := l.token
|
||||
step := 1
|
||||
if i := strings.IndexByte(token, '/'); i >= 0 {
|
||||
if i+1 == len(token) {
|
||||
return zp.setParseError("bad step in $GENERATE range", l)
|
||||
}
|
||||
|
||||
s, err := strconv.Atoi(token[i+1:])
|
||||
if err != nil || s <= 0 {
|
||||
return zp.setParseError("bad step in $GENERATE range", l)
|
||||
}
|
||||
|
||||
step = s
|
||||
token = token[:i]
|
||||
}
|
||||
|
||||
sx := strings.SplitN(token, "-", 2)
|
||||
if len(sx) != 2 {
|
||||
return zp.setParseError("bad start-stop in $GENERATE range", l)
|
||||
}
|
||||
|
||||
start, err := strconv.Atoi(sx[0])
|
||||
if err != nil {
|
||||
return zp.setParseError("bad start in $GENERATE range", l)
|
||||
}
|
||||
|
||||
end, err := strconv.Atoi(sx[1])
|
||||
if err != nil {
|
||||
return zp.setParseError("bad stop in $GENERATE range", l)
|
||||
}
|
||||
if end < 0 || start < 0 || end < start {
|
||||
return zp.setParseError("bad range in $GENERATE range", l)
|
||||
}
|
||||
|
||||
zp.c.Next() // _BLANK
|
||||
|
||||
// Create a complete new string, which we then parse again.
|
||||
var s string
|
||||
for l, ok := zp.c.Next(); ok; l, ok = zp.c.Next() {
|
||||
if l.err {
|
||||
return zp.setParseError("bad data in $GENERATE directive", l)
|
||||
}
|
||||
if l.value == zNewline {
|
||||
break
|
||||
}
|
||||
|
||||
s += l.token
|
||||
}
|
||||
|
||||
r := &generateReader{
|
||||
s: s,
|
||||
|
||||
cur: start,
|
||||
start: start,
|
||||
end: end,
|
||||
step: step,
|
||||
|
||||
file: zp.file,
|
||||
lex: &l,
|
||||
}
|
||||
zp.sub = NewZoneParser(r, zp.origin, zp.file)
|
||||
zp.sub.includeDepth, zp.sub.includeAllowed = zp.includeDepth, zp.includeAllowed
|
||||
zp.sub.SetDefaultTTL(defaultTtl)
|
||||
return zp.subNext()
|
||||
}
|
||||
|
||||
type generateReader struct {
|
||||
s string
|
||||
si int
|
||||
|
||||
cur int
|
||||
start int
|
||||
end int
|
||||
step int
|
||||
|
||||
mod bytes.Buffer
|
||||
|
||||
escape bool
|
||||
|
||||
eof bool
|
||||
|
||||
file string
|
||||
lex *lex
|
||||
}
|
||||
|
||||
func (r *generateReader) parseError(msg string, end int) *ParseError {
|
||||
r.eof = true // Make errors sticky.
|
||||
|
||||
l := *r.lex
|
||||
l.token = r.s[r.si-1 : end]
|
||||
l.column += r.si // l.column starts one zBLANK before r.s
|
||||
|
||||
return &ParseError{r.file, msg, l}
|
||||
}
|
||||
|
||||
func (r *generateReader) Read(p []byte) (int, error) {
|
||||
// NewZLexer, through NewZoneParser, should use ReadByte and
|
||||
// not end up here.
|
||||
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
func (r *generateReader) ReadByte() (byte, error) {
|
||||
if r.eof {
|
||||
return 0, io.EOF
|
||||
}
|
||||
if r.mod.Len() > 0 {
|
||||
return r.mod.ReadByte()
|
||||
}
|
||||
|
||||
if r.si >= len(r.s) {
|
||||
r.si = 0
|
||||
r.cur += r.step
|
||||
|
||||
r.eof = r.cur > r.end || r.cur < 0
|
||||
return '\n', nil
|
||||
}
|
||||
|
||||
si := r.si
|
||||
r.si++
|
||||
|
||||
switch r.s[si] {
|
||||
case '\\':
|
||||
if r.escape {
|
||||
r.escape = false
|
||||
return '\\', nil
|
||||
}
|
||||
|
||||
r.escape = true
|
||||
return r.ReadByte()
|
||||
case '$':
|
||||
if r.escape {
|
||||
r.escape = false
|
||||
return '$', nil
|
||||
}
|
||||
|
||||
mod := "%d"
|
||||
|
||||
if si >= len(r.s)-1 {
|
||||
// End of the string
|
||||
fmt.Fprintf(&r.mod, mod, r.cur)
|
||||
return r.mod.ReadByte()
|
||||
}
|
||||
|
||||
if r.s[si+1] == '$' {
|
||||
r.si++
|
||||
return '$', nil
|
||||
}
|
||||
|
||||
var offset int
|
||||
|
||||
// Search for { and }
|
||||
if r.s[si+1] == '{' {
|
||||
// Modifier block
|
||||
sep := strings.Index(r.s[si+2:], "}")
|
||||
if sep < 0 {
|
||||
return 0, r.parseError("bad modifier in $GENERATE", len(r.s))
|
||||
}
|
||||
|
||||
var errMsg string
|
||||
mod, offset, errMsg = modToPrintf(r.s[si+2 : si+2+sep])
|
||||
if errMsg != "" {
|
||||
return 0, r.parseError(errMsg, si+3+sep)
|
||||
}
|
||||
if r.start+offset < 0 || r.end+offset > 1<<31-1 {
|
||||
return 0, r.parseError("bad offset in $GENERATE", si+3+sep)
|
||||
}
|
||||
|
||||
r.si += 2 + sep // Jump to it
|
||||
}
|
||||
|
||||
fmt.Fprintf(&r.mod, mod, r.cur+offset)
|
||||
return r.mod.ReadByte()
|
||||
default:
|
||||
if r.escape { // Pretty useless here
|
||||
r.escape = false
|
||||
return r.ReadByte()
|
||||
}
|
||||
|
||||
return r.s[si], nil
|
||||
}
|
||||
}
|
||||
|
||||
// Convert a $GENERATE modifier 0,0,d to something Printf can deal with.
|
||||
func modToPrintf(s string) (string, int, string) {
|
||||
// Modifier is { offset [ ,width [ ,base ] ] } - provide default
|
||||
// values for optional width and type, if necessary.
|
||||
var offStr, widthStr, base string
|
||||
switch xs := strings.Split(s, ","); len(xs) {
|
||||
case 1:
|
||||
offStr, widthStr, base = xs[0], "0", "d"
|
||||
case 2:
|
||||
offStr, widthStr, base = xs[0], xs[1], "d"
|
||||
case 3:
|
||||
offStr, widthStr, base = xs[0], xs[1], xs[2]
|
||||
default:
|
||||
return "", 0, "bad modifier in $GENERATE"
|
||||
}
|
||||
|
||||
switch base {
|
||||
case "o", "d", "x", "X":
|
||||
default:
|
||||
return "", 0, "bad base in $GENERATE"
|
||||
}
|
||||
|
||||
offset, err := strconv.Atoi(offStr)
|
||||
if err != nil {
|
||||
return "", 0, "bad offset in $GENERATE"
|
||||
}
|
||||
|
||||
width, err := strconv.Atoi(widthStr)
|
||||
if err != nil || width < 0 || width > 255 {
|
||||
return "", 0, "bad width in $GENERATE"
|
||||
}
|
||||
|
||||
if width == 0 {
|
||||
return "%" + base, offset, ""
|
||||
}
|
||||
|
||||
return "%0" + widthStr + base, offset, ""
|
||||
}
|
||||
191
vendor/github.com/miekg/dns/labels.go
generated
vendored
Normal file
191
vendor/github.com/miekg/dns/labels.go
generated
vendored
Normal file
@@ -0,0 +1,191 @@
|
||||
package dns
|
||||
|
||||
// Holds a bunch of helper functions for dealing with labels.
|
||||
|
||||
// SplitDomainName splits a name string into it's labels.
|
||||
// www.miek.nl. returns []string{"www", "miek", "nl"}
|
||||
// .www.miek.nl. returns []string{"", "www", "miek", "nl"},
|
||||
// The root label (.) returns nil. Note that using
|
||||
// strings.Split(s) will work in most cases, but does not handle
|
||||
// escaped dots (\.) for instance.
|
||||
// s must be a syntactically valid domain name, see IsDomainName.
|
||||
func SplitDomainName(s string) (labels []string) {
|
||||
if len(s) == 0 {
|
||||
return nil
|
||||
}
|
||||
fqdnEnd := 0 // offset of the final '.' or the length of the name
|
||||
idx := Split(s)
|
||||
begin := 0
|
||||
if s[len(s)-1] == '.' {
|
||||
fqdnEnd = len(s) - 1
|
||||
} else {
|
||||
fqdnEnd = len(s)
|
||||
}
|
||||
|
||||
switch len(idx) {
|
||||
case 0:
|
||||
return nil
|
||||
case 1:
|
||||
// no-op
|
||||
default:
|
||||
end := 0
|
||||
for i := 1; i < len(idx); i++ {
|
||||
end = idx[i]
|
||||
labels = append(labels, s[begin:end-1])
|
||||
begin = end
|
||||
}
|
||||
}
|
||||
|
||||
labels = append(labels, s[begin:fqdnEnd])
|
||||
return labels
|
||||
}
|
||||
|
||||
// CompareDomainName compares the names s1 and s2 and
|
||||
// returns how many labels they have in common starting from the *right*.
|
||||
// The comparison stops at the first inequality. The names are downcased
|
||||
// before the comparison.
|
||||
//
|
||||
// www.miek.nl. and miek.nl. have two labels in common: miek and nl
|
||||
// www.miek.nl. and www.bla.nl. have one label in common: nl
|
||||
//
|
||||
// s1 and s2 must be syntactically valid domain names.
|
||||
func CompareDomainName(s1, s2 string) (n int) {
|
||||
// the first check: root label
|
||||
if s1 == "." || s2 == "." {
|
||||
return 0
|
||||
}
|
||||
|
||||
l1 := Split(s1)
|
||||
l2 := Split(s2)
|
||||
|
||||
j1 := len(l1) - 1 // end
|
||||
i1 := len(l1) - 2 // start
|
||||
j2 := len(l2) - 1
|
||||
i2 := len(l2) - 2
|
||||
// the second check can be done here: last/only label
|
||||
// before we fall through into the for-loop below
|
||||
if equal(s1[l1[j1]:], s2[l2[j2]:]) {
|
||||
n++
|
||||
} else {
|
||||
return
|
||||
}
|
||||
for {
|
||||
if i1 < 0 || i2 < 0 {
|
||||
break
|
||||
}
|
||||
if equal(s1[l1[i1]:l1[j1]], s2[l2[i2]:l2[j2]]) {
|
||||
n++
|
||||
} else {
|
||||
break
|
||||
}
|
||||
j1--
|
||||
i1--
|
||||
j2--
|
||||
i2--
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// CountLabel counts the the number of labels in the string s.
|
||||
// s must be a syntactically valid domain name.
|
||||
func CountLabel(s string) (labels int) {
|
||||
if s == "." {
|
||||
return
|
||||
}
|
||||
off := 0
|
||||
end := false
|
||||
for {
|
||||
off, end = NextLabel(s, off)
|
||||
labels++
|
||||
if end {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Split splits a name s into its label indexes.
|
||||
// www.miek.nl. returns []int{0, 4, 9}, www.miek.nl also returns []int{0, 4, 9}.
|
||||
// The root name (.) returns nil. Also see SplitDomainName.
|
||||
// s must be a syntactically valid domain name.
|
||||
func Split(s string) []int {
|
||||
if s == "." {
|
||||
return nil
|
||||
}
|
||||
idx := make([]int, 1, 3)
|
||||
off := 0
|
||||
end := false
|
||||
|
||||
for {
|
||||
off, end = NextLabel(s, off)
|
||||
if end {
|
||||
return idx
|
||||
}
|
||||
idx = append(idx, off)
|
||||
}
|
||||
}
|
||||
|
||||
// NextLabel returns the index of the start of the next label in the
|
||||
// string s starting at offset.
|
||||
// The bool end is true when the end of the string has been reached.
|
||||
// Also see PrevLabel.
|
||||
func NextLabel(s string, offset int) (i int, end bool) {
|
||||
quote := false
|
||||
for i = offset; i < len(s)-1; i++ {
|
||||
switch s[i] {
|
||||
case '\\':
|
||||
quote = !quote
|
||||
default:
|
||||
quote = false
|
||||
case '.':
|
||||
if quote {
|
||||
quote = !quote
|
||||
continue
|
||||
}
|
||||
return i + 1, false
|
||||
}
|
||||
}
|
||||
return i + 1, true
|
||||
}
|
||||
|
||||
// PrevLabel returns the index of the label when starting from the right and
|
||||
// jumping n labels to the left.
|
||||
// The bool start is true when the start of the string has been overshot.
|
||||
// Also see NextLabel.
|
||||
func PrevLabel(s string, n int) (i int, start bool) {
|
||||
if n == 0 {
|
||||
return len(s), false
|
||||
}
|
||||
lab := Split(s)
|
||||
if lab == nil {
|
||||
return 0, true
|
||||
}
|
||||
if n > len(lab) {
|
||||
return 0, true
|
||||
}
|
||||
return lab[len(lab)-n], false
|
||||
}
|
||||
|
||||
// equal compares a and b while ignoring case. It returns true when equal otherwise false.
|
||||
func equal(a, b string) bool {
|
||||
// might be lifted into API function.
|
||||
la := len(a)
|
||||
lb := len(b)
|
||||
if la != lb {
|
||||
return false
|
||||
}
|
||||
|
||||
for i := la - 1; i >= 0; i-- {
|
||||
ai := a[i]
|
||||
bi := b[i]
|
||||
if ai >= 'A' && ai <= 'Z' {
|
||||
ai |= 'a' - 'A'
|
||||
}
|
||||
if bi >= 'A' && bi <= 'Z' {
|
||||
bi |= 'a' - 'A'
|
||||
}
|
||||
if ai != bi {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
44
vendor/github.com/miekg/dns/listen_go111.go
generated
vendored
Normal file
44
vendor/github.com/miekg/dns/listen_go111.go
generated
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
// +build go1.11
|
||||
// +build aix darwin dragonfly freebsd linux netbsd openbsd
|
||||
|
||||
package dns
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
const supportsReusePort = true
|
||||
|
||||
func reuseportControl(network, address string, c syscall.RawConn) error {
|
||||
var opErr error
|
||||
err := c.Control(func(fd uintptr) {
|
||||
opErr = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_REUSEPORT, 1)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return opErr
|
||||
}
|
||||
|
||||
func listenTCP(network, addr string, reuseport bool) (net.Listener, error) {
|
||||
var lc net.ListenConfig
|
||||
if reuseport {
|
||||
lc.Control = reuseportControl
|
||||
}
|
||||
|
||||
return lc.Listen(context.Background(), network, addr)
|
||||
}
|
||||
|
||||
func listenUDP(network, addr string, reuseport bool) (net.PacketConn, error) {
|
||||
var lc net.ListenConfig
|
||||
if reuseport {
|
||||
lc.Control = reuseportControl
|
||||
}
|
||||
|
||||
return lc.ListenPacket(context.Background(), network, addr)
|
||||
}
|
||||
23
vendor/github.com/miekg/dns/listen_go_not111.go
generated
vendored
Normal file
23
vendor/github.com/miekg/dns/listen_go_not111.go
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
// +build !go1.11 !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd
|
||||
|
||||
package dns
|
||||
|
||||
import "net"
|
||||
|
||||
const supportsReusePort = false
|
||||
|
||||
func listenTCP(network, addr string, reuseport bool) (net.Listener, error) {
|
||||
if reuseport {
|
||||
// TODO(tmthrgd): return an error?
|
||||
}
|
||||
|
||||
return net.Listen(network, addr)
|
||||
}
|
||||
|
||||
func listenUDP(network, addr string, reuseport bool) (net.PacketConn, error) {
|
||||
if reuseport {
|
||||
// TODO(tmthrgd): return an error?
|
||||
}
|
||||
|
||||
return net.ListenPacket(network, addr)
|
||||
}
|
||||
1181
vendor/github.com/miekg/dns/msg.go
generated
vendored
Normal file
1181
vendor/github.com/miekg/dns/msg.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
633
vendor/github.com/miekg/dns/msg_helpers.go
generated
vendored
Normal file
633
vendor/github.com/miekg/dns/msg_helpers.go
generated
vendored
Normal file
@@ -0,0 +1,633 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"encoding/base32"
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"net"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// helper functions called from the generated zmsg.go
|
||||
|
||||
// These function are named after the tag to help pack/unpack, if there is no tag it is the name
|
||||
// of the type they pack/unpack (string, int, etc). We prefix all with unpackData or packData, so packDataA or
|
||||
// packDataDomainName.
|
||||
|
||||
func unpackDataA(msg []byte, off int) (net.IP, int, error) {
|
||||
if off+net.IPv4len > len(msg) {
|
||||
return nil, len(msg), &Error{err: "overflow unpacking a"}
|
||||
}
|
||||
a := append(make(net.IP, 0, net.IPv4len), msg[off:off+net.IPv4len]...)
|
||||
off += net.IPv4len
|
||||
return a, off, nil
|
||||
}
|
||||
|
||||
func packDataA(a net.IP, msg []byte, off int) (int, error) {
|
||||
// It must be a slice of 4, even if it is 16, we encode only the first 4
|
||||
if off+net.IPv4len > len(msg) {
|
||||
return len(msg), &Error{err: "overflow packing a"}
|
||||
}
|
||||
switch len(a) {
|
||||
case net.IPv4len, net.IPv6len:
|
||||
copy(msg[off:], a.To4())
|
||||
off += net.IPv4len
|
||||
case 0:
|
||||
// Allowed, for dynamic updates.
|
||||
default:
|
||||
return len(msg), &Error{err: "overflow packing a"}
|
||||
}
|
||||
return off, nil
|
||||
}
|
||||
|
||||
func unpackDataAAAA(msg []byte, off int) (net.IP, int, error) {
|
||||
if off+net.IPv6len > len(msg) {
|
||||
return nil, len(msg), &Error{err: "overflow unpacking aaaa"}
|
||||
}
|
||||
aaaa := append(make(net.IP, 0, net.IPv6len), msg[off:off+net.IPv6len]...)
|
||||
off += net.IPv6len
|
||||
return aaaa, off, nil
|
||||
}
|
||||
|
||||
func packDataAAAA(aaaa net.IP, msg []byte, off int) (int, error) {
|
||||
if off+net.IPv6len > len(msg) {
|
||||
return len(msg), &Error{err: "overflow packing aaaa"}
|
||||
}
|
||||
|
||||
switch len(aaaa) {
|
||||
case net.IPv6len:
|
||||
copy(msg[off:], aaaa)
|
||||
off += net.IPv6len
|
||||
case 0:
|
||||
// Allowed, dynamic updates.
|
||||
default:
|
||||
return len(msg), &Error{err: "overflow packing aaaa"}
|
||||
}
|
||||
return off, nil
|
||||
}
|
||||
|
||||
// unpackHeader unpacks an RR header, returning the offset to the end of the header and a
|
||||
// re-sliced msg according to the expected length of the RR.
|
||||
func unpackHeader(msg []byte, off int) (rr RR_Header, off1 int, truncmsg []byte, err error) {
|
||||
hdr := RR_Header{}
|
||||
if off == len(msg) {
|
||||
return hdr, off, msg, nil
|
||||
}
|
||||
|
||||
hdr.Name, off, err = UnpackDomainName(msg, off)
|
||||
if err != nil {
|
||||
return hdr, len(msg), msg, err
|
||||
}
|
||||
hdr.Rrtype, off, err = unpackUint16(msg, off)
|
||||
if err != nil {
|
||||
return hdr, len(msg), msg, err
|
||||
}
|
||||
hdr.Class, off, err = unpackUint16(msg, off)
|
||||
if err != nil {
|
||||
return hdr, len(msg), msg, err
|
||||
}
|
||||
hdr.Ttl, off, err = unpackUint32(msg, off)
|
||||
if err != nil {
|
||||
return hdr, len(msg), msg, err
|
||||
}
|
||||
hdr.Rdlength, off, err = unpackUint16(msg, off)
|
||||
if err != nil {
|
||||
return hdr, len(msg), msg, err
|
||||
}
|
||||
msg, err = truncateMsgFromRdlength(msg, off, hdr.Rdlength)
|
||||
return hdr, off, msg, err
|
||||
}
|
||||
|
||||
// pack packs an RR header, returning the offset to the end of the header.
|
||||
// See PackDomainName for documentation about the compression.
|
||||
func (hdr RR_Header) pack(msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) {
|
||||
if off == len(msg) {
|
||||
return off, nil
|
||||
}
|
||||
|
||||
off, err = PackDomainName(hdr.Name, msg, off, compression, compress)
|
||||
if err != nil {
|
||||
return len(msg), err
|
||||
}
|
||||
off, err = packUint16(hdr.Rrtype, msg, off)
|
||||
if err != nil {
|
||||
return len(msg), err
|
||||
}
|
||||
off, err = packUint16(hdr.Class, msg, off)
|
||||
if err != nil {
|
||||
return len(msg), err
|
||||
}
|
||||
off, err = packUint32(hdr.Ttl, msg, off)
|
||||
if err != nil {
|
||||
return len(msg), err
|
||||
}
|
||||
off, err = packUint16(hdr.Rdlength, msg, off)
|
||||
if err != nil {
|
||||
return len(msg), err
|
||||
}
|
||||
return off, nil
|
||||
}
|
||||
|
||||
// helper helper functions.
|
||||
|
||||
// truncateMsgFromRdLength truncates msg to match the expected length of the RR.
|
||||
// Returns an error if msg is smaller than the expected size.
|
||||
func truncateMsgFromRdlength(msg []byte, off int, rdlength uint16) (truncmsg []byte, err error) {
|
||||
lenrd := off + int(rdlength)
|
||||
if lenrd > len(msg) {
|
||||
return msg, &Error{err: "overflowing header size"}
|
||||
}
|
||||
return msg[:lenrd], nil
|
||||
}
|
||||
|
||||
var base32HexNoPadEncoding = base32.HexEncoding.WithPadding(base32.NoPadding)
|
||||
|
||||
func fromBase32(s []byte) (buf []byte, err error) {
|
||||
for i, b := range s {
|
||||
if b >= 'a' && b <= 'z' {
|
||||
s[i] = b - 32
|
||||
}
|
||||
}
|
||||
buflen := base32HexNoPadEncoding.DecodedLen(len(s))
|
||||
buf = make([]byte, buflen)
|
||||
n, err := base32HexNoPadEncoding.Decode(buf, s)
|
||||
buf = buf[:n]
|
||||
return
|
||||
}
|
||||
|
||||
func toBase32(b []byte) string {
|
||||
return base32HexNoPadEncoding.EncodeToString(b)
|
||||
}
|
||||
|
||||
func fromBase64(s []byte) (buf []byte, err error) {
|
||||
buflen := base64.StdEncoding.DecodedLen(len(s))
|
||||
buf = make([]byte, buflen)
|
||||
n, err := base64.StdEncoding.Decode(buf, s)
|
||||
buf = buf[:n]
|
||||
return
|
||||
}
|
||||
|
||||
func toBase64(b []byte) string { return base64.StdEncoding.EncodeToString(b) }
|
||||
|
||||
// dynamicUpdate returns true if the Rdlength is zero.
|
||||
func noRdata(h RR_Header) bool { return h.Rdlength == 0 }
|
||||
|
||||
func unpackUint8(msg []byte, off int) (i uint8, off1 int, err error) {
|
||||
if off+1 > len(msg) {
|
||||
return 0, len(msg), &Error{err: "overflow unpacking uint8"}
|
||||
}
|
||||
return uint8(msg[off]), off + 1, nil
|
||||
}
|
||||
|
||||
func packUint8(i uint8, msg []byte, off int) (off1 int, err error) {
|
||||
if off+1 > len(msg) {
|
||||
return len(msg), &Error{err: "overflow packing uint8"}
|
||||
}
|
||||
msg[off] = byte(i)
|
||||
return off + 1, nil
|
||||
}
|
||||
|
||||
func unpackUint16(msg []byte, off int) (i uint16, off1 int, err error) {
|
||||
if off+2 > len(msg) {
|
||||
return 0, len(msg), &Error{err: "overflow unpacking uint16"}
|
||||
}
|
||||
return binary.BigEndian.Uint16(msg[off:]), off + 2, nil
|
||||
}
|
||||
|
||||
func packUint16(i uint16, msg []byte, off int) (off1 int, err error) {
|
||||
if off+2 > len(msg) {
|
||||
return len(msg), &Error{err: "overflow packing uint16"}
|
||||
}
|
||||
binary.BigEndian.PutUint16(msg[off:], i)
|
||||
return off + 2, nil
|
||||
}
|
||||
|
||||
func unpackUint32(msg []byte, off int) (i uint32, off1 int, err error) {
|
||||
if off+4 > len(msg) {
|
||||
return 0, len(msg), &Error{err: "overflow unpacking uint32"}
|
||||
}
|
||||
return binary.BigEndian.Uint32(msg[off:]), off + 4, nil
|
||||
}
|
||||
|
||||
func packUint32(i uint32, msg []byte, off int) (off1 int, err error) {
|
||||
if off+4 > len(msg) {
|
||||
return len(msg), &Error{err: "overflow packing uint32"}
|
||||
}
|
||||
binary.BigEndian.PutUint32(msg[off:], i)
|
||||
return off + 4, nil
|
||||
}
|
||||
|
||||
func unpackUint48(msg []byte, off int) (i uint64, off1 int, err error) {
|
||||
if off+6 > len(msg) {
|
||||
return 0, len(msg), &Error{err: "overflow unpacking uint64 as uint48"}
|
||||
}
|
||||
// Used in TSIG where the last 48 bits are occupied, so for now, assume a uint48 (6 bytes)
|
||||
i = uint64(uint64(msg[off])<<40 | uint64(msg[off+1])<<32 | uint64(msg[off+2])<<24 | uint64(msg[off+3])<<16 |
|
||||
uint64(msg[off+4])<<8 | uint64(msg[off+5]))
|
||||
off += 6
|
||||
return i, off, nil
|
||||
}
|
||||
|
||||
func packUint48(i uint64, msg []byte, off int) (off1 int, err error) {
|
||||
if off+6 > len(msg) {
|
||||
return len(msg), &Error{err: "overflow packing uint64 as uint48"}
|
||||
}
|
||||
msg[off] = byte(i >> 40)
|
||||
msg[off+1] = byte(i >> 32)
|
||||
msg[off+2] = byte(i >> 24)
|
||||
msg[off+3] = byte(i >> 16)
|
||||
msg[off+4] = byte(i >> 8)
|
||||
msg[off+5] = byte(i)
|
||||
off += 6
|
||||
return off, nil
|
||||
}
|
||||
|
||||
func unpackUint64(msg []byte, off int) (i uint64, off1 int, err error) {
|
||||
if off+8 > len(msg) {
|
||||
return 0, len(msg), &Error{err: "overflow unpacking uint64"}
|
||||
}
|
||||
return binary.BigEndian.Uint64(msg[off:]), off + 8, nil
|
||||
}
|
||||
|
||||
func packUint64(i uint64, msg []byte, off int) (off1 int, err error) {
|
||||
if off+8 > len(msg) {
|
||||
return len(msg), &Error{err: "overflow packing uint64"}
|
||||
}
|
||||
binary.BigEndian.PutUint64(msg[off:], i)
|
||||
off += 8
|
||||
return off, nil
|
||||
}
|
||||
|
||||
func unpackString(msg []byte, off int) (string, int, error) {
|
||||
if off+1 > len(msg) {
|
||||
return "", off, &Error{err: "overflow unpacking txt"}
|
||||
}
|
||||
l := int(msg[off])
|
||||
if off+l+1 > len(msg) {
|
||||
return "", off, &Error{err: "overflow unpacking txt"}
|
||||
}
|
||||
var s strings.Builder
|
||||
s.Grow(l)
|
||||
for _, b := range msg[off+1 : off+1+l] {
|
||||
switch {
|
||||
case b == '"' || b == '\\':
|
||||
s.WriteByte('\\')
|
||||
s.WriteByte(b)
|
||||
case b < ' ' || b > '~': // unprintable
|
||||
writeEscapedByte(&s, b)
|
||||
default:
|
||||
s.WriteByte(b)
|
||||
}
|
||||
}
|
||||
off += 1 + l
|
||||
return s.String(), off, nil
|
||||
}
|
||||
|
||||
func packString(s string, msg []byte, off int) (int, error) {
|
||||
txtTmp := make([]byte, 256*4+1)
|
||||
off, err := packTxtString(s, msg, off, txtTmp)
|
||||
if err != nil {
|
||||
return len(msg), err
|
||||
}
|
||||
return off, nil
|
||||
}
|
||||
|
||||
func unpackStringBase32(msg []byte, off, end int) (string, int, error) {
|
||||
if end > len(msg) {
|
||||
return "", len(msg), &Error{err: "overflow unpacking base32"}
|
||||
}
|
||||
s := toBase32(msg[off:end])
|
||||
return s, end, nil
|
||||
}
|
||||
|
||||
func packStringBase32(s string, msg []byte, off int) (int, error) {
|
||||
b32, err := fromBase32([]byte(s))
|
||||
if err != nil {
|
||||
return len(msg), err
|
||||
}
|
||||
if off+len(b32) > len(msg) {
|
||||
return len(msg), &Error{err: "overflow packing base32"}
|
||||
}
|
||||
copy(msg[off:off+len(b32)], b32)
|
||||
off += len(b32)
|
||||
return off, nil
|
||||
}
|
||||
|
||||
func unpackStringBase64(msg []byte, off, end int) (string, int, error) {
|
||||
// Rest of the RR is base64 encoded value, so we don't need an explicit length
|
||||
// to be set. Thus far all RR's that have base64 encoded fields have those as their
|
||||
// last one. What we do need is the end of the RR!
|
||||
if end > len(msg) {
|
||||
return "", len(msg), &Error{err: "overflow unpacking base64"}
|
||||
}
|
||||
s := toBase64(msg[off:end])
|
||||
return s, end, nil
|
||||
}
|
||||
|
||||
func packStringBase64(s string, msg []byte, off int) (int, error) {
|
||||
b64, err := fromBase64([]byte(s))
|
||||
if err != nil {
|
||||
return len(msg), err
|
||||
}
|
||||
if off+len(b64) > len(msg) {
|
||||
return len(msg), &Error{err: "overflow packing base64"}
|
||||
}
|
||||
copy(msg[off:off+len(b64)], b64)
|
||||
off += len(b64)
|
||||
return off, nil
|
||||
}
|
||||
|
||||
func unpackStringHex(msg []byte, off, end int) (string, int, error) {
|
||||
// Rest of the RR is hex encoded value, so we don't need an explicit length
|
||||
// to be set. NSEC and TSIG have hex fields with a length field.
|
||||
// What we do need is the end of the RR!
|
||||
if end > len(msg) {
|
||||
return "", len(msg), &Error{err: "overflow unpacking hex"}
|
||||
}
|
||||
|
||||
s := hex.EncodeToString(msg[off:end])
|
||||
return s, end, nil
|
||||
}
|
||||
|
||||
func packStringHex(s string, msg []byte, off int) (int, error) {
|
||||
h, err := hex.DecodeString(s)
|
||||
if err != nil {
|
||||
return len(msg), err
|
||||
}
|
||||
if off+len(h) > len(msg) {
|
||||
return len(msg), &Error{err: "overflow packing hex"}
|
||||
}
|
||||
copy(msg[off:off+len(h)], h)
|
||||
off += len(h)
|
||||
return off, nil
|
||||
}
|
||||
|
||||
func unpackStringTxt(msg []byte, off int) ([]string, int, error) {
|
||||
txt, off, err := unpackTxt(msg, off)
|
||||
if err != nil {
|
||||
return nil, len(msg), err
|
||||
}
|
||||
return txt, off, nil
|
||||
}
|
||||
|
||||
func packStringTxt(s []string, msg []byte, off int) (int, error) {
|
||||
txtTmp := make([]byte, 256*4+1) // If the whole string consists out of \DDD we need this many.
|
||||
off, err := packTxt(s, msg, off, txtTmp)
|
||||
if err != nil {
|
||||
return len(msg), err
|
||||
}
|
||||
return off, nil
|
||||
}
|
||||
|
||||
func unpackDataOpt(msg []byte, off int) ([]EDNS0, int, error) {
|
||||
var edns []EDNS0
|
||||
Option:
|
||||
code := uint16(0)
|
||||
if off+4 > len(msg) {
|
||||
return nil, len(msg), &Error{err: "overflow unpacking opt"}
|
||||
}
|
||||
code = binary.BigEndian.Uint16(msg[off:])
|
||||
off += 2
|
||||
optlen := binary.BigEndian.Uint16(msg[off:])
|
||||
off += 2
|
||||
if off+int(optlen) > len(msg) {
|
||||
return nil, len(msg), &Error{err: "overflow unpacking opt"}
|
||||
}
|
||||
switch code {
|
||||
case EDNS0NSID:
|
||||
e := new(EDNS0_NSID)
|
||||
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
|
||||
return nil, len(msg), err
|
||||
}
|
||||
edns = append(edns, e)
|
||||
off += int(optlen)
|
||||
case EDNS0SUBNET:
|
||||
e := new(EDNS0_SUBNET)
|
||||
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
|
||||
return nil, len(msg), err
|
||||
}
|
||||
edns = append(edns, e)
|
||||
off += int(optlen)
|
||||
case EDNS0COOKIE:
|
||||
e := new(EDNS0_COOKIE)
|
||||
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
|
||||
return nil, len(msg), err
|
||||
}
|
||||
edns = append(edns, e)
|
||||
off += int(optlen)
|
||||
case EDNS0UL:
|
||||
e := new(EDNS0_UL)
|
||||
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
|
||||
return nil, len(msg), err
|
||||
}
|
||||
edns = append(edns, e)
|
||||
off += int(optlen)
|
||||
case EDNS0LLQ:
|
||||
e := new(EDNS0_LLQ)
|
||||
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
|
||||
return nil, len(msg), err
|
||||
}
|
||||
edns = append(edns, e)
|
||||
off += int(optlen)
|
||||
case EDNS0DAU:
|
||||
e := new(EDNS0_DAU)
|
||||
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
|
||||
return nil, len(msg), err
|
||||
}
|
||||
edns = append(edns, e)
|
||||
off += int(optlen)
|
||||
case EDNS0DHU:
|
||||
e := new(EDNS0_DHU)
|
||||
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
|
||||
return nil, len(msg), err
|
||||
}
|
||||
edns = append(edns, e)
|
||||
off += int(optlen)
|
||||
case EDNS0N3U:
|
||||
e := new(EDNS0_N3U)
|
||||
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
|
||||
return nil, len(msg), err
|
||||
}
|
||||
edns = append(edns, e)
|
||||
off += int(optlen)
|
||||
case EDNS0PADDING:
|
||||
e := new(EDNS0_PADDING)
|
||||
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
|
||||
return nil, len(msg), err
|
||||
}
|
||||
edns = append(edns, e)
|
||||
off += int(optlen)
|
||||
default:
|
||||
e := new(EDNS0_LOCAL)
|
||||
e.Code = code
|
||||
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
|
||||
return nil, len(msg), err
|
||||
}
|
||||
edns = append(edns, e)
|
||||
off += int(optlen)
|
||||
}
|
||||
|
||||
if off < len(msg) {
|
||||
goto Option
|
||||
}
|
||||
|
||||
return edns, off, nil
|
||||
}
|
||||
|
||||
func packDataOpt(options []EDNS0, msg []byte, off int) (int, error) {
|
||||
for _, el := range options {
|
||||
b, err := el.pack()
|
||||
if err != nil || off+3 > len(msg) {
|
||||
return len(msg), &Error{err: "overflow packing opt"}
|
||||
}
|
||||
binary.BigEndian.PutUint16(msg[off:], el.Option()) // Option code
|
||||
binary.BigEndian.PutUint16(msg[off+2:], uint16(len(b))) // Length
|
||||
off += 4
|
||||
if off+len(b) > len(msg) {
|
||||
copy(msg[off:], b)
|
||||
off = len(msg)
|
||||
continue
|
||||
}
|
||||
// Actual data
|
||||
copy(msg[off:off+len(b)], b)
|
||||
off += len(b)
|
||||
}
|
||||
return off, nil
|
||||
}
|
||||
|
||||
func unpackStringOctet(msg []byte, off int) (string, int, error) {
|
||||
s := string(msg[off:])
|
||||
return s, len(msg), nil
|
||||
}
|
||||
|
||||
func packStringOctet(s string, msg []byte, off int) (int, error) {
|
||||
txtTmp := make([]byte, 256*4+1)
|
||||
off, err := packOctetString(s, msg, off, txtTmp)
|
||||
if err != nil {
|
||||
return len(msg), err
|
||||
}
|
||||
return off, nil
|
||||
}
|
||||
|
||||
func unpackDataNsec(msg []byte, off int) ([]uint16, int, error) {
|
||||
var nsec []uint16
|
||||
length, window, lastwindow := 0, 0, -1
|
||||
for off < len(msg) {
|
||||
if off+2 > len(msg) {
|
||||
return nsec, len(msg), &Error{err: "overflow unpacking nsecx"}
|
||||
}
|
||||
window = int(msg[off])
|
||||
length = int(msg[off+1])
|
||||
off += 2
|
||||
if window <= lastwindow {
|
||||
// RFC 4034: Blocks are present in the NSEC RR RDATA in
|
||||
// increasing numerical order.
|
||||
return nsec, len(msg), &Error{err: "out of order NSEC block"}
|
||||
}
|
||||
if length == 0 {
|
||||
// RFC 4034: Blocks with no types present MUST NOT be included.
|
||||
return nsec, len(msg), &Error{err: "empty NSEC block"}
|
||||
}
|
||||
if length > 32 {
|
||||
return nsec, len(msg), &Error{err: "NSEC block too long"}
|
||||
}
|
||||
if off+length > len(msg) {
|
||||
return nsec, len(msg), &Error{err: "overflowing NSEC block"}
|
||||
}
|
||||
|
||||
// Walk the bytes in the window and extract the type bits
|
||||
for j := 0; j < length; j++ {
|
||||
b := msg[off+j]
|
||||
// Check the bits one by one, and set the type
|
||||
if b&0x80 == 0x80 {
|
||||
nsec = append(nsec, uint16(window*256+j*8+0))
|
||||
}
|
||||
if b&0x40 == 0x40 {
|
||||
nsec = append(nsec, uint16(window*256+j*8+1))
|
||||
}
|
||||
if b&0x20 == 0x20 {
|
||||
nsec = append(nsec, uint16(window*256+j*8+2))
|
||||
}
|
||||
if b&0x10 == 0x10 {
|
||||
nsec = append(nsec, uint16(window*256+j*8+3))
|
||||
}
|
||||
if b&0x8 == 0x8 {
|
||||
nsec = append(nsec, uint16(window*256+j*8+4))
|
||||
}
|
||||
if b&0x4 == 0x4 {
|
||||
nsec = append(nsec, uint16(window*256+j*8+5))
|
||||
}
|
||||
if b&0x2 == 0x2 {
|
||||
nsec = append(nsec, uint16(window*256+j*8+6))
|
||||
}
|
||||
if b&0x1 == 0x1 {
|
||||
nsec = append(nsec, uint16(window*256+j*8+7))
|
||||
}
|
||||
}
|
||||
off += length
|
||||
lastwindow = window
|
||||
}
|
||||
return nsec, off, nil
|
||||
}
|
||||
|
||||
func packDataNsec(bitmap []uint16, msg []byte, off int) (int, error) {
|
||||
if len(bitmap) == 0 {
|
||||
return off, nil
|
||||
}
|
||||
var lastwindow, lastlength uint16
|
||||
for j := 0; j < len(bitmap); j++ {
|
||||
t := bitmap[j]
|
||||
window := t / 256
|
||||
length := (t-window*256)/8 + 1
|
||||
if window > lastwindow && lastlength != 0 { // New window, jump to the new offset
|
||||
off += int(lastlength) + 2
|
||||
lastlength = 0
|
||||
}
|
||||
if window < lastwindow || length < lastlength {
|
||||
return len(msg), &Error{err: "nsec bits out of order"}
|
||||
}
|
||||
if off+2+int(length) > len(msg) {
|
||||
return len(msg), &Error{err: "overflow packing nsec"}
|
||||
}
|
||||
// Setting the window #
|
||||
msg[off] = byte(window)
|
||||
// Setting the octets length
|
||||
msg[off+1] = byte(length)
|
||||
// Setting the bit value for the type in the right octet
|
||||
msg[off+1+int(length)] |= byte(1 << (7 - t%8))
|
||||
lastwindow, lastlength = window, length
|
||||
}
|
||||
off += int(lastlength) + 2
|
||||
return off, nil
|
||||
}
|
||||
|
||||
func unpackDataDomainNames(msg []byte, off, end int) ([]string, int, error) {
|
||||
var (
|
||||
servers []string
|
||||
s string
|
||||
err error
|
||||
)
|
||||
if end > len(msg) {
|
||||
return nil, len(msg), &Error{err: "overflow unpacking domain names"}
|
||||
}
|
||||
for off < end {
|
||||
s, off, err = UnpackDomainName(msg, off)
|
||||
if err != nil {
|
||||
return servers, len(msg), err
|
||||
}
|
||||
servers = append(servers, s)
|
||||
}
|
||||
return servers, off, nil
|
||||
}
|
||||
|
||||
func packDataDomainNames(names []string, msg []byte, off int, compression map[string]int, compress bool) (int, error) {
|
||||
var err error
|
||||
for j := 0; j < len(names); j++ {
|
||||
off, err = PackDomainName(names[j], msg, off, compression, false && compress)
|
||||
if err != nil {
|
||||
return len(msg), err
|
||||
}
|
||||
}
|
||||
return off, nil
|
||||
}
|
||||
106
vendor/github.com/miekg/dns/nsecx.go
generated
vendored
Normal file
106
vendor/github.com/miekg/dns/nsecx.go
generated
vendored
Normal file
@@ -0,0 +1,106 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"crypto/sha1"
|
||||
"hash"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type saltWireFmt struct {
|
||||
Salt string `dns:"size-hex"`
|
||||
}
|
||||
|
||||
// HashName hashes a string (label) according to RFC 5155. It returns the hashed string in uppercase.
|
||||
func HashName(label string, ha uint8, iter uint16, salt string) string {
|
||||
saltwire := new(saltWireFmt)
|
||||
saltwire.Salt = salt
|
||||
wire := make([]byte, DefaultMsgSize)
|
||||
n, err := packSaltWire(saltwire, wire)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
wire = wire[:n]
|
||||
name := make([]byte, 255)
|
||||
off, err := PackDomainName(strings.ToLower(label), name, 0, nil, false)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
name = name[:off]
|
||||
var s hash.Hash
|
||||
switch ha {
|
||||
case SHA1:
|
||||
s = sha1.New()
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
|
||||
// k = 0
|
||||
s.Write(name)
|
||||
s.Write(wire)
|
||||
nsec3 := s.Sum(nil)
|
||||
// k > 0
|
||||
for k := uint16(0); k < iter; k++ {
|
||||
s.Reset()
|
||||
s.Write(nsec3)
|
||||
s.Write(wire)
|
||||
nsec3 = s.Sum(nsec3[:0])
|
||||
}
|
||||
return toBase32(nsec3)
|
||||
}
|
||||
|
||||
// Cover returns true if a name is covered by the NSEC3 record
|
||||
func (rr *NSEC3) Cover(name string) bool {
|
||||
nameHash := HashName(name, rr.Hash, rr.Iterations, rr.Salt)
|
||||
owner := strings.ToUpper(rr.Hdr.Name)
|
||||
labelIndices := Split(owner)
|
||||
if len(labelIndices) < 2 {
|
||||
return false
|
||||
}
|
||||
ownerHash := owner[:labelIndices[1]-1]
|
||||
ownerZone := owner[labelIndices[1]:]
|
||||
if !IsSubDomain(ownerZone, strings.ToUpper(name)) { // name is outside owner zone
|
||||
return false
|
||||
}
|
||||
|
||||
nextHash := rr.NextDomain
|
||||
if ownerHash == nextHash { // empty interval
|
||||
return false
|
||||
}
|
||||
if ownerHash > nextHash { // end of zone
|
||||
if nameHash > ownerHash { // covered since there is nothing after ownerHash
|
||||
return true
|
||||
}
|
||||
return nameHash < nextHash // if nameHash is before beginning of zone it is covered
|
||||
}
|
||||
if nameHash < ownerHash { // nameHash is before ownerHash, not covered
|
||||
return false
|
||||
}
|
||||
return nameHash < nextHash // if nameHash is before nextHash is it covered (between ownerHash and nextHash)
|
||||
}
|
||||
|
||||
// Match returns true if a name matches the NSEC3 record
|
||||
func (rr *NSEC3) Match(name string) bool {
|
||||
nameHash := HashName(name, rr.Hash, rr.Iterations, rr.Salt)
|
||||
owner := strings.ToUpper(rr.Hdr.Name)
|
||||
labelIndices := Split(owner)
|
||||
if len(labelIndices) < 2 {
|
||||
return false
|
||||
}
|
||||
ownerHash := owner[:labelIndices[1]-1]
|
||||
ownerZone := owner[labelIndices[1]:]
|
||||
if !IsSubDomain(ownerZone, strings.ToUpper(name)) { // name is outside owner zone
|
||||
return false
|
||||
}
|
||||
if ownerHash == nameHash {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func packSaltWire(sw *saltWireFmt, msg []byte) (int, error) {
|
||||
off, err := packStringHex(sw.Salt, msg, 0)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
return off, nil
|
||||
}
|
||||
147
vendor/github.com/miekg/dns/privaterr.go
generated
vendored
Normal file
147
vendor/github.com/miekg/dns/privaterr.go
generated
vendored
Normal file
@@ -0,0 +1,147 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// PrivateRdata is an interface used for implementing "Private Use" RR types, see
|
||||
// RFC 6895. This allows one to experiment with new RR types, without requesting an
|
||||
// official type code. Also see dns.PrivateHandle and dns.PrivateHandleRemove.
|
||||
type PrivateRdata interface {
|
||||
// String returns the text presentaton of the Rdata of the Private RR.
|
||||
String() string
|
||||
// Parse parses the Rdata of the private RR.
|
||||
Parse([]string) error
|
||||
// Pack is used when packing a private RR into a buffer.
|
||||
Pack([]byte) (int, error)
|
||||
// Unpack is used when unpacking a private RR from a buffer.
|
||||
// TODO(miek): diff. signature than Pack, see edns0.go for instance.
|
||||
Unpack([]byte) (int, error)
|
||||
// Copy copies the Rdata.
|
||||
Copy(PrivateRdata) error
|
||||
// Len returns the length in octets of the Rdata.
|
||||
Len() int
|
||||
}
|
||||
|
||||
// PrivateRR represents an RR that uses a PrivateRdata user-defined type.
|
||||
// It mocks normal RRs and implements dns.RR interface.
|
||||
type PrivateRR struct {
|
||||
Hdr RR_Header
|
||||
Data PrivateRdata
|
||||
}
|
||||
|
||||
func mkPrivateRR(rrtype uint16) *PrivateRR {
|
||||
// Panics if RR is not an instance of PrivateRR.
|
||||
rrfunc, ok := TypeToRR[rrtype]
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("dns: invalid operation with Private RR type %d", rrtype))
|
||||
}
|
||||
|
||||
anyrr := rrfunc()
|
||||
switch rr := anyrr.(type) {
|
||||
case *PrivateRR:
|
||||
return rr
|
||||
}
|
||||
panic(fmt.Sprintf("dns: RR is not a PrivateRR, TypeToRR[%d] generator returned %T", rrtype, anyrr))
|
||||
}
|
||||
|
||||
// Header return the RR header of r.
|
||||
func (r *PrivateRR) Header() *RR_Header { return &r.Hdr }
|
||||
|
||||
func (r *PrivateRR) String() string { return r.Hdr.String() + r.Data.String() }
|
||||
|
||||
// Private len and copy parts to satisfy RR interface.
|
||||
func (r *PrivateRR) len() int { return r.Hdr.len() + r.Data.Len() }
|
||||
func (r *PrivateRR) copy() RR {
|
||||
// make new RR like this:
|
||||
rr := mkPrivateRR(r.Hdr.Rrtype)
|
||||
rr.Hdr = r.Hdr
|
||||
|
||||
err := r.Data.Copy(rr.Data)
|
||||
if err != nil {
|
||||
panic("dns: got value that could not be used to copy Private rdata")
|
||||
}
|
||||
return rr
|
||||
}
|
||||
func (r *PrivateRR) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
|
||||
off, err := r.Hdr.pack(msg, off, compression, compress)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
headerEnd := off
|
||||
n, err := r.Data.Pack(msg[off:])
|
||||
if err != nil {
|
||||
return len(msg), err
|
||||
}
|
||||
off += n
|
||||
r.Header().Rdlength = uint16(off - headerEnd)
|
||||
return off, nil
|
||||
}
|
||||
|
||||
// PrivateHandle registers a private resource record type. It requires
|
||||
// string and numeric representation of private RR type and generator function as argument.
|
||||
func PrivateHandle(rtypestr string, rtype uint16, generator func() PrivateRdata) {
|
||||
rtypestr = strings.ToUpper(rtypestr)
|
||||
|
||||
TypeToRR[rtype] = func() RR { return &PrivateRR{RR_Header{}, generator()} }
|
||||
TypeToString[rtype] = rtypestr
|
||||
StringToType[rtypestr] = rtype
|
||||
|
||||
typeToUnpack[rtype] = func(h RR_Header, msg []byte, off int) (RR, int, error) {
|
||||
if noRdata(h) {
|
||||
return &h, off, nil
|
||||
}
|
||||
var err error
|
||||
|
||||
rr := mkPrivateRR(h.Rrtype)
|
||||
rr.Hdr = h
|
||||
|
||||
off1, err := rr.Data.Unpack(msg[off:])
|
||||
off += off1
|
||||
if err != nil {
|
||||
return rr, off, err
|
||||
}
|
||||
return rr, off, err
|
||||
}
|
||||
|
||||
setPrivateRR := func(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) {
|
||||
rr := mkPrivateRR(h.Rrtype)
|
||||
rr.Hdr = h
|
||||
|
||||
var l lex
|
||||
text := make([]string, 0, 2) // could be 0..N elements, median is probably 1
|
||||
Fetch:
|
||||
for {
|
||||
// TODO(miek): we could also be returning _QUOTE, this might or might not
|
||||
// be an issue (basically parsing TXT becomes hard)
|
||||
switch l, _ = c.Next(); l.value {
|
||||
case zNewline, zEOF:
|
||||
break Fetch
|
||||
case zString:
|
||||
text = append(text, l.token)
|
||||
}
|
||||
}
|
||||
|
||||
err := rr.Data.Parse(text)
|
||||
if err != nil {
|
||||
return nil, &ParseError{f, err.Error(), l}, ""
|
||||
}
|
||||
|
||||
return rr, nil, ""
|
||||
}
|
||||
|
||||
typeToparserFunc[rtype] = parserFunc{setPrivateRR, true}
|
||||
}
|
||||
|
||||
// PrivateHandleRemove removes definitions required to support private RR type.
|
||||
func PrivateHandleRemove(rtype uint16) {
|
||||
rtypestr, ok := TypeToString[rtype]
|
||||
if ok {
|
||||
delete(TypeToRR, rtype)
|
||||
delete(TypeToString, rtype)
|
||||
delete(typeToparserFunc, rtype)
|
||||
delete(StringToType, rtypestr)
|
||||
delete(typeToUnpack, rtype)
|
||||
}
|
||||
}
|
||||
49
vendor/github.com/miekg/dns/rawmsg.go
generated
vendored
Normal file
49
vendor/github.com/miekg/dns/rawmsg.go
generated
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
package dns
|
||||
|
||||
import "encoding/binary"
|
||||
|
||||
// rawSetRdlength sets the rdlength in the header of
|
||||
// the RR. The offset 'off' must be positioned at the
|
||||
// start of the header of the RR, 'end' must be the
|
||||
// end of the RR.
|
||||
func rawSetRdlength(msg []byte, off, end int) bool {
|
||||
l := len(msg)
|
||||
Loop:
|
||||
for {
|
||||
if off+1 > l {
|
||||
return false
|
||||
}
|
||||
c := int(msg[off])
|
||||
off++
|
||||
switch c & 0xC0 {
|
||||
case 0x00:
|
||||
if c == 0x00 {
|
||||
// End of the domainname
|
||||
break Loop
|
||||
}
|
||||
if off+c > l {
|
||||
return false
|
||||
}
|
||||
off += c
|
||||
|
||||
case 0xC0:
|
||||
// pointer, next byte included, ends domainname
|
||||
off++
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
// The domainname has been seen, we at the start of the fixed part in the header.
|
||||
// Type is 2 bytes, class is 2 bytes, ttl 4 and then 2 bytes for the length.
|
||||
off += 2 + 2 + 4
|
||||
if off+2 > l {
|
||||
return false
|
||||
}
|
||||
//off+1 is the end of the header, 'end' is the end of the rr
|
||||
//so 'end' - 'off+2' is the length of the rdata
|
||||
rdatalen := end - (off + 2)
|
||||
if rdatalen > 0xFFFF {
|
||||
return false
|
||||
}
|
||||
binary.BigEndian.PutUint16(msg[off:], uint16(rdatalen))
|
||||
return true
|
||||
}
|
||||
38
vendor/github.com/miekg/dns/reverse.go
generated
vendored
Normal file
38
vendor/github.com/miekg/dns/reverse.go
generated
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
package dns
|
||||
|
||||
// StringToType is the reverse of TypeToString, needed for string parsing.
|
||||
var StringToType = reverseInt16(TypeToString)
|
||||
|
||||
// StringToClass is the reverse of ClassToString, needed for string parsing.
|
||||
var StringToClass = reverseInt16(ClassToString)
|
||||
|
||||
// StringToOpcode is a map of opcodes to strings.
|
||||
var StringToOpcode = reverseInt(OpcodeToString)
|
||||
|
||||
// StringToRcode is a map of rcodes to strings.
|
||||
var StringToRcode = reverseInt(RcodeToString)
|
||||
|
||||
// Reverse a map
|
||||
func reverseInt8(m map[uint8]string) map[string]uint8 {
|
||||
n := make(map[string]uint8, len(m))
|
||||
for u, s := range m {
|
||||
n[s] = u
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func reverseInt16(m map[uint16]string) map[string]uint16 {
|
||||
n := make(map[string]uint16, len(m))
|
||||
for u, s := range m {
|
||||
n[s] = u
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func reverseInt(m map[int]string) map[string]int {
|
||||
n := make(map[string]int, len(m))
|
||||
for u, s := range m {
|
||||
n[s] = u
|
||||
}
|
||||
return n
|
||||
}
|
||||
85
vendor/github.com/miekg/dns/sanitize.go
generated
vendored
Normal file
85
vendor/github.com/miekg/dns/sanitize.go
generated
vendored
Normal file
@@ -0,0 +1,85 @@
|
||||
package dns
|
||||
|
||||
// Dedup removes identical RRs from rrs. It preserves the original ordering.
|
||||
// The lowest TTL of any duplicates is used in the remaining one. Dedup modifies
|
||||
// rrs.
|
||||
// m is used to store the RRs temporary. If it is nil a new map will be allocated.
|
||||
func Dedup(rrs []RR, m map[string]RR) []RR {
|
||||
|
||||
if m == nil {
|
||||
m = make(map[string]RR)
|
||||
}
|
||||
// Save the keys, so we don't have to call normalizedString twice.
|
||||
keys := make([]*string, 0, len(rrs))
|
||||
|
||||
for _, r := range rrs {
|
||||
key := normalizedString(r)
|
||||
keys = append(keys, &key)
|
||||
if _, ok := m[key]; ok {
|
||||
// Shortest TTL wins.
|
||||
if m[key].Header().Ttl > r.Header().Ttl {
|
||||
m[key].Header().Ttl = r.Header().Ttl
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
m[key] = r
|
||||
}
|
||||
// If the length of the result map equals the amount of RRs we got,
|
||||
// it means they were all different. We can then just return the original rrset.
|
||||
if len(m) == len(rrs) {
|
||||
return rrs
|
||||
}
|
||||
|
||||
j := 0
|
||||
for i, r := range rrs {
|
||||
// If keys[i] lives in the map, we should copy and remove it.
|
||||
if _, ok := m[*keys[i]]; ok {
|
||||
delete(m, *keys[i])
|
||||
rrs[j] = r
|
||||
j++
|
||||
}
|
||||
|
||||
if len(m) == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return rrs[:j]
|
||||
}
|
||||
|
||||
// normalizedString returns a normalized string from r. The TTL
|
||||
// is removed and the domain name is lowercased. We go from this:
|
||||
// DomainName<TAB>TTL<TAB>CLASS<TAB>TYPE<TAB>RDATA to:
|
||||
// lowercasename<TAB>CLASS<TAB>TYPE...
|
||||
func normalizedString(r RR) string {
|
||||
// A string Go DNS makes has: domainname<TAB>TTL<TAB>...
|
||||
b := []byte(r.String())
|
||||
|
||||
// find the first non-escaped tab, then another, so we capture where the TTL lives.
|
||||
esc := false
|
||||
ttlStart, ttlEnd := 0, 0
|
||||
for i := 0; i < len(b) && ttlEnd == 0; i++ {
|
||||
switch {
|
||||
case b[i] == '\\':
|
||||
esc = !esc
|
||||
case b[i] == '\t' && !esc:
|
||||
if ttlStart == 0 {
|
||||
ttlStart = i
|
||||
continue
|
||||
}
|
||||
if ttlEnd == 0 {
|
||||
ttlEnd = i
|
||||
}
|
||||
case b[i] >= 'A' && b[i] <= 'Z' && !esc:
|
||||
b[i] += 32
|
||||
default:
|
||||
esc = false
|
||||
}
|
||||
}
|
||||
|
||||
// remove TTL.
|
||||
copy(b[ttlStart:], b[ttlEnd:])
|
||||
cut := ttlEnd - ttlStart
|
||||
return string(b[:len(b)-cut])
|
||||
}
|
||||
1331
vendor/github.com/miekg/dns/scan.go
generated
vendored
Normal file
1331
vendor/github.com/miekg/dns/scan.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
2209
vendor/github.com/miekg/dns/scan_rr.go
generated
vendored
Normal file
2209
vendor/github.com/miekg/dns/scan_rr.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
147
vendor/github.com/miekg/dns/serve_mux.go
generated
vendored
Normal file
147
vendor/github.com/miekg/dns/serve_mux.go
generated
vendored
Normal file
@@ -0,0 +1,147 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// ServeMux is an DNS request multiplexer. It matches the zone name of
|
||||
// each incoming request against a list of registered patterns add calls
|
||||
// the handler for the pattern that most closely matches the zone name.
|
||||
//
|
||||
// ServeMux is DNSSEC aware, meaning that queries for the DS record are
|
||||
// redirected to the parent zone (if that is also registered), otherwise
|
||||
// the child gets the query.
|
||||
//
|
||||
// ServeMux is also safe for concurrent access from multiple goroutines.
|
||||
//
|
||||
// The zero ServeMux is empty and ready for use.
|
||||
type ServeMux struct {
|
||||
z map[string]Handler
|
||||
m sync.RWMutex
|
||||
}
|
||||
|
||||
// NewServeMux allocates and returns a new ServeMux.
|
||||
func NewServeMux() *ServeMux {
|
||||
return new(ServeMux)
|
||||
}
|
||||
|
||||
// DefaultServeMux is the default ServeMux used by Serve.
|
||||
var DefaultServeMux = NewServeMux()
|
||||
|
||||
func (mux *ServeMux) match(q string, t uint16) Handler {
|
||||
mux.m.RLock()
|
||||
defer mux.m.RUnlock()
|
||||
if mux.z == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var handler Handler
|
||||
|
||||
// TODO(tmthrgd): Once https://go-review.googlesource.com/c/go/+/137575
|
||||
// lands in a go release, replace the following with strings.ToLower.
|
||||
var sb strings.Builder
|
||||
for i := 0; i < len(q); i++ {
|
||||
c := q[i]
|
||||
if !(c >= 'A' && c <= 'Z') {
|
||||
continue
|
||||
}
|
||||
|
||||
sb.Grow(len(q))
|
||||
sb.WriteString(q[:i])
|
||||
|
||||
for ; i < len(q); i++ {
|
||||
c := q[i]
|
||||
if c >= 'A' && c <= 'Z' {
|
||||
c += 'a' - 'A'
|
||||
}
|
||||
|
||||
sb.WriteByte(c)
|
||||
}
|
||||
|
||||
q = sb.String()
|
||||
break
|
||||
}
|
||||
|
||||
for off, end := 0, false; !end; off, end = NextLabel(q, off) {
|
||||
if h, ok := mux.z[q[off:]]; ok {
|
||||
if t != TypeDS {
|
||||
return h
|
||||
}
|
||||
// Continue for DS to see if we have a parent too, if so delegate to the parent
|
||||
handler = h
|
||||
}
|
||||
}
|
||||
|
||||
// Wildcard match, if we have found nothing try the root zone as a last resort.
|
||||
if h, ok := mux.z["."]; ok {
|
||||
return h
|
||||
}
|
||||
|
||||
return handler
|
||||
}
|
||||
|
||||
// Handle adds a handler to the ServeMux for pattern.
|
||||
func (mux *ServeMux) Handle(pattern string, handler Handler) {
|
||||
if pattern == "" {
|
||||
panic("dns: invalid pattern " + pattern)
|
||||
}
|
||||
mux.m.Lock()
|
||||
if mux.z == nil {
|
||||
mux.z = make(map[string]Handler)
|
||||
}
|
||||
mux.z[Fqdn(pattern)] = handler
|
||||
mux.m.Unlock()
|
||||
}
|
||||
|
||||
// HandleFunc adds a handler function to the ServeMux for pattern.
|
||||
func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) {
|
||||
mux.Handle(pattern, HandlerFunc(handler))
|
||||
}
|
||||
|
||||
// HandleRemove deregisters the handler specific for pattern from the ServeMux.
|
||||
func (mux *ServeMux) HandleRemove(pattern string) {
|
||||
if pattern == "" {
|
||||
panic("dns: invalid pattern " + pattern)
|
||||
}
|
||||
mux.m.Lock()
|
||||
delete(mux.z, Fqdn(pattern))
|
||||
mux.m.Unlock()
|
||||
}
|
||||
|
||||
// ServeDNS dispatches the request to the handler whose pattern most
|
||||
// closely matches the request message.
|
||||
//
|
||||
// ServeDNS is DNSSEC aware, meaning that queries for the DS record
|
||||
// are redirected to the parent zone (if that is also registered),
|
||||
// otherwise the child gets the query.
|
||||
//
|
||||
// If no handler is found, or there is no question, a standard SERVFAIL
|
||||
// message is returned
|
||||
func (mux *ServeMux) ServeDNS(w ResponseWriter, req *Msg) {
|
||||
var h Handler
|
||||
if len(req.Question) >= 1 { // allow more than one question
|
||||
h = mux.match(req.Question[0].Name, req.Question[0].Qtype)
|
||||
}
|
||||
|
||||
if h != nil {
|
||||
h.ServeDNS(w, req)
|
||||
} else {
|
||||
HandleFailed(w, req)
|
||||
}
|
||||
}
|
||||
|
||||
// Handle registers the handler with the given pattern
|
||||
// in the DefaultServeMux. The documentation for
|
||||
// ServeMux explains how patterns are matched.
|
||||
func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) }
|
||||
|
||||
// HandleRemove deregisters the handle with the given pattern
|
||||
// in the DefaultServeMux.
|
||||
func HandleRemove(pattern string) { DefaultServeMux.HandleRemove(pattern) }
|
||||
|
||||
// HandleFunc registers the handler function with the given pattern
|
||||
// in the DefaultServeMux.
|
||||
func HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) {
|
||||
DefaultServeMux.HandleFunc(pattern, handler)
|
||||
}
|
||||
829
vendor/github.com/miekg/dns/server.go
generated
vendored
Normal file
829
vendor/github.com/miekg/dns/server.go
generated
vendored
Normal file
@@ -0,0 +1,829 @@
|
||||
// DNS server implementation.
|
||||
|
||||
package dns
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
"net"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Default maximum number of TCP queries before we close the socket.
|
||||
const maxTCPQueries = 128
|
||||
|
||||
// The maximum number of idle workers.
|
||||
//
|
||||
// This controls the maximum number of workers that are allowed to stay
|
||||
// idle waiting for incoming requests before being torn down.
|
||||
//
|
||||
// If this limit is reached, the server will just keep spawning new
|
||||
// workers (goroutines) for each incoming request. In this case, each
|
||||
// worker will only be used for a single request.
|
||||
const maxIdleWorkersCount = 10000
|
||||
|
||||
// The maximum length of time a worker may idle for before being destroyed.
|
||||
const idleWorkerTimeout = 10 * time.Second
|
||||
|
||||
// aLongTimeAgo is a non-zero time, far in the past, used for
|
||||
// immediate cancelation of network operations.
|
||||
var aLongTimeAgo = time.Unix(1, 0)
|
||||
|
||||
// Handler is implemented by any value that implements ServeDNS.
|
||||
type Handler interface {
|
||||
ServeDNS(w ResponseWriter, r *Msg)
|
||||
}
|
||||
|
||||
// The HandlerFunc type is an adapter to allow the use of
|
||||
// ordinary functions as DNS handlers. If f is a function
|
||||
// with the appropriate signature, HandlerFunc(f) is a
|
||||
// Handler object that calls f.
|
||||
type HandlerFunc func(ResponseWriter, *Msg)
|
||||
|
||||
// ServeDNS calls f(w, r).
|
||||
func (f HandlerFunc) ServeDNS(w ResponseWriter, r *Msg) {
|
||||
f(w, r)
|
||||
}
|
||||
|
||||
// A ResponseWriter interface is used by an DNS handler to
|
||||
// construct an DNS response.
|
||||
type ResponseWriter interface {
|
||||
// LocalAddr returns the net.Addr of the server
|
||||
LocalAddr() net.Addr
|
||||
// RemoteAddr returns the net.Addr of the client that sent the current request.
|
||||
RemoteAddr() net.Addr
|
||||
// WriteMsg writes a reply back to the client.
|
||||
WriteMsg(*Msg) error
|
||||
// Write writes a raw buffer back to the client.
|
||||
Write([]byte) (int, error)
|
||||
// Close closes the connection.
|
||||
Close() error
|
||||
// TsigStatus returns the status of the Tsig.
|
||||
TsigStatus() error
|
||||
// TsigTimersOnly sets the tsig timers only boolean.
|
||||
TsigTimersOnly(bool)
|
||||
// Hijack lets the caller take over the connection.
|
||||
// After a call to Hijack(), the DNS package will not do anything with the connection.
|
||||
Hijack()
|
||||
}
|
||||
|
||||
// A ConnectionStater interface is used by a DNS Handler to access TLS connection state
|
||||
// when available.
|
||||
type ConnectionStater interface {
|
||||
ConnectionState() *tls.ConnectionState
|
||||
}
|
||||
|
||||
type response struct {
|
||||
msg []byte
|
||||
hijacked bool // connection has been hijacked by handler
|
||||
tsigTimersOnly bool
|
||||
tsigStatus error
|
||||
tsigRequestMAC string
|
||||
tsigSecret map[string]string // the tsig secrets
|
||||
udp *net.UDPConn // i/o connection if UDP was used
|
||||
tcp net.Conn // i/o connection if TCP was used
|
||||
udpSession *SessionUDP // oob data to get egress interface right
|
||||
writer Writer // writer to output the raw DNS bits
|
||||
wg *sync.WaitGroup // for gracefull shutdown
|
||||
}
|
||||
|
||||
// HandleFailed returns a HandlerFunc that returns SERVFAIL for every request it gets.
|
||||
func HandleFailed(w ResponseWriter, r *Msg) {
|
||||
m := new(Msg)
|
||||
m.SetRcode(r, RcodeServerFailure)
|
||||
// does not matter if this write fails
|
||||
w.WriteMsg(m)
|
||||
}
|
||||
|
||||
// ListenAndServe Starts a server on address and network specified Invoke handler
|
||||
// for incoming queries.
|
||||
func ListenAndServe(addr string, network string, handler Handler) error {
|
||||
server := &Server{Addr: addr, Net: network, Handler: handler}
|
||||
return server.ListenAndServe()
|
||||
}
|
||||
|
||||
// ListenAndServeTLS acts like http.ListenAndServeTLS, more information in
|
||||
// http://golang.org/pkg/net/http/#ListenAndServeTLS
|
||||
func ListenAndServeTLS(addr, certFile, keyFile string, handler Handler) error {
|
||||
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
config := tls.Config{
|
||||
Certificates: []tls.Certificate{cert},
|
||||
}
|
||||
|
||||
server := &Server{
|
||||
Addr: addr,
|
||||
Net: "tcp-tls",
|
||||
TLSConfig: &config,
|
||||
Handler: handler,
|
||||
}
|
||||
|
||||
return server.ListenAndServe()
|
||||
}
|
||||
|
||||
// ActivateAndServe activates a server with a listener from systemd,
|
||||
// l and p should not both be non-nil.
|
||||
// If both l and p are not nil only p will be used.
|
||||
// Invoke handler for incoming queries.
|
||||
func ActivateAndServe(l net.Listener, p net.PacketConn, handler Handler) error {
|
||||
server := &Server{Listener: l, PacketConn: p, Handler: handler}
|
||||
return server.ActivateAndServe()
|
||||
}
|
||||
|
||||
// Writer writes raw DNS messages; each call to Write should send an entire message.
|
||||
type Writer interface {
|
||||
io.Writer
|
||||
}
|
||||
|
||||
// Reader reads raw DNS messages; each call to ReadTCP or ReadUDP should return an entire message.
|
||||
type Reader interface {
|
||||
// ReadTCP reads a raw message from a TCP connection. Implementations may alter
|
||||
// connection properties, for example the read-deadline.
|
||||
ReadTCP(conn net.Conn, timeout time.Duration) ([]byte, error)
|
||||
// ReadUDP reads a raw message from a UDP connection. Implementations may alter
|
||||
// connection properties, for example the read-deadline.
|
||||
ReadUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error)
|
||||
}
|
||||
|
||||
// defaultReader is an adapter for the Server struct that implements the Reader interface
|
||||
// using the readTCP and readUDP func of the embedded Server.
|
||||
type defaultReader struct {
|
||||
*Server
|
||||
}
|
||||
|
||||
func (dr *defaultReader) ReadTCP(conn net.Conn, timeout time.Duration) ([]byte, error) {
|
||||
return dr.readTCP(conn, timeout)
|
||||
}
|
||||
|
||||
func (dr *defaultReader) ReadUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) {
|
||||
return dr.readUDP(conn, timeout)
|
||||
}
|
||||
|
||||
// DecorateReader is a decorator hook for extending or supplanting the functionality of a Reader.
|
||||
// Implementations should never return a nil Reader.
|
||||
type DecorateReader func(Reader) Reader
|
||||
|
||||
// DecorateWriter is a decorator hook for extending or supplanting the functionality of a Writer.
|
||||
// Implementations should never return a nil Writer.
|
||||
type DecorateWriter func(Writer) Writer
|
||||
|
||||
// A Server defines parameters for running an DNS server.
|
||||
type Server struct {
|
||||
// Address to listen on, ":dns" if empty.
|
||||
Addr string
|
||||
// if "tcp" or "tcp-tls" (DNS over TLS) it will invoke a TCP listener, otherwise an UDP one
|
||||
Net string
|
||||
// TCP Listener to use, this is to aid in systemd's socket activation.
|
||||
Listener net.Listener
|
||||
// TLS connection configuration
|
||||
TLSConfig *tls.Config
|
||||
// UDP "Listener" to use, this is to aid in systemd's socket activation.
|
||||
PacketConn net.PacketConn
|
||||
// Handler to invoke, dns.DefaultServeMux if nil.
|
||||
Handler Handler
|
||||
// Default buffer size to use to read incoming UDP messages. If not set
|
||||
// it defaults to MinMsgSize (512 B).
|
||||
UDPSize int
|
||||
// The net.Conn.SetReadTimeout value for new connections, defaults to 2 * time.Second.
|
||||
ReadTimeout time.Duration
|
||||
// The net.Conn.SetWriteTimeout value for new connections, defaults to 2 * time.Second.
|
||||
WriteTimeout time.Duration
|
||||
// TCP idle timeout for multiple queries, if nil, defaults to 8 * time.Second (RFC 5966).
|
||||
IdleTimeout func() time.Duration
|
||||
// Secret(s) for Tsig map[<zonename>]<base64 secret>. The zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2).
|
||||
TsigSecret map[string]string
|
||||
// Unsafe instructs the server to disregard any sanity checks and directly hand the message to
|
||||
// the handler. It will specifically not check if the query has the QR bit not set.
|
||||
Unsafe bool
|
||||
// If NotifyStartedFunc is set it is called once the server has started listening.
|
||||
NotifyStartedFunc func()
|
||||
// DecorateReader is optional, allows customization of the process that reads raw DNS messages.
|
||||
DecorateReader DecorateReader
|
||||
// DecorateWriter is optional, allows customization of the process that writes raw DNS messages.
|
||||
DecorateWriter DecorateWriter
|
||||
// Maximum number of TCP queries before we close the socket. Default is maxTCPQueries (unlimited if -1).
|
||||
MaxTCPQueries int
|
||||
// Whether to set the SO_REUSEPORT socket option, allowing multiple listeners to be bound to a single address.
|
||||
// It is only supported on go1.11+ and when using ListenAndServe.
|
||||
ReusePort bool
|
||||
|
||||
// UDP packet or TCP connection queue
|
||||
queue chan *response
|
||||
// Workers count
|
||||
workersCount int32
|
||||
|
||||
// Shutdown handling
|
||||
lock sync.RWMutex
|
||||
started bool
|
||||
shutdown chan struct{}
|
||||
conns map[net.Conn]struct{}
|
||||
|
||||
// A pool for UDP message buffers.
|
||||
udpPool sync.Pool
|
||||
}
|
||||
|
||||
func (srv *Server) isStarted() bool {
|
||||
srv.lock.RLock()
|
||||
started := srv.started
|
||||
srv.lock.RUnlock()
|
||||
return started
|
||||
}
|
||||
|
||||
func (srv *Server) worker(w *response) {
|
||||
srv.serve(w)
|
||||
|
||||
for {
|
||||
count := atomic.LoadInt32(&srv.workersCount)
|
||||
if count > maxIdleWorkersCount {
|
||||
return
|
||||
}
|
||||
if atomic.CompareAndSwapInt32(&srv.workersCount, count, count+1) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
defer atomic.AddInt32(&srv.workersCount, -1)
|
||||
|
||||
inUse := false
|
||||
timeout := time.NewTimer(idleWorkerTimeout)
|
||||
defer timeout.Stop()
|
||||
LOOP:
|
||||
for {
|
||||
select {
|
||||
case w, ok := <-srv.queue:
|
||||
if !ok {
|
||||
break LOOP
|
||||
}
|
||||
inUse = true
|
||||
srv.serve(w)
|
||||
case <-timeout.C:
|
||||
if !inUse {
|
||||
break LOOP
|
||||
}
|
||||
inUse = false
|
||||
timeout.Reset(idleWorkerTimeout)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (srv *Server) spawnWorker(w *response) {
|
||||
select {
|
||||
case srv.queue <- w:
|
||||
default:
|
||||
go srv.worker(w)
|
||||
}
|
||||
}
|
||||
|
||||
func makeUDPBuffer(size int) func() interface{} {
|
||||
return func() interface{} {
|
||||
return make([]byte, size)
|
||||
}
|
||||
}
|
||||
|
||||
func (srv *Server) init() {
|
||||
srv.queue = make(chan *response)
|
||||
|
||||
srv.shutdown = make(chan struct{})
|
||||
srv.conns = make(map[net.Conn]struct{})
|
||||
|
||||
if srv.UDPSize == 0 {
|
||||
srv.UDPSize = MinMsgSize
|
||||
}
|
||||
|
||||
srv.udpPool.New = makeUDPBuffer(srv.UDPSize)
|
||||
}
|
||||
|
||||
func unlockOnce(l sync.Locker) func() {
|
||||
var once sync.Once
|
||||
return func() { once.Do(l.Unlock) }
|
||||
}
|
||||
|
||||
// ListenAndServe starts a nameserver on the configured address in *Server.
|
||||
func (srv *Server) ListenAndServe() error {
|
||||
unlock := unlockOnce(&srv.lock)
|
||||
srv.lock.Lock()
|
||||
defer unlock()
|
||||
|
||||
if srv.started {
|
||||
return &Error{err: "server already started"}
|
||||
}
|
||||
|
||||
addr := srv.Addr
|
||||
if addr == "" {
|
||||
addr = ":domain"
|
||||
}
|
||||
|
||||
srv.init()
|
||||
defer close(srv.queue)
|
||||
|
||||
switch srv.Net {
|
||||
case "tcp", "tcp4", "tcp6":
|
||||
l, err := listenTCP(srv.Net, addr, srv.ReusePort)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
srv.Listener = l
|
||||
srv.started = true
|
||||
unlock()
|
||||
return srv.serveTCP(l)
|
||||
case "tcp-tls", "tcp4-tls", "tcp6-tls":
|
||||
if srv.TLSConfig == nil || (len(srv.TLSConfig.Certificates) == 0 && srv.TLSConfig.GetCertificate == nil) {
|
||||
return errors.New("dns: neither Certificates nor GetCertificate set in Config")
|
||||
}
|
||||
network := strings.TrimSuffix(srv.Net, "-tls")
|
||||
l, err := listenTCP(network, addr, srv.ReusePort)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
l = tls.NewListener(l, srv.TLSConfig)
|
||||
srv.Listener = l
|
||||
srv.started = true
|
||||
unlock()
|
||||
return srv.serveTCP(l)
|
||||
case "udp", "udp4", "udp6":
|
||||
l, err := listenUDP(srv.Net, addr, srv.ReusePort)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
u := l.(*net.UDPConn)
|
||||
if e := setUDPSocketOptions(u); e != nil {
|
||||
return e
|
||||
}
|
||||
srv.PacketConn = l
|
||||
srv.started = true
|
||||
unlock()
|
||||
return srv.serveUDP(u)
|
||||
}
|
||||
return &Error{err: "bad network"}
|
||||
}
|
||||
|
||||
// ActivateAndServe starts a nameserver with the PacketConn or Listener
|
||||
// configured in *Server. Its main use is to start a server from systemd.
|
||||
func (srv *Server) ActivateAndServe() error {
|
||||
unlock := unlockOnce(&srv.lock)
|
||||
srv.lock.Lock()
|
||||
defer unlock()
|
||||
|
||||
if srv.started {
|
||||
return &Error{err: "server already started"}
|
||||
}
|
||||
|
||||
srv.init()
|
||||
defer close(srv.queue)
|
||||
|
||||
pConn := srv.PacketConn
|
||||
l := srv.Listener
|
||||
if pConn != nil {
|
||||
// Check PacketConn interface's type is valid and value
|
||||
// is not nil
|
||||
if t, ok := pConn.(*net.UDPConn); ok && t != nil {
|
||||
if e := setUDPSocketOptions(t); e != nil {
|
||||
return e
|
||||
}
|
||||
srv.started = true
|
||||
unlock()
|
||||
return srv.serveUDP(t)
|
||||
}
|
||||
}
|
||||
if l != nil {
|
||||
srv.started = true
|
||||
unlock()
|
||||
return srv.serveTCP(l)
|
||||
}
|
||||
return &Error{err: "bad listeners"}
|
||||
}
|
||||
|
||||
// Shutdown shuts down a server. After a call to Shutdown, ListenAndServe and
|
||||
// ActivateAndServe will return.
|
||||
func (srv *Server) Shutdown() error {
|
||||
return srv.ShutdownContext(context.Background())
|
||||
}
|
||||
|
||||
// ShutdownContext shuts down a server. After a call to ShutdownContext,
|
||||
// ListenAndServe and ActivateAndServe will return.
|
||||
//
|
||||
// A context.Context may be passed to limit how long to wait for connections
|
||||
// to terminate.
|
||||
func (srv *Server) ShutdownContext(ctx context.Context) error {
|
||||
srv.lock.Lock()
|
||||
if !srv.started {
|
||||
srv.lock.Unlock()
|
||||
return &Error{err: "server not started"}
|
||||
}
|
||||
|
||||
srv.started = false
|
||||
|
||||
if srv.PacketConn != nil {
|
||||
srv.PacketConn.SetReadDeadline(aLongTimeAgo) // Unblock reads
|
||||
}
|
||||
|
||||
if srv.Listener != nil {
|
||||
srv.Listener.Close()
|
||||
}
|
||||
|
||||
for rw := range srv.conns {
|
||||
rw.SetReadDeadline(aLongTimeAgo) // Unblock reads
|
||||
}
|
||||
|
||||
srv.lock.Unlock()
|
||||
|
||||
if testShutdownNotify != nil {
|
||||
testShutdownNotify.Broadcast()
|
||||
}
|
||||
|
||||
var ctxErr error
|
||||
select {
|
||||
case <-srv.shutdown:
|
||||
case <-ctx.Done():
|
||||
ctxErr = ctx.Err()
|
||||
}
|
||||
|
||||
if srv.PacketConn != nil {
|
||||
srv.PacketConn.Close()
|
||||
}
|
||||
|
||||
return ctxErr
|
||||
}
|
||||
|
||||
var testShutdownNotify *sync.Cond
|
||||
|
||||
// getReadTimeout is a helper func to use system timeout if server did not intend to change it.
|
||||
func (srv *Server) getReadTimeout() time.Duration {
|
||||
rtimeout := dnsTimeout
|
||||
if srv.ReadTimeout != 0 {
|
||||
rtimeout = srv.ReadTimeout
|
||||
}
|
||||
return rtimeout
|
||||
}
|
||||
|
||||
// serveTCP starts a TCP listener for the server.
|
||||
func (srv *Server) serveTCP(l net.Listener) error {
|
||||
defer l.Close()
|
||||
|
||||
if srv.NotifyStartedFunc != nil {
|
||||
srv.NotifyStartedFunc()
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
defer func() {
|
||||
wg.Wait()
|
||||
close(srv.shutdown)
|
||||
}()
|
||||
|
||||
for srv.isStarted() {
|
||||
rw, err := l.Accept()
|
||||
if err != nil {
|
||||
if !srv.isStarted() {
|
||||
return nil
|
||||
}
|
||||
if neterr, ok := err.(net.Error); ok && neterr.Temporary() {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
srv.lock.Lock()
|
||||
// Track the connection to allow unblocking reads on shutdown.
|
||||
srv.conns[rw] = struct{}{}
|
||||
srv.lock.Unlock()
|
||||
wg.Add(1)
|
||||
srv.spawnWorker(&response{
|
||||
tsigSecret: srv.TsigSecret,
|
||||
tcp: rw,
|
||||
wg: &wg,
|
||||
})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// serveUDP starts a UDP listener for the server.
|
||||
func (srv *Server) serveUDP(l *net.UDPConn) error {
|
||||
defer l.Close()
|
||||
|
||||
if srv.NotifyStartedFunc != nil {
|
||||
srv.NotifyStartedFunc()
|
||||
}
|
||||
|
||||
reader := Reader(&defaultReader{srv})
|
||||
if srv.DecorateReader != nil {
|
||||
reader = srv.DecorateReader(reader)
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
defer func() {
|
||||
wg.Wait()
|
||||
close(srv.shutdown)
|
||||
}()
|
||||
|
||||
rtimeout := srv.getReadTimeout()
|
||||
// deadline is not used here
|
||||
for srv.isStarted() {
|
||||
m, s, err := reader.ReadUDP(l, rtimeout)
|
||||
if err != nil {
|
||||
if !srv.isStarted() {
|
||||
return nil
|
||||
}
|
||||
if netErr, ok := err.(net.Error); ok && netErr.Temporary() {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
if len(m) < headerSize {
|
||||
if cap(m) == srv.UDPSize {
|
||||
srv.udpPool.Put(m[:srv.UDPSize])
|
||||
}
|
||||
continue
|
||||
}
|
||||
wg.Add(1)
|
||||
srv.spawnWorker(&response{
|
||||
msg: m,
|
||||
tsigSecret: srv.TsigSecret,
|
||||
udp: l,
|
||||
udpSession: s,
|
||||
wg: &wg,
|
||||
})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (srv *Server) serve(w *response) {
|
||||
if srv.DecorateWriter != nil {
|
||||
w.writer = srv.DecorateWriter(w)
|
||||
} else {
|
||||
w.writer = w
|
||||
}
|
||||
|
||||
if w.udp != nil {
|
||||
// serve UDP
|
||||
srv.serveDNS(w)
|
||||
|
||||
w.wg.Done()
|
||||
return
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if !w.hijacked {
|
||||
w.Close()
|
||||
}
|
||||
|
||||
srv.lock.Lock()
|
||||
delete(srv.conns, w.tcp)
|
||||
srv.lock.Unlock()
|
||||
|
||||
w.wg.Done()
|
||||
}()
|
||||
|
||||
reader := Reader(&defaultReader{srv})
|
||||
if srv.DecorateReader != nil {
|
||||
reader = srv.DecorateReader(reader)
|
||||
}
|
||||
|
||||
idleTimeout := tcpIdleTimeout
|
||||
if srv.IdleTimeout != nil {
|
||||
idleTimeout = srv.IdleTimeout()
|
||||
}
|
||||
|
||||
timeout := srv.getReadTimeout()
|
||||
|
||||
limit := srv.MaxTCPQueries
|
||||
if limit == 0 {
|
||||
limit = maxTCPQueries
|
||||
}
|
||||
|
||||
for q := 0; (q < limit || limit == -1) && srv.isStarted(); q++ {
|
||||
var err error
|
||||
w.msg, err = reader.ReadTCP(w.tcp, timeout)
|
||||
if err != nil {
|
||||
// TODO(tmthrgd): handle error
|
||||
break
|
||||
}
|
||||
srv.serveDNS(w)
|
||||
if w.tcp == nil {
|
||||
break // Close() was called
|
||||
}
|
||||
if w.hijacked {
|
||||
break // client will call Close() themselves
|
||||
}
|
||||
// The first read uses the read timeout, the rest use the
|
||||
// idle timeout.
|
||||
timeout = idleTimeout
|
||||
}
|
||||
}
|
||||
|
||||
func (srv *Server) disposeBuffer(w *response) {
|
||||
if w.udp != nil && cap(w.msg) == srv.UDPSize {
|
||||
srv.udpPool.Put(w.msg[:srv.UDPSize])
|
||||
}
|
||||
w.msg = nil
|
||||
}
|
||||
|
||||
func (srv *Server) serveDNS(w *response) {
|
||||
req := new(Msg)
|
||||
err := req.Unpack(w.msg)
|
||||
if err != nil { // Send a FormatError back
|
||||
x := new(Msg)
|
||||
x.SetRcodeFormatError(req)
|
||||
w.WriteMsg(x)
|
||||
}
|
||||
if err != nil || !srv.Unsafe && req.Response {
|
||||
srv.disposeBuffer(w)
|
||||
return
|
||||
}
|
||||
|
||||
w.tsigStatus = nil
|
||||
if w.tsigSecret != nil {
|
||||
if t := req.IsTsig(); t != nil {
|
||||
if secret, ok := w.tsigSecret[t.Hdr.Name]; ok {
|
||||
w.tsigStatus = TsigVerify(w.msg, secret, "", false)
|
||||
} else {
|
||||
w.tsigStatus = ErrSecret
|
||||
}
|
||||
w.tsigTimersOnly = false
|
||||
w.tsigRequestMAC = req.Extra[len(req.Extra)-1].(*TSIG).MAC
|
||||
}
|
||||
}
|
||||
|
||||
srv.disposeBuffer(w)
|
||||
|
||||
handler := srv.Handler
|
||||
if handler == nil {
|
||||
handler = DefaultServeMux
|
||||
}
|
||||
|
||||
handler.ServeDNS(w, req) // Writes back to the client
|
||||
}
|
||||
|
||||
func (srv *Server) readTCP(conn net.Conn, timeout time.Duration) ([]byte, error) {
|
||||
// If we race with ShutdownContext, the read deadline may
|
||||
// have been set in the distant past to unblock the read
|
||||
// below. We must not override it, otherwise we may block
|
||||
// ShutdownContext.
|
||||
srv.lock.RLock()
|
||||
if srv.started {
|
||||
conn.SetReadDeadline(time.Now().Add(timeout))
|
||||
}
|
||||
srv.lock.RUnlock()
|
||||
|
||||
l := make([]byte, 2)
|
||||
n, err := conn.Read(l)
|
||||
if err != nil || n != 2 {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, ErrShortRead
|
||||
}
|
||||
length := binary.BigEndian.Uint16(l)
|
||||
if length == 0 {
|
||||
return nil, ErrShortRead
|
||||
}
|
||||
m := make([]byte, int(length))
|
||||
n, err = conn.Read(m[:int(length)])
|
||||
if err != nil || n == 0 {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, ErrShortRead
|
||||
}
|
||||
i := n
|
||||
for i < int(length) {
|
||||
j, err := conn.Read(m[i:int(length)])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
i += j
|
||||
}
|
||||
n = i
|
||||
m = m[:n]
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (srv *Server) readUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) {
|
||||
srv.lock.RLock()
|
||||
if srv.started {
|
||||
// See the comment in readTCP above.
|
||||
conn.SetReadDeadline(time.Now().Add(timeout))
|
||||
}
|
||||
srv.lock.RUnlock()
|
||||
|
||||
m := srv.udpPool.Get().([]byte)
|
||||
n, s, err := ReadFromSessionUDP(conn, m)
|
||||
if err != nil {
|
||||
srv.udpPool.Put(m)
|
||||
return nil, nil, err
|
||||
}
|
||||
m = m[:n]
|
||||
return m, s, nil
|
||||
}
|
||||
|
||||
// WriteMsg implements the ResponseWriter.WriteMsg method.
|
||||
func (w *response) WriteMsg(m *Msg) (err error) {
|
||||
var data []byte
|
||||
if w.tsigSecret != nil { // if no secrets, dont check for the tsig (which is a longer check)
|
||||
if t := m.IsTsig(); t != nil {
|
||||
data, w.tsigRequestMAC, err = TsigGenerate(m, w.tsigSecret[t.Hdr.Name], w.tsigRequestMAC, w.tsigTimersOnly)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = w.writer.Write(data)
|
||||
return err
|
||||
}
|
||||
}
|
||||
data, err = m.Pack()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = w.writer.Write(data)
|
||||
return err
|
||||
}
|
||||
|
||||
// Write implements the ResponseWriter.Write method.
|
||||
func (w *response) Write(m []byte) (int, error) {
|
||||
switch {
|
||||
case w.udp != nil:
|
||||
n, err := WriteToSessionUDP(w.udp, m, w.udpSession)
|
||||
return n, err
|
||||
case w.tcp != nil:
|
||||
lm := len(m)
|
||||
if lm < 2 {
|
||||
return 0, io.ErrShortBuffer
|
||||
}
|
||||
if lm > MaxMsgSize {
|
||||
return 0, &Error{err: "message too large"}
|
||||
}
|
||||
l := make([]byte, 2, 2+lm)
|
||||
binary.BigEndian.PutUint16(l, uint16(lm))
|
||||
m = append(l, m...)
|
||||
|
||||
n, err := io.Copy(w.tcp, bytes.NewReader(m))
|
||||
return int(n), err
|
||||
default:
|
||||
panic("dns: Write called after Close")
|
||||
}
|
||||
}
|
||||
|
||||
// LocalAddr implements the ResponseWriter.LocalAddr method.
|
||||
func (w *response) LocalAddr() net.Addr {
|
||||
switch {
|
||||
case w.udp != nil:
|
||||
return w.udp.LocalAddr()
|
||||
case w.tcp != nil:
|
||||
return w.tcp.LocalAddr()
|
||||
default:
|
||||
panic("dns: LocalAddr called after Close")
|
||||
}
|
||||
}
|
||||
|
||||
// RemoteAddr implements the ResponseWriter.RemoteAddr method.
|
||||
func (w *response) RemoteAddr() net.Addr {
|
||||
switch {
|
||||
case w.udpSession != nil:
|
||||
return w.udpSession.RemoteAddr()
|
||||
case w.tcp != nil:
|
||||
return w.tcp.RemoteAddr()
|
||||
default:
|
||||
panic("dns: RemoteAddr called after Close")
|
||||
}
|
||||
}
|
||||
|
||||
// TsigStatus implements the ResponseWriter.TsigStatus method.
|
||||
func (w *response) TsigStatus() error { return w.tsigStatus }
|
||||
|
||||
// TsigTimersOnly implements the ResponseWriter.TsigTimersOnly method.
|
||||
func (w *response) TsigTimersOnly(b bool) { w.tsigTimersOnly = b }
|
||||
|
||||
// Hijack implements the ResponseWriter.Hijack method.
|
||||
func (w *response) Hijack() { w.hijacked = true }
|
||||
|
||||
// Close implements the ResponseWriter.Close method
|
||||
func (w *response) Close() error {
|
||||
// Can't close the udp conn, as that is actually the listener.
|
||||
if w.tcp != nil {
|
||||
e := w.tcp.Close()
|
||||
w.tcp = nil
|
||||
return e
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ConnectionState() implements the ConnectionStater.ConnectionState() interface.
|
||||
func (w *response) ConnectionState() *tls.ConnectionState {
|
||||
type tlsConnectionStater interface {
|
||||
ConnectionState() tls.ConnectionState
|
||||
}
|
||||
if v, ok := w.tcp.(tlsConnectionStater); ok {
|
||||
t := v.ConnectionState()
|
||||
return &t
|
||||
}
|
||||
return nil
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user