mirror of
https://github.com/weaveworks/scope.git
synced 2026-05-13 04:37:36 +00:00
Use NATS for shortcut reports in the service. (#1568)
* Vendor nats-io/nats * Use NATS for shortcut reports. * Review feedback. * Rejig shortcut subscriptions, so they work. * Review feedback
This commit is contained in:
22
app/multitenant/common.go
Normal file
22
app/multitenant/common.go
Normal file
@@ -0,0 +1,22 @@
|
||||
package multitenant
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func errorCode(err error) string {
|
||||
if err == nil {
|
||||
return "200"
|
||||
}
|
||||
return "500"
|
||||
}
|
||||
|
||||
func timeRequest(method string, metric *prometheus.SummaryVec, f func() error) error {
|
||||
startTime := time.Now()
|
||||
err := f()
|
||||
duration := time.Now().Sub(startTime)
|
||||
metric.WithLabelValues(method, errorCode(err)).Observe(float64(duration.Nanoseconds()))
|
||||
return err
|
||||
}
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
log "github.com/Sirupsen/logrus"
|
||||
@@ -15,6 +16,7 @@ import (
|
||||
"github.com/aws/aws-sdk-go/service/dynamodb"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/bluele/gcache"
|
||||
"github.com/nats-io/nats"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/ugorji/go/codec"
|
||||
"golang.org/x/net/context"
|
||||
@@ -24,11 +26,12 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
hourField = "hour"
|
||||
tsField = "ts"
|
||||
reportField = "report"
|
||||
cacheSize = (15 / 3) * 10 * 5 // (window size * report rate) * number of hosts per user * number of users
|
||||
cacheExpiration = 15 * time.Second
|
||||
hourField = "hour"
|
||||
tsField = "ts"
|
||||
reportField = "report"
|
||||
reportCacheSize = (15 / 3) * 10 * 5 // (window size * report rate) * number of hosts per user * number of users
|
||||
reportCacheExpiration = 15 * time.Second
|
||||
natsTimeout = 10 * time.Second
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -69,6 +72,12 @@ var (
|
||||
Name: "s3_request_duration_nanoseconds",
|
||||
Help: "Time spent doing S3 requests.",
|
||||
}, []string{"method", "status_code"})
|
||||
|
||||
natsRequests = prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: "scope",
|
||||
Name: "nats_requests",
|
||||
Help: "Number of NATS requests.",
|
||||
}, []string{"method", "status_code"})
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -79,6 +88,7 @@ func init() {
|
||||
prometheus.MustRegister(dynamoValueSize)
|
||||
prometheus.MustRegister(reportSize)
|
||||
prometheus.MustRegister(s3RequestDuration)
|
||||
prometheus.MustRegister(natsRequests)
|
||||
}
|
||||
|
||||
// DynamoDBCollector is a Collector which can also CreateTables
|
||||
@@ -95,11 +105,42 @@ type dynamoDBCollector struct {
|
||||
bucketName string
|
||||
merger app.Merger
|
||||
cache gcache.Cache
|
||||
|
||||
nats *nats.Conn
|
||||
waitersLock sync.Mutex
|
||||
waiters map[watchKey]*nats.Subscription
|
||||
}
|
||||
|
||||
// Shortcut reports:
|
||||
// When the UI connects a WS to the query service, a goroutine periodically
|
||||
// published rendered reports to that ws. This process can be interrupted by
|
||||
// "shortcut" reports, causing the query service to push a render report
|
||||
// immediately. This whole process is controlled by the aforementioned
|
||||
// goroutine registering a channel with the collector. We store these
|
||||
// registered channels in a map keyed by the userid and the channel itself,
|
||||
// which in go is hashable. We then listen on a NATS topic for any shortcut
|
||||
// reports coming from the collection service.
|
||||
type watchKey struct {
|
||||
userid string
|
||||
c chan struct{}
|
||||
}
|
||||
|
||||
// NewDynamoDBCollector the reaper of souls
|
||||
// https://github.com/aws/aws-sdk-go/wiki/common-examples
|
||||
func NewDynamoDBCollector(dynamoDBConfig, s3Config *aws.Config, userIDer UserIDer, tableName, bucketName string) DynamoDBCollector {
|
||||
func NewDynamoDBCollector(
|
||||
userIDer UserIDer,
|
||||
dynamoDBConfig, s3Config *aws.Config,
|
||||
tableName, bucketName, natsHost string,
|
||||
) (DynamoDBCollector, error) {
|
||||
var nc *nats.Conn
|
||||
if natsHost != "" {
|
||||
var err error
|
||||
nc, err = nats.Connect(natsHost)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return &dynamoDBCollector{
|
||||
db: dynamodb.New(session.New(dynamoDBConfig)),
|
||||
s3: s3.New(session.New(s3Config)),
|
||||
@@ -107,8 +148,10 @@ func NewDynamoDBCollector(dynamoDBConfig, s3Config *aws.Config, userIDer UserIDe
|
||||
tableName: tableName,
|
||||
bucketName: bucketName,
|
||||
merger: app.NewSmartMerger(),
|
||||
cache: gcache.New(cacheSize).LRU().Expiration(cacheExpiration).Build(),
|
||||
}
|
||||
cache: gcache.New(reportCacheSize).LRU().Expiration(reportCacheExpiration).Build(),
|
||||
nats: nc,
|
||||
waiters: map[watchKey]*nats.Subscription{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// CreateDynamoDBTables creates the required tables in dynamodb
|
||||
@@ -163,21 +206,6 @@ func (c *dynamoDBCollector) CreateTables() error {
|
||||
return err
|
||||
}
|
||||
|
||||
func errorCode(err error) string {
|
||||
if err == nil {
|
||||
return "200"
|
||||
}
|
||||
return "500"
|
||||
}
|
||||
|
||||
func timeRequest(method string, metric *prometheus.SummaryVec, f func() error) error {
|
||||
startTime := time.Now()
|
||||
err := f()
|
||||
duration := time.Now().Sub(startTime)
|
||||
metric.WithLabelValues(method, errorCode(err)).Observe(float64(duration.Nanoseconds()))
|
||||
return err
|
||||
}
|
||||
|
||||
// getReportKeys gets the s3 keys for reports in this range
|
||||
func (c *dynamoDBCollector) getReportKeys(rowKey string, start, end time.Time) ([]string, error) {
|
||||
var resp *dynamodb.QueryOutput
|
||||
@@ -419,9 +447,82 @@ func (c *dynamoDBCollector) Add(ctx context.Context, rep report.Report) error {
|
||||
dynamoConsumedCapacity.WithLabelValues("PutItem").
|
||||
Add(float64(*resp.ConsumedCapacity.CapacityUnits))
|
||||
}
|
||||
return err
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if rep.Shortcut && c.nats != nil {
|
||||
err := c.nats.Publish(userid, []byte(s3Key))
|
||||
natsRequests.WithLabelValues("Publish", errorCode(err)).Add(1)
|
||||
if err != nil {
|
||||
log.Errorf("Error sending shortcut report: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *dynamoDBCollector) WaitOn(context.Context, chan struct{}) {}
|
||||
func (c *dynamoDBCollector) WaitOn(ctx context.Context, waiter chan struct{}) {
|
||||
userid, err := c.userIDer(ctx)
|
||||
if err != nil {
|
||||
log.Errorf("Error getting user id in WaitOn: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
func (c *dynamoDBCollector) UnWait(context.Context, chan struct{}) {}
|
||||
if c.nats == nil {
|
||||
return
|
||||
}
|
||||
|
||||
sub, err := c.nats.SubscribeSync(userid)
|
||||
natsRequests.WithLabelValues("SubscribeSync", errorCode(err)).Add(1)
|
||||
if err != nil {
|
||||
log.Errorf("Error subscribing for shortcuts: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
c.waitersLock.Lock()
|
||||
c.waiters[watchKey{userid, waiter}] = sub
|
||||
c.waitersLock.Unlock()
|
||||
|
||||
go func() {
|
||||
for {
|
||||
_, err := sub.NextMsg(natsTimeout)
|
||||
if err == nats.ErrTimeout {
|
||||
continue
|
||||
}
|
||||
natsRequests.WithLabelValues("NextMsg", errorCode(err)).Add(1)
|
||||
if err != nil {
|
||||
log.Debugf("NextMsg error: %v", err)
|
||||
return
|
||||
}
|
||||
select {
|
||||
case waiter <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (c *dynamoDBCollector) UnWait(ctx context.Context, waiter chan struct{}) {
|
||||
userid, err := c.userIDer(ctx)
|
||||
if err != nil {
|
||||
log.Errorf("Error getting user id in WaitOn: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
if c.nats == nil {
|
||||
return
|
||||
}
|
||||
|
||||
c.waitersLock.Lock()
|
||||
key := watchKey{userid, waiter}
|
||||
sub := c.waiters[key]
|
||||
delete(c.waiters, key)
|
||||
c.waitersLock.Unlock()
|
||||
|
||||
err = sub.Unsubscribe()
|
||||
natsRequests.WithLabelValues("Unsubscribe", errorCode(err)).Add(1)
|
||||
if err != nil {
|
||||
log.Errorf("Error on unsubscribe: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
26
prog/app.go
26
prog/app.go
@@ -76,7 +76,7 @@ func awsConfigFromURL(url *url.URL) (*aws.Config, error) {
|
||||
return config, nil
|
||||
}
|
||||
|
||||
func collectorFactory(userIDer multitenant.UserIDer, collectorURL, s3URL string, window time.Duration, createTables bool) (app.Collector, error) {
|
||||
func collectorFactory(userIDer multitenant.UserIDer, collectorURL, s3URL, natsHostname string, window time.Duration, createTables bool) (app.Collector, error) {
|
||||
if collectorURL == "local" {
|
||||
return app.NewCollector(window), nil
|
||||
}
|
||||
@@ -101,8 +101,11 @@ func collectorFactory(userIDer multitenant.UserIDer, collectorURL, s3URL string,
|
||||
}
|
||||
tableName := strings.TrimPrefix(parsed.Path, "/")
|
||||
bucketName := strings.TrimPrefix(s3.Path, "/")
|
||||
dynamoCollector := multitenant.NewDynamoDBCollector(
|
||||
dynamoDBConfig, s3Config, userIDer, tableName, bucketName)
|
||||
dynamoCollector, err := multitenant.NewDynamoDBCollector(
|
||||
userIDer, dynamoDBConfig, s3Config, tableName, bucketName, natsHostname)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if createTables {
|
||||
if err := dynamoCollector.CreateTables(); err != nil {
|
||||
return nil, err
|
||||
@@ -167,12 +170,20 @@ func appMain(flags appFlags) {
|
||||
setLogLevel(flags.logLevel)
|
||||
setLogFormatter(flags.logPrefix)
|
||||
|
||||
defer log.Info("app exiting")
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
app.UniqueID = strconv.FormatInt(rand.Int63(), 16)
|
||||
app.Version = version
|
||||
log.Infof("app starting, version %s, ID %s", app.Version, app.UniqueID)
|
||||
log.Infof("command line: %v", os.Args)
|
||||
|
||||
userIDer := multitenant.NoopUserIDer
|
||||
if flags.userIDHeader != "" {
|
||||
userIDer = multitenant.UserIDHeader(flags.userIDHeader)
|
||||
}
|
||||
|
||||
collector, err := collectorFactory(userIDer, flags.collectorURL, flags.s3URL, flags.window, flags.awsCreateTables)
|
||||
collector, err := collectorFactory(
|
||||
userIDer, flags.collectorURL, flags.s3URL, flags.natsHostname, flags.window, flags.awsCreateTables)
|
||||
if err != nil {
|
||||
log.Fatalf("Error creating collector: %v", err)
|
||||
return
|
||||
@@ -190,13 +201,6 @@ func appMain(flags appFlags) {
|
||||
return
|
||||
}
|
||||
|
||||
defer log.Info("app exiting")
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
app.UniqueID = strconv.FormatInt(rand.Int63(), 16)
|
||||
app.Version = version
|
||||
log.Infof("app starting, version %s, ID %s", app.Version, app.UniqueID)
|
||||
log.Infof("command line: %v", os.Args)
|
||||
|
||||
// Start background version checking
|
||||
checkpoint.CheckInterval(&checkpoint.CheckParams{
|
||||
Product: "scope-app",
|
||||
|
||||
@@ -100,6 +100,7 @@ type appFlags struct {
|
||||
s3URL string
|
||||
controlRouterURL string
|
||||
pipeRouterURL string
|
||||
natsHostname string
|
||||
userIDHeader string
|
||||
|
||||
awsCreateTables bool
|
||||
@@ -176,6 +177,7 @@ func main() {
|
||||
flag.StringVar(&flags.app.s3URL, "app.collector.s3", "local", "S3 URL to use (when collector is dynamodb)")
|
||||
flag.StringVar(&flags.app.controlRouterURL, "app.control.router", "local", "Control router to use (local or sqs)")
|
||||
flag.StringVar(&flags.app.pipeRouterURL, "app.pipe.router", "local", "Pipe router to use (local)")
|
||||
flag.StringVar(&flags.app.natsHostname, "app.nats", "", "Hostname for NATS service to use for shortcut reports. If empty, shortcut reporting will be disabled.")
|
||||
flag.StringVar(&flags.app.userIDHeader, "app.userid.header", "", "HTTP header to use as userid")
|
||||
|
||||
flag.BoolVar(&flags.app.awsCreateTables, "app.aws.create.tables", false, "Create the tables in DynamoDB")
|
||||
|
||||
20
vendor/github.com/nats-io/gnatsd/auth/LICENSE
generated
vendored
Normal file
20
vendor/github.com/nats-io/gnatsd/auth/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2012-2016 Apcera Inc.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
41
vendor/github.com/nats-io/gnatsd/auth/multiuser.go
generated
vendored
Normal file
41
vendor/github.com/nats-io/gnatsd/auth/multiuser.go
generated
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
// Copyright 2016 Apcera Inc. All rights reserved.
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
|
||||
"github.com/nats-io/gnatsd/server"
|
||||
)
|
||||
|
||||
// Plain authentication is a basic username and password
|
||||
type MultiUser struct {
|
||||
users map[string]string
|
||||
}
|
||||
|
||||
// Create a new multi-user
|
||||
func NewMultiUser(users []server.User) *MultiUser {
|
||||
m := &MultiUser{users: make(map[string]string)}
|
||||
for _, u := range users {
|
||||
m.users[u.Username] = u.Password
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// Check authenticates the client using a username and password against a list of multiple users.
|
||||
func (m *MultiUser) Check(c server.ClientAuth) bool {
|
||||
opts := c.GetOpts()
|
||||
pass, ok := m.users[opts.Username]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
// Check to see if the password is a bcrypt hash
|
||||
if isBcrypt(pass) {
|
||||
if err := bcrypt.CompareHashAndPassword([]byte(pass), []byte(opts.Password)); err != nil {
|
||||
return false
|
||||
}
|
||||
} else if pass != opts.Password {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
40
vendor/github.com/nats-io/gnatsd/auth/plain.go
generated
vendored
Normal file
40
vendor/github.com/nats-io/gnatsd/auth/plain.go
generated
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
// Copyright 2014-2015 Apcera Inc. All rights reserved.
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/nats-io/gnatsd/server"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
)
|
||||
|
||||
const bcryptPrefix = "$2a$"
|
||||
|
||||
func isBcrypt(password string) bool {
|
||||
return strings.HasPrefix(password, bcryptPrefix)
|
||||
}
|
||||
|
||||
// Plain authentication is a basic username and password
|
||||
type Plain struct {
|
||||
Username string
|
||||
Password string
|
||||
}
|
||||
|
||||
// Check authenticates the client using a username and password
|
||||
func (p *Plain) Check(c server.ClientAuth) bool {
|
||||
opts := c.GetOpts()
|
||||
if p.Username != opts.Username {
|
||||
return false
|
||||
}
|
||||
// Check to see if the password is a bcrypt hash
|
||||
if isBcrypt(p.Password) {
|
||||
if err := bcrypt.CompareHashAndPassword([]byte(p.Password), []byte(opts.Password)); err != nil {
|
||||
return false
|
||||
}
|
||||
} else if p.Password != opts.Password {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
26
vendor/github.com/nats-io/gnatsd/auth/token.go
generated
vendored
Normal file
26
vendor/github.com/nats-io/gnatsd/auth/token.go
generated
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
package auth
|
||||
|
||||
import (
|
||||
"github.com/nats-io/gnatsd/server"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
)
|
||||
|
||||
// Token holds a string token used for authentication
|
||||
type Token struct {
|
||||
Token string
|
||||
}
|
||||
|
||||
// Check authenticates a client from a token
|
||||
func (p *Token) Check(c server.ClientAuth) bool {
|
||||
opts := c.GetOpts()
|
||||
// Check to see if the token is a bcrypt hash
|
||||
if isBcrypt(p.Token) {
|
||||
if err := bcrypt.CompareHashAndPassword([]byte(p.Token), []byte(opts.Authorization)); err != nil {
|
||||
return false
|
||||
}
|
||||
} else if p.Token != opts.Authorization {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
20
vendor/github.com/nats-io/gnatsd/conf/LICENSE
generated
vendored
Normal file
20
vendor/github.com/nats-io/gnatsd/conf/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2012-2016 Apcera Inc.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
952
vendor/github.com/nats-io/gnatsd/conf/lex.go
generated
vendored
Normal file
952
vendor/github.com/nats-io/gnatsd/conf/lex.go
generated
vendored
Normal file
@@ -0,0 +1,952 @@
|
||||
// Copyright 2013-2016 Apcera Inc. All rights reserved.
|
||||
|
||||
// Customized heavily from
|
||||
// https://github.com/BurntSushi/toml/blob/master/lex.go, which is based on
|
||||
// Rob Pike's talk: http://cuddle.googlecode.com/hg/talk/lex.html
|
||||
|
||||
// The format supported is less restrictive than today's formats.
|
||||
// Supports mixed Arrays [], nested Maps {}, multiple comment types (# and //)
|
||||
// Also supports key value assigments using '=' or ':' or whiteSpace()
|
||||
// e.g. foo = 2, foo : 2, foo 2
|
||||
// maps can be assigned with no key separator as well
|
||||
// semicolons as value terminators in key/value assignments are optional
|
||||
//
|
||||
// see lex_test.go for more examples.
|
||||
|
||||
package conf
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
type itemType int
|
||||
|
||||
const (
|
||||
itemError itemType = iota
|
||||
itemNIL // used in the parser to indicate no type
|
||||
itemEOF
|
||||
itemKey
|
||||
itemText
|
||||
itemString
|
||||
itemBool
|
||||
itemInteger
|
||||
itemFloat
|
||||
itemDatetime
|
||||
itemArrayStart
|
||||
itemArrayEnd
|
||||
itemMapStart
|
||||
itemMapEnd
|
||||
itemCommentStart
|
||||
itemVariable
|
||||
)
|
||||
|
||||
const (
|
||||
eof = 0
|
||||
mapStart = '{'
|
||||
mapEnd = '}'
|
||||
keySepEqual = '='
|
||||
keySepColon = ':'
|
||||
arrayStart = '['
|
||||
arrayEnd = ']'
|
||||
arrayValTerm = ','
|
||||
mapValTerm = ','
|
||||
commentHashStart = '#'
|
||||
commentSlashStart = '/'
|
||||
dqStringStart = '"'
|
||||
dqStringEnd = '"'
|
||||
sqStringStart = '\''
|
||||
sqStringEnd = '\''
|
||||
optValTerm = ';'
|
||||
blockStart = '('
|
||||
blockEnd = ')'
|
||||
)
|
||||
|
||||
type stateFn func(lx *lexer) stateFn
|
||||
|
||||
type lexer struct {
|
||||
input string
|
||||
start int
|
||||
pos int
|
||||
width int
|
||||
line int
|
||||
state stateFn
|
||||
items chan item
|
||||
|
||||
// A stack of state functions used to maintain context.
|
||||
// The idea is to reuse parts of the state machine in various places.
|
||||
// For example, values can appear at the top level or within arbitrarily
|
||||
// nested arrays. The last state on the stack is used after a value has
|
||||
// been lexed. Similarly for comments.
|
||||
stack []stateFn
|
||||
}
|
||||
|
||||
type item struct {
|
||||
typ itemType
|
||||
val string
|
||||
line int
|
||||
}
|
||||
|
||||
func (lx *lexer) nextItem() item {
|
||||
for {
|
||||
select {
|
||||
case item := <-lx.items:
|
||||
return item
|
||||
default:
|
||||
lx.state = lx.state(lx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func lex(input string) *lexer {
|
||||
lx := &lexer{
|
||||
input: input,
|
||||
state: lexTop,
|
||||
line: 1,
|
||||
items: make(chan item, 10),
|
||||
stack: make([]stateFn, 0, 10),
|
||||
}
|
||||
return lx
|
||||
}
|
||||
|
||||
func (lx *lexer) push(state stateFn) {
|
||||
lx.stack = append(lx.stack, state)
|
||||
}
|
||||
|
||||
func (lx *lexer) pop() stateFn {
|
||||
if len(lx.stack) == 0 {
|
||||
return lx.errorf("BUG in lexer: no states to pop.")
|
||||
}
|
||||
li := len(lx.stack) - 1
|
||||
last := lx.stack[li]
|
||||
lx.stack = lx.stack[0:li]
|
||||
return last
|
||||
}
|
||||
|
||||
func (lx *lexer) emit(typ itemType) {
|
||||
lx.items <- item{typ, lx.input[lx.start:lx.pos], lx.line}
|
||||
lx.start = lx.pos
|
||||
}
|
||||
|
||||
func (lx *lexer) next() (r rune) {
|
||||
if lx.pos >= len(lx.input) {
|
||||
lx.width = 0
|
||||
return eof
|
||||
}
|
||||
|
||||
if lx.input[lx.pos] == '\n' {
|
||||
lx.line++
|
||||
}
|
||||
r, lx.width = utf8.DecodeRuneInString(lx.input[lx.pos:])
|
||||
lx.pos += lx.width
|
||||
return r
|
||||
}
|
||||
|
||||
// ignore skips over the pending input before this point.
|
||||
func (lx *lexer) ignore() {
|
||||
lx.start = lx.pos
|
||||
}
|
||||
|
||||
// backup steps back one rune. Can be called only once per call of next.
|
||||
func (lx *lexer) backup() {
|
||||
lx.pos -= lx.width
|
||||
if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' {
|
||||
lx.line--
|
||||
}
|
||||
}
|
||||
|
||||
// peek returns but does not consume the next rune in the input.
|
||||
func (lx *lexer) peek() rune {
|
||||
r := lx.next()
|
||||
lx.backup()
|
||||
return r
|
||||
}
|
||||
|
||||
// errorf stops all lexing by emitting an error and returning `nil`.
|
||||
// Note that any value that is a character is escaped if it's a special
|
||||
// character (new lines, tabs, etc.).
|
||||
func (lx *lexer) errorf(format string, values ...interface{}) stateFn {
|
||||
for i, value := range values {
|
||||
if v, ok := value.(rune); ok {
|
||||
values[i] = escapeSpecial(v)
|
||||
}
|
||||
}
|
||||
lx.items <- item{
|
||||
itemError,
|
||||
fmt.Sprintf(format, values...),
|
||||
lx.line,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// lexTop consumes elements at the top level of data structure.
|
||||
func lexTop(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if isWhitespace(r) || isNL(r) {
|
||||
return lexSkip(lx, lexTop)
|
||||
}
|
||||
|
||||
switch r {
|
||||
case commentHashStart:
|
||||
lx.push(lexTop)
|
||||
return lexCommentStart
|
||||
case commentSlashStart:
|
||||
rn := lx.next()
|
||||
if rn == commentSlashStart {
|
||||
lx.push(lexTop)
|
||||
return lexCommentStart
|
||||
}
|
||||
lx.backup()
|
||||
fallthrough
|
||||
case eof:
|
||||
if lx.pos > lx.start {
|
||||
return lx.errorf("Unexpected EOF.")
|
||||
}
|
||||
lx.emit(itemEOF)
|
||||
return nil
|
||||
}
|
||||
|
||||
// At this point, the only valid item can be a key, so we back up
|
||||
// and let the key lexer do the rest.
|
||||
lx.backup()
|
||||
lx.push(lexTopValueEnd)
|
||||
return lexKeyStart
|
||||
}
|
||||
|
||||
// lexTopValueEnd is entered whenever a top-level value has been consumed.
|
||||
// It must see only whitespace, and will turn back to lexTop upon a new line.
|
||||
// If it sees EOF, it will quit the lexer successfully.
|
||||
func lexTopValueEnd(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case r == commentHashStart:
|
||||
// a comment will read to a new line for us.
|
||||
lx.push(lexTop)
|
||||
return lexCommentStart
|
||||
case r == commentSlashStart:
|
||||
rn := lx.next()
|
||||
if rn == commentSlashStart {
|
||||
lx.push(lexTop)
|
||||
return lexCommentStart
|
||||
}
|
||||
lx.backup()
|
||||
fallthrough
|
||||
case isWhitespace(r):
|
||||
return lexTopValueEnd
|
||||
case isNL(r) || r == eof || r == optValTerm:
|
||||
lx.ignore()
|
||||
return lexTop
|
||||
}
|
||||
return lx.errorf("Expected a top-level value to end with a new line, "+
|
||||
"comment or EOF, but got '%v' instead.", r)
|
||||
}
|
||||
|
||||
// lexKeyStart consumes a key name up until the first non-whitespace character.
|
||||
// lexKeyStart will ignore whitespace. It will also eat enclosing quotes.
|
||||
func lexKeyStart(lx *lexer) stateFn {
|
||||
r := lx.peek()
|
||||
switch {
|
||||
case isKeySeparator(r):
|
||||
return lx.errorf("Unexpected key separator '%v'", r)
|
||||
case isWhitespace(r) || isNL(r):
|
||||
lx.next()
|
||||
return lexSkip(lx, lexKeyStart)
|
||||
case r == dqStringStart:
|
||||
lx.next()
|
||||
return lexSkip(lx, lexDubQuotedKey)
|
||||
case r == sqStringStart:
|
||||
lx.next()
|
||||
return lexSkip(lx, lexQuotedKey)
|
||||
}
|
||||
lx.ignore()
|
||||
lx.next()
|
||||
return lexKey
|
||||
}
|
||||
|
||||
// lexDubQuotedKey consumes the text of a key between quotes.
|
||||
func lexDubQuotedKey(lx *lexer) stateFn {
|
||||
r := lx.peek()
|
||||
if r == dqStringEnd {
|
||||
lx.emit(itemKey)
|
||||
lx.next()
|
||||
return lexSkip(lx, lexKeyEnd)
|
||||
}
|
||||
lx.next()
|
||||
return lexDubQuotedKey
|
||||
}
|
||||
|
||||
// lexQuotedKey consumes the text of a key between quotes.
|
||||
func lexQuotedKey(lx *lexer) stateFn {
|
||||
r := lx.peek()
|
||||
if r == sqStringEnd {
|
||||
lx.emit(itemKey)
|
||||
lx.next()
|
||||
return lexSkip(lx, lexKeyEnd)
|
||||
}
|
||||
lx.next()
|
||||
return lexQuotedKey
|
||||
}
|
||||
|
||||
// lexKey consumes the text of a key. Assumes that the first character (which
|
||||
// is not whitespace) has already been consumed.
|
||||
func lexKey(lx *lexer) stateFn {
|
||||
r := lx.peek()
|
||||
if isWhitespace(r) || isNL(r) || isKeySeparator(r) || r == eof {
|
||||
lx.emit(itemKey)
|
||||
return lexKeyEnd
|
||||
}
|
||||
lx.next()
|
||||
return lexKey
|
||||
}
|
||||
|
||||
// lexKeyEnd consumes the end of a key (up to the key separator).
|
||||
// Assumes that the first whitespace character after a key (or the '=' or ':'
|
||||
// separator) has NOT been consumed.
|
||||
func lexKeyEnd(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case isWhitespace(r) || isNL(r):
|
||||
return lexSkip(lx, lexKeyEnd)
|
||||
case isKeySeparator(r):
|
||||
return lexSkip(lx, lexValue)
|
||||
case r == eof:
|
||||
lx.emit(itemEOF)
|
||||
return nil
|
||||
}
|
||||
// We start the value here
|
||||
lx.backup()
|
||||
return lexValue
|
||||
}
|
||||
|
||||
// lexValue starts the consumption of a value anywhere a value is expected.
|
||||
// lexValue will ignore whitespace.
|
||||
// After a value is lexed, the last state on the next is popped and returned.
|
||||
func lexValue(lx *lexer) stateFn {
|
||||
// We allow whitespace to precede a value, but NOT new lines.
|
||||
// In array syntax, the array states are responsible for ignoring new lines.
|
||||
r := lx.next()
|
||||
if isWhitespace(r) {
|
||||
return lexSkip(lx, lexValue)
|
||||
}
|
||||
|
||||
switch {
|
||||
case r == arrayStart:
|
||||
lx.ignore()
|
||||
lx.emit(itemArrayStart)
|
||||
return lexArrayValue
|
||||
case r == mapStart:
|
||||
lx.ignore()
|
||||
lx.emit(itemMapStart)
|
||||
return lexMapKeyStart
|
||||
case r == sqStringStart:
|
||||
lx.ignore() // ignore the " or '
|
||||
return lexQuotedString
|
||||
case r == dqStringStart:
|
||||
lx.ignore() // ignore the " or '
|
||||
return lexDubQuotedString
|
||||
case r == '-':
|
||||
return lexNumberStart
|
||||
case r == blockStart:
|
||||
lx.ignore()
|
||||
return lexBlock
|
||||
case isDigit(r):
|
||||
lx.backup() // avoid an extra state and use the same as above
|
||||
return lexNumberOrDateOrIPStart
|
||||
case r == '.': // special error case, be kind to users
|
||||
return lx.errorf("Floats must start with a digit")
|
||||
case isNL(r):
|
||||
return lx.errorf("Expected value but found new line")
|
||||
}
|
||||
lx.backup()
|
||||
return lexString
|
||||
}
|
||||
|
||||
// lexArrayValue consumes one value in an array. It assumes that '[' or ','
|
||||
// have already been consumed. All whitespace and new lines are ignored.
|
||||
func lexArrayValue(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case isWhitespace(r) || isNL(r):
|
||||
return lexSkip(lx, lexArrayValue)
|
||||
case r == commentHashStart:
|
||||
lx.push(lexArrayValue)
|
||||
return lexCommentStart
|
||||
case r == commentSlashStart:
|
||||
rn := lx.next()
|
||||
if rn == commentSlashStart {
|
||||
lx.push(lexArrayValue)
|
||||
return lexCommentStart
|
||||
}
|
||||
lx.backup()
|
||||
fallthrough
|
||||
case r == arrayValTerm:
|
||||
return lx.errorf("Unexpected array value terminator '%v'.", arrayValTerm)
|
||||
case r == arrayEnd:
|
||||
return lexArrayEnd
|
||||
}
|
||||
|
||||
lx.backup()
|
||||
lx.push(lexArrayValueEnd)
|
||||
return lexValue
|
||||
}
|
||||
|
||||
// lexArrayValueEnd consumes the cruft between values of an array. Namely,
|
||||
// it ignores whitespace and expects either a ',' or a ']'.
|
||||
func lexArrayValueEnd(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case isWhitespace(r):
|
||||
return lexSkip(lx, lexArrayValueEnd)
|
||||
case r == commentHashStart:
|
||||
lx.push(lexArrayValueEnd)
|
||||
return lexCommentStart
|
||||
case r == commentSlashStart:
|
||||
rn := lx.next()
|
||||
if rn == commentSlashStart {
|
||||
lx.push(lexArrayValueEnd)
|
||||
return lexCommentStart
|
||||
}
|
||||
lx.backup()
|
||||
fallthrough
|
||||
case r == arrayValTerm || isNL(r):
|
||||
return lexSkip(lx, lexArrayValue) // Move onto next
|
||||
case r == arrayEnd:
|
||||
return lexArrayEnd
|
||||
}
|
||||
return lx.errorf("Expected an array value terminator %q or an array "+
|
||||
"terminator %q, but got '%v' instead.", arrayValTerm, arrayEnd, r)
|
||||
}
|
||||
|
||||
// lexArrayEnd finishes the lexing of an array. It assumes that a ']' has
|
||||
// just been consumed.
|
||||
func lexArrayEnd(lx *lexer) stateFn {
|
||||
lx.ignore()
|
||||
lx.emit(itemArrayEnd)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexMapKeyStart consumes a key name up until the first non-whitespace
|
||||
// character.
|
||||
// lexMapKeyStart will ignore whitespace.
|
||||
func lexMapKeyStart(lx *lexer) stateFn {
|
||||
r := lx.peek()
|
||||
switch {
|
||||
case isKeySeparator(r):
|
||||
return lx.errorf("Unexpected key separator '%v'.", r)
|
||||
case isWhitespace(r) || isNL(r):
|
||||
lx.next()
|
||||
return lexSkip(lx, lexMapKeyStart)
|
||||
case r == mapEnd:
|
||||
lx.next()
|
||||
return lexSkip(lx, lexMapEnd)
|
||||
case r == commentHashStart:
|
||||
lx.next()
|
||||
lx.push(lexMapKeyStart)
|
||||
return lexCommentStart
|
||||
case r == commentSlashStart:
|
||||
lx.next()
|
||||
rn := lx.next()
|
||||
if rn == commentSlashStart {
|
||||
lx.push(lexMapKeyStart)
|
||||
return lexCommentStart
|
||||
}
|
||||
lx.backup()
|
||||
case r == sqStringStart:
|
||||
lx.next()
|
||||
return lexSkip(lx, lexMapQuotedKey)
|
||||
case r == dqStringStart:
|
||||
lx.next()
|
||||
return lexSkip(lx, lexMapDubQuotedKey)
|
||||
}
|
||||
lx.ignore()
|
||||
lx.next()
|
||||
return lexMapKey
|
||||
}
|
||||
|
||||
// lexMapQuotedKey consumes the text of a key between quotes.
|
||||
func lexMapQuotedKey(lx *lexer) stateFn {
|
||||
r := lx.peek()
|
||||
if r == sqStringEnd {
|
||||
lx.emit(itemKey)
|
||||
lx.next()
|
||||
return lexSkip(lx, lexMapKeyEnd)
|
||||
}
|
||||
lx.next()
|
||||
return lexMapQuotedKey
|
||||
}
|
||||
|
||||
// lexMapQuotedKey consumes the text of a key between quotes.
|
||||
func lexMapDubQuotedKey(lx *lexer) stateFn {
|
||||
r := lx.peek()
|
||||
if r == dqStringEnd {
|
||||
lx.emit(itemKey)
|
||||
lx.next()
|
||||
return lexSkip(lx, lexMapKeyEnd)
|
||||
}
|
||||
lx.next()
|
||||
return lexMapDubQuotedKey
|
||||
}
|
||||
|
||||
// lexMapKey consumes the text of a key. Assumes that the first character (which
|
||||
// is not whitespace) has already been consumed.
|
||||
func lexMapKey(lx *lexer) stateFn {
|
||||
r := lx.peek()
|
||||
if isWhitespace(r) || isNL(r) || isKeySeparator(r) {
|
||||
lx.emit(itemKey)
|
||||
return lexMapKeyEnd
|
||||
}
|
||||
lx.next()
|
||||
return lexMapKey
|
||||
}
|
||||
|
||||
// lexMapKeyEnd consumes the end of a key (up to the key separator).
|
||||
// Assumes that the first whitespace character after a key (or the '='
|
||||
// separator) has NOT been consumed.
|
||||
func lexMapKeyEnd(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case isWhitespace(r) || isNL(r):
|
||||
return lexSkip(lx, lexMapKeyEnd)
|
||||
case isKeySeparator(r):
|
||||
return lexSkip(lx, lexMapValue)
|
||||
}
|
||||
// We start the value here
|
||||
lx.backup()
|
||||
return lexMapValue
|
||||
}
|
||||
|
||||
// lexMapValue consumes one value in a map. It assumes that '{' or ','
|
||||
// have already been consumed. All whitespace and new lines are ignored.
|
||||
// Map values can be separated by ',' or simple NLs.
|
||||
func lexMapValue(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case isWhitespace(r) || isNL(r):
|
||||
return lexSkip(lx, lexMapValue)
|
||||
case r == mapValTerm:
|
||||
return lx.errorf("Unexpected map value terminator %q.", mapValTerm)
|
||||
case r == mapEnd:
|
||||
return lexSkip(lx, lexMapEnd)
|
||||
}
|
||||
lx.backup()
|
||||
lx.push(lexMapValueEnd)
|
||||
return lexValue
|
||||
}
|
||||
|
||||
// lexMapValueEnd consumes the cruft between values of a map. Namely,
|
||||
// it ignores whitespace and expects either a ',' or a '}'.
|
||||
func lexMapValueEnd(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case isWhitespace(r):
|
||||
return lexSkip(lx, lexMapValueEnd)
|
||||
case r == commentHashStart:
|
||||
lx.push(lexMapValueEnd)
|
||||
return lexCommentStart
|
||||
case r == commentSlashStart:
|
||||
rn := lx.next()
|
||||
if rn == commentSlashStart {
|
||||
lx.push(lexMapValueEnd)
|
||||
return lexCommentStart
|
||||
}
|
||||
lx.backup()
|
||||
fallthrough
|
||||
case r == optValTerm || r == mapValTerm || isNL(r):
|
||||
return lexSkip(lx, lexMapKeyStart) // Move onto next
|
||||
case r == mapEnd:
|
||||
return lexSkip(lx, lexMapEnd)
|
||||
}
|
||||
return lx.errorf("Expected a map value terminator %q or a map "+
|
||||
"terminator %q, but got '%v' instead.", mapValTerm, mapEnd, r)
|
||||
}
|
||||
|
||||
// lexMapEnd finishes the lexing of a map. It assumes that a '}' has
|
||||
// just been consumed.
|
||||
func lexMapEnd(lx *lexer) stateFn {
|
||||
lx.ignore()
|
||||
lx.emit(itemMapEnd)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// Checks if the unquoted string was actually a boolean
|
||||
func (lx *lexer) isBool() bool {
|
||||
str := lx.input[lx.start:lx.pos]
|
||||
return str == "true" || str == "false" || str == "TRUE" || str == "FALSE"
|
||||
}
|
||||
|
||||
// Check if the unquoted string is a variable reference, starting with $.
|
||||
func (lx *lexer) isVariable() bool {
|
||||
if lx.input[lx.start] == '$' {
|
||||
lx.start += 1
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// lexQuotedString consumes the inner contents of a string. It assumes that the
|
||||
// beginning '"' has already been consumed and ignored. It will not interpret any
|
||||
// internal contents.
|
||||
func lexQuotedString(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case r == sqStringEnd:
|
||||
lx.backup()
|
||||
lx.emit(itemString)
|
||||
lx.next()
|
||||
lx.ignore()
|
||||
return lx.pop()
|
||||
}
|
||||
return lexQuotedString
|
||||
}
|
||||
|
||||
// lexDubQuotedString consumes the inner contents of a string. It assumes that the
|
||||
// beginning '"' has already been consumed and ignored. It will not interpret any
|
||||
// internal contents.
|
||||
func lexDubQuotedString(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case r == dqStringEnd:
|
||||
lx.backup()
|
||||
lx.emit(itemString)
|
||||
lx.next()
|
||||
lx.ignore()
|
||||
return lx.pop()
|
||||
}
|
||||
return lexDubQuotedString
|
||||
}
|
||||
|
||||
// lexString consumes the inner contents of a raw string.
|
||||
func lexString(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case r == '\\':
|
||||
return lexStringEscape
|
||||
// Termination of non-quoted strings
|
||||
case isNL(r) || r == eof || r == optValTerm ||
|
||||
r == arrayValTerm || r == arrayEnd || r == mapEnd ||
|
||||
isWhitespace(r):
|
||||
|
||||
lx.backup()
|
||||
if lx.isBool() {
|
||||
lx.emit(itemBool)
|
||||
} else if lx.isVariable() {
|
||||
lx.emit(itemVariable)
|
||||
} else {
|
||||
lx.emit(itemString)
|
||||
}
|
||||
return lx.pop()
|
||||
case r == sqStringEnd:
|
||||
lx.backup()
|
||||
lx.emit(itemString)
|
||||
lx.next()
|
||||
lx.ignore()
|
||||
return lx.pop()
|
||||
}
|
||||
return lexString
|
||||
}
|
||||
|
||||
// lexBlock consumes the inner contents as a string. It assumes that the
|
||||
// beginning '(' has already been consumed and ignored. It will continue
|
||||
// processing until it finds a ')' on a new line by itself.
|
||||
func lexBlock(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case r == blockEnd:
|
||||
lx.backup()
|
||||
lx.backup()
|
||||
|
||||
// Looking for a ')' character on a line by itself, if the previous
|
||||
// character isn't a new line, then break so we keep processing the block.
|
||||
if lx.next() != '\n' {
|
||||
lx.next()
|
||||
break
|
||||
}
|
||||
lx.next()
|
||||
|
||||
// Make sure the next character is a new line or an eof. We want a ')' on a
|
||||
// bare line by itself.
|
||||
switch lx.next() {
|
||||
case '\n', eof:
|
||||
lx.backup()
|
||||
lx.backup()
|
||||
lx.emit(itemString)
|
||||
lx.next()
|
||||
lx.ignore()
|
||||
return lx.pop()
|
||||
}
|
||||
lx.backup()
|
||||
}
|
||||
return lexBlock
|
||||
}
|
||||
|
||||
// lexStringEscape consumes an escaped character. It assumes that the preceding
|
||||
// '\\' has already been consumed.
|
||||
func lexStringEscape(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch r {
|
||||
case 'x':
|
||||
return lexStringBinary
|
||||
case 't':
|
||||
fallthrough
|
||||
case 'n':
|
||||
fallthrough
|
||||
case 'r':
|
||||
fallthrough
|
||||
case '"':
|
||||
fallthrough
|
||||
case '\\':
|
||||
return lexString
|
||||
}
|
||||
return lx.errorf("Invalid escape character '%v'. Only the following "+
|
||||
"escape characters are allowed: \\xXX, \\t, \\n, \\r, \\\", \\\\.", r)
|
||||
}
|
||||
|
||||
// lexStringBinary consumes two hexadecimal digits following '\x'. It assumes
|
||||
// that the '\x' has already been consumed.
|
||||
func lexStringBinary(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if !isHexadecimal(r) {
|
||||
return lx.errorf("Expected two hexadecimal digits after '\\x', but "+
|
||||
"got '%v' instead.", r)
|
||||
}
|
||||
|
||||
r = lx.next()
|
||||
if !isHexadecimal(r) {
|
||||
return lx.errorf("Expected two hexadecimal digits after '\\x', but "+
|
||||
"got '%v' instead.", r)
|
||||
}
|
||||
return lexString
|
||||
}
|
||||
|
||||
// lexNumberOrDateStart consumes either a (positive) integer, a float, a datetime, or IP.
|
||||
// It assumes that NO negative sign has been consumed, that is triggered above.
|
||||
func lexNumberOrDateOrIPStart(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if !isDigit(r) {
|
||||
if r == '.' {
|
||||
return lx.errorf("Floats must start with a digit, not '.'.")
|
||||
}
|
||||
return lx.errorf("Expected a digit but got '%v'.", r)
|
||||
}
|
||||
return lexNumberOrDateOrIP
|
||||
}
|
||||
|
||||
// lexNumberOrDateOrIP consumes either a (positive) integer, float, datetime or IP.
|
||||
func lexNumberOrDateOrIP(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case r == '-':
|
||||
if lx.pos-lx.start != 5 {
|
||||
return lx.errorf("All ISO8601 dates must be in full Zulu form.")
|
||||
}
|
||||
return lexDateAfterYear
|
||||
case isDigit(r):
|
||||
return lexNumberOrDateOrIP
|
||||
case r == '.':
|
||||
return lexFloatStart
|
||||
}
|
||||
|
||||
lx.backup()
|
||||
lx.emit(itemInteger)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexDateAfterYear consumes a full Zulu Datetime in ISO8601 format.
|
||||
// It assumes that "YYYY-" has already been consumed.
|
||||
func lexDateAfterYear(lx *lexer) stateFn {
|
||||
formats := []rune{
|
||||
// digits are '0'.
|
||||
// everything else is direct equality.
|
||||
'0', '0', '-', '0', '0',
|
||||
'T',
|
||||
'0', '0', ':', '0', '0', ':', '0', '0',
|
||||
'Z',
|
||||
}
|
||||
for _, f := range formats {
|
||||
r := lx.next()
|
||||
if f == '0' {
|
||||
if !isDigit(r) {
|
||||
return lx.errorf("Expected digit in ISO8601 datetime, "+
|
||||
"but found '%v' instead.", r)
|
||||
}
|
||||
} else if f != r {
|
||||
return lx.errorf("Expected '%v' in ISO8601 datetime, "+
|
||||
"but found '%v' instead.", f, r)
|
||||
}
|
||||
}
|
||||
lx.emit(itemDatetime)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexNumberStart consumes either an integer or a float. It assumes that a
|
||||
// negative sign has already been read, but that *no* digits have been consumed.
|
||||
// lexNumberStart will move to the appropriate integer or float states.
|
||||
func lexNumberStart(lx *lexer) stateFn {
|
||||
// we MUST see a digit. Even floats have to start with a digit.
|
||||
r := lx.next()
|
||||
if !isDigit(r) {
|
||||
if r == '.' {
|
||||
return lx.errorf("Floats must start with a digit, not '.'.")
|
||||
}
|
||||
return lx.errorf("Expected a digit but got '%v'.", r)
|
||||
}
|
||||
return lexNumber
|
||||
}
|
||||
|
||||
// lexNumber consumes an integer or a float after seeing the first digit.
|
||||
func lexNumber(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case isDigit(r):
|
||||
return lexNumber
|
||||
case r == '.':
|
||||
return lexFloatStart
|
||||
}
|
||||
lx.backup()
|
||||
lx.emit(itemInteger)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexFloatStart starts the consumption of digits of a float after a '.'.
|
||||
// Namely, at least one digit is required.
|
||||
func lexFloatStart(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if !isDigit(r) {
|
||||
return lx.errorf("Floats must have a digit after the '.', but got "+
|
||||
"'%v' instead.", r)
|
||||
}
|
||||
return lexFloat
|
||||
}
|
||||
|
||||
// lexFloat consumes the digits of a float after a '.'.
|
||||
// Assumes that one digit has been consumed after a '.' already.
|
||||
func lexFloat(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if isDigit(r) {
|
||||
return lexFloat
|
||||
}
|
||||
|
||||
// Not a digit, if its another '.', need to see if we falsely assumed a float.
|
||||
if r == '.' {
|
||||
return lexIPAddr
|
||||
}
|
||||
|
||||
lx.backup()
|
||||
lx.emit(itemFloat)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexIPAddr consumes IP addrs, like 127.0.0.1:4222
|
||||
func lexIPAddr(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if isDigit(r) || r == '.' || r == ':' {
|
||||
return lexIPAddr
|
||||
}
|
||||
lx.backup()
|
||||
lx.emit(itemString)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexCommentStart begins the lexing of a comment. It will emit
|
||||
// itemCommentStart and consume no characters, passing control to lexComment.
|
||||
func lexCommentStart(lx *lexer) stateFn {
|
||||
lx.ignore()
|
||||
lx.emit(itemCommentStart)
|
||||
return lexComment
|
||||
}
|
||||
|
||||
// lexComment lexes an entire comment. It assumes that '#' has been consumed.
|
||||
// It will consume *up to* the first new line character, and pass control
|
||||
// back to the last state on the stack.
|
||||
func lexComment(lx *lexer) stateFn {
|
||||
r := lx.peek()
|
||||
if isNL(r) || r == eof {
|
||||
lx.emit(itemText)
|
||||
return lx.pop()
|
||||
}
|
||||
lx.next()
|
||||
return lexComment
|
||||
}
|
||||
|
||||
// lexSkip ignores all slurped input and moves on to the next state.
|
||||
func lexSkip(lx *lexer, nextState stateFn) stateFn {
|
||||
return func(lx *lexer) stateFn {
|
||||
lx.ignore()
|
||||
return nextState
|
||||
}
|
||||
}
|
||||
|
||||
// Tests for both key separators
|
||||
func isKeySeparator(r rune) bool {
|
||||
return r == keySepEqual || r == keySepColon
|
||||
}
|
||||
|
||||
// isWhitespace returns true if `r` is a whitespace character according
|
||||
// to the spec.
|
||||
func isWhitespace(r rune) bool {
|
||||
return r == '\t' || r == ' '
|
||||
}
|
||||
|
||||
func isNL(r rune) bool {
|
||||
return r == '\n' || r == '\r'
|
||||
}
|
||||
|
||||
func isDigit(r rune) bool {
|
||||
return r >= '0' && r <= '9'
|
||||
}
|
||||
|
||||
func isHexadecimal(r rune) bool {
|
||||
return (r >= '0' && r <= '9') ||
|
||||
(r >= 'a' && r <= 'f') ||
|
||||
(r >= 'A' && r <= 'F')
|
||||
}
|
||||
|
||||
func (itype itemType) String() string {
|
||||
switch itype {
|
||||
case itemError:
|
||||
return "Error"
|
||||
case itemNIL:
|
||||
return "NIL"
|
||||
case itemEOF:
|
||||
return "EOF"
|
||||
case itemText:
|
||||
return "Text"
|
||||
case itemString:
|
||||
return "String"
|
||||
case itemBool:
|
||||
return "Bool"
|
||||
case itemInteger:
|
||||
return "Integer"
|
||||
case itemFloat:
|
||||
return "Float"
|
||||
case itemDatetime:
|
||||
return "DateTime"
|
||||
case itemKey:
|
||||
return "Key"
|
||||
case itemArrayStart:
|
||||
return "ArrayStart"
|
||||
case itemArrayEnd:
|
||||
return "ArrayEnd"
|
||||
case itemMapStart:
|
||||
return "MapStart"
|
||||
case itemMapEnd:
|
||||
return "MapEnd"
|
||||
case itemCommentStart:
|
||||
return "CommentStart"
|
||||
case itemVariable:
|
||||
return "Variable"
|
||||
}
|
||||
panic(fmt.Sprintf("BUG: Unknown type '%s'.", itype.String()))
|
||||
}
|
||||
|
||||
func (item item) String() string {
|
||||
return fmt.Sprintf("(%s, '%s', %d)", item.typ.String(), item.val, item.line)
|
||||
}
|
||||
|
||||
func escapeSpecial(c rune) string {
|
||||
switch c {
|
||||
case '\n':
|
||||
return "\\n"
|
||||
}
|
||||
return string(c)
|
||||
}
|
||||
229
vendor/github.com/nats-io/gnatsd/conf/parse.go
generated
vendored
Normal file
229
vendor/github.com/nats-io/gnatsd/conf/parse.go
generated
vendored
Normal file
@@ -0,0 +1,229 @@
|
||||
// Copyright 2013-2016 Apcera Inc. All rights reserved.
|
||||
|
||||
// Package conf supports a configuration file format used by gnatsd. It is
|
||||
// a flexible format that combines the best of traditional
|
||||
// configuration formats and newer styles such as JSON and YAML.
|
||||
package conf
|
||||
|
||||
// The format supported is less restrictive than today's formats.
|
||||
// Supports mixed Arrays [], nested Maps {}, multiple comment types (# and //)
|
||||
// Also supports key value assigments using '=' or ':' or whiteSpace()
|
||||
// e.g. foo = 2, foo : 2, foo 2
|
||||
// maps can be assigned with no key separator as well
|
||||
// semicolons as value terminators in key/value assignments are optional
|
||||
//
|
||||
// see parse_test.go for more examples.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type parser struct {
|
||||
mapping map[string]interface{}
|
||||
lx *lexer
|
||||
|
||||
// The current scoped context, can be array or map
|
||||
ctx interface{}
|
||||
|
||||
// stack of contexts, either map or array/slice stack
|
||||
ctxs []interface{}
|
||||
|
||||
// Keys stack
|
||||
keys []string
|
||||
}
|
||||
|
||||
// Parse will return a map of keys to interface{}, although concrete types
|
||||
// underly them. The values supported are string, bool, int64, float64, DateTime.
|
||||
// Arrays and nested Maps are also supported.
|
||||
func Parse(data string) (map[string]interface{}, error) {
|
||||
p, err := parse(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return p.mapping, nil
|
||||
}
|
||||
|
||||
func parse(data string) (p *parser, err error) {
|
||||
p = &parser{
|
||||
mapping: make(map[string]interface{}),
|
||||
lx: lex(data),
|
||||
ctxs: make([]interface{}, 0, 4),
|
||||
keys: make([]string, 0, 4),
|
||||
}
|
||||
p.pushContext(p.mapping)
|
||||
|
||||
for {
|
||||
it := p.next()
|
||||
if it.typ == itemEOF {
|
||||
break
|
||||
}
|
||||
if err := p.processItem(it); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func (p *parser) next() item {
|
||||
return p.lx.nextItem()
|
||||
}
|
||||
|
||||
func (p *parser) pushContext(ctx interface{}) {
|
||||
p.ctxs = append(p.ctxs, ctx)
|
||||
p.ctx = ctx
|
||||
}
|
||||
|
||||
func (p *parser) popContext() interface{} {
|
||||
if len(p.ctxs) == 0 {
|
||||
panic("BUG in parser, context stack empty")
|
||||
}
|
||||
li := len(p.ctxs) - 1
|
||||
last := p.ctxs[li]
|
||||
p.ctxs = p.ctxs[0:li]
|
||||
p.ctx = p.ctxs[len(p.ctxs)-1]
|
||||
return last
|
||||
}
|
||||
|
||||
func (p *parser) pushKey(key string) {
|
||||
p.keys = append(p.keys, key)
|
||||
}
|
||||
|
||||
func (p *parser) popKey() string {
|
||||
if len(p.keys) == 0 {
|
||||
panic("BUG in parser, keys stack empty")
|
||||
}
|
||||
li := len(p.keys) - 1
|
||||
last := p.keys[li]
|
||||
p.keys = p.keys[0:li]
|
||||
return last
|
||||
}
|
||||
|
||||
func (p *parser) processItem(it item) error {
|
||||
switch it.typ {
|
||||
case itemError:
|
||||
return fmt.Errorf("Parse error on line %d: '%s'", it.line, it.val)
|
||||
case itemKey:
|
||||
p.pushKey(it.val)
|
||||
case itemMapStart:
|
||||
newCtx := make(map[string]interface{})
|
||||
p.pushContext(newCtx)
|
||||
case itemMapEnd:
|
||||
p.setValue(p.popContext())
|
||||
case itemString:
|
||||
p.setValue(it.val) // FIXME(dlc) sanitize string?
|
||||
case itemInteger:
|
||||
num, err := strconv.ParseInt(it.val, 10, 64)
|
||||
if err != nil {
|
||||
if e, ok := err.(*strconv.NumError); ok &&
|
||||
e.Err == strconv.ErrRange {
|
||||
return fmt.Errorf("Integer '%s' is out of the range.", it.val)
|
||||
}
|
||||
return fmt.Errorf("Expected integer, but got '%s'.", it.val)
|
||||
}
|
||||
p.setValue(num)
|
||||
case itemFloat:
|
||||
num, err := strconv.ParseFloat(it.val, 64)
|
||||
if err != nil {
|
||||
if e, ok := err.(*strconv.NumError); ok &&
|
||||
e.Err == strconv.ErrRange {
|
||||
return fmt.Errorf("Float '%s' is out of the range.", it.val)
|
||||
}
|
||||
return fmt.Errorf("Expected float, but got '%s'.", it.val)
|
||||
}
|
||||
p.setValue(num)
|
||||
case itemBool:
|
||||
switch it.val {
|
||||
case "true":
|
||||
p.setValue(true)
|
||||
case "false":
|
||||
p.setValue(false)
|
||||
default:
|
||||
return fmt.Errorf("Expected boolean value, but got '%s'.", it.val)
|
||||
}
|
||||
case itemDatetime:
|
||||
dt, err := time.Parse("2006-01-02T15:04:05Z", it.val)
|
||||
if err != nil {
|
||||
return fmt.Errorf(
|
||||
"Expected Zulu formatted DateTime, but got '%s'.", it.val)
|
||||
}
|
||||
p.setValue(dt)
|
||||
case itemArrayStart:
|
||||
var array = make([]interface{}, 0)
|
||||
p.pushContext(array)
|
||||
case itemArrayEnd:
|
||||
array := p.ctx
|
||||
p.popContext()
|
||||
p.setValue(array)
|
||||
case itemVariable:
|
||||
if value, ok := p.lookupVariable(it.val); ok {
|
||||
p.setValue(value)
|
||||
} else {
|
||||
return fmt.Errorf("Variable reference for '%s' on line %d can not be found.",
|
||||
it.val, it.line)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Used to map an environment value into a temporary map to pass to secondary Parse call.
|
||||
const pkey = "pk"
|
||||
|
||||
// We special case raw strings here that are bcrypt'd. This allows us not to force quoting the strings
|
||||
const bcryptPrefix = "2a$"
|
||||
|
||||
// lookupVariable will lookup a variable reference. It will use block scoping on keys
|
||||
// it has seen before, with the top level scoping being the environment variables. We
|
||||
// ignore array contexts and only process the map contexts..
|
||||
//
|
||||
// Returns true for ok if it finds something, similar to map.
|
||||
func (p *parser) lookupVariable(varReference string) (interface{}, bool) {
|
||||
// Do special check to see if it is a raw bcrypt string.
|
||||
if strings.HasPrefix(varReference, bcryptPrefix) {
|
||||
return "$" + varReference, true
|
||||
}
|
||||
|
||||
// Loop through contexts currently on the stack.
|
||||
for i := len(p.ctxs) - 1; i >= 0; i -= 1 {
|
||||
ctx := p.ctxs[i]
|
||||
// Process if it is a map context
|
||||
if m, ok := ctx.(map[string]interface{}); ok {
|
||||
if v, ok := m[varReference]; ok {
|
||||
return v, ok
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If we are here, we have exhausted our context maps and still not found anything.
|
||||
// Parse from the environment.
|
||||
if vStr, ok := os.LookupEnv(varReference); ok {
|
||||
// Everything we get here will be a string value, so we need to process as a parser would.
|
||||
if vmap, err := Parse(fmt.Sprintf("%s=%s", pkey, vStr)); err == nil {
|
||||
v, ok := vmap[pkey]
|
||||
return v, ok
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (p *parser) setValue(val interface{}) {
|
||||
// Test to see if we are on an array or a map
|
||||
|
||||
// Array processing
|
||||
if ctx, ok := p.ctx.([]interface{}); ok {
|
||||
p.ctx = append(ctx, val)
|
||||
p.ctxs[len(p.ctxs)-1] = p.ctx
|
||||
}
|
||||
|
||||
// Map processing
|
||||
if ctx, ok := p.ctx.(map[string]interface{}); ok {
|
||||
key := p.popKey()
|
||||
// FIXME(dlc), make sure to error if redefining same key?
|
||||
ctx[key] = val
|
||||
}
|
||||
}
|
||||
20
vendor/github.com/nats-io/gnatsd/server/LICENSE
generated
vendored
Normal file
20
vendor/github.com/nats-io/gnatsd/server/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2012-2016 Apcera Inc.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
15
vendor/github.com/nats-io/gnatsd/server/auth.go
generated
vendored
Normal file
15
vendor/github.com/nats-io/gnatsd/server/auth.go
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
// Copyright 2012-2014 Apcera Inc. All rights reserved.
|
||||
|
||||
package server
|
||||
|
||||
// Auth is an interface for implementing authentication
|
||||
type Auth interface {
|
||||
// Check if a client is authorized to connect
|
||||
Check(c ClientAuth) bool
|
||||
}
|
||||
|
||||
// ClientAuth is an interface for client authentication
|
||||
type ClientAuth interface {
|
||||
// Get options associated with a client
|
||||
GetOpts() *clientOpts
|
||||
}
|
||||
33
vendor/github.com/nats-io/gnatsd/server/ciphersuites_1.4.go
generated
vendored
Normal file
33
vendor/github.com/nats-io/gnatsd/server/ciphersuites_1.4.go
generated
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
// Copyright 2015 Apcera Inc. All rights reserved.
|
||||
|
||||
// +build go1.4,!go1.5
|
||||
|
||||
package server
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
)
|
||||
|
||||
// Where we maintain all of the available 1.4 ciphers
|
||||
var cipherMap = map[string]uint16{
|
||||
"TLS_RSA_WITH_RC4_128_SHA": tls.TLS_RSA_WITH_RC4_128_SHA,
|
||||
"TLS_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||
"TLS_RSA_WITH_AES_128_CBC_SHA": tls.TLS_RSA_WITH_AES_128_CBC_SHA,
|
||||
"TLS_RSA_WITH_AES_256_CBC_SHA": tls.TLS_RSA_WITH_AES_256_CBC_SHA,
|
||||
"TLS_ECDHE_ECDSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
|
||||
"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
|
||||
"TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
|
||||
"TLS_ECDHE_RSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,
|
||||
"TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||
"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
|
||||
"TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
|
||||
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
||||
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||
}
|
||||
|
||||
func defaultCipherSuites() []uint16 {
|
||||
return []uint16{
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
||||
}
|
||||
}
|
||||
38
vendor/github.com/nats-io/gnatsd/server/ciphersuites_1.5.go
generated
vendored
Normal file
38
vendor/github.com/nats-io/gnatsd/server/ciphersuites_1.5.go
generated
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
// Copyright 2015 Apcera Inc. All rights reserved.
|
||||
|
||||
// +build go1.5
|
||||
|
||||
package server
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
)
|
||||
|
||||
// Where we maintain all of the available 1.5 ciphers
|
||||
var cipherMap = map[string]uint16{
|
||||
"TLS_RSA_WITH_RC4_128_SHA": tls.TLS_RSA_WITH_RC4_128_SHA,
|
||||
"TLS_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||
"TLS_RSA_WITH_AES_128_CBC_SHA": tls.TLS_RSA_WITH_AES_128_CBC_SHA,
|
||||
"TLS_RSA_WITH_AES_256_CBC_SHA": tls.TLS_RSA_WITH_AES_256_CBC_SHA,
|
||||
"TLS_ECDHE_ECDSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
|
||||
"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
|
||||
"TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
|
||||
"TLS_ECDHE_RSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,
|
||||
"TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||
"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
|
||||
"TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
|
||||
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
||||
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||
"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
|
||||
"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
|
||||
}
|
||||
|
||||
func defaultCipherSuites() []uint16 {
|
||||
return []uint16{
|
||||
// The SHA384 versions are only in Go1.5
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
||||
}
|
||||
}
|
||||
1123
vendor/github.com/nats-io/gnatsd/server/client.go
generated
vendored
Normal file
1123
vendor/github.com/nats-io/gnatsd/server/client.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
85
vendor/github.com/nats-io/gnatsd/server/const.go
generated
vendored
Normal file
85
vendor/github.com/nats-io/gnatsd/server/const.go
generated
vendored
Normal file
@@ -0,0 +1,85 @@
|
||||
// Copyright 2012-2016 Apcera Inc. All rights reserved.
|
||||
|
||||
package server
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// VERSION is the current version for the server.
|
||||
VERSION = "0.8.2"
|
||||
|
||||
// DEFAULT_PORT is the deault port for client connections.
|
||||
DEFAULT_PORT = 4222
|
||||
|
||||
// RANDOM_PORT is the value for port that, when supplied, will cause the
|
||||
// server to listen on a randomly-chosen available port. The resolved port
|
||||
// is available via the Addr() method.
|
||||
RANDOM_PORT = -1
|
||||
|
||||
// DEFAULT_HOST defaults to all interfaces.
|
||||
DEFAULT_HOST = "0.0.0.0"
|
||||
|
||||
// MAX_CONTROL_LINE_SIZE is the maximum allowed protocol control line size.
|
||||
// 1k should be plenty since payloads sans connect string are separate
|
||||
MAX_CONTROL_LINE_SIZE = 1024
|
||||
|
||||
// MAX_PAYLOAD_SIZE is the maximum allowed payload size. Should be using
|
||||
// something different if > 1MB payloads are needed.
|
||||
MAX_PAYLOAD_SIZE = (1024 * 1024)
|
||||
|
||||
// MAX_PENDING_SIZE is the maximum outbound size (in bytes) per client.
|
||||
MAX_PENDING_SIZE = (10 * 1024 * 1024)
|
||||
|
||||
// DEFAULT_MAX_CONNECTIONS is the default maximum connections allowed.
|
||||
DEFAULT_MAX_CONNECTIONS = (64 * 1024)
|
||||
|
||||
// TLS_TIMEOUT is the TLS wait time.
|
||||
TLS_TIMEOUT = 500 * time.Millisecond
|
||||
|
||||
// AUTH_TIMEOUT is the authorization wait time.
|
||||
AUTH_TIMEOUT = 2 * TLS_TIMEOUT
|
||||
|
||||
// DEFAULT_PING_INTERVAL is how often pings are sent to clients and routes.
|
||||
DEFAULT_PING_INTERVAL = 2 * time.Minute
|
||||
|
||||
// DEFAULT_PING_MAX_OUT is maximum allowed pings outstanding before disconnect.
|
||||
DEFAULT_PING_MAX_OUT = 2
|
||||
|
||||
// CR_LF string
|
||||
CR_LF = "\r\n"
|
||||
|
||||
// LEN_CR_LF hold onto the computed size.
|
||||
LEN_CR_LF = len(CR_LF)
|
||||
|
||||
// DEFAULT_FLUSH_DEADLINE is the write/flush deadlines.
|
||||
DEFAULT_FLUSH_DEADLINE = 2 * time.Second
|
||||
|
||||
// DEFAULT_HTTP_PORT is the default monitoring port.
|
||||
DEFAULT_HTTP_PORT = 8222
|
||||
|
||||
// ACCEPT_MIN_SLEEP is the minimum acceptable sleep times on temporary errors.
|
||||
ACCEPT_MIN_SLEEP = 10 * time.Millisecond
|
||||
|
||||
// ACCEPT_MAX_SLEEP is the maximum acceptable sleep times on temporary errors
|
||||
ACCEPT_MAX_SLEEP = 1 * time.Second
|
||||
|
||||
// DEFAULT_ROUTE_CONNECT Route solicitation intervals.
|
||||
DEFAULT_ROUTE_CONNECT = 1 * time.Second
|
||||
|
||||
// DEFAULT_ROUTE_RECONNECT Route reconnect intervals.
|
||||
DEFAULT_ROUTE_RECONNECT = 1 * time.Second
|
||||
|
||||
// DEFAULT_ROUTE_DIAL Route dial timeout.
|
||||
DEFAULT_ROUTE_DIAL = 1 * time.Second
|
||||
|
||||
// PROTO_SNIPPET_SIZE is the default size of proto to print on parse errors.
|
||||
PROTO_SNIPPET_SIZE = 32
|
||||
|
||||
// MAX_MSG_ARGS Maximum possible number of arguments from MSG proto.
|
||||
MAX_MSG_ARGS = 4
|
||||
|
||||
// MAX_PUB_ARGS Maximum possible number of arguments from PUB proto.
|
||||
MAX_PUB_ARGS = 3
|
||||
)
|
||||
19
vendor/github.com/nats-io/gnatsd/server/errors.go
generated
vendored
Normal file
19
vendor/github.com/nats-io/gnatsd/server/errors.go
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
// Copyright 2012 Apcera Inc. All rights reserved.
|
||||
|
||||
package server
|
||||
|
||||
import "errors"
|
||||
|
||||
var (
|
||||
// ErrConnectionClosed represents error condition on a closed connection.
|
||||
ErrConnectionClosed = errors.New("Connection Closed")
|
||||
|
||||
// ErrAuthorization represents error condition on failed authorization.
|
||||
ErrAuthorization = errors.New("Authorization Error")
|
||||
|
||||
// ErrAuthTimeout represents error condition on failed authorization due to timeout.
|
||||
ErrAuthTimeout = errors.New("Authorization Timeout")
|
||||
|
||||
// ErrMaxPayload represents error condition when the payload is too big.
|
||||
ErrMaxPayload = errors.New("Maximum Payload Exceeded")
|
||||
)
|
||||
104
vendor/github.com/nats-io/gnatsd/server/log.go
generated
vendored
Normal file
104
vendor/github.com/nats-io/gnatsd/server/log.go
generated
vendored
Normal file
@@ -0,0 +1,104 @@
|
||||
// Copyright 2012-2015 Apcera Inc. All rights reserved.
|
||||
|
||||
package server
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// Package globals for performance checks
|
||||
var trace int32
|
||||
var debug int32
|
||||
|
||||
var log = struct {
|
||||
sync.Mutex
|
||||
logger Logger
|
||||
}{}
|
||||
|
||||
// Logger interface of the NATS Server
|
||||
type Logger interface {
|
||||
|
||||
// Log a notice statement
|
||||
Noticef(format string, v ...interface{})
|
||||
|
||||
// Log a fatal error
|
||||
Fatalf(format string, v ...interface{})
|
||||
|
||||
// Log an error
|
||||
Errorf(format string, v ...interface{})
|
||||
|
||||
// Log a debug statement
|
||||
Debugf(format string, v ...interface{})
|
||||
|
||||
// Log a trace statement
|
||||
Tracef(format string, v ...interface{})
|
||||
}
|
||||
|
||||
// SetLogger sets the logger of the server
|
||||
func (s *Server) SetLogger(logger Logger, debugFlag, traceFlag bool) {
|
||||
if debugFlag {
|
||||
atomic.StoreInt32(&debug, 1)
|
||||
}
|
||||
|
||||
if traceFlag {
|
||||
atomic.StoreInt32(&trace, 1)
|
||||
}
|
||||
|
||||
log.Lock()
|
||||
log.logger = logger
|
||||
log.Unlock()
|
||||
}
|
||||
|
||||
// Noticef logs a notice statement
|
||||
func Noticef(format string, v ...interface{}) {
|
||||
executeLogCall(func(logger Logger, format string, v ...interface{}) {
|
||||
logger.Noticef(format, v...)
|
||||
}, format, v...)
|
||||
}
|
||||
|
||||
// Errorf logs an error
|
||||
func Errorf(format string, v ...interface{}) {
|
||||
executeLogCall(func(logger Logger, format string, v ...interface{}) {
|
||||
logger.Errorf(format, v...)
|
||||
}, format, v...)
|
||||
}
|
||||
|
||||
// Fatalf logs a fatal error
|
||||
func Fatalf(format string, v ...interface{}) {
|
||||
executeLogCall(func(logger Logger, format string, v ...interface{}) {
|
||||
logger.Fatalf(format, v...)
|
||||
}, format, v...)
|
||||
}
|
||||
|
||||
// Debugf logs a debug statement
|
||||
func Debugf(format string, v ...interface{}) {
|
||||
if atomic.LoadInt32(&debug) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
executeLogCall(func(logger Logger, format string, v ...interface{}) {
|
||||
logger.Debugf(format, v...)
|
||||
}, format, v...)
|
||||
}
|
||||
|
||||
// Tracef logs a trace statement
|
||||
func Tracef(format string, v ...interface{}) {
|
||||
if atomic.LoadInt32(&trace) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
executeLogCall(func(logger Logger, format string, v ...interface{}) {
|
||||
logger.Tracef(format, v...)
|
||||
}, format, v...)
|
||||
}
|
||||
|
||||
func executeLogCall(f func(logger Logger, format string, v ...interface{}), format string, args ...interface{}) {
|
||||
log.Lock()
|
||||
defer log.Unlock()
|
||||
if log.logger == nil {
|
||||
return
|
||||
}
|
||||
|
||||
f(log.logger, format, args...)
|
||||
}
|
||||
521
vendor/github.com/nats-io/gnatsd/server/monitor.go
generated
vendored
Normal file
521
vendor/github.com/nats-io/gnatsd/server/monitor.go
generated
vendored
Normal file
@@ -0,0 +1,521 @@
|
||||
// Copyright 2013-2015 Apcera Inc. All rights reserved.
|
||||
|
||||
package server
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strconv"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/nats-io/gnatsd/server/pse"
|
||||
)
|
||||
|
||||
// Snapshot this
|
||||
var numCores int
|
||||
|
||||
func init() {
|
||||
numCores = runtime.NumCPU()
|
||||
}
|
||||
|
||||
// Connz represents detailed information on current client connections.
|
||||
type Connz struct {
|
||||
Now time.Time `json:"now"`
|
||||
NumConns int `json:"num_connections"`
|
||||
Total int `json:"total"`
|
||||
Offset int `json:"offset"`
|
||||
Limit int `json:"limit"`
|
||||
Conns []ConnInfo `json:"connections"`
|
||||
}
|
||||
|
||||
// ConnInfo has detailed information on a per connection basis.
|
||||
type ConnInfo struct {
|
||||
Cid uint64 `json:"cid"`
|
||||
IP string `json:"ip"`
|
||||
Port int `json:"port"`
|
||||
Start time.Time `json:"start"`
|
||||
LastActivity time.Time `json:"last_activity"`
|
||||
Uptime string `json:"uptime"`
|
||||
Idle string `json:"idle"`
|
||||
Pending int `json:"pending_bytes"`
|
||||
InMsgs int64 `json:"in_msgs"`
|
||||
OutMsgs int64 `json:"out_msgs"`
|
||||
InBytes int64 `json:"in_bytes"`
|
||||
OutBytes int64 `json:"out_bytes"`
|
||||
NumSubs uint32 `json:"subscriptions"`
|
||||
Name string `json:"name,omitempty"`
|
||||
Lang string `json:"lang,omitempty"`
|
||||
Version string `json:"version,omitempty"`
|
||||
TLSVersion string `json:"tls_version,omitempty"`
|
||||
TLSCipher string `json:"tls_cipher_suite,omitempty"`
|
||||
AuthorizedUser string `json:"authorized_user,omitempty"`
|
||||
Subs []string `json:"subscriptions_list,omitempty"`
|
||||
}
|
||||
|
||||
// DefaultConnListSize is the default size of the connection list.
|
||||
const DefaultConnListSize = 1024
|
||||
|
||||
const defaultStackBufSize = 10000
|
||||
|
||||
// HandleConnz process HTTP requests for connection information.
|
||||
func (s *Server) HandleConnz(w http.ResponseWriter, r *http.Request) {
|
||||
sortOpt := SortOpt(r.URL.Query().Get("sort"))
|
||||
|
||||
// If no sort option given or sort is by uptime, then sort by cid
|
||||
if sortOpt == "" || sortOpt == byUptime {
|
||||
sortOpt = byCid
|
||||
} else if !sortOpt.IsValid() {
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
w.Write([]byte(fmt.Sprintf("Invalid sorting option: %s", sortOpt)))
|
||||
return
|
||||
}
|
||||
|
||||
c := &Connz{}
|
||||
c.Now = time.Now()
|
||||
|
||||
auth, _ := strconv.Atoi(r.URL.Query().Get("auth"))
|
||||
subs, _ := strconv.Atoi(r.URL.Query().Get("subs"))
|
||||
c.Offset, _ = strconv.Atoi(r.URL.Query().Get("offset"))
|
||||
c.Limit, _ = strconv.Atoi(r.URL.Query().Get("limit"))
|
||||
|
||||
if c.Limit == 0 {
|
||||
c.Limit = DefaultConnListSize
|
||||
}
|
||||
|
||||
// Walk the list
|
||||
s.mu.Lock()
|
||||
s.httpReqStats[ConnzPath]++
|
||||
tlsRequired := s.info.TLSRequired
|
||||
|
||||
// number total of clients. The resulting ConnInfo array
|
||||
// may be smaller if pagination is used.
|
||||
totalClients := len(s.clients)
|
||||
c.Total = totalClients
|
||||
|
||||
i := 0
|
||||
pairs := make(Pairs, totalClients)
|
||||
for _, client := range s.clients {
|
||||
client.mu.Lock()
|
||||
switch sortOpt {
|
||||
case byCid:
|
||||
pairs[i] = Pair{Key: client, Val: int64(client.cid)}
|
||||
case bySubs:
|
||||
pairs[i] = Pair{Key: client, Val: int64(len(client.subs))}
|
||||
case byPending:
|
||||
pairs[i] = Pair{Key: client, Val: int64(client.bw.Buffered())}
|
||||
case byOutMsgs:
|
||||
pairs[i] = Pair{Key: client, Val: client.outMsgs}
|
||||
case byInMsgs:
|
||||
pairs[i] = Pair{Key: client, Val: atomic.LoadInt64(&client.inMsgs)}
|
||||
case byOutBytes:
|
||||
pairs[i] = Pair{Key: client, Val: client.outBytes}
|
||||
case byInBytes:
|
||||
pairs[i] = Pair{Key: client, Val: atomic.LoadInt64(&client.inBytes)}
|
||||
case byLast:
|
||||
pairs[i] = Pair{Key: client, Val: client.last.UnixNano()}
|
||||
case byIdle:
|
||||
pairs[i] = Pair{Key: client, Val: c.Now.Sub(client.last).Nanoseconds()}
|
||||
}
|
||||
client.mu.Unlock()
|
||||
i++
|
||||
}
|
||||
s.mu.Unlock()
|
||||
|
||||
if totalClients > 0 {
|
||||
if sortOpt == byCid {
|
||||
// Return in ascending order
|
||||
sort.Sort(pairs)
|
||||
} else {
|
||||
// Return in descending order
|
||||
sort.Sort(sort.Reverse(pairs))
|
||||
}
|
||||
}
|
||||
|
||||
minoff := c.Offset
|
||||
maxoff := c.Offset + c.Limit
|
||||
|
||||
// Make sure these are sane.
|
||||
if minoff > totalClients {
|
||||
minoff = totalClients
|
||||
}
|
||||
if maxoff > totalClients {
|
||||
maxoff = totalClients
|
||||
}
|
||||
pairs = pairs[minoff:maxoff]
|
||||
|
||||
// Now we have the real number of ConnInfo objects, we can set c.NumConns
|
||||
// and allocate the array
|
||||
c.NumConns = len(pairs)
|
||||
c.Conns = make([]ConnInfo, c.NumConns)
|
||||
|
||||
i = 0
|
||||
for _, pair := range pairs {
|
||||
|
||||
client := pair.Key
|
||||
|
||||
client.mu.Lock()
|
||||
|
||||
// First, fill ConnInfo with current client's values. We will
|
||||
// then overwrite the field used for the sort with what was stored
|
||||
// in 'pair'.
|
||||
ci := &c.Conns[i]
|
||||
|
||||
ci.Cid = client.cid
|
||||
ci.Start = client.start
|
||||
ci.LastActivity = client.last
|
||||
ci.Uptime = myUptime(c.Now.Sub(client.start))
|
||||
ci.Idle = myUptime(c.Now.Sub(client.last))
|
||||
ci.OutMsgs = client.outMsgs
|
||||
ci.OutBytes = client.outBytes
|
||||
ci.NumSubs = uint32(len(client.subs))
|
||||
ci.Pending = client.bw.Buffered()
|
||||
ci.Name = client.opts.Name
|
||||
ci.Lang = client.opts.Lang
|
||||
ci.Version = client.opts.Version
|
||||
// inMsgs and inBytes are updated outside of the client's lock, so
|
||||
// we need to use atomic here.
|
||||
ci.InMsgs = atomic.LoadInt64(&client.inMsgs)
|
||||
ci.InBytes = atomic.LoadInt64(&client.inBytes)
|
||||
|
||||
// Now overwrite the field that was used as the sort key, so results
|
||||
// still look sorted even if the value has changed since sort occurred.
|
||||
sortValue := pair.Val
|
||||
switch sortOpt {
|
||||
case bySubs:
|
||||
ci.NumSubs = uint32(sortValue)
|
||||
case byPending:
|
||||
ci.Pending = int(sortValue)
|
||||
case byOutMsgs:
|
||||
ci.OutMsgs = sortValue
|
||||
case byInMsgs:
|
||||
ci.InMsgs = sortValue
|
||||
case byOutBytes:
|
||||
ci.OutBytes = sortValue
|
||||
case byInBytes:
|
||||
ci.InBytes = sortValue
|
||||
case byLast:
|
||||
ci.LastActivity = time.Unix(0, sortValue)
|
||||
case byIdle:
|
||||
ci.Idle = myUptime(time.Duration(sortValue))
|
||||
}
|
||||
|
||||
// If the connection is gone, too bad, we won't set TLSVersion and TLSCipher.
|
||||
if tlsRequired && client.nc != nil {
|
||||
conn := client.nc.(*tls.Conn)
|
||||
cs := conn.ConnectionState()
|
||||
ci.TLSVersion = tlsVersion(cs.Version)
|
||||
ci.TLSCipher = tlsCipher(cs.CipherSuite)
|
||||
}
|
||||
|
||||
switch conn := client.nc.(type) {
|
||||
case *net.TCPConn, *tls.Conn:
|
||||
addr := conn.RemoteAddr().(*net.TCPAddr)
|
||||
ci.Port = addr.Port
|
||||
ci.IP = addr.IP.String()
|
||||
}
|
||||
|
||||
// Fill in subscription data if requested.
|
||||
if subs == 1 {
|
||||
sublist := make([]*subscription, 0, len(client.subs))
|
||||
for _, sub := range client.subs {
|
||||
sublist = append(sublist, sub)
|
||||
}
|
||||
ci.Subs = castToSliceString(sublist)
|
||||
}
|
||||
|
||||
// Fill in user if auth requested.
|
||||
if auth == 1 {
|
||||
ci.AuthorizedUser = client.opts.Username
|
||||
}
|
||||
|
||||
client.mu.Unlock()
|
||||
i++
|
||||
}
|
||||
|
||||
b, err := json.MarshalIndent(c, "", " ")
|
||||
if err != nil {
|
||||
Errorf("Error marshalling response to /connz request: %v", err)
|
||||
}
|
||||
|
||||
// Handle response
|
||||
ResponseHandler(w, r, b)
|
||||
}
|
||||
|
||||
func castToSliceString(input []*subscription) []string {
|
||||
output := make([]string, 0, len(input))
|
||||
for _, line := range input {
|
||||
output = append(output, string(line.subject))
|
||||
}
|
||||
return output
|
||||
}
|
||||
|
||||
// Subsz represents detail information on current connections.
|
||||
type Subsz struct {
|
||||
*SublistStats
|
||||
}
|
||||
|
||||
// Routez represents detailed information on current client connections.
|
||||
type Routez struct {
|
||||
Now time.Time `json:"now"`
|
||||
NumRoutes int `json:"num_routes"`
|
||||
Routes []*RouteInfo `json:"routes"`
|
||||
}
|
||||
|
||||
// RouteInfo has detailed information on a per connection basis.
|
||||
type RouteInfo struct {
|
||||
Rid uint64 `json:"rid"`
|
||||
RemoteID string `json:"remote_id"`
|
||||
DidSolicit bool `json:"did_solicit"`
|
||||
IsConfigured bool `json:"is_configured"`
|
||||
IP string `json:"ip"`
|
||||
Port int `json:"port"`
|
||||
Pending int `json:"pending_size"`
|
||||
InMsgs int64 `json:"in_msgs"`
|
||||
OutMsgs int64 `json:"out_msgs"`
|
||||
InBytes int64 `json:"in_bytes"`
|
||||
OutBytes int64 `json:"out_bytes"`
|
||||
NumSubs uint32 `json:"subscriptions"`
|
||||
Subs []string `json:"subscriptions_list,omitempty"`
|
||||
}
|
||||
|
||||
// HandleRoutez process HTTP requests for route information.
|
||||
func (s *Server) HandleRoutez(w http.ResponseWriter, r *http.Request) {
|
||||
rs := &Routez{Routes: []*RouteInfo{}}
|
||||
rs.Now = time.Now()
|
||||
|
||||
subs, _ := strconv.Atoi(r.URL.Query().Get("subs"))
|
||||
|
||||
// Walk the list
|
||||
s.mu.Lock()
|
||||
|
||||
s.httpReqStats[RoutezPath]++
|
||||
rs.NumRoutes = len(s.routes)
|
||||
|
||||
for _, r := range s.routes {
|
||||
r.mu.Lock()
|
||||
ri := &RouteInfo{
|
||||
Rid: r.cid,
|
||||
RemoteID: r.route.remoteID,
|
||||
DidSolicit: r.route.didSolicit,
|
||||
IsConfigured: r.route.routeType == Explicit,
|
||||
InMsgs: atomic.LoadInt64(&r.inMsgs),
|
||||
OutMsgs: r.outMsgs,
|
||||
InBytes: atomic.LoadInt64(&r.inBytes),
|
||||
OutBytes: r.outBytes,
|
||||
NumSubs: uint32(len(r.subs)),
|
||||
}
|
||||
|
||||
if subs == 1 {
|
||||
sublist := make([]*subscription, 0, len(r.subs))
|
||||
for _, sub := range r.subs {
|
||||
sublist = append(sublist, sub)
|
||||
}
|
||||
ri.Subs = castToSliceString(sublist)
|
||||
}
|
||||
r.mu.Unlock()
|
||||
|
||||
if ip, ok := r.nc.(*net.TCPConn); ok {
|
||||
addr := ip.RemoteAddr().(*net.TCPAddr)
|
||||
ri.Port = addr.Port
|
||||
ri.IP = addr.IP.String()
|
||||
}
|
||||
rs.Routes = append(rs.Routes, ri)
|
||||
}
|
||||
s.mu.Unlock()
|
||||
|
||||
b, err := json.MarshalIndent(rs, "", " ")
|
||||
if err != nil {
|
||||
Errorf("Error marshalling response to /routez request: %v", err)
|
||||
}
|
||||
|
||||
// Handle response
|
||||
ResponseHandler(w, r, b)
|
||||
}
|
||||
|
||||
// HandleSubsz processes HTTP requests for subjects stats.
|
||||
func (s *Server) HandleSubsz(w http.ResponseWriter, r *http.Request) {
|
||||
s.mu.Lock()
|
||||
s.httpReqStats[SubszPath]++
|
||||
s.mu.Unlock()
|
||||
|
||||
st := &Subsz{s.sl.Stats()}
|
||||
b, err := json.MarshalIndent(st, "", " ")
|
||||
if err != nil {
|
||||
Errorf("Error marshalling response to /subscriptionsz request: %v", err)
|
||||
}
|
||||
|
||||
// Handle response
|
||||
ResponseHandler(w, r, b)
|
||||
}
|
||||
|
||||
// HandleStacksz processes HTTP requests for getting stacks
|
||||
func (s *Server) HandleStacksz(w http.ResponseWriter, r *http.Request) {
|
||||
// Do not get any lock here that would prevent getting the stacks
|
||||
// if we were to have a deadlock somewhere.
|
||||
var defaultBuf [defaultStackBufSize]byte
|
||||
size := defaultStackBufSize
|
||||
buf := defaultBuf[:size]
|
||||
n := 0
|
||||
for {
|
||||
n = runtime.Stack(buf, true)
|
||||
if n < size {
|
||||
break
|
||||
}
|
||||
size *= 2
|
||||
buf = make([]byte, size)
|
||||
}
|
||||
// Handle response
|
||||
ResponseHandler(w, r, buf[:n])
|
||||
}
|
||||
|
||||
// Varz will output server information on the monitoring port at /varz.
|
||||
type Varz struct {
|
||||
*Info
|
||||
*Options
|
||||
Port int `json:"port"`
|
||||
MaxPayload int `json:"max_payload"`
|
||||
Start time.Time `json:"start"`
|
||||
Now time.Time `json:"now"`
|
||||
Uptime string `json:"uptime"`
|
||||
Mem int64 `json:"mem"`
|
||||
Cores int `json:"cores"`
|
||||
CPU float64 `json:"cpu"`
|
||||
Connections int `json:"connections"`
|
||||
TotalConnections uint64 `json:"total_connections"`
|
||||
Routes int `json:"routes"`
|
||||
Remotes int `json:"remotes"`
|
||||
InMsgs int64 `json:"in_msgs"`
|
||||
OutMsgs int64 `json:"out_msgs"`
|
||||
InBytes int64 `json:"in_bytes"`
|
||||
OutBytes int64 `json:"out_bytes"`
|
||||
SlowConsumers int64 `json:"slow_consumers"`
|
||||
Subscriptions uint32 `json:"subscriptions"`
|
||||
HTTPReqStats map[string]uint64 `json:"http_req_stats"`
|
||||
}
|
||||
|
||||
type usage struct {
|
||||
CPU float32
|
||||
Cores int
|
||||
Mem int64
|
||||
}
|
||||
|
||||
func myUptime(d time.Duration) string {
|
||||
// Just use total seconds for uptime, and display days / years
|
||||
tsecs := d / time.Second
|
||||
tmins := tsecs / 60
|
||||
thrs := tmins / 60
|
||||
tdays := thrs / 24
|
||||
tyrs := tdays / 365
|
||||
|
||||
if tyrs > 0 {
|
||||
return fmt.Sprintf("%dy%dd%dh%dm%ds", tyrs, tdays%365, thrs%24, tmins%60, tsecs%60)
|
||||
}
|
||||
if tdays > 0 {
|
||||
return fmt.Sprintf("%dd%dh%dm%ds", tdays, thrs%24, tmins%60, tsecs%60)
|
||||
}
|
||||
if thrs > 0 {
|
||||
return fmt.Sprintf("%dh%dm%ds", thrs, tmins%60, tsecs%60)
|
||||
}
|
||||
if tmins > 0 {
|
||||
return fmt.Sprintf("%dm%ds", tmins, tsecs%60)
|
||||
}
|
||||
return fmt.Sprintf("%ds", tsecs)
|
||||
}
|
||||
|
||||
// HandleRoot will show basic info and links to others handlers.
|
||||
func (s *Server) HandleRoot(w http.ResponseWriter, r *http.Request) {
|
||||
// This feels dumb to me, but is required: https://code.google.com/p/go/issues/detail?id=4799
|
||||
if r.URL.Path != "/" {
|
||||
http.NotFound(w, r)
|
||||
return
|
||||
}
|
||||
s.mu.Lock()
|
||||
s.httpReqStats[RootPath]++
|
||||
s.mu.Unlock()
|
||||
fmt.Fprintf(w, `<html lang="en">
|
||||
<head>
|
||||
<link rel="shortcut icon" href="http://nats.io/img/favicon.ico">
|
||||
<style type="text/css">
|
||||
body { font-family: “Century Gothic”, CenturyGothic, AppleGothic, sans-serif; font-size: 22; }
|
||||
a { margin-left: 32px; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<img src="http://nats.io/img/logo.png" alt="NATS">
|
||||
<br/>
|
||||
<a href=/varz>varz</a><br/>
|
||||
<a href=/connz>connz</a><br/>
|
||||
<a href=/routez>routez</a><br/>
|
||||
<a href=/subsz>subsz</a><br/>
|
||||
<br/>
|
||||
<a href=http://nats.io/documentation/server/gnatsd-monitoring/>help</a>
|
||||
</body>
|
||||
</html>`)
|
||||
}
|
||||
|
||||
// HandleVarz will process HTTP requests for server information.
|
||||
func (s *Server) HandleVarz(w http.ResponseWriter, r *http.Request) {
|
||||
v := &Varz{Info: &s.info, Options: s.opts, MaxPayload: s.opts.MaxPayload, Start: s.start}
|
||||
v.Now = time.Now()
|
||||
v.Uptime = myUptime(time.Since(s.start))
|
||||
v.Port = v.Info.Port
|
||||
|
||||
updateUsage(v)
|
||||
|
||||
s.mu.Lock()
|
||||
v.Connections = len(s.clients)
|
||||
v.TotalConnections = s.totalClients
|
||||
v.Routes = len(s.routes)
|
||||
v.Remotes = len(s.remotes)
|
||||
v.InMsgs = s.inMsgs
|
||||
v.InBytes = s.inBytes
|
||||
v.OutMsgs = s.outMsgs
|
||||
v.OutBytes = s.outBytes
|
||||
v.SlowConsumers = s.slowConsumers
|
||||
v.Subscriptions = s.sl.Count()
|
||||
s.httpReqStats[VarzPath]++
|
||||
v.HTTPReqStats = s.httpReqStats
|
||||
s.mu.Unlock()
|
||||
|
||||
b, err := json.MarshalIndent(v, "", " ")
|
||||
if err != nil {
|
||||
Errorf("Error marshalling response to /varz request: %v", err)
|
||||
}
|
||||
|
||||
// Handle response
|
||||
ResponseHandler(w, r, b)
|
||||
}
|
||||
|
||||
// Grab RSS and PCPU
|
||||
func updateUsage(v *Varz) {
|
||||
var rss, vss int64
|
||||
var pcpu float64
|
||||
|
||||
pse.ProcUsage(&pcpu, &rss, &vss)
|
||||
|
||||
v.Mem = rss
|
||||
v.CPU = pcpu
|
||||
v.Cores = numCores
|
||||
}
|
||||
|
||||
// ResponseHandler handles responses for monitoring routes
|
||||
func ResponseHandler(w http.ResponseWriter, r *http.Request, data []byte) {
|
||||
// Get callback from request
|
||||
callback := r.URL.Query().Get("callback")
|
||||
// If callback is not empty then
|
||||
if callback != "" {
|
||||
// Response for JSONP
|
||||
w.Header().Set("Content-Type", "application/javascript")
|
||||
fmt.Fprintf(w, "%s(%s)", callback, data)
|
||||
} else {
|
||||
// Otherwise JSON
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Write(data)
|
||||
}
|
||||
}
|
||||
50
vendor/github.com/nats-io/gnatsd/server/monitor_sort_opts.go
generated
vendored
Normal file
50
vendor/github.com/nats-io/gnatsd/server/monitor_sort_opts.go
generated
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
// Copyright 2013-2016 Apcera Inc. All rights reserved.
|
||||
|
||||
package server
|
||||
|
||||
// SortOpt is a helper type to sort by ConnInfo values
|
||||
type SortOpt string
|
||||
|
||||
const (
|
||||
byCid SortOpt = "cid"
|
||||
bySubs = "subs"
|
||||
byPending = "pending"
|
||||
byOutMsgs = "msgs_to"
|
||||
byInMsgs = "msgs_from"
|
||||
byOutBytes = "bytes_to"
|
||||
byInBytes = "bytes_from"
|
||||
byLast = "last"
|
||||
byIdle = "idle"
|
||||
byUptime = "uptime"
|
||||
)
|
||||
|
||||
// IsValid determines if a sort option is valid
|
||||
func (s SortOpt) IsValid() bool {
|
||||
switch s {
|
||||
case "", byCid, bySubs, byPending, byOutMsgs, byInMsgs, byOutBytes, byInBytes, byLast, byIdle, byUptime:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Pair type is internally used.
|
||||
type Pair struct {
|
||||
Key *client
|
||||
Val int64
|
||||
}
|
||||
|
||||
// Pairs type is internally used.
|
||||
type Pairs []Pair
|
||||
|
||||
func (d Pairs) Len() int {
|
||||
return len(d)
|
||||
}
|
||||
|
||||
func (d Pairs) Swap(i, j int) {
|
||||
d[i], d[j] = d[j], d[i]
|
||||
}
|
||||
|
||||
func (d Pairs) Less(i, j int) bool {
|
||||
return d[i].Val < d[j].Val
|
||||
}
|
||||
665
vendor/github.com/nats-io/gnatsd/server/opts.go
generated
vendored
Normal file
665
vendor/github.com/nats-io/gnatsd/server/opts.go
generated
vendored
Normal file
@@ -0,0 +1,665 @@
|
||||
// Copyright 2012-2016 Apcera Inc. All rights reserved.
|
||||
|
||||
package server
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/nats-io/gnatsd/conf"
|
||||
)
|
||||
|
||||
// For multiple accounts/users.
|
||||
type User struct {
|
||||
Username string `json:"user"`
|
||||
Password string `json:"password"`
|
||||
}
|
||||
|
||||
// Options block for gnatsd server.
|
||||
type Options struct {
|
||||
Host string `json:"addr"`
|
||||
Port int `json:"port"`
|
||||
Trace bool `json:"-"`
|
||||
Debug bool `json:"-"`
|
||||
NoLog bool `json:"-"`
|
||||
NoSigs bool `json:"-"`
|
||||
Logtime bool `json:"-"`
|
||||
MaxConn int `json:"max_connections"`
|
||||
Users []User `json:"-"`
|
||||
Username string `json:"-"`
|
||||
Password string `json:"-"`
|
||||
Authorization string `json:"-"`
|
||||
PingInterval time.Duration `json:"ping_interval"`
|
||||
MaxPingsOut int `json:"ping_max"`
|
||||
HTTPHost string `json:"http_host"`
|
||||
HTTPPort int `json:"http_port"`
|
||||
HTTPSPort int `json:"https_port"`
|
||||
AuthTimeout float64 `json:"auth_timeout"`
|
||||
MaxControlLine int `json:"max_control_line"`
|
||||
MaxPayload int `json:"max_payload"`
|
||||
MaxPending int `json:"max_pending_size"`
|
||||
ClusterHost string `json:"addr"`
|
||||
ClusterPort int `json:"cluster_port"`
|
||||
ClusterUsername string `json:"-"`
|
||||
ClusterPassword string `json:"-"`
|
||||
ClusterAuthTimeout float64 `json:"auth_timeout"`
|
||||
ClusterTLSTimeout float64 `json:"-"`
|
||||
ClusterTLSConfig *tls.Config `json:"-"`
|
||||
ClusterListenStr string `json:"-"`
|
||||
ProfPort int `json:"-"`
|
||||
PidFile string `json:"-"`
|
||||
LogFile string `json:"-"`
|
||||
Syslog bool `json:"-"`
|
||||
RemoteSyslog string `json:"-"`
|
||||
Routes []*url.URL `json:"-"`
|
||||
RoutesStr string `json:"-"`
|
||||
TLSTimeout float64 `json:"tls_timeout"`
|
||||
TLS bool `json:"-"`
|
||||
TLSVerify bool `json:"-"`
|
||||
TLSCert string `json:"-"`
|
||||
TLSKey string `json:"-"`
|
||||
TLSCaCert string `json:"-"`
|
||||
TLSConfig *tls.Config `json:"-"`
|
||||
}
|
||||
|
||||
type authorization struct {
|
||||
// Singles
|
||||
user string
|
||||
pass string
|
||||
// Multiple Users
|
||||
users []User
|
||||
timeout float64
|
||||
}
|
||||
|
||||
// TLSConfigOpts holds the parsed tls config information,
|
||||
// used with flag parsing
|
||||
type TLSConfigOpts struct {
|
||||
CertFile string
|
||||
KeyFile string
|
||||
CaFile string
|
||||
Verify bool
|
||||
Timeout float64
|
||||
Ciphers []uint16
|
||||
}
|
||||
|
||||
var tlsUsage = `
|
||||
TLS configuration is specified in the tls section of a configuration file:
|
||||
|
||||
e.g.
|
||||
|
||||
tls {
|
||||
cert_file: "./certs/server-cert.pem"
|
||||
key_file: "./certs/server-key.pem"
|
||||
ca_file: "./certs/ca.pem"
|
||||
verify: true
|
||||
|
||||
cipher_suites: [
|
||||
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
|
||||
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"
|
||||
]
|
||||
}
|
||||
|
||||
Available cipher suites include:
|
||||
`
|
||||
|
||||
// ProcessConfigFile processes a configuration file.
|
||||
// FIXME(dlc): Hacky
|
||||
func ProcessConfigFile(configFile string) (*Options, error) {
|
||||
opts := &Options{}
|
||||
|
||||
if configFile == "" {
|
||||
return opts, nil
|
||||
}
|
||||
|
||||
data, err := ioutil.ReadFile(configFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error opening config file: %v", err)
|
||||
}
|
||||
|
||||
m, err := conf.Parse(string(data))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for k, v := range m {
|
||||
switch strings.ToLower(k) {
|
||||
case "listen":
|
||||
hp, err := parseListen(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
opts.Host = hp.host
|
||||
opts.Port = hp.port
|
||||
case "port":
|
||||
opts.Port = int(v.(int64))
|
||||
case "host", "net":
|
||||
opts.Host = v.(string)
|
||||
case "debug":
|
||||
opts.Debug = v.(bool)
|
||||
case "trace":
|
||||
opts.Trace = v.(bool)
|
||||
case "logtime":
|
||||
opts.Logtime = v.(bool)
|
||||
case "authorization":
|
||||
am := v.(map[string]interface{})
|
||||
auth, err := parseAuthorization(am)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
opts.Username = auth.user
|
||||
opts.Password = auth.pass
|
||||
opts.AuthTimeout = auth.timeout
|
||||
// Check for multiple users defined
|
||||
if auth.users != nil {
|
||||
if auth.user != "" {
|
||||
return nil, fmt.Errorf("Can not have a single user/pass and a users array")
|
||||
}
|
||||
opts.Users = auth.users
|
||||
}
|
||||
case "http":
|
||||
hp, err := parseListen(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
opts.HTTPHost = hp.host
|
||||
opts.HTTPPort = hp.port
|
||||
case "https":
|
||||
hp, err := parseListen(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
opts.HTTPHost = hp.host
|
||||
opts.HTTPSPort = hp.port
|
||||
case "http_port", "monitor_port":
|
||||
opts.HTTPPort = int(v.(int64))
|
||||
case "https_port":
|
||||
opts.HTTPSPort = int(v.(int64))
|
||||
case "cluster":
|
||||
cm := v.(map[string]interface{})
|
||||
if err := parseCluster(cm, opts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case "logfile", "log_file":
|
||||
opts.LogFile = v.(string)
|
||||
case "syslog":
|
||||
opts.Syslog = v.(bool)
|
||||
case "remote_syslog":
|
||||
opts.RemoteSyslog = v.(string)
|
||||
case "pidfile", "pid_file":
|
||||
opts.PidFile = v.(string)
|
||||
case "prof_port":
|
||||
opts.ProfPort = int(v.(int64))
|
||||
case "max_control_line":
|
||||
opts.MaxControlLine = int(v.(int64))
|
||||
case "max_payload":
|
||||
opts.MaxPayload = int(v.(int64))
|
||||
case "max_pending_size", "max_pending":
|
||||
opts.MaxPending = int(v.(int64))
|
||||
case "max_connections", "max_conn":
|
||||
opts.MaxConn = int(v.(int64))
|
||||
case "tls":
|
||||
tlsm := v.(map[string]interface{})
|
||||
tc, err := parseTLS(tlsm)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if opts.TLSConfig, err = GenTLSConfig(tc); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
opts.TLSTimeout = tc.Timeout
|
||||
}
|
||||
}
|
||||
return opts, nil
|
||||
}
|
||||
|
||||
// hostPort is simple struct to hold parsed listen/addr strings.
|
||||
type hostPort struct {
|
||||
host string
|
||||
port int
|
||||
}
|
||||
|
||||
// parseListen will parse listen option which is replacing host/net and port
|
||||
func parseListen(v interface{}) (*hostPort, error) {
|
||||
hp := &hostPort{}
|
||||
switch v.(type) {
|
||||
// Only a port
|
||||
case int64:
|
||||
hp.port = int(v.(int64))
|
||||
case string:
|
||||
host, port, err := net.SplitHostPort(v.(string))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Could not parse address string %q", v)
|
||||
}
|
||||
hp.port, err = strconv.Atoi(port)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Could not parse port %q", port)
|
||||
}
|
||||
hp.host = host
|
||||
}
|
||||
return hp, nil
|
||||
}
|
||||
|
||||
// parseCluster will parse the cluster config.
|
||||
func parseCluster(cm map[string]interface{}, opts *Options) error {
|
||||
for mk, mv := range cm {
|
||||
switch strings.ToLower(mk) {
|
||||
case "listen":
|
||||
hp, err := parseListen(mv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
opts.ClusterHost = hp.host
|
||||
opts.ClusterPort = hp.port
|
||||
case "port":
|
||||
opts.ClusterPort = int(mv.(int64))
|
||||
case "host", "net":
|
||||
opts.ClusterHost = mv.(string)
|
||||
case "authorization":
|
||||
am := mv.(map[string]interface{})
|
||||
auth, err := parseAuthorization(am)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if auth.users != nil {
|
||||
return fmt.Errorf("Cluster authorization does not allow multiple users")
|
||||
}
|
||||
opts.ClusterUsername = auth.user
|
||||
opts.ClusterPassword = auth.pass
|
||||
opts.ClusterAuthTimeout = auth.timeout
|
||||
case "routes":
|
||||
ra := mv.([]interface{})
|
||||
opts.Routes = make([]*url.URL, 0, len(ra))
|
||||
for _, r := range ra {
|
||||
routeURL := r.(string)
|
||||
url, err := url.Parse(routeURL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing route url [%q]", routeURL)
|
||||
}
|
||||
opts.Routes = append(opts.Routes, url)
|
||||
}
|
||||
case "tls":
|
||||
tlsm := mv.(map[string]interface{})
|
||||
tc, err := parseTLS(tlsm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if opts.ClusterTLSConfig, err = GenTLSConfig(tc); err != nil {
|
||||
return err
|
||||
}
|
||||
// For clusters, we will force strict verification. We also act
|
||||
// as both client and server, so will mirror the rootCA to the
|
||||
// clientCA pool.
|
||||
opts.ClusterTLSConfig.ClientAuth = tls.RequireAndVerifyClientCert
|
||||
opts.ClusterTLSConfig.ClientCAs = opts.ClusterTLSConfig.RootCAs
|
||||
opts.ClusterTLSTimeout = tc.Timeout
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Helper function to parse Authorization configs.
|
||||
func parseAuthorization(am map[string]interface{}) (*authorization, error) {
|
||||
auth := &authorization{}
|
||||
for mk, mv := range am {
|
||||
switch strings.ToLower(mk) {
|
||||
case "user", "username":
|
||||
auth.user = mv.(string)
|
||||
case "pass", "password":
|
||||
auth.pass = mv.(string)
|
||||
case "timeout":
|
||||
at := float64(1)
|
||||
switch mv.(type) {
|
||||
case int64:
|
||||
at = float64(mv.(int64))
|
||||
case float64:
|
||||
at = mv.(float64)
|
||||
}
|
||||
auth.timeout = at
|
||||
case "users":
|
||||
b, _ := json.Marshal(mv)
|
||||
users := []User{}
|
||||
if err := json.Unmarshal(b, &users); err != nil {
|
||||
return nil, fmt.Errorf("Could not parse user array properly, %v", err)
|
||||
}
|
||||
auth.users = users
|
||||
}
|
||||
}
|
||||
return auth, nil
|
||||
}
|
||||
|
||||
// PrintTLSHelpAndDie prints TLS usage and exits.
|
||||
func PrintTLSHelpAndDie() {
|
||||
fmt.Printf("%s\n", tlsUsage)
|
||||
for k := range cipherMap {
|
||||
fmt.Printf(" %s\n", k)
|
||||
}
|
||||
fmt.Printf("\n")
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
func parseCipher(cipherName string) (uint16, error) {
|
||||
|
||||
cipher, exists := cipherMap[cipherName]
|
||||
if !exists {
|
||||
return 0, fmt.Errorf("Unrecognized cipher %s", cipherName)
|
||||
}
|
||||
|
||||
return cipher, nil
|
||||
}
|
||||
|
||||
// Helper function to parse TLS configs.
|
||||
func parseTLS(tlsm map[string]interface{}) (*TLSConfigOpts, error) {
|
||||
tc := TLSConfigOpts{}
|
||||
for mk, mv := range tlsm {
|
||||
switch strings.ToLower(mk) {
|
||||
case "cert_file":
|
||||
certFile, ok := mv.(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("error parsing tls config, expected 'cert_file' to be filename")
|
||||
}
|
||||
tc.CertFile = certFile
|
||||
case "key_file":
|
||||
keyFile, ok := mv.(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("error parsing tls config, expected 'key_file' to be filename")
|
||||
}
|
||||
tc.KeyFile = keyFile
|
||||
case "ca_file":
|
||||
caFile, ok := mv.(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("error parsing tls config, expected 'ca_file' to be filename")
|
||||
}
|
||||
tc.CaFile = caFile
|
||||
case "verify":
|
||||
verify, ok := mv.(bool)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("error parsing tls config, expected 'verify' to be a boolean")
|
||||
}
|
||||
tc.Verify = verify
|
||||
case "cipher_suites":
|
||||
ra := mv.([]interface{})
|
||||
if len(ra) == 0 {
|
||||
return nil, fmt.Errorf("error parsing tls config, 'cipher_suites' cannot be empty")
|
||||
}
|
||||
tc.Ciphers = make([]uint16, 0, len(ra))
|
||||
for _, r := range ra {
|
||||
cipher, err := parseCipher(r.(string))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tc.Ciphers = append(tc.Ciphers, cipher)
|
||||
}
|
||||
case "timeout":
|
||||
at := float64(0)
|
||||
switch mv.(type) {
|
||||
case int64:
|
||||
at = float64(mv.(int64))
|
||||
case float64:
|
||||
at = mv.(float64)
|
||||
}
|
||||
tc.Timeout = at
|
||||
default:
|
||||
return nil, fmt.Errorf("error parsing tls config, unknown field [%q]", mk)
|
||||
}
|
||||
}
|
||||
|
||||
// If cipher suites were not specified then use the defaults
|
||||
if tc.Ciphers == nil {
|
||||
tc.Ciphers = defaultCipherSuites()
|
||||
}
|
||||
|
||||
return &tc, nil
|
||||
}
|
||||
|
||||
// GenTLSConfig loads TLS related configuration parameters.
|
||||
func GenTLSConfig(tc *TLSConfigOpts) (*tls.Config, error) {
|
||||
|
||||
// Now load in cert and private key
|
||||
cert, err := tls.LoadX509KeyPair(tc.CertFile, tc.KeyFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing X509 certificate/key pair: %v", err)
|
||||
}
|
||||
cert.Leaf, err = x509.ParseCertificate(cert.Certificate[0])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing certificate: %v", err)
|
||||
}
|
||||
|
||||
// Create TLSConfig
|
||||
// We will determine the cipher suites that we prefer.
|
||||
config := tls.Config{
|
||||
Certificates: []tls.Certificate{cert},
|
||||
PreferServerCipherSuites: true,
|
||||
MinVersion: tls.VersionTLS12,
|
||||
CipherSuites: tc.Ciphers,
|
||||
}
|
||||
|
||||
// Require client certificates as needed
|
||||
if tc.Verify {
|
||||
config.ClientAuth = tls.RequireAnyClientCert
|
||||
}
|
||||
// Add in CAs if applicable.
|
||||
if tc.CaFile != "" {
|
||||
rootPEM, err := ioutil.ReadFile(tc.CaFile)
|
||||
if err != nil || rootPEM == nil {
|
||||
return nil, err
|
||||
}
|
||||
pool := x509.NewCertPool()
|
||||
ok := pool.AppendCertsFromPEM([]byte(rootPEM))
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("failed to parse root ca certificate")
|
||||
}
|
||||
config.RootCAs = pool
|
||||
}
|
||||
|
||||
return &config, nil
|
||||
}
|
||||
|
||||
// MergeOptions will merge two options giving preference to the flagOpts
|
||||
// if the item is present.
|
||||
func MergeOptions(fileOpts, flagOpts *Options) *Options {
|
||||
if fileOpts == nil {
|
||||
return flagOpts
|
||||
}
|
||||
if flagOpts == nil {
|
||||
return fileOpts
|
||||
}
|
||||
// Merge the two, flagOpts override
|
||||
opts := *fileOpts
|
||||
|
||||
if flagOpts.Port != 0 {
|
||||
opts.Port = flagOpts.Port
|
||||
}
|
||||
if flagOpts.Host != "" {
|
||||
opts.Host = flagOpts.Host
|
||||
}
|
||||
if flagOpts.Username != "" {
|
||||
opts.Username = flagOpts.Username
|
||||
}
|
||||
if flagOpts.Password != "" {
|
||||
opts.Password = flagOpts.Password
|
||||
}
|
||||
if flagOpts.Authorization != "" {
|
||||
opts.Authorization = flagOpts.Authorization
|
||||
}
|
||||
if flagOpts.HTTPPort != 0 {
|
||||
opts.HTTPPort = flagOpts.HTTPPort
|
||||
}
|
||||
if flagOpts.Debug {
|
||||
opts.Debug = true
|
||||
}
|
||||
if flagOpts.Trace {
|
||||
opts.Trace = true
|
||||
}
|
||||
if flagOpts.Logtime {
|
||||
opts.Logtime = true
|
||||
}
|
||||
if flagOpts.LogFile != "" {
|
||||
opts.LogFile = flagOpts.LogFile
|
||||
}
|
||||
if flagOpts.PidFile != "" {
|
||||
opts.PidFile = flagOpts.PidFile
|
||||
}
|
||||
if flagOpts.ProfPort != 0 {
|
||||
opts.ProfPort = flagOpts.ProfPort
|
||||
}
|
||||
if flagOpts.RoutesStr != "" {
|
||||
mergeRoutes(&opts, flagOpts)
|
||||
}
|
||||
return &opts
|
||||
}
|
||||
|
||||
// RoutesFromStr parses route URLs from a string
|
||||
func RoutesFromStr(routesStr string) []*url.URL {
|
||||
routes := strings.Split(routesStr, ",")
|
||||
if len(routes) == 0 {
|
||||
return nil
|
||||
}
|
||||
routeUrls := []*url.URL{}
|
||||
for _, r := range routes {
|
||||
r = strings.TrimSpace(r)
|
||||
u, _ := url.Parse(r)
|
||||
routeUrls = append(routeUrls, u)
|
||||
}
|
||||
return routeUrls
|
||||
}
|
||||
|
||||
// This will merge the flag routes and override anything that was present.
|
||||
func mergeRoutes(opts, flagOpts *Options) {
|
||||
routeUrls := RoutesFromStr(flagOpts.RoutesStr)
|
||||
if routeUrls == nil {
|
||||
return
|
||||
}
|
||||
opts.Routes = routeUrls
|
||||
opts.RoutesStr = flagOpts.RoutesStr
|
||||
}
|
||||
|
||||
// RemoveSelfReference removes this server from an array of routes
|
||||
func RemoveSelfReference(clusterPort int, routes []*url.URL) ([]*url.URL, error) {
|
||||
var cleanRoutes []*url.URL
|
||||
cport := strconv.Itoa(clusterPort)
|
||||
|
||||
selfIPs := getInterfaceIPs()
|
||||
for _, r := range routes {
|
||||
host, port, err := net.SplitHostPort(r.Host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if cport == port && isIPInList(selfIPs, getURLIP(host)) {
|
||||
Noticef("Self referencing IP found: ", r)
|
||||
continue
|
||||
}
|
||||
cleanRoutes = append(cleanRoutes, r)
|
||||
}
|
||||
|
||||
return cleanRoutes, nil
|
||||
}
|
||||
|
||||
func isIPInList(list1 []net.IP, list2 []net.IP) bool {
|
||||
for _, ip1 := range list1 {
|
||||
for _, ip2 := range list2 {
|
||||
if ip1.Equal(ip2) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func getURLIP(ipStr string) []net.IP {
|
||||
ipList := []net.IP{}
|
||||
|
||||
ip := net.ParseIP(ipStr)
|
||||
if ip != nil {
|
||||
ipList = append(ipList, ip)
|
||||
return ipList
|
||||
}
|
||||
|
||||
hostAddr, err := net.LookupHost(ipStr)
|
||||
if err != nil {
|
||||
Errorf("Error looking up host with route hostname: %v", err)
|
||||
return ipList
|
||||
}
|
||||
for _, addr := range hostAddr {
|
||||
ip = net.ParseIP(addr)
|
||||
if ip != nil {
|
||||
ipList = append(ipList, ip)
|
||||
}
|
||||
}
|
||||
return ipList
|
||||
}
|
||||
|
||||
func getInterfaceIPs() []net.IP {
|
||||
var localIPs []net.IP
|
||||
|
||||
interfaceAddr, err := net.InterfaceAddrs()
|
||||
if err != nil {
|
||||
Errorf("Error getting self referencing address: %v", err)
|
||||
return localIPs
|
||||
}
|
||||
|
||||
for i := 0; i < len(interfaceAddr); i++ {
|
||||
interfaceIP, _, _ := net.ParseCIDR(interfaceAddr[i].String())
|
||||
if net.ParseIP(interfaceIP.String()) != nil {
|
||||
localIPs = append(localIPs, interfaceIP)
|
||||
} else {
|
||||
Errorf("Error parsing self referencing address: %v", err)
|
||||
}
|
||||
}
|
||||
return localIPs
|
||||
}
|
||||
|
||||
func processOptions(opts *Options) {
|
||||
// Setup non-standard Go defaults
|
||||
if opts.Host == "" {
|
||||
opts.Host = DEFAULT_HOST
|
||||
}
|
||||
if opts.Port == 0 {
|
||||
opts.Port = DEFAULT_PORT
|
||||
} else if opts.Port == RANDOM_PORT {
|
||||
// Choose randomly inside of net.Listen
|
||||
opts.Port = 0
|
||||
}
|
||||
if opts.MaxConn == 0 {
|
||||
opts.MaxConn = DEFAULT_MAX_CONNECTIONS
|
||||
}
|
||||
if opts.PingInterval == 0 {
|
||||
opts.PingInterval = DEFAULT_PING_INTERVAL
|
||||
}
|
||||
if opts.MaxPingsOut == 0 {
|
||||
opts.MaxPingsOut = DEFAULT_PING_MAX_OUT
|
||||
}
|
||||
if opts.TLSTimeout == 0 {
|
||||
opts.TLSTimeout = float64(TLS_TIMEOUT) / float64(time.Second)
|
||||
}
|
||||
if opts.AuthTimeout == 0 {
|
||||
opts.AuthTimeout = float64(AUTH_TIMEOUT) / float64(time.Second)
|
||||
}
|
||||
if opts.ClusterHost == "" {
|
||||
opts.ClusterHost = DEFAULT_HOST
|
||||
}
|
||||
if opts.ClusterTLSTimeout == 0 {
|
||||
opts.ClusterTLSTimeout = float64(TLS_TIMEOUT) / float64(time.Second)
|
||||
}
|
||||
if opts.ClusterAuthTimeout == 0 {
|
||||
opts.ClusterAuthTimeout = float64(AUTH_TIMEOUT) / float64(time.Second)
|
||||
}
|
||||
if opts.MaxControlLine == 0 {
|
||||
opts.MaxControlLine = MAX_CONTROL_LINE_SIZE
|
||||
}
|
||||
if opts.MaxPayload == 0 {
|
||||
opts.MaxPayload = MAX_PAYLOAD_SIZE
|
||||
}
|
||||
if opts.MaxPending == 0 {
|
||||
opts.MaxPending = MAX_PENDING_SIZE
|
||||
}
|
||||
}
|
||||
715
vendor/github.com/nats-io/gnatsd/server/parser.go
generated
vendored
Normal file
715
vendor/github.com/nats-io/gnatsd/server/parser.go
generated
vendored
Normal file
@@ -0,0 +1,715 @@
|
||||
// Copyright 2012-2014 Apcera Inc. All rights reserved.
|
||||
|
||||
package server
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type pubArg struct {
|
||||
subject []byte
|
||||
reply []byte
|
||||
sid []byte
|
||||
szb []byte
|
||||
size int
|
||||
}
|
||||
|
||||
type parseState struct {
|
||||
state int
|
||||
as int
|
||||
drop int
|
||||
pa pubArg
|
||||
argBuf []byte
|
||||
msgBuf []byte
|
||||
scratch [MAX_CONTROL_LINE_SIZE]byte
|
||||
}
|
||||
|
||||
// Parser constants
|
||||
const (
|
||||
OP_START = iota
|
||||
OP_PLUS
|
||||
OP_PLUS_O
|
||||
OP_PLUS_OK
|
||||
OP_MINUS
|
||||
OP_MINUS_E
|
||||
OP_MINUS_ER
|
||||
OP_MINUS_ERR
|
||||
OP_MINUS_ERR_SPC
|
||||
MINUS_ERR_ARG
|
||||
OP_C
|
||||
OP_CO
|
||||
OP_CON
|
||||
OP_CONN
|
||||
OP_CONNE
|
||||
OP_CONNEC
|
||||
OP_CONNECT
|
||||
CONNECT_ARG
|
||||
OP_P
|
||||
OP_PU
|
||||
OP_PUB
|
||||
OP_PUB_SPC
|
||||
PUB_ARG
|
||||
OP_PI
|
||||
OP_PIN
|
||||
OP_PING
|
||||
OP_PO
|
||||
OP_PON
|
||||
OP_PONG
|
||||
MSG_PAYLOAD
|
||||
MSG_END
|
||||
OP_S
|
||||
OP_SU
|
||||
OP_SUB
|
||||
OP_SUB_SPC
|
||||
SUB_ARG
|
||||
OP_U
|
||||
OP_UN
|
||||
OP_UNS
|
||||
OP_UNSU
|
||||
OP_UNSUB
|
||||
OP_UNSUB_SPC
|
||||
UNSUB_ARG
|
||||
OP_M
|
||||
OP_MS
|
||||
OP_MSG
|
||||
OP_MSG_SPC
|
||||
MSG_ARG
|
||||
OP_I
|
||||
OP_IN
|
||||
OP_INF
|
||||
OP_INFO
|
||||
INFO_ARG
|
||||
)
|
||||
|
||||
func (c *client) parse(buf []byte) error {
|
||||
var i int
|
||||
var b byte
|
||||
|
||||
// snapshot this, and reset when we receive a
|
||||
// proper CONNECT if needed.
|
||||
authSet := c.isAuthTimerSet()
|
||||
|
||||
// Move to loop instead of range syntax to allow jumping of i
|
||||
for i = 0; i < len(buf); i++ {
|
||||
b = buf[i]
|
||||
|
||||
switch c.state {
|
||||
case OP_START:
|
||||
if b != 'C' && b != 'c' && authSet {
|
||||
goto authErr
|
||||
}
|
||||
switch b {
|
||||
case 'P', 'p':
|
||||
c.state = OP_P
|
||||
case 'S', 's':
|
||||
c.state = OP_S
|
||||
case 'U', 'u':
|
||||
c.state = OP_U
|
||||
case 'M', 'm':
|
||||
if c.typ == CLIENT {
|
||||
goto parseErr
|
||||
} else {
|
||||
c.state = OP_M
|
||||
}
|
||||
case 'C', 'c':
|
||||
c.state = OP_C
|
||||
case 'I', 'i':
|
||||
c.state = OP_I
|
||||
case '+':
|
||||
c.state = OP_PLUS
|
||||
case '-':
|
||||
c.state = OP_MINUS
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_P:
|
||||
switch b {
|
||||
case 'U', 'u':
|
||||
c.state = OP_PU
|
||||
case 'I', 'i':
|
||||
c.state = OP_PI
|
||||
case 'O', 'o':
|
||||
c.state = OP_PO
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_PU:
|
||||
switch b {
|
||||
case 'B', 'b':
|
||||
c.state = OP_PUB
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_PUB:
|
||||
switch b {
|
||||
case ' ', '\t':
|
||||
c.state = OP_PUB_SPC
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_PUB_SPC:
|
||||
switch b {
|
||||
case ' ', '\t':
|
||||
continue
|
||||
default:
|
||||
c.state = PUB_ARG
|
||||
c.as = i
|
||||
}
|
||||
case PUB_ARG:
|
||||
switch b {
|
||||
case '\r':
|
||||
c.drop = 1
|
||||
case '\n':
|
||||
var arg []byte
|
||||
if c.argBuf != nil {
|
||||
arg = c.argBuf
|
||||
} else {
|
||||
arg = buf[c.as : i-c.drop]
|
||||
}
|
||||
if err := c.processPub(arg); err != nil {
|
||||
return err
|
||||
}
|
||||
c.drop, c.as, c.state = 0, i+1, MSG_PAYLOAD
|
||||
// If we don't have a saved buffer then jump ahead with
|
||||
// the index. If this overruns what is left we fall out
|
||||
// and process split buffer.
|
||||
if c.msgBuf == nil {
|
||||
i = c.as + c.pa.size - LEN_CR_LF
|
||||
}
|
||||
default:
|
||||
if c.argBuf != nil {
|
||||
c.argBuf = append(c.argBuf, b)
|
||||
}
|
||||
}
|
||||
case MSG_PAYLOAD:
|
||||
if c.msgBuf != nil {
|
||||
// copy as much as we can to the buffer and skip ahead.
|
||||
toCopy := c.pa.size - len(c.msgBuf)
|
||||
avail := len(buf) - i
|
||||
if avail < toCopy {
|
||||
toCopy = avail
|
||||
}
|
||||
if toCopy > 0 {
|
||||
start := len(c.msgBuf)
|
||||
// This is needed for copy to work.
|
||||
c.msgBuf = c.msgBuf[:start+toCopy]
|
||||
copy(c.msgBuf[start:], buf[i:i+toCopy])
|
||||
// Update our index
|
||||
i = (i + toCopy) - 1
|
||||
} else {
|
||||
// Fall back to append if needed.
|
||||
c.msgBuf = append(c.msgBuf, b)
|
||||
}
|
||||
if len(c.msgBuf) >= c.pa.size {
|
||||
c.state = MSG_END
|
||||
}
|
||||
} else if i-c.as >= c.pa.size {
|
||||
c.state = MSG_END
|
||||
}
|
||||
case MSG_END:
|
||||
switch b {
|
||||
case '\n':
|
||||
if c.msgBuf != nil {
|
||||
c.msgBuf = append(c.msgBuf, b)
|
||||
} else {
|
||||
c.msgBuf = buf[c.as : i+1]
|
||||
}
|
||||
// strict check for proto
|
||||
if len(c.msgBuf) != c.pa.size+LEN_CR_LF {
|
||||
goto parseErr
|
||||
}
|
||||
c.processMsg(c.msgBuf)
|
||||
c.argBuf, c.msgBuf = nil, nil
|
||||
c.drop, c.as, c.state = 0, i+1, OP_START
|
||||
default:
|
||||
if c.msgBuf != nil {
|
||||
c.msgBuf = append(c.msgBuf, b)
|
||||
}
|
||||
continue
|
||||
}
|
||||
case OP_S:
|
||||
switch b {
|
||||
case 'U', 'u':
|
||||
c.state = OP_SU
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_SU:
|
||||
switch b {
|
||||
case 'B', 'b':
|
||||
c.state = OP_SUB
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_SUB:
|
||||
switch b {
|
||||
case ' ', '\t':
|
||||
c.state = OP_SUB_SPC
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_SUB_SPC:
|
||||
switch b {
|
||||
case ' ', '\t':
|
||||
continue
|
||||
default:
|
||||
c.state = SUB_ARG
|
||||
c.as = i
|
||||
}
|
||||
case SUB_ARG:
|
||||
switch b {
|
||||
case '\r':
|
||||
c.drop = 1
|
||||
case '\n':
|
||||
var arg []byte
|
||||
if c.argBuf != nil {
|
||||
arg = c.argBuf
|
||||
c.argBuf = nil
|
||||
} else {
|
||||
arg = buf[c.as : i-c.drop]
|
||||
}
|
||||
if err := c.processSub(arg); err != nil {
|
||||
return err
|
||||
}
|
||||
c.drop, c.as, c.state = 0, i+1, OP_START
|
||||
default:
|
||||
if c.argBuf != nil {
|
||||
c.argBuf = append(c.argBuf, b)
|
||||
}
|
||||
}
|
||||
case OP_U:
|
||||
switch b {
|
||||
case 'N', 'n':
|
||||
c.state = OP_UN
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_UN:
|
||||
switch b {
|
||||
case 'S', 's':
|
||||
c.state = OP_UNS
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_UNS:
|
||||
switch b {
|
||||
case 'U', 'u':
|
||||
c.state = OP_UNSU
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_UNSU:
|
||||
switch b {
|
||||
case 'B', 'b':
|
||||
c.state = OP_UNSUB
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_UNSUB:
|
||||
switch b {
|
||||
case ' ', '\t':
|
||||
c.state = OP_UNSUB_SPC
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_UNSUB_SPC:
|
||||
switch b {
|
||||
case ' ', '\t':
|
||||
continue
|
||||
default:
|
||||
c.state = UNSUB_ARG
|
||||
c.as = i
|
||||
}
|
||||
case UNSUB_ARG:
|
||||
switch b {
|
||||
case '\r':
|
||||
c.drop = 1
|
||||
case '\n':
|
||||
var arg []byte
|
||||
if c.argBuf != nil {
|
||||
arg = c.argBuf
|
||||
c.argBuf = nil
|
||||
} else {
|
||||
arg = buf[c.as : i-c.drop]
|
||||
}
|
||||
if err := c.processUnsub(arg); err != nil {
|
||||
return err
|
||||
}
|
||||
c.drop, c.as, c.state = 0, i+1, OP_START
|
||||
default:
|
||||
if c.argBuf != nil {
|
||||
c.argBuf = append(c.argBuf, b)
|
||||
}
|
||||
}
|
||||
case OP_PI:
|
||||
switch b {
|
||||
case 'N', 'n':
|
||||
c.state = OP_PIN
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_PIN:
|
||||
switch b {
|
||||
case 'G', 'g':
|
||||
c.state = OP_PING
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_PING:
|
||||
switch b {
|
||||
case '\n':
|
||||
c.processPing()
|
||||
c.drop, c.state = 0, OP_START
|
||||
}
|
||||
case OP_PO:
|
||||
switch b {
|
||||
case 'N', 'n':
|
||||
c.state = OP_PON
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_PON:
|
||||
switch b {
|
||||
case 'G', 'g':
|
||||
c.state = OP_PONG
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_PONG:
|
||||
switch b {
|
||||
case '\n':
|
||||
c.processPong()
|
||||
c.drop, c.state = 0, OP_START
|
||||
}
|
||||
case OP_C:
|
||||
switch b {
|
||||
case 'O', 'o':
|
||||
c.state = OP_CO
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_CO:
|
||||
switch b {
|
||||
case 'N', 'n':
|
||||
c.state = OP_CON
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_CON:
|
||||
switch b {
|
||||
case 'N', 'n':
|
||||
c.state = OP_CONN
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_CONN:
|
||||
switch b {
|
||||
case 'E', 'e':
|
||||
c.state = OP_CONNE
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_CONNE:
|
||||
switch b {
|
||||
case 'C', 'c':
|
||||
c.state = OP_CONNEC
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_CONNEC:
|
||||
switch b {
|
||||
case 'T', 't':
|
||||
c.state = OP_CONNECT
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_CONNECT:
|
||||
switch b {
|
||||
case ' ', '\t':
|
||||
continue
|
||||
default:
|
||||
c.state = CONNECT_ARG
|
||||
c.as = i
|
||||
}
|
||||
case CONNECT_ARG:
|
||||
switch b {
|
||||
case '\r':
|
||||
c.drop = 1
|
||||
case '\n':
|
||||
var arg []byte
|
||||
if c.argBuf != nil {
|
||||
arg = c.argBuf
|
||||
c.argBuf = nil
|
||||
} else {
|
||||
arg = buf[c.as : i-c.drop]
|
||||
}
|
||||
if err := c.processConnect(arg); err != nil {
|
||||
return err
|
||||
}
|
||||
c.drop, c.state = 0, OP_START
|
||||
// Reset notion on authSet
|
||||
authSet = c.isAuthTimerSet()
|
||||
default:
|
||||
if c.argBuf != nil {
|
||||
c.argBuf = append(c.argBuf, b)
|
||||
}
|
||||
}
|
||||
case OP_M:
|
||||
switch b {
|
||||
case 'S', 's':
|
||||
c.state = OP_MS
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_MS:
|
||||
switch b {
|
||||
case 'G', 'g':
|
||||
c.state = OP_MSG
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_MSG:
|
||||
switch b {
|
||||
case ' ', '\t':
|
||||
c.state = OP_MSG_SPC
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_MSG_SPC:
|
||||
switch b {
|
||||
case ' ', '\t':
|
||||
continue
|
||||
default:
|
||||
c.state = MSG_ARG
|
||||
c.as = i
|
||||
}
|
||||
case MSG_ARG:
|
||||
switch b {
|
||||
case '\r':
|
||||
c.drop = 1
|
||||
case '\n':
|
||||
var arg []byte
|
||||
if c.argBuf != nil {
|
||||
arg = c.argBuf
|
||||
} else {
|
||||
arg = buf[c.as : i-c.drop]
|
||||
}
|
||||
if err := c.processMsgArgs(arg); err != nil {
|
||||
return err
|
||||
}
|
||||
c.drop, c.as, c.state = 0, i+1, MSG_PAYLOAD
|
||||
default:
|
||||
if c.argBuf != nil {
|
||||
c.argBuf = append(c.argBuf, b)
|
||||
}
|
||||
}
|
||||
case OP_I:
|
||||
switch b {
|
||||
case 'N', 'n':
|
||||
c.state = OP_IN
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_IN:
|
||||
switch b {
|
||||
case 'F', 'f':
|
||||
c.state = OP_INF
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_INF:
|
||||
switch b {
|
||||
case 'O', 'o':
|
||||
c.state = OP_INFO
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_INFO:
|
||||
switch b {
|
||||
case ' ', '\t':
|
||||
continue
|
||||
default:
|
||||
c.state = INFO_ARG
|
||||
c.as = i
|
||||
}
|
||||
case INFO_ARG:
|
||||
switch b {
|
||||
case '\r':
|
||||
c.drop = 1
|
||||
case '\n':
|
||||
var arg []byte
|
||||
if c.argBuf != nil {
|
||||
arg = c.argBuf
|
||||
c.argBuf = nil
|
||||
} else {
|
||||
arg = buf[c.as : i-c.drop]
|
||||
}
|
||||
if err := c.processInfo(arg); err != nil {
|
||||
return err
|
||||
}
|
||||
c.drop, c.as, c.state = 0, i+1, OP_START
|
||||
default:
|
||||
if c.argBuf != nil {
|
||||
c.argBuf = append(c.argBuf, b)
|
||||
}
|
||||
}
|
||||
case OP_PLUS:
|
||||
switch b {
|
||||
case 'O', 'o':
|
||||
c.state = OP_PLUS_O
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_PLUS_O:
|
||||
switch b {
|
||||
case 'K', 'k':
|
||||
c.state = OP_PLUS_OK
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_PLUS_OK:
|
||||
switch b {
|
||||
case '\n':
|
||||
c.drop, c.state = 0, OP_START
|
||||
}
|
||||
case OP_MINUS:
|
||||
switch b {
|
||||
case 'E', 'e':
|
||||
c.state = OP_MINUS_E
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_MINUS_E:
|
||||
switch b {
|
||||
case 'R', 'r':
|
||||
c.state = OP_MINUS_ER
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_MINUS_ER:
|
||||
switch b {
|
||||
case 'R', 'r':
|
||||
c.state = OP_MINUS_ERR
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_MINUS_ERR:
|
||||
switch b {
|
||||
case ' ', '\t':
|
||||
c.state = OP_MINUS_ERR_SPC
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_MINUS_ERR_SPC:
|
||||
switch b {
|
||||
case ' ', '\t':
|
||||
continue
|
||||
default:
|
||||
c.state = MINUS_ERR_ARG
|
||||
c.as = i
|
||||
}
|
||||
case MINUS_ERR_ARG:
|
||||
switch b {
|
||||
case '\r':
|
||||
c.drop = 1
|
||||
case '\n':
|
||||
var arg []byte
|
||||
if c.argBuf != nil {
|
||||
arg = c.argBuf
|
||||
c.argBuf = nil
|
||||
} else {
|
||||
arg = buf[c.as : i-c.drop]
|
||||
}
|
||||
c.processErr(string(arg))
|
||||
c.drop, c.as, c.state = 0, i+1, OP_START
|
||||
default:
|
||||
if c.argBuf != nil {
|
||||
c.argBuf = append(c.argBuf, b)
|
||||
}
|
||||
}
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
}
|
||||
// Check for split buffer scenarios for any ARG state.
|
||||
if (c.state == SUB_ARG || c.state == UNSUB_ARG || c.state == PUB_ARG ||
|
||||
c.state == MSG_ARG || c.state == MINUS_ERR_ARG ||
|
||||
c.state == CONNECT_ARG || c.state == INFO_ARG) && c.argBuf == nil {
|
||||
c.argBuf = c.scratch[:0]
|
||||
c.argBuf = append(c.argBuf, buf[c.as:i-c.drop]...)
|
||||
// FIXME(dlc), check max control line len
|
||||
}
|
||||
// Check for split msg
|
||||
if (c.state == MSG_PAYLOAD || c.state == MSG_END) && c.msgBuf == nil {
|
||||
// We need to clone the pubArg if it is still referencing the
|
||||
// read buffer and we are not able to process the msg.
|
||||
if c.argBuf == nil {
|
||||
c.clonePubArg()
|
||||
}
|
||||
|
||||
// If we will overflow the scratch buffer, just create a
|
||||
// new buffer to hold the split message.
|
||||
if c.pa.size > cap(c.scratch)-len(c.argBuf) {
|
||||
lrem := len(buf[c.as:])
|
||||
|
||||
// Consider it a protocol error when the remaining payload
|
||||
// is larger than the reported size for PUB. It can happen
|
||||
// when processing incomplete messages from rogue clients.
|
||||
if lrem > c.pa.size+LEN_CR_LF {
|
||||
goto parseErr
|
||||
}
|
||||
c.msgBuf = make([]byte, lrem, c.pa.size+LEN_CR_LF)
|
||||
copy(c.msgBuf, buf[c.as:])
|
||||
} else {
|
||||
c.msgBuf = c.scratch[len(c.argBuf):len(c.argBuf)]
|
||||
c.msgBuf = append(c.msgBuf, (buf[c.as:])...)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
authErr:
|
||||
c.authViolation()
|
||||
return ErrAuthorization
|
||||
|
||||
parseErr:
|
||||
c.sendErr("Unknown Protocol Operation")
|
||||
snip := protoSnippet(i, buf)
|
||||
err := fmt.Errorf("%s Parser ERROR, state=%d, i=%d: proto='%s...'",
|
||||
c.typeString(), c.state, i, snip)
|
||||
return err
|
||||
}
|
||||
|
||||
func protoSnippet(start int, buf []byte) string {
|
||||
stop := start + PROTO_SNIPPET_SIZE
|
||||
bufSize := len(buf)
|
||||
if start >= bufSize {
|
||||
return `""`
|
||||
}
|
||||
if stop > bufSize {
|
||||
stop = bufSize - 1
|
||||
}
|
||||
return fmt.Sprintf("%q", buf[start:stop])
|
||||
}
|
||||
|
||||
// clonePubArg is used when the split buffer scenario has the pubArg in the existing read buffer, but
|
||||
// we need to hold onto it into the next read.
|
||||
func (c *client) clonePubArg() {
|
||||
c.argBuf = c.scratch[:0]
|
||||
c.argBuf = append(c.argBuf, c.pa.subject...)
|
||||
c.argBuf = append(c.argBuf, c.pa.reply...)
|
||||
c.argBuf = append(c.argBuf, c.pa.sid...)
|
||||
c.argBuf = append(c.argBuf, c.pa.szb...)
|
||||
|
||||
c.pa.subject = c.argBuf[:len(c.pa.subject)]
|
||||
|
||||
if c.pa.reply != nil {
|
||||
c.pa.reply = c.argBuf[len(c.pa.subject) : len(c.pa.subject)+len(c.pa.reply)]
|
||||
}
|
||||
|
||||
if c.pa.sid != nil {
|
||||
c.pa.sid = c.argBuf[len(c.pa.subject)+len(c.pa.reply) : len(c.pa.subject)+len(c.pa.reply)+len(c.pa.sid)]
|
||||
}
|
||||
|
||||
c.pa.szb = c.argBuf[len(c.pa.subject)+len(c.pa.reply)+len(c.pa.sid):]
|
||||
}
|
||||
23
vendor/github.com/nats-io/gnatsd/server/pse/pse_darwin.go
generated
vendored
Normal file
23
vendor/github.com/nats-io/gnatsd/server/pse/pse_darwin.go
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
// Copyright 2015-2016 Apcera Inc. All rights reserved.
|
||||
|
||||
package pse
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
func ProcUsage(pcpu *float64, rss, vss *int64) error {
|
||||
pidStr := fmt.Sprintf("%d", os.Getpid())
|
||||
out, err := exec.Command("ps", "o", "pcpu=,rss=,vsz=", "-p", pidStr).Output()
|
||||
if err != nil {
|
||||
*rss, *vss = -1, -1
|
||||
return errors.New(fmt.Sprintf("ps call failed:%v", err))
|
||||
}
|
||||
fmt.Sscanf(string(out), "%f %d %d", pcpu, rss, vss)
|
||||
*rss *= 1024 // 1k blocks, want bytes.
|
||||
*vss *= 1024 // 1k blocks, want bytes.
|
||||
return nil
|
||||
}
|
||||
72
vendor/github.com/nats-io/gnatsd/server/pse/pse_freebsd.go
generated
vendored
Normal file
72
vendor/github.com/nats-io/gnatsd/server/pse/pse_freebsd.go
generated
vendored
Normal file
@@ -0,0 +1,72 @@
|
||||
// Copyright 2015-2016 Apcera Inc. All rights reserved.
|
||||
|
||||
package pse
|
||||
|
||||
/*
|
||||
#include <sys/types.h>
|
||||
#include <sys/sysctl.h>
|
||||
#include <sys/user.h>
|
||||
#include <stddef.h>
|
||||
#include <unistd.h>
|
||||
|
||||
long pagetok(long size)
|
||||
{
|
||||
int pageshift, pagesize;
|
||||
|
||||
pagesize = getpagesize();
|
||||
pageshift = 0;
|
||||
|
||||
while (pagesize > 1) {
|
||||
pageshift++;
|
||||
pagesize >>= 1;
|
||||
}
|
||||
|
||||
return (size << pageshift);
|
||||
}
|
||||
|
||||
int getusage(double *pcpu, unsigned int *rss, unsigned int *vss)
|
||||
{
|
||||
int mib[4], ret;
|
||||
size_t len;
|
||||
struct kinfo_proc kp;
|
||||
|
||||
len = 4;
|
||||
sysctlnametomib("kern.proc.pid", mib, &len);
|
||||
|
||||
mib[3] = getpid();
|
||||
len = sizeof(kp);
|
||||
|
||||
ret = sysctl(mib, 4, &kp, &len, NULL, 0);
|
||||
if (ret != 0) {
|
||||
return (errno);
|
||||
}
|
||||
|
||||
*rss = pagetok(kp.ki_rssize);
|
||||
*vss = kp.ki_size;
|
||||
*pcpu = kp.ki_pctcpu;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// This is a placeholder for now.
|
||||
func ProcUsage(pcpu *float64, rss, vss *int64) error {
|
||||
var r, v C.uint
|
||||
var c C.double
|
||||
|
||||
if ret := C.getusage(&c, &r, &v); ret != 0 {
|
||||
return syscall.Errno(ret)
|
||||
}
|
||||
|
||||
*pcpu = float64(c)
|
||||
*rss = int64(r)
|
||||
*vss = int64(v)
|
||||
|
||||
return nil
|
||||
}
|
||||
115
vendor/github.com/nats-io/gnatsd/server/pse/pse_linux.go
generated
vendored
Normal file
115
vendor/github.com/nats-io/gnatsd/server/pse/pse_linux.go
generated
vendored
Normal file
@@ -0,0 +1,115 @@
|
||||
// Copyright 2015 Apcera Inc. All rights reserved.
|
||||
|
||||
package pse
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
procStatFile string
|
||||
ticks int64
|
||||
lastTotal int64
|
||||
lastSeconds int64
|
||||
ipcpu int64
|
||||
)
|
||||
|
||||
const (
|
||||
utimePos = 13
|
||||
stimePos = 14
|
||||
startPos = 21
|
||||
vssPos = 22
|
||||
rssPos = 23
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Avoiding to generate docker image without CGO
|
||||
ticks = 100 // int64(C.sysconf(C._SC_CLK_TCK))
|
||||
procStatFile = fmt.Sprintf("/proc/%d/stat", os.Getpid())
|
||||
periodic()
|
||||
}
|
||||
|
||||
// Sampling function to keep pcpu relevant.
|
||||
func periodic() {
|
||||
contents, err := ioutil.ReadFile(procStatFile)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
fields := bytes.Fields(contents)
|
||||
|
||||
// PCPU
|
||||
pstart := parseInt64(fields[startPos])
|
||||
utime := parseInt64(fields[utimePos])
|
||||
stime := parseInt64(fields[stimePos])
|
||||
total := utime + stime
|
||||
|
||||
var sysinfo syscall.Sysinfo_t
|
||||
if err := syscall.Sysinfo(&sysinfo); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
seconds := int64(sysinfo.Uptime) - (pstart / ticks)
|
||||
|
||||
// Save off temps
|
||||
lt := lastTotal
|
||||
ls := lastSeconds
|
||||
|
||||
// Update last sample
|
||||
lastTotal = total
|
||||
lastSeconds = seconds
|
||||
|
||||
// Adjust to current time window
|
||||
total -= lt
|
||||
seconds -= ls
|
||||
|
||||
if seconds > 0 {
|
||||
atomic.StoreInt64(&ipcpu, (total*1000/ticks)/seconds)
|
||||
}
|
||||
|
||||
time.AfterFunc(1*time.Second, periodic)
|
||||
}
|
||||
|
||||
func ProcUsage(pcpu *float64, rss, vss *int64) error {
|
||||
contents, err := ioutil.ReadFile(procStatFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fields := bytes.Fields(contents)
|
||||
|
||||
// Memory
|
||||
*rss = (parseInt64(fields[rssPos])) << 12
|
||||
*vss = parseInt64(fields[vssPos])
|
||||
|
||||
// PCPU
|
||||
// We track this with periodic sampling, so just load and go.
|
||||
*pcpu = float64(atomic.LoadInt64(&ipcpu)) / 10.0
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Ascii numbers 0-9
|
||||
const (
|
||||
asciiZero = 48
|
||||
asciiNine = 57
|
||||
)
|
||||
|
||||
// parseInt64 expects decimal positive numbers. We
|
||||
// return -1 to signal error
|
||||
func parseInt64(d []byte) (n int64) {
|
||||
if len(d) == 0 {
|
||||
return -1
|
||||
}
|
||||
for _, dec := range d {
|
||||
if dec < asciiZero || dec > asciiNine {
|
||||
return -1
|
||||
}
|
||||
n = n*10 + (int64(dec) - asciiZero)
|
||||
}
|
||||
return n
|
||||
}
|
||||
12
vendor/github.com/nats-io/gnatsd/server/pse/pse_solaris.go
generated
vendored
Normal file
12
vendor/github.com/nats-io/gnatsd/server/pse/pse_solaris.go
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
// Copyright 2015-2016 Apcera Inc. All rights reserved.
|
||||
|
||||
package pse
|
||||
|
||||
// This is a placeholder for now.
|
||||
func ProcUsage(pcpu *float64, rss, vss *int64) error {
|
||||
*pcpu = 0.0
|
||||
*rss = 0
|
||||
*vss = 0
|
||||
|
||||
return nil
|
||||
}
|
||||
268
vendor/github.com/nats-io/gnatsd/server/pse/pse_windows.go
generated
vendored
Normal file
268
vendor/github.com/nats-io/gnatsd/server/pse/pse_windows.go
generated
vendored
Normal file
@@ -0,0 +1,268 @@
|
||||
// Copyright 2015-2016 Apcera Inc. All rights reserved.
|
||||
// +build windows
|
||||
|
||||
package pse
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var (
|
||||
pdh = syscall.NewLazyDLL("pdh.dll")
|
||||
winPdhOpenQuery = pdh.NewProc("PdhOpenQuery")
|
||||
winPdhAddCounter = pdh.NewProc("PdhAddCounterW")
|
||||
winPdhCollectQueryData = pdh.NewProc("PdhCollectQueryData")
|
||||
winPdhGetFormattedCounterValue = pdh.NewProc("PdhGetFormattedCounterValue")
|
||||
winPdhGetFormattedCounterArray = pdh.NewProc("PdhGetFormattedCounterArrayW")
|
||||
)
|
||||
|
||||
// global performance counter query handle and counters
|
||||
var (
|
||||
pcHandle PDH_HQUERY
|
||||
pidCounter, cpuCounter, rssCounter, vssCounter PDH_HCOUNTER
|
||||
prevCPU float64
|
||||
prevRss int64
|
||||
prevVss int64
|
||||
lastSampleTime time.Time
|
||||
processPid int
|
||||
pcQueryLock sync.Mutex
|
||||
initialSample = true
|
||||
)
|
||||
|
||||
// maxQuerySize is the number of values to return from a query.
|
||||
// It represents the maximum # of servers that can be queried
|
||||
// simultaneously running on a machine.
|
||||
const maxQuerySize = 512
|
||||
|
||||
// Keep static memory around to reuse; this works best for passing
|
||||
// into the pdh API.
|
||||
var counterResults [maxQuerySize]PDH_FMT_COUNTERVALUE_ITEM_DOUBLE
|
||||
|
||||
// PDH Types
|
||||
type (
|
||||
PDH_HQUERY syscall.Handle
|
||||
PDH_HCOUNTER syscall.Handle
|
||||
)
|
||||
|
||||
// PDH constants used here
|
||||
const (
|
||||
PDH_FMT_DOUBLE = 0x00000200
|
||||
PDH_INVALID_DATA = 0xC0000BC6
|
||||
PDH_MORE_DATA = 0x800007D2
|
||||
)
|
||||
|
||||
// PDH_FMT_COUNTERVALUE_DOUBLE - double value
|
||||
type PDH_FMT_COUNTERVALUE_DOUBLE struct {
|
||||
CStatus uint32
|
||||
DoubleValue float64
|
||||
}
|
||||
|
||||
// PDH_FMT_COUNTERVALUE_ITEM_DOUBLE is an array
|
||||
// element of a double value
|
||||
type PDH_FMT_COUNTERVALUE_ITEM_DOUBLE struct {
|
||||
SzName *uint16 // pointer to a string
|
||||
FmtValue PDH_FMT_COUNTERVALUE_DOUBLE
|
||||
}
|
||||
|
||||
func pdhAddCounter(hQuery PDH_HQUERY, szFullCounterPath string, dwUserData uintptr, phCounter *PDH_HCOUNTER) error {
|
||||
ptxt, _ := syscall.UTF16PtrFromString(szFullCounterPath)
|
||||
r0, _, _ := winPdhAddCounter.Call(
|
||||
uintptr(hQuery),
|
||||
uintptr(unsafe.Pointer(ptxt)),
|
||||
dwUserData,
|
||||
uintptr(unsafe.Pointer(phCounter)))
|
||||
|
||||
if r0 != 0 {
|
||||
return fmt.Errorf("pdhAddCounter failed. %d", r0)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func pdhOpenQuery(datasrc *uint16, userdata uint32, query *PDH_HQUERY) error {
|
||||
r0, _, _ := syscall.Syscall(winPdhOpenQuery.Addr(), 3, 0, uintptr(userdata), uintptr(unsafe.Pointer(query)))
|
||||
if r0 != 0 {
|
||||
return fmt.Errorf("pdhOpenQuery failed - %d", r0)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func pdhCollectQueryData(hQuery PDH_HQUERY) error {
|
||||
r0, _, _ := winPdhCollectQueryData.Call(uintptr(hQuery))
|
||||
if r0 != 0 {
|
||||
return fmt.Errorf("pdhCollectQueryData failed - %d", r0)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// pdhGetFormattedCounterArrayDouble returns the value of return code
|
||||
// rather than error, to easily check return codes
|
||||
func pdhGetFormattedCounterArrayDouble(hCounter PDH_HCOUNTER, lpdwBufferSize *uint32, lpdwBufferCount *uint32, itemBuffer *PDH_FMT_COUNTERVALUE_ITEM_DOUBLE) uint32 {
|
||||
ret, _, _ := winPdhGetFormattedCounterArray.Call(
|
||||
uintptr(hCounter),
|
||||
uintptr(PDH_FMT_DOUBLE),
|
||||
uintptr(unsafe.Pointer(lpdwBufferSize)),
|
||||
uintptr(unsafe.Pointer(lpdwBufferCount)),
|
||||
uintptr(unsafe.Pointer(itemBuffer)))
|
||||
|
||||
return uint32(ret)
|
||||
}
|
||||
|
||||
func getCounterArrayData(counter PDH_HCOUNTER) ([]float64, error) {
|
||||
var bufSize uint32
|
||||
var bufCount uint32
|
||||
|
||||
// Retrieving array data requires two calls, the first which
|
||||
// requires an adressable empty buffer, and sets size fields.
|
||||
// The second call returns the data.
|
||||
initialBuf := make([]PDH_FMT_COUNTERVALUE_ITEM_DOUBLE, 1)
|
||||
ret := pdhGetFormattedCounterArrayDouble(counter, &bufSize, &bufCount, &initialBuf[0])
|
||||
if ret == PDH_MORE_DATA {
|
||||
// we'll likely never get here, but be safe.
|
||||
if bufCount > maxQuerySize {
|
||||
bufCount = maxQuerySize
|
||||
}
|
||||
ret = pdhGetFormattedCounterArrayDouble(counter, &bufSize, &bufCount, &counterResults[0])
|
||||
if ret == 0 {
|
||||
rv := make([]float64, bufCount)
|
||||
for i := 0; i < int(bufCount); i++ {
|
||||
rv[i] = counterResults[i].FmtValue.DoubleValue
|
||||
}
|
||||
return rv, nil
|
||||
}
|
||||
}
|
||||
if ret != 0 {
|
||||
return nil, fmt.Errorf("getCounterArrayData failed - %d", ret)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// getProcessImageName returns the name of the process image, as expected by
|
||||
// the performance counter API.
|
||||
func getProcessImageName() (name string) {
|
||||
name = filepath.Base(os.Args[0])
|
||||
name = strings.TrimRight(name, ".exe")
|
||||
return
|
||||
}
|
||||
|
||||
// initialize our counters
|
||||
func initCounters() (err error) {
|
||||
|
||||
processPid = os.Getpid()
|
||||
// require an addressible nil pointer
|
||||
var source uint16
|
||||
if err := pdhOpenQuery(&source, 0, &pcHandle); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// setup the performance counters, search for all server instances
|
||||
name := fmt.Sprintf("%s*", getProcessImageName())
|
||||
pidQuery := fmt.Sprintf("\\Process(%s)\\ID Process", name)
|
||||
cpuQuery := fmt.Sprintf("\\Process(%s)\\%% Processor Time", name)
|
||||
rssQuery := fmt.Sprintf("\\Process(%s)\\Working Set - Private", name)
|
||||
vssQuery := fmt.Sprintf("\\Process(%s)\\Virtual Bytes", name)
|
||||
|
||||
if err = pdhAddCounter(pcHandle, pidQuery, 0, &pidCounter); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = pdhAddCounter(pcHandle, cpuQuery, 0, &cpuCounter); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = pdhAddCounter(pcHandle, rssQuery, 0, &rssCounter); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = pdhAddCounter(pcHandle, vssQuery, 0, &vssCounter); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// prime the counters by collecting once, and sleep to get somewhat
|
||||
// useful information the first request. Counters for the CPU require
|
||||
// at least two collect calls.
|
||||
if err = pdhCollectQueryData(pcHandle); err != nil {
|
||||
return err
|
||||
}
|
||||
time.Sleep(50)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProcUsage returns process CPU and memory statistics
|
||||
func ProcUsage(pcpu *float64, rss, vss *int64) error {
|
||||
var err error
|
||||
|
||||
// For simplicity, protect the entire call.
|
||||
// Most simultaneous requests will immediately return
|
||||
// with cached values.
|
||||
pcQueryLock.Lock()
|
||||
defer pcQueryLock.Unlock()
|
||||
|
||||
// First time through, initialize counters.
|
||||
if initialSample {
|
||||
if err = initCounters(); err != nil {
|
||||
return err
|
||||
}
|
||||
initialSample = false
|
||||
} else if time.Since(lastSampleTime) < (2 * time.Second) {
|
||||
// only refresh every two seconds as to minimize impact
|
||||
// on the server.
|
||||
*pcpu = prevCPU
|
||||
*rss = prevRss
|
||||
*vss = prevVss
|
||||
return nil
|
||||
}
|
||||
|
||||
// always save the sample time, even on errors.
|
||||
defer func() {
|
||||
lastSampleTime = time.Now()
|
||||
}()
|
||||
|
||||
// refresh the performance counter data
|
||||
if err = pdhCollectQueryData(pcHandle); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// retrieve the data
|
||||
var pidAry, cpuAry, rssAry, vssAry []float64
|
||||
if pidAry, err = getCounterArrayData(pidCounter); err != nil {
|
||||
return err
|
||||
}
|
||||
if cpuAry, err = getCounterArrayData(cpuCounter); err != nil {
|
||||
return err
|
||||
}
|
||||
if rssAry, err = getCounterArrayData(rssCounter); err != nil {
|
||||
return err
|
||||
}
|
||||
if vssAry, err = getCounterArrayData(vssCounter); err != nil {
|
||||
return err
|
||||
}
|
||||
// find the index of the entry for this process
|
||||
idx := int(-1)
|
||||
for i := range pidAry {
|
||||
if int(pidAry[i]) == processPid {
|
||||
idx = i
|
||||
break
|
||||
}
|
||||
}
|
||||
// no pid found...
|
||||
if idx < 0 {
|
||||
return fmt.Errorf("could not find pid in performance counter results")
|
||||
}
|
||||
// assign values from the performance counters
|
||||
*pcpu = cpuAry[idx]
|
||||
*rss = int64(rssAry[idx])
|
||||
*vss = int64(vssAry[idx])
|
||||
|
||||
// save off cache values
|
||||
prevCPU = *pcpu
|
||||
prevRss = *rss
|
||||
prevVss = *vss
|
||||
|
||||
return nil
|
||||
}
|
||||
662
vendor/github.com/nats-io/gnatsd/server/route.go
generated
vendored
Normal file
662
vendor/github.com/nats-io/gnatsd/server/route.go
generated
vendored
Normal file
@@ -0,0 +1,662 @@
|
||||
// Copyright 2013-2016 Apcera Inc. All rights reserved.
|
||||
|
||||
package server
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
// RouteType designates the router type
|
||||
type RouteType int
|
||||
|
||||
// Type of Route
|
||||
const (
|
||||
// This route we learned from speaking to other routes.
|
||||
Implicit RouteType = iota
|
||||
// This route was explicitly configured.
|
||||
Explicit
|
||||
)
|
||||
|
||||
type route struct {
|
||||
remoteID string
|
||||
didSolicit bool
|
||||
retry bool
|
||||
routeType RouteType
|
||||
url *url.URL
|
||||
authRequired bool
|
||||
tlsRequired bool
|
||||
}
|
||||
|
||||
type connectInfo struct {
|
||||
Verbose bool `json:"verbose"`
|
||||
Pedantic bool `json:"pedantic"`
|
||||
User string `json:"user,omitempty"`
|
||||
Pass string `json:"pass,omitempty"`
|
||||
TLS bool `json:"tls_required"`
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
// Route protocol constants
|
||||
const (
|
||||
ConProto = "CONNECT %s" + _CRLF_
|
||||
InfoProto = "INFO %s" + _CRLF_
|
||||
)
|
||||
|
||||
// Lock should be held entering here.
|
||||
func (c *client) sendConnect(tlsRequired bool) {
|
||||
var user, pass string
|
||||
if userInfo := c.route.url.User; userInfo != nil {
|
||||
user = userInfo.Username()
|
||||
pass, _ = userInfo.Password()
|
||||
}
|
||||
cinfo := connectInfo{
|
||||
Verbose: false,
|
||||
Pedantic: false,
|
||||
User: user,
|
||||
Pass: pass,
|
||||
TLS: tlsRequired,
|
||||
Name: c.srv.info.ID,
|
||||
}
|
||||
b, err := json.Marshal(cinfo)
|
||||
if err != nil {
|
||||
c.Errorf("Error marshalling CONNECT to route: %v\n", err)
|
||||
c.closeConnection()
|
||||
return
|
||||
}
|
||||
c.sendProto([]byte(fmt.Sprintf(ConProto, b)), true)
|
||||
}
|
||||
|
||||
// Process the info message if we are a route.
|
||||
func (c *client) processRouteInfo(info *Info) {
|
||||
c.mu.Lock()
|
||||
// Connection can be closed at any time (by auth timeout, etc).
|
||||
// Does not make sense to continue here if connection is gone.
|
||||
if c.route == nil || c.nc == nil {
|
||||
c.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
s := c.srv
|
||||
remoteID := c.route.remoteID
|
||||
|
||||
// We receive an INFO from a server that informs us about another server,
|
||||
// so the info.ID in the INFO protocol does not match the ID of this route.
|
||||
if remoteID != "" && remoteID != info.ID {
|
||||
c.mu.Unlock()
|
||||
|
||||
// Process this implicit route. We will check that it is not an explicit
|
||||
// route and/or that it has not been connected already.
|
||||
s.processImplicitRoute(info)
|
||||
return
|
||||
}
|
||||
|
||||
// Need to set this for the detection of the route to self to work
|
||||
// in closeConnection().
|
||||
c.route.remoteID = info.ID
|
||||
|
||||
// Detect route to self.
|
||||
if c.route.remoteID == s.info.ID {
|
||||
c.mu.Unlock()
|
||||
c.closeConnection()
|
||||
return
|
||||
}
|
||||
|
||||
// Copy over important information.
|
||||
c.route.authRequired = info.AuthRequired
|
||||
c.route.tlsRequired = info.TLSRequired
|
||||
|
||||
// If we do not know this route's URL, construct one on the fly
|
||||
// from the information provided.
|
||||
if c.route.url == nil {
|
||||
// Add in the URL from host and port
|
||||
hp := net.JoinHostPort(info.Host, strconv.Itoa(info.Port))
|
||||
url, err := url.Parse(fmt.Sprintf("nats-route://%s/", hp))
|
||||
if err != nil {
|
||||
c.Errorf("Error parsing URL from INFO: %v\n", err)
|
||||
c.mu.Unlock()
|
||||
c.closeConnection()
|
||||
return
|
||||
}
|
||||
c.route.url = url
|
||||
}
|
||||
|
||||
// Check to see if we have this remote already registered.
|
||||
// This can happen when both servers have routes to each other.
|
||||
c.mu.Unlock()
|
||||
|
||||
if added, sendInfo := s.addRoute(c, info); added {
|
||||
c.Debugf("Registering remote route %q", info.ID)
|
||||
// Send our local subscriptions to this route.
|
||||
s.sendLocalSubsToRoute(c)
|
||||
if sendInfo {
|
||||
// Need to get the remote IP address.
|
||||
c.mu.Lock()
|
||||
switch conn := c.nc.(type) {
|
||||
case *net.TCPConn, *tls.Conn:
|
||||
addr := conn.RemoteAddr().(*net.TCPAddr)
|
||||
info.IP = fmt.Sprintf("nats-route://%s/", net.JoinHostPort(addr.IP.String(), strconv.Itoa(info.Port)))
|
||||
default:
|
||||
info.IP = fmt.Sprintf("%s", c.route.url)
|
||||
}
|
||||
c.mu.Unlock()
|
||||
// Now let the known servers know about this new route
|
||||
s.forwardNewRouteInfoToKnownServers(info)
|
||||
}
|
||||
} else {
|
||||
c.Debugf("Detected duplicate remote route %q", info.ID)
|
||||
c.closeConnection()
|
||||
}
|
||||
}
|
||||
|
||||
// This will process implicit route information received from another server.
|
||||
// We will check to see if we have configured or are already connected,
|
||||
// and if so we will ignore. Otherwise we will attempt to connect.
|
||||
func (s *Server) processImplicitRoute(info *Info) {
|
||||
remoteID := info.ID
|
||||
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
// Don't connect to ourself
|
||||
if remoteID == s.info.ID {
|
||||
return
|
||||
}
|
||||
// Check if this route already exists
|
||||
if _, exists := s.remotes[remoteID]; exists {
|
||||
return
|
||||
}
|
||||
// Check if we have this route as a configured route
|
||||
if s.hasThisRouteConfigured(info) {
|
||||
return
|
||||
}
|
||||
|
||||
// Initiate the connection, using info.IP instead of info.URL here...
|
||||
r, err := url.Parse(info.IP)
|
||||
if err != nil {
|
||||
Debugf("Error parsing URL from INFO: %v\n", err)
|
||||
return
|
||||
}
|
||||
if info.AuthRequired {
|
||||
r.User = url.UserPassword(s.opts.ClusterUsername, s.opts.ClusterPassword)
|
||||
}
|
||||
s.startGoRoutine(func() { s.connectToRoute(r, false) })
|
||||
}
|
||||
|
||||
// hasThisRouteConfigured returns true if info.Host:info.Port is present
|
||||
// in the server's opts.Routes, false otherwise.
|
||||
// Server lock is assumed to be held by caller.
|
||||
func (s *Server) hasThisRouteConfigured(info *Info) bool {
|
||||
urlToCheckExplicit := strings.ToLower(net.JoinHostPort(info.Host, strconv.Itoa(info.Port)))
|
||||
for _, ri := range s.opts.Routes {
|
||||
if strings.ToLower(ri.Host) == urlToCheckExplicit {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// forwardNewRouteInfoToKnownServers sends the INFO protocol of the new route
|
||||
// to all routes known by this server. In turn, each server will contact this
|
||||
// new route.
|
||||
func (s *Server) forwardNewRouteInfoToKnownServers(info *Info) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
b, _ := json.Marshal(info)
|
||||
infoJSON := []byte(fmt.Sprintf(InfoProto, b))
|
||||
|
||||
for _, r := range s.routes {
|
||||
r.mu.Lock()
|
||||
if r.route.remoteID != info.ID {
|
||||
r.sendInfo(infoJSON)
|
||||
}
|
||||
r.mu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// This will send local subscription state to a new route connection.
|
||||
// FIXME(dlc) - This could be a DOS or perf issue with many clients
|
||||
// and large subscription space. Plus buffering in place not a good idea.
|
||||
func (s *Server) sendLocalSubsToRoute(route *client) {
|
||||
b := bytes.Buffer{}
|
||||
s.mu.Lock()
|
||||
for _, client := range s.clients {
|
||||
client.mu.Lock()
|
||||
subs := make([]*subscription, 0, len(client.subs))
|
||||
for _, sub := range client.subs {
|
||||
subs = append(subs, sub)
|
||||
}
|
||||
client.mu.Unlock()
|
||||
for _, sub := range subs {
|
||||
rsid := routeSid(sub)
|
||||
proto := fmt.Sprintf(subProto, sub.subject, sub.queue, rsid)
|
||||
b.WriteString(proto)
|
||||
}
|
||||
}
|
||||
s.mu.Unlock()
|
||||
|
||||
route.mu.Lock()
|
||||
defer route.mu.Unlock()
|
||||
route.sendProto(b.Bytes(), true)
|
||||
|
||||
route.Debugf("Route sent local subscriptions")
|
||||
}
|
||||
|
||||
func (s *Server) createRoute(conn net.Conn, rURL *url.URL) *client {
|
||||
didSolicit := rURL != nil
|
||||
r := &route{didSolicit: didSolicit}
|
||||
for _, route := range s.opts.Routes {
|
||||
if rURL != nil && (strings.ToLower(rURL.Host) == strings.ToLower(route.Host)) {
|
||||
r.routeType = Explicit
|
||||
}
|
||||
}
|
||||
|
||||
c := &client{srv: s, nc: conn, opts: clientOpts{}, typ: ROUTER, route: r}
|
||||
|
||||
// Grab server variables
|
||||
s.mu.Lock()
|
||||
infoJSON := s.routeInfoJSON
|
||||
authRequired := s.routeInfo.AuthRequired
|
||||
tlsRequired := s.routeInfo.TLSRequired
|
||||
s.mu.Unlock()
|
||||
|
||||
// Grab lock
|
||||
c.mu.Lock()
|
||||
|
||||
// Initialize
|
||||
c.initClient()
|
||||
|
||||
c.Debugf("Route connection created")
|
||||
|
||||
if didSolicit {
|
||||
// Do this before the TLS code, otherwise, in case of failure
|
||||
// and if route is explicit, it would try to reconnect to 'nil'...
|
||||
r.url = rURL
|
||||
}
|
||||
|
||||
// Check for TLS
|
||||
if tlsRequired {
|
||||
// Copy off the config to add in ServerName if we
|
||||
tlsConfig := *s.opts.ClusterTLSConfig
|
||||
|
||||
// If we solicited, we will act like the client, otherwise the server.
|
||||
if didSolicit {
|
||||
c.Debugf("Starting TLS route client handshake")
|
||||
// Specify the ServerName we are expecting.
|
||||
host, _, _ := net.SplitHostPort(rURL.Host)
|
||||
tlsConfig.ServerName = host
|
||||
c.nc = tls.Client(c.nc, &tlsConfig)
|
||||
} else {
|
||||
c.Debugf("Starting TLS route server handshake")
|
||||
c.nc = tls.Server(c.nc, &tlsConfig)
|
||||
}
|
||||
|
||||
conn := c.nc.(*tls.Conn)
|
||||
|
||||
// Setup the timeout
|
||||
ttl := secondsToDuration(s.opts.ClusterTLSTimeout)
|
||||
time.AfterFunc(ttl, func() { tlsTimeout(c, conn) })
|
||||
conn.SetReadDeadline(time.Now().Add(ttl))
|
||||
|
||||
c.mu.Unlock()
|
||||
if err := conn.Handshake(); err != nil {
|
||||
c.Debugf("TLS route handshake error: %v", err)
|
||||
c.sendErr("Secure Connection - TLS Required")
|
||||
c.closeConnection()
|
||||
return nil
|
||||
}
|
||||
// Reset the read deadline
|
||||
conn.SetReadDeadline(time.Time{})
|
||||
|
||||
// Re-Grab lock
|
||||
c.mu.Lock()
|
||||
|
||||
// Verify that the connection did not go away while we released the lock.
|
||||
if c.nc == nil {
|
||||
c.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Rewrap bw
|
||||
c.bw = bufio.NewWriterSize(c.nc, startBufSize)
|
||||
}
|
||||
|
||||
// Do final client initialization
|
||||
|
||||
// Set the Ping timer
|
||||
c.setPingTimer()
|
||||
|
||||
// For routes, the "client" is added to s.routes only when processing
|
||||
// the INFO protocol, that is much later.
|
||||
// In the meantime, if the server shutsdown, there would be no reference
|
||||
// to the client (connection) to be closed, leaving this readLoop
|
||||
// uinterrupted, causing the Shutdown() to wait indefinitively.
|
||||
// We need to store the client in a special map, under a special lock.
|
||||
s.grMu.Lock()
|
||||
s.grTmpClients[c.cid] = c
|
||||
s.grMu.Unlock()
|
||||
|
||||
// Spin up the read loop.
|
||||
s.startGoRoutine(func() { c.readLoop() })
|
||||
|
||||
if tlsRequired {
|
||||
c.Debugf("TLS handshake complete")
|
||||
cs := c.nc.(*tls.Conn).ConnectionState()
|
||||
c.Debugf("TLS version %s, cipher suite %s", tlsVersion(cs.Version), tlsCipher(cs.CipherSuite))
|
||||
}
|
||||
|
||||
// Queue Connect proto if we solicited the connection.
|
||||
if didSolicit {
|
||||
c.Debugf("Route connect msg sent")
|
||||
c.sendConnect(tlsRequired)
|
||||
}
|
||||
|
||||
// Send our info to the other side.
|
||||
c.sendInfo(infoJSON)
|
||||
|
||||
// Check for Auth required state for incoming connections.
|
||||
if authRequired && !didSolicit {
|
||||
ttl := secondsToDuration(s.opts.ClusterAuthTimeout)
|
||||
c.setAuthTimer(ttl)
|
||||
}
|
||||
|
||||
c.mu.Unlock()
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
const (
|
||||
_CRLF_ = "\r\n"
|
||||
_EMPTY_ = ""
|
||||
_SPC_ = " "
|
||||
)
|
||||
|
||||
const (
|
||||
subProto = "SUB %s %s %s" + _CRLF_
|
||||
unsubProto = "UNSUB %s%s" + _CRLF_
|
||||
)
|
||||
|
||||
// FIXME(dlc) - Make these reserved and reject if they come in as a sid
|
||||
// from a client connection.
|
||||
// Route constants
|
||||
const (
|
||||
RSID = "RSID"
|
||||
QRSID = "QRSID"
|
||||
|
||||
RSID_CID_INDEX = 1
|
||||
RSID_SID_INDEX = 2
|
||||
EXPECTED_MATCHES = 3
|
||||
)
|
||||
|
||||
// FIXME(dlc) - This may be too slow, check at later date.
|
||||
var qrsidRe = regexp.MustCompile(`QRSID:(\d+):([^\s]+)`)
|
||||
|
||||
func (s *Server) routeSidQueueSubscriber(rsid []byte) (*subscription, bool) {
|
||||
if !bytes.HasPrefix(rsid, []byte(QRSID)) {
|
||||
return nil, false
|
||||
}
|
||||
matches := qrsidRe.FindSubmatch(rsid)
|
||||
if matches == nil || len(matches) != EXPECTED_MATCHES {
|
||||
return nil, false
|
||||
}
|
||||
cid := uint64(parseInt64(matches[RSID_CID_INDEX]))
|
||||
|
||||
s.mu.Lock()
|
||||
client := s.clients[cid]
|
||||
s.mu.Unlock()
|
||||
|
||||
if client == nil {
|
||||
return nil, true
|
||||
}
|
||||
sid := matches[RSID_SID_INDEX]
|
||||
|
||||
client.mu.Lock()
|
||||
sub, ok := client.subs[string(sid)]
|
||||
client.mu.Unlock()
|
||||
if ok {
|
||||
return sub, true
|
||||
}
|
||||
return nil, true
|
||||
}
|
||||
|
||||
func routeSid(sub *subscription) string {
|
||||
var qi string
|
||||
if len(sub.queue) > 0 {
|
||||
qi = "Q"
|
||||
}
|
||||
return fmt.Sprintf("%s%s:%d:%s", qi, RSID, sub.client.cid, sub.sid)
|
||||
}
|
||||
|
||||
func (s *Server) addRoute(c *client, info *Info) (bool, bool) {
|
||||
id := c.route.remoteID
|
||||
sendInfo := false
|
||||
|
||||
s.mu.Lock()
|
||||
if !s.running {
|
||||
s.mu.Unlock()
|
||||
return false, false
|
||||
}
|
||||
remote, exists := s.remotes[id]
|
||||
if !exists {
|
||||
// Remove from the temporary map
|
||||
s.grMu.Lock()
|
||||
delete(s.grTmpClients, c.cid)
|
||||
s.grMu.Unlock()
|
||||
|
||||
s.routes[c.cid] = c
|
||||
s.remotes[id] = c
|
||||
|
||||
// If this server's ID is (alpha) less than the peer, then we will
|
||||
// make sure that if we are disconnected, we will try to connect once
|
||||
// more. This is to mitigate the issue where both sides add the route
|
||||
// on the opposite connection, and therefore we end-up with both
|
||||
// being dropped.
|
||||
if s.info.ID < id {
|
||||
c.mu.Lock()
|
||||
// Make this as a retry (otherwise, only explicit are retried).
|
||||
c.route.retry = true
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
// we don't need to send if the only route is the one we just accepted.
|
||||
sendInfo = len(s.routes) > 1
|
||||
}
|
||||
s.mu.Unlock()
|
||||
|
||||
if exists && c.route.didSolicit {
|
||||
// upgrade to solicited?
|
||||
remote.mu.Lock()
|
||||
// the existing route (remote) should keep its 'retry' value, and
|
||||
// not be replaced with c.route.retry.
|
||||
retry := remote.route.retry
|
||||
remote.route = c.route
|
||||
remote.route.retry = retry
|
||||
remote.mu.Unlock()
|
||||
}
|
||||
|
||||
return !exists, sendInfo
|
||||
}
|
||||
|
||||
func (s *Server) broadcastInterestToRoutes(proto string) {
|
||||
var arg []byte
|
||||
if atomic.LoadInt32(&trace) == 1 {
|
||||
arg = []byte(proto[:len(proto)-LEN_CR_LF])
|
||||
}
|
||||
protoAsBytes := []byte(proto)
|
||||
s.mu.Lock()
|
||||
for _, route := range s.routes {
|
||||
// FIXME(dlc) - Make same logic as deliverMsg
|
||||
route.mu.Lock()
|
||||
route.sendProto(protoAsBytes, true)
|
||||
route.mu.Unlock()
|
||||
route.traceOutOp("", arg)
|
||||
}
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
// broadcastSubscribe will forward a client subscription
|
||||
// to all active routes.
|
||||
func (s *Server) broadcastSubscribe(sub *subscription) {
|
||||
if s.numRoutes() == 0 {
|
||||
return
|
||||
}
|
||||
rsid := routeSid(sub)
|
||||
proto := fmt.Sprintf(subProto, sub.subject, sub.queue, rsid)
|
||||
s.broadcastInterestToRoutes(proto)
|
||||
}
|
||||
|
||||
// broadcastUnSubscribe will forward a client unsubscribe
|
||||
// action to all active routes.
|
||||
func (s *Server) broadcastUnSubscribe(sub *subscription) {
|
||||
if s.numRoutes() == 0 {
|
||||
return
|
||||
}
|
||||
rsid := routeSid(sub)
|
||||
maxStr := _EMPTY_
|
||||
// Set max if we have it set and have not tripped auto-unsubscribe
|
||||
if sub.max > 0 && sub.nm < sub.max {
|
||||
maxStr = fmt.Sprintf(" %d", sub.max)
|
||||
}
|
||||
proto := fmt.Sprintf(unsubProto, rsid, maxStr)
|
||||
s.broadcastInterestToRoutes(proto)
|
||||
}
|
||||
|
||||
func (s *Server) routeAcceptLoop(ch chan struct{}) {
|
||||
hp := fmt.Sprintf("%s:%d", s.opts.ClusterHost, s.opts.ClusterPort)
|
||||
Noticef("Listening for route connections on %s", hp)
|
||||
l, e := net.Listen("tcp", hp)
|
||||
if e != nil {
|
||||
Fatalf("Error listening on router port: %d - %v", s.opts.Port, e)
|
||||
return
|
||||
}
|
||||
|
||||
// Let them know we are up
|
||||
close(ch)
|
||||
|
||||
// Setup state that can enable shutdown
|
||||
s.mu.Lock()
|
||||
s.routeListener = l
|
||||
s.mu.Unlock()
|
||||
|
||||
tmpDelay := ACCEPT_MIN_SLEEP
|
||||
|
||||
for s.isRunning() {
|
||||
conn, err := l.Accept()
|
||||
if err != nil {
|
||||
if ne, ok := err.(net.Error); ok && ne.Temporary() {
|
||||
Debugf("Temporary Route Accept Errorf(%v), sleeping %dms",
|
||||
ne, tmpDelay/time.Millisecond)
|
||||
time.Sleep(tmpDelay)
|
||||
tmpDelay *= 2
|
||||
if tmpDelay > ACCEPT_MAX_SLEEP {
|
||||
tmpDelay = ACCEPT_MAX_SLEEP
|
||||
}
|
||||
} else if s.isRunning() {
|
||||
Noticef("Accept error: %v", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
tmpDelay = ACCEPT_MIN_SLEEP
|
||||
s.startGoRoutine(func() {
|
||||
s.createRoute(conn, nil)
|
||||
s.grWG.Done()
|
||||
})
|
||||
}
|
||||
Debugf("Router accept loop exiting..")
|
||||
s.done <- true
|
||||
}
|
||||
|
||||
// StartRouting will start the accept loop on the cluster host:port
|
||||
// and will actively try to connect to listed routes.
|
||||
func (s *Server) StartRouting() {
|
||||
// Check for TLSConfig
|
||||
tlsReq := s.opts.ClusterTLSConfig != nil
|
||||
info := Info{
|
||||
ID: s.info.ID,
|
||||
Version: s.info.Version,
|
||||
Host: s.opts.ClusterHost,
|
||||
Port: s.opts.ClusterPort,
|
||||
AuthRequired: false,
|
||||
TLSRequired: tlsReq,
|
||||
SSLRequired: tlsReq,
|
||||
TLSVerify: tlsReq,
|
||||
MaxPayload: s.info.MaxPayload,
|
||||
}
|
||||
// Check for Auth items
|
||||
if s.opts.ClusterUsername != "" {
|
||||
info.AuthRequired = true
|
||||
}
|
||||
s.routeInfo = info
|
||||
b, _ := json.Marshal(info)
|
||||
s.routeInfoJSON = []byte(fmt.Sprintf(InfoProto, b))
|
||||
|
||||
// Spin up the accept loop
|
||||
ch := make(chan struct{})
|
||||
go s.routeAcceptLoop(ch)
|
||||
<-ch
|
||||
|
||||
// Solicit Routes if needed.
|
||||
s.solicitRoutes()
|
||||
}
|
||||
|
||||
func (s *Server) reConnectToRoute(rURL *url.URL, rtype RouteType) {
|
||||
tryForEver := rtype == Explicit
|
||||
if tryForEver {
|
||||
time.Sleep(DEFAULT_ROUTE_RECONNECT)
|
||||
}
|
||||
s.connectToRoute(rURL, tryForEver)
|
||||
}
|
||||
|
||||
func (s *Server) connectToRoute(rURL *url.URL, tryForEver bool) {
|
||||
defer s.grWG.Done()
|
||||
for s.isRunning() && rURL != nil {
|
||||
Debugf("Trying to connect to route on %s", rURL.Host)
|
||||
conn, err := net.DialTimeout("tcp", rURL.Host, DEFAULT_ROUTE_DIAL)
|
||||
if err != nil {
|
||||
Debugf("Error trying to connect to route: %v", err)
|
||||
select {
|
||||
case <-s.rcQuit:
|
||||
return
|
||||
case <-time.After(DEFAULT_ROUTE_CONNECT):
|
||||
if !tryForEver {
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
// We have a route connection here.
|
||||
// Go ahead and create it and exit this func.
|
||||
s.createRoute(conn, rURL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (c *client) isSolicitedRoute() bool {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
return c.typ == ROUTER && c.route != nil && c.route.didSolicit
|
||||
}
|
||||
|
||||
func (s *Server) solicitRoutes() {
|
||||
for _, r := range s.opts.Routes {
|
||||
route := r
|
||||
s.startGoRoutine(func() { s.connectToRoute(route, true) })
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) numRoutes() int {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
return len(s.routes)
|
||||
}
|
||||
827
vendor/github.com/nats-io/gnatsd/server/server.go
generated
vendored
Normal file
827
vendor/github.com/nats-io/gnatsd/server/server.go
generated
vendored
Normal file
@@ -0,0 +1,827 @@
|
||||
// Copyright 2012-2016 Apcera Inc. All rights reserved.
|
||||
|
||||
package server
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
// Allow dynamic profiling.
|
||||
_ "net/http/pprof"
|
||||
)
|
||||
|
||||
// Info is the information sent to clients to help them understand information
|
||||
// about this server.
|
||||
type Info struct {
|
||||
ID string `json:"server_id"`
|
||||
Version string `json:"version"`
|
||||
GoVersion string `json:"go"`
|
||||
Host string `json:"host"`
|
||||
Port int `json:"port"`
|
||||
AuthRequired bool `json:"auth_required"`
|
||||
SSLRequired bool `json:"ssl_required"` // DEPRECATED: ssl json used for older clients
|
||||
TLSRequired bool `json:"tls_required"`
|
||||
TLSVerify bool `json:"tls_verify"`
|
||||
MaxPayload int `json:"max_payload"`
|
||||
IP string `json:"ip,omitempty"`
|
||||
}
|
||||
|
||||
// Server is our main struct.
|
||||
type Server struct {
|
||||
gcid uint64
|
||||
grid uint64
|
||||
stats
|
||||
mu sync.Mutex
|
||||
info Info
|
||||
infoJSON []byte
|
||||
sl *Sublist
|
||||
opts *Options
|
||||
cAuth Auth
|
||||
rAuth Auth
|
||||
trace bool
|
||||
debug bool
|
||||
running bool
|
||||
listener net.Listener
|
||||
clients map[uint64]*client
|
||||
routes map[uint64]*client
|
||||
remotes map[string]*client
|
||||
totalClients uint64
|
||||
done chan bool
|
||||
start time.Time
|
||||
http net.Listener
|
||||
httpReqStats map[string]uint64
|
||||
routeListener net.Listener
|
||||
routeInfo Info
|
||||
routeInfoJSON []byte
|
||||
rcQuit chan bool
|
||||
grMu sync.Mutex
|
||||
grTmpClients map[uint64]*client
|
||||
grRunning bool
|
||||
grWG sync.WaitGroup // to wait on various go routines
|
||||
}
|
||||
|
||||
// Make sure all are 64bits for atomic use
|
||||
type stats struct {
|
||||
inMsgs int64
|
||||
outMsgs int64
|
||||
inBytes int64
|
||||
outBytes int64
|
||||
slowConsumers int64
|
||||
}
|
||||
|
||||
// New will setup a new server struct after parsing the options.
|
||||
func New(opts *Options) *Server {
|
||||
processOptions(opts)
|
||||
|
||||
// Process TLS options, including whether we require client certificates.
|
||||
tlsReq := opts.TLSConfig != nil
|
||||
verify := (tlsReq && opts.TLSConfig.ClientAuth == tls.RequireAnyClientCert)
|
||||
|
||||
info := Info{
|
||||
ID: genID(),
|
||||
Version: VERSION,
|
||||
GoVersion: runtime.Version(),
|
||||
Host: opts.Host,
|
||||
Port: opts.Port,
|
||||
AuthRequired: false,
|
||||
TLSRequired: tlsReq,
|
||||
SSLRequired: tlsReq,
|
||||
TLSVerify: verify,
|
||||
MaxPayload: opts.MaxPayload,
|
||||
}
|
||||
|
||||
s := &Server{
|
||||
info: info,
|
||||
sl: NewSublist(),
|
||||
opts: opts,
|
||||
debug: opts.Debug,
|
||||
trace: opts.Trace,
|
||||
done: make(chan bool, 1),
|
||||
start: time.Now(),
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
// For tracking clients
|
||||
s.clients = make(map[uint64]*client)
|
||||
|
||||
// For tracking connections that are not yet registered
|
||||
// in s.routes, but for which readLoop has started.
|
||||
s.grTmpClients = make(map[uint64]*client)
|
||||
|
||||
// For tracking routes and their remote ids
|
||||
s.routes = make(map[uint64]*client)
|
||||
s.remotes = make(map[string]*client)
|
||||
|
||||
// Used to kick out all of the route
|
||||
// connect Go routines.
|
||||
s.rcQuit = make(chan bool)
|
||||
s.generateServerInfoJSON()
|
||||
s.handleSignals()
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// SetClientAuthMethod sets the authentication method for clients.
|
||||
func (s *Server) SetClientAuthMethod(authMethod Auth) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
s.info.AuthRequired = true
|
||||
s.cAuth = authMethod
|
||||
|
||||
s.generateServerInfoJSON()
|
||||
}
|
||||
|
||||
// SetRouteAuthMethod sets the authentication method for routes.
|
||||
func (s *Server) SetRouteAuthMethod(authMethod Auth) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.rAuth = authMethod
|
||||
}
|
||||
|
||||
func (s *Server) generateServerInfoJSON() {
|
||||
// Generate the info json
|
||||
b, err := json.Marshal(s.info)
|
||||
if err != nil {
|
||||
Fatalf("Error marshalling INFO JSON: %+v\n", err)
|
||||
return
|
||||
}
|
||||
s.infoJSON = []byte(fmt.Sprintf("INFO %s %s", b, CR_LF))
|
||||
}
|
||||
|
||||
// PrintAndDie is exported for access in other packages.
|
||||
func PrintAndDie(msg string) {
|
||||
fmt.Fprintf(os.Stderr, "%s\n", msg)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// PrintServerAndExit will print our version and exit.
|
||||
func PrintServerAndExit() {
|
||||
fmt.Printf("nats-server version %s\n", VERSION)
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
// Signal Handling
|
||||
func (s *Server) handleSignals() {
|
||||
if s.opts.NoSigs {
|
||||
return
|
||||
}
|
||||
c := make(chan os.Signal, 1)
|
||||
signal.Notify(c, os.Interrupt)
|
||||
go func() {
|
||||
for sig := range c {
|
||||
Debugf("Trapped Signal; %v", sig)
|
||||
// FIXME, trip running?
|
||||
Noticef("Server Exiting..")
|
||||
os.Exit(0)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Protected check on running state
|
||||
func (s *Server) isRunning() bool {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
return s.running
|
||||
}
|
||||
|
||||
func (s *Server) logPid() {
|
||||
pidStr := strconv.Itoa(os.Getpid())
|
||||
err := ioutil.WriteFile(s.opts.PidFile, []byte(pidStr), 0660)
|
||||
if err != nil {
|
||||
PrintAndDie(fmt.Sprintf("Could not write pidfile: %v\n", err))
|
||||
}
|
||||
}
|
||||
|
||||
// Start up the server, this will block.
|
||||
// Start via a Go routine if needed.
|
||||
func (s *Server) Start() {
|
||||
Noticef("Starting nats-server version %s", VERSION)
|
||||
Debugf("Go build version %s", s.info.GoVersion)
|
||||
|
||||
s.running = true
|
||||
s.grMu.Lock()
|
||||
s.grRunning = true
|
||||
s.grMu.Unlock()
|
||||
|
||||
// Log the pid to a file
|
||||
if s.opts.PidFile != _EMPTY_ {
|
||||
s.logPid()
|
||||
}
|
||||
|
||||
// Start up the http server if needed.
|
||||
if s.opts.HTTPPort != 0 {
|
||||
s.StartHTTPMonitoring()
|
||||
}
|
||||
|
||||
// Start up the https server if needed.
|
||||
if s.opts.HTTPSPort != 0 {
|
||||
if s.opts.TLSConfig == nil {
|
||||
Fatalf("TLS cert and key required for HTTPS")
|
||||
return
|
||||
}
|
||||
s.StartHTTPSMonitoring()
|
||||
}
|
||||
|
||||
// Start up routing as well if needed.
|
||||
if s.opts.ClusterPort != 0 {
|
||||
s.StartRouting()
|
||||
}
|
||||
|
||||
// Pprof http endpoint for the profiler.
|
||||
if s.opts.ProfPort != 0 {
|
||||
s.StartProfiler()
|
||||
}
|
||||
|
||||
// Wait for clients.
|
||||
s.AcceptLoop()
|
||||
}
|
||||
|
||||
// Shutdown will shutdown the server instance by kicking out the AcceptLoop
|
||||
// and closing all associated clients.
|
||||
func (s *Server) Shutdown() {
|
||||
s.mu.Lock()
|
||||
|
||||
// Prevent issues with multiple calls.
|
||||
if !s.running {
|
||||
s.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
s.running = false
|
||||
s.grMu.Lock()
|
||||
s.grRunning = false
|
||||
s.grMu.Unlock()
|
||||
|
||||
conns := make(map[uint64]*client)
|
||||
|
||||
// Copy off the clients
|
||||
for i, c := range s.clients {
|
||||
conns[i] = c
|
||||
}
|
||||
// Copy off the connections that are not yet registered
|
||||
// in s.routes, but for which the readLoop has started
|
||||
s.grMu.Lock()
|
||||
for i, c := range s.grTmpClients {
|
||||
conns[i] = c
|
||||
}
|
||||
s.grMu.Unlock()
|
||||
// Copy off the routes
|
||||
for i, r := range s.routes {
|
||||
conns[i] = r
|
||||
}
|
||||
|
||||
// Number of done channel responses we expect.
|
||||
doneExpected := 0
|
||||
|
||||
// Kick client AcceptLoop()
|
||||
if s.listener != nil {
|
||||
doneExpected++
|
||||
s.listener.Close()
|
||||
s.listener = nil
|
||||
}
|
||||
|
||||
// Kick route AcceptLoop()
|
||||
if s.routeListener != nil {
|
||||
doneExpected++
|
||||
s.routeListener.Close()
|
||||
s.routeListener = nil
|
||||
}
|
||||
|
||||
// Kick HTTP monitoring if its running
|
||||
if s.http != nil {
|
||||
doneExpected++
|
||||
s.http.Close()
|
||||
s.http = nil
|
||||
}
|
||||
|
||||
// Release the solicited routes connect go routines.
|
||||
close(s.rcQuit)
|
||||
|
||||
s.mu.Unlock()
|
||||
|
||||
// Close client and route connections
|
||||
for _, c := range conns {
|
||||
c.closeConnection()
|
||||
}
|
||||
|
||||
// Block until the accept loops exit
|
||||
for doneExpected > 0 {
|
||||
<-s.done
|
||||
doneExpected--
|
||||
}
|
||||
|
||||
// Wait for go routines to be done.
|
||||
s.grWG.Wait()
|
||||
}
|
||||
|
||||
// AcceptLoop is exported for easier testing.
|
||||
func (s *Server) AcceptLoop() {
|
||||
hp := net.JoinHostPort(s.opts.Host, strconv.Itoa(s.opts.Port))
|
||||
Noticef("Listening for client connections on %s", hp)
|
||||
l, e := net.Listen("tcp", hp)
|
||||
if e != nil {
|
||||
Fatalf("Error listening on port: %s, %q", hp, e)
|
||||
return
|
||||
}
|
||||
|
||||
// Alert of TLS enabled.
|
||||
if s.opts.TLSConfig != nil {
|
||||
Noticef("TLS required for client connections")
|
||||
}
|
||||
|
||||
Debugf("Server id is %s", s.info.ID)
|
||||
Noticef("Server is ready")
|
||||
|
||||
// Setup state that can enable shutdown
|
||||
s.mu.Lock()
|
||||
s.listener = l
|
||||
|
||||
// If server was started with RANDOM_PORT (-1), opts.Port would be equal
|
||||
// to 0 at the beginning this function. So we need to get the actual port
|
||||
if s.opts.Port == 0 {
|
||||
// Write resolved port back to options.
|
||||
_, port, err := net.SplitHostPort(l.Addr().String())
|
||||
if err != nil {
|
||||
Fatalf("Error parsing server address (%s): %s", l.Addr().String(), e)
|
||||
s.mu.Unlock()
|
||||
return
|
||||
}
|
||||
portNum, err := strconv.Atoi(port)
|
||||
if err != nil {
|
||||
Fatalf("Error parsing server address (%s): %s", l.Addr().String(), e)
|
||||
s.mu.Unlock()
|
||||
return
|
||||
}
|
||||
s.opts.Port = portNum
|
||||
}
|
||||
s.mu.Unlock()
|
||||
|
||||
tmpDelay := ACCEPT_MIN_SLEEP
|
||||
|
||||
for s.isRunning() {
|
||||
conn, err := l.Accept()
|
||||
if err != nil {
|
||||
if ne, ok := err.(net.Error); ok && ne.Temporary() {
|
||||
Debugf("Temporary Client Accept Error(%v), sleeping %dms",
|
||||
ne, tmpDelay/time.Millisecond)
|
||||
time.Sleep(tmpDelay)
|
||||
tmpDelay *= 2
|
||||
if tmpDelay > ACCEPT_MAX_SLEEP {
|
||||
tmpDelay = ACCEPT_MAX_SLEEP
|
||||
}
|
||||
} else if s.isRunning() {
|
||||
Noticef("Accept error: %v", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
tmpDelay = ACCEPT_MIN_SLEEP
|
||||
s.startGoRoutine(func() {
|
||||
s.createClient(conn)
|
||||
s.grWG.Done()
|
||||
})
|
||||
}
|
||||
Noticef("Server Exiting..")
|
||||
s.done <- true
|
||||
}
|
||||
|
||||
// StartProfiler is called to enable dynamic profiling.
|
||||
func (s *Server) StartProfiler() {
|
||||
Noticef("Starting profiling on http port %d", s.opts.ProfPort)
|
||||
hp := net.JoinHostPort(s.opts.Host, strconv.Itoa(s.opts.ProfPort))
|
||||
go func() {
|
||||
err := http.ListenAndServe(hp, nil)
|
||||
if err != nil {
|
||||
Fatalf("error starting monitor server: %s", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// StartHTTPMonitoring will enable the HTTP monitoring port.
|
||||
func (s *Server) StartHTTPMonitoring() {
|
||||
s.startMonitoring(false)
|
||||
}
|
||||
|
||||
// StartHTTPSMonitoring will enable the HTTPS monitoring port.
|
||||
func (s *Server) StartHTTPSMonitoring() {
|
||||
s.startMonitoring(true)
|
||||
}
|
||||
|
||||
// HTTP endpoints
|
||||
const (
|
||||
RootPath = "/"
|
||||
VarzPath = "/varz"
|
||||
ConnzPath = "/connz"
|
||||
RoutezPath = "/routez"
|
||||
SubszPath = "/subsz"
|
||||
StackszPath = "/stacksz"
|
||||
)
|
||||
|
||||
// Start the monitoring server
|
||||
func (s *Server) startMonitoring(secure bool) {
|
||||
|
||||
// Used to track HTTP requests
|
||||
s.httpReqStats = map[string]uint64{
|
||||
RootPath: 0,
|
||||
VarzPath: 0,
|
||||
ConnzPath: 0,
|
||||
RoutezPath: 0,
|
||||
SubszPath: 0,
|
||||
}
|
||||
|
||||
var hp string
|
||||
var err error
|
||||
|
||||
if secure {
|
||||
hp = net.JoinHostPort(s.opts.HTTPHost, strconv.Itoa(s.opts.HTTPSPort))
|
||||
Noticef("Starting https monitor on %s", hp)
|
||||
config := *s.opts.TLSConfig
|
||||
config.ClientAuth = tls.NoClientCert
|
||||
s.http, err = tls.Listen("tcp", hp, &config)
|
||||
|
||||
} else {
|
||||
hp = net.JoinHostPort(s.opts.HTTPHost, strconv.Itoa(s.opts.HTTPPort))
|
||||
Noticef("Starting http monitor on %s", hp)
|
||||
s.http, err = net.Listen("tcp", hp)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
Fatalf("Can't listen to the monitor port: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
mux := http.NewServeMux()
|
||||
|
||||
// Root
|
||||
mux.HandleFunc(RootPath, s.HandleRoot)
|
||||
// Varz
|
||||
mux.HandleFunc(VarzPath, s.HandleVarz)
|
||||
// Connz
|
||||
mux.HandleFunc(ConnzPath, s.HandleConnz)
|
||||
// Routez
|
||||
mux.HandleFunc(RoutezPath, s.HandleRoutez)
|
||||
// Subz
|
||||
mux.HandleFunc(SubszPath, s.HandleSubsz)
|
||||
// Subz alias for backwards compatibility
|
||||
mux.HandleFunc("/subscriptionsz", s.HandleSubsz)
|
||||
// Stacksz
|
||||
mux.HandleFunc(StackszPath, s.HandleStacksz)
|
||||
|
||||
srv := &http.Server{
|
||||
Addr: hp,
|
||||
Handler: mux,
|
||||
ReadTimeout: 2 * time.Second,
|
||||
WriteTimeout: 2 * time.Second,
|
||||
MaxHeaderBytes: 1 << 20,
|
||||
}
|
||||
|
||||
go func() {
|
||||
srv.Serve(s.http)
|
||||
srv.Handler = nil
|
||||
s.done <- true
|
||||
}()
|
||||
}
|
||||
|
||||
func (s *Server) createClient(conn net.Conn) *client {
|
||||
c := &client{srv: s, nc: conn, opts: defaultOpts, mpay: s.info.MaxPayload, start: time.Now()}
|
||||
|
||||
// Grab JSON info string
|
||||
s.mu.Lock()
|
||||
info := s.infoJSON
|
||||
authRequired := s.info.AuthRequired
|
||||
tlsRequired := s.info.TLSRequired
|
||||
s.totalClients++
|
||||
s.mu.Unlock()
|
||||
|
||||
// Grab lock
|
||||
c.mu.Lock()
|
||||
|
||||
// Initialize
|
||||
c.initClient()
|
||||
|
||||
c.Debugf("Client connection created")
|
||||
|
||||
// Check for Auth
|
||||
if authRequired {
|
||||
ttl := secondsToDuration(s.opts.AuthTimeout)
|
||||
c.setAuthTimer(ttl)
|
||||
}
|
||||
|
||||
// Send our information.
|
||||
c.sendInfo(info)
|
||||
|
||||
// Unlock to register
|
||||
c.mu.Unlock()
|
||||
|
||||
// Register with the server.
|
||||
s.mu.Lock()
|
||||
// If server is not running, Shutdown() may have already gathered the
|
||||
// list of connections to close. It won't contain this one, so we need
|
||||
// to bail out now otherwise the readLoop started down there would not
|
||||
// be interrupted.
|
||||
if !s.running {
|
||||
s.mu.Unlock()
|
||||
return c
|
||||
}
|
||||
s.clients[c.cid] = c
|
||||
s.mu.Unlock()
|
||||
|
||||
// Re-Grab lock
|
||||
c.mu.Lock()
|
||||
|
||||
// Check for TLS
|
||||
if tlsRequired {
|
||||
c.Debugf("Starting TLS client connection handshake")
|
||||
c.nc = tls.Server(c.nc, s.opts.TLSConfig)
|
||||
conn := c.nc.(*tls.Conn)
|
||||
|
||||
// Setup the timeout
|
||||
ttl := secondsToDuration(s.opts.TLSTimeout)
|
||||
time.AfterFunc(ttl, func() { tlsTimeout(c, conn) })
|
||||
conn.SetReadDeadline(time.Now().Add(ttl))
|
||||
|
||||
// Force handshake
|
||||
c.mu.Unlock()
|
||||
if err := conn.Handshake(); err != nil {
|
||||
c.Debugf("TLS handshake error: %v", err)
|
||||
c.sendErr("Secure Connection - TLS Required")
|
||||
c.closeConnection()
|
||||
return nil
|
||||
}
|
||||
// Reset the read deadline
|
||||
conn.SetReadDeadline(time.Time{})
|
||||
|
||||
// Re-Grab lock
|
||||
c.mu.Lock()
|
||||
}
|
||||
|
||||
// The connection may have been closed
|
||||
if c.nc == nil {
|
||||
c.mu.Unlock()
|
||||
return c
|
||||
}
|
||||
|
||||
if tlsRequired {
|
||||
// Rewrap bw
|
||||
c.bw = bufio.NewWriterSize(c.nc, startBufSize)
|
||||
}
|
||||
|
||||
// Do final client initialization
|
||||
|
||||
// Set the Ping timer
|
||||
c.setPingTimer()
|
||||
|
||||
// Spin up the read loop.
|
||||
s.startGoRoutine(func() { c.readLoop() })
|
||||
|
||||
if tlsRequired {
|
||||
c.Debugf("TLS handshake complete")
|
||||
cs := c.nc.(*tls.Conn).ConnectionState()
|
||||
c.Debugf("TLS version %s, cipher suite %s", tlsVersion(cs.Version), tlsCipher(cs.CipherSuite))
|
||||
}
|
||||
|
||||
c.mu.Unlock()
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// Handle closing down a connection when the handshake has timedout.
|
||||
func tlsTimeout(c *client, conn *tls.Conn) {
|
||||
c.mu.Lock()
|
||||
nc := c.nc
|
||||
c.mu.Unlock()
|
||||
// Check if already closed
|
||||
if nc == nil {
|
||||
return
|
||||
}
|
||||
cs := conn.ConnectionState()
|
||||
if !cs.HandshakeComplete {
|
||||
c.Debugf("TLS handshake timeout")
|
||||
c.sendErr("Secure Connection - TLS Required")
|
||||
c.closeConnection()
|
||||
}
|
||||
}
|
||||
|
||||
// Seems silly we have to write these
|
||||
func tlsVersion(ver uint16) string {
|
||||
switch ver {
|
||||
case tls.VersionTLS10:
|
||||
return "1.0"
|
||||
case tls.VersionTLS11:
|
||||
return "1.1"
|
||||
case tls.VersionTLS12:
|
||||
return "1.2"
|
||||
}
|
||||
return fmt.Sprintf("Unknown [%x]", ver)
|
||||
}
|
||||
|
||||
// We use hex here so we don't need multiple versions
|
||||
func tlsCipher(cs uint16) string {
|
||||
switch cs {
|
||||
case 0x0005:
|
||||
return "TLS_RSA_WITH_RC4_128_SHA"
|
||||
case 0x000a:
|
||||
return "TLS_RSA_WITH_3DES_EDE_CBC_SHA"
|
||||
case 0x002f:
|
||||
return "TLS_RSA_WITH_AES_128_CBC_SHA"
|
||||
case 0x0035:
|
||||
return "TLS_RSA_WITH_AES_256_CBC_SHA"
|
||||
case 0xc007:
|
||||
return "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA"
|
||||
case 0xc009:
|
||||
return "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA"
|
||||
case 0xc00a:
|
||||
return "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA"
|
||||
case 0xc011:
|
||||
return "TLS_ECDHE_RSA_WITH_RC4_128_SHA"
|
||||
case 0xc012:
|
||||
return "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA"
|
||||
case 0xc013:
|
||||
return "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA"
|
||||
case 0xc014:
|
||||
return "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA"
|
||||
case 0xc02f:
|
||||
return "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"
|
||||
case 0xc02b:
|
||||
return "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256"
|
||||
case 0xc030:
|
||||
return "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384"
|
||||
case 0xc02c:
|
||||
return "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384"
|
||||
}
|
||||
return fmt.Sprintf("Unknown [%x]", cs)
|
||||
}
|
||||
|
||||
func (s *Server) checkClientAuth(c *client) bool {
|
||||
if s.cAuth == nil {
|
||||
return true
|
||||
}
|
||||
return s.cAuth.Check(c)
|
||||
}
|
||||
|
||||
func (s *Server) checkRouterAuth(c *client) bool {
|
||||
if s.rAuth == nil {
|
||||
return true
|
||||
}
|
||||
return s.rAuth.Check(c)
|
||||
}
|
||||
|
||||
// Check auth and return boolean indicating if client is ok
|
||||
func (s *Server) checkAuth(c *client) bool {
|
||||
switch c.typ {
|
||||
case CLIENT:
|
||||
return s.checkClientAuth(c)
|
||||
case ROUTER:
|
||||
return s.checkRouterAuth(c)
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Remove a client or route from our internal accounting.
|
||||
func (s *Server) removeClient(c *client) {
|
||||
var rID string
|
||||
c.mu.Lock()
|
||||
cid := c.cid
|
||||
typ := c.typ
|
||||
r := c.route
|
||||
if r != nil {
|
||||
rID = r.remoteID
|
||||
}
|
||||
c.mu.Unlock()
|
||||
|
||||
s.mu.Lock()
|
||||
switch typ {
|
||||
case CLIENT:
|
||||
delete(s.clients, cid)
|
||||
case ROUTER:
|
||||
delete(s.routes, cid)
|
||||
if r != nil {
|
||||
rc, ok := s.remotes[rID]
|
||||
// Only delete it if it is us..
|
||||
if ok && c == rc {
|
||||
delete(s.remotes, rID)
|
||||
}
|
||||
}
|
||||
}
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////
|
||||
// These are some helpers for accounting in functional tests.
|
||||
/////////////////////////////////////////////////////////////////
|
||||
|
||||
// NumRoutes will report the number of registered routes.
|
||||
func (s *Server) NumRoutes() int {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
return len(s.routes)
|
||||
}
|
||||
|
||||
// NumRemotes will report number of registered remotes.
|
||||
func (s *Server) NumRemotes() int {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
return len(s.remotes)
|
||||
}
|
||||
|
||||
// NumClients will report the number of registered clients.
|
||||
func (s *Server) NumClients() int {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
return len(s.clients)
|
||||
}
|
||||
|
||||
// NumSubscriptions will report how many subscriptions are active.
|
||||
func (s *Server) NumSubscriptions() uint32 {
|
||||
s.mu.Lock()
|
||||
subs := s.sl.Count()
|
||||
s.mu.Unlock()
|
||||
return subs
|
||||
}
|
||||
|
||||
// Addr will return the net.Addr object for the current listener.
|
||||
func (s *Server) Addr() net.Addr {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if s.listener == nil {
|
||||
return nil
|
||||
}
|
||||
return s.listener.Addr()
|
||||
}
|
||||
|
||||
// GetListenEndpoint will return a string of the form host:port suitable for
|
||||
// a connect. Will return empty string if the server is not ready to accept
|
||||
// client connections.
|
||||
func (s *Server) GetListenEndpoint() string {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
// Wait for the listener to be set, see note about RANDOM_PORT below
|
||||
if s.listener == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
host := s.opts.Host
|
||||
|
||||
// On windows, a connect with host "0.0.0.0" (or "::") will fail.
|
||||
// We replace it with "localhost" when that's the case.
|
||||
if host == "0.0.0.0" || host == "::" || host == "[::]" {
|
||||
host = "localhost"
|
||||
}
|
||||
|
||||
// Return the opts's Host and Port. Note that the Port may be set
|
||||
// when the listener is started, due to the use of RANDOM_PORT
|
||||
return net.JoinHostPort(host, strconv.Itoa(s.opts.Port))
|
||||
}
|
||||
|
||||
// GetRouteListenEndpoint will return a string of the form host:port suitable
|
||||
// for a connect. Will return empty string if the server is not configured for
|
||||
// routing or not ready to accept route connections.
|
||||
func (s *Server) GetRouteListenEndpoint() string {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
if s.routeListener == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
host := s.opts.ClusterHost
|
||||
|
||||
// On windows, a connect with host "0.0.0.0" (or "::") will fail.
|
||||
// We replace it with "localhost" when that's the case.
|
||||
if host == "0.0.0.0" || host == "::" || host == "[::]" {
|
||||
host = "localhost"
|
||||
}
|
||||
|
||||
// Return the cluster's Host and Port.
|
||||
return net.JoinHostPort(host, strconv.Itoa(s.opts.ClusterPort))
|
||||
}
|
||||
|
||||
// ID returns the server's ID
|
||||
func (s *Server) ID() string {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
return s.info.ID
|
||||
}
|
||||
|
||||
func (s *Server) startGoRoutine(f func()) {
|
||||
s.grMu.Lock()
|
||||
if s.grRunning {
|
||||
s.grWG.Add(1)
|
||||
go f()
|
||||
}
|
||||
s.grMu.Unlock()
|
||||
}
|
||||
621
vendor/github.com/nats-io/gnatsd/server/sublist.go
generated
vendored
Normal file
621
vendor/github.com/nats-io/gnatsd/server/sublist.go
generated
vendored
Normal file
@@ -0,0 +1,621 @@
|
||||
// Copyright 2016 Apcera Inc. All rights reserved.
|
||||
|
||||
// Package sublist is a routing mechanism to handle subject distribution
|
||||
// and provides a facility to match subjects from published messages to
|
||||
// interested subscribers. Subscribers can have wildcard subjects to match
|
||||
// multiple published subjects.
|
||||
package server
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// Common byte variables for wildcards and token separator.
|
||||
const (
|
||||
pwc = '*'
|
||||
fwc = '>'
|
||||
tsep = "."
|
||||
btsep = '.'
|
||||
)
|
||||
|
||||
// Sublist related errors
|
||||
var (
|
||||
ErrInvalidSubject = errors.New("sublist: Invalid Subject")
|
||||
ErrNotFound = errors.New("sublist: No Matches Found")
|
||||
)
|
||||
|
||||
// cacheMax is used to bound limit the frontend cache
|
||||
const slCacheMax = 1024
|
||||
|
||||
// A result structure better optimized for queue subs.
|
||||
type SublistResult struct {
|
||||
psubs []*subscription
|
||||
qsubs [][]*subscription // don't make this a map, too expensive to iterate
|
||||
}
|
||||
|
||||
// A Sublist stores and efficiently retrieves subscriptions.
|
||||
type Sublist struct {
|
||||
sync.RWMutex
|
||||
genid uint64
|
||||
matches uint64
|
||||
cacheHits uint64
|
||||
inserts uint64
|
||||
removes uint64
|
||||
cache map[string]*SublistResult
|
||||
root *level
|
||||
count uint32
|
||||
}
|
||||
|
||||
// A node contains subscriptions and a pointer to the next level.
|
||||
type node struct {
|
||||
next *level
|
||||
psubs []*subscription
|
||||
qsubs [][]*subscription
|
||||
}
|
||||
|
||||
// A level represents a group of nodes and special pointers to
|
||||
// wildcard nodes.
|
||||
type level struct {
|
||||
nodes map[string]*node
|
||||
pwc, fwc *node
|
||||
}
|
||||
|
||||
// Create a new default node.
|
||||
func newNode() *node {
|
||||
return &node{psubs: make([]*subscription, 0, 4)}
|
||||
}
|
||||
|
||||
// Create a new default level. We use FNV1A as the hash
|
||||
// algortihm for the tokens, which should be short.
|
||||
func newLevel() *level {
|
||||
return &level{nodes: make(map[string]*node)}
|
||||
}
|
||||
|
||||
// New will create a default sublist
|
||||
func NewSublist() *Sublist {
|
||||
return &Sublist{root: newLevel(), cache: make(map[string]*SublistResult)}
|
||||
}
|
||||
|
||||
// Insert adds a subscription into the sublist
|
||||
func (s *Sublist) Insert(sub *subscription) error {
|
||||
// copy the subject since we hold this and this might be part of a large byte slice.
|
||||
subject := string(sub.subject)
|
||||
tsa := [32]string{}
|
||||
tokens := tsa[:0]
|
||||
start := 0
|
||||
for i := 0; i < len(subject); i++ {
|
||||
if subject[i] == btsep {
|
||||
tokens = append(tokens, subject[start:i])
|
||||
start = i + 1
|
||||
}
|
||||
}
|
||||
tokens = append(tokens, subject[start:])
|
||||
|
||||
s.Lock()
|
||||
|
||||
sfwc := false
|
||||
l := s.root
|
||||
var n *node
|
||||
|
||||
for _, t := range tokens {
|
||||
if len(t) == 0 || sfwc {
|
||||
s.Unlock()
|
||||
return ErrInvalidSubject
|
||||
}
|
||||
|
||||
switch t[0] {
|
||||
case pwc:
|
||||
n = l.pwc
|
||||
case fwc:
|
||||
n = l.fwc
|
||||
sfwc = true
|
||||
default:
|
||||
n = l.nodes[t]
|
||||
}
|
||||
if n == nil {
|
||||
n = newNode()
|
||||
switch t[0] {
|
||||
case pwc:
|
||||
l.pwc = n
|
||||
case fwc:
|
||||
l.fwc = n
|
||||
default:
|
||||
l.nodes[t] = n
|
||||
}
|
||||
}
|
||||
if n.next == nil {
|
||||
n.next = newLevel()
|
||||
}
|
||||
l = n.next
|
||||
}
|
||||
if sub.queue == nil {
|
||||
n.psubs = append(n.psubs, sub)
|
||||
} else {
|
||||
// This is a queue subscription
|
||||
if i := findQSliceForSub(sub, n.qsubs); i >= 0 {
|
||||
n.qsubs[i] = append(n.qsubs[i], sub)
|
||||
} else {
|
||||
n.qsubs = append(n.qsubs, []*subscription{sub})
|
||||
}
|
||||
}
|
||||
|
||||
s.count++
|
||||
s.inserts++
|
||||
|
||||
s.addToCache(subject, sub)
|
||||
atomic.AddUint64(&s.genid, 1)
|
||||
|
||||
s.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Deep copy
|
||||
func copyResult(r *SublistResult) *SublistResult {
|
||||
nr := &SublistResult{}
|
||||
nr.psubs = append([]*subscription(nil), r.psubs...)
|
||||
for _, qr := range r.qsubs {
|
||||
nqr := append([]*subscription(nil), qr...)
|
||||
nr.qsubs = append(nr.qsubs, nqr)
|
||||
}
|
||||
return nr
|
||||
}
|
||||
|
||||
// addToCache will add the new entry to existing cache
|
||||
// entries if needed. Assumes write lock is held.
|
||||
func (s *Sublist) addToCache(subject string, sub *subscription) {
|
||||
for k, r := range s.cache {
|
||||
if matchLiteral(k, subject) {
|
||||
// Copy since others may have a reference.
|
||||
nr := copyResult(r)
|
||||
if sub.queue == nil {
|
||||
nr.psubs = append(nr.psubs, sub)
|
||||
} else {
|
||||
if i := findQSliceForSub(sub, nr.qsubs); i >= 0 {
|
||||
nr.qsubs[i] = append(nr.qsubs[i], sub)
|
||||
} else {
|
||||
nr.qsubs = append(nr.qsubs, []*subscription{sub})
|
||||
}
|
||||
}
|
||||
s.cache[k] = nr
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// removeFromCache will remove the sub from any active cache entries.
|
||||
// Assumes write lock is held.
|
||||
func (s *Sublist) removeFromCache(subject string, sub *subscription) {
|
||||
for k, _ := range s.cache {
|
||||
if !matchLiteral(k, subject) {
|
||||
continue
|
||||
}
|
||||
// Since someone else may be referecing, can't modify the list
|
||||
// safely, just let it re-populate.
|
||||
delete(s.cache, k)
|
||||
}
|
||||
}
|
||||
|
||||
// Match will match all entries to the literal subject.
|
||||
// It will return a set of results for both normal and queue subscribers.
|
||||
func (s *Sublist) Match(subject string) *SublistResult {
|
||||
s.RLock()
|
||||
atomic.AddUint64(&s.matches, 1)
|
||||
rc, ok := s.cache[subject]
|
||||
s.RUnlock()
|
||||
if ok {
|
||||
atomic.AddUint64(&s.cacheHits, 1)
|
||||
return rc
|
||||
}
|
||||
|
||||
tsa := [32]string{}
|
||||
tokens := tsa[:0]
|
||||
start := 0
|
||||
for i := 0; i < len(subject); i++ {
|
||||
if subject[i] == btsep {
|
||||
tokens = append(tokens, subject[start:i])
|
||||
start = i + 1
|
||||
}
|
||||
}
|
||||
tokens = append(tokens, subject[start:])
|
||||
|
||||
// FIXME(dlc) - Make shared pool between sublist and client readLoop?
|
||||
result := &SublistResult{}
|
||||
|
||||
s.Lock()
|
||||
matchLevel(s.root, tokens, result)
|
||||
|
||||
// Add to our cache
|
||||
s.cache[subject] = result
|
||||
// Bound the number of entries to sublistMaxCache
|
||||
if len(s.cache) > slCacheMax {
|
||||
for k, _ := range s.cache {
|
||||
delete(s.cache, k)
|
||||
break
|
||||
}
|
||||
}
|
||||
s.Unlock()
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// This will add in a node's results to the total results.
|
||||
func addNodeToResults(n *node, results *SublistResult) {
|
||||
results.psubs = append(results.psubs, n.psubs...)
|
||||
for _, qr := range n.qsubs {
|
||||
if len(qr) == 0 {
|
||||
continue
|
||||
}
|
||||
// Need to find matching list in results
|
||||
if i := findQSliceForSub(qr[0], results.qsubs); i >= 0 {
|
||||
results.qsubs[i] = append(results.qsubs[i], qr...)
|
||||
} else {
|
||||
results.qsubs = append(results.qsubs, qr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We do not use a map here since we want iteration to be past when
|
||||
// processing publishes in L1 on client. So we need to walk sequentially
|
||||
// for now. Keep an eye on this in case we start getting large number of
|
||||
// different queue subscribers for the same subject.
|
||||
func findQSliceForSub(sub *subscription, qsl [][]*subscription) int {
|
||||
if sub.queue == nil {
|
||||
return -1
|
||||
}
|
||||
for i, qr := range qsl {
|
||||
if len(qr) > 0 && bytes.Equal(sub.queue, qr[0].queue) {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// matchLevel is used to recursively descend into the trie.
|
||||
func matchLevel(l *level, toks []string, results *SublistResult) {
|
||||
var pwc, n *node
|
||||
for i, t := range toks {
|
||||
if l == nil {
|
||||
return
|
||||
}
|
||||
if l.fwc != nil {
|
||||
addNodeToResults(l.fwc, results)
|
||||
}
|
||||
if pwc = l.pwc; pwc != nil {
|
||||
matchLevel(pwc.next, toks[i+1:], results)
|
||||
}
|
||||
n = l.nodes[t]
|
||||
if n != nil {
|
||||
l = n.next
|
||||
} else {
|
||||
l = nil
|
||||
}
|
||||
}
|
||||
if n != nil {
|
||||
addNodeToResults(n, results)
|
||||
}
|
||||
if pwc != nil {
|
||||
addNodeToResults(pwc, results)
|
||||
}
|
||||
}
|
||||
|
||||
// lnt is used to track descent into levels for a removal for pruning.
|
||||
type lnt struct {
|
||||
l *level
|
||||
n *node
|
||||
t string
|
||||
}
|
||||
|
||||
// Remove will remove a subscription.
|
||||
func (s *Sublist) Remove(sub *subscription) error {
|
||||
subject := string(sub.subject)
|
||||
tsa := [32]string{}
|
||||
tokens := tsa[:0]
|
||||
start := 0
|
||||
for i := 0; i < len(subject); i++ {
|
||||
if subject[i] == btsep {
|
||||
tokens = append(tokens, subject[start:i])
|
||||
start = i + 1
|
||||
}
|
||||
}
|
||||
tokens = append(tokens, subject[start:])
|
||||
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
sfwc := false
|
||||
l := s.root
|
||||
var n *node
|
||||
|
||||
// Track levels for pruning
|
||||
var lnts [32]lnt
|
||||
levels := lnts[:0]
|
||||
|
||||
for _, t := range tokens {
|
||||
if len(t) == 0 || sfwc {
|
||||
return ErrInvalidSubject
|
||||
}
|
||||
if l == nil {
|
||||
return ErrNotFound
|
||||
}
|
||||
switch t[0] {
|
||||
case pwc:
|
||||
n = l.pwc
|
||||
case fwc:
|
||||
n = l.fwc
|
||||
sfwc = true
|
||||
default:
|
||||
n = l.nodes[t]
|
||||
}
|
||||
if n != nil {
|
||||
levels = append(levels, lnt{l, n, t})
|
||||
l = n.next
|
||||
} else {
|
||||
l = nil
|
||||
}
|
||||
}
|
||||
if !s.removeFromNode(n, sub) {
|
||||
return ErrNotFound
|
||||
}
|
||||
|
||||
s.count--
|
||||
s.removes++
|
||||
|
||||
for i := len(levels) - 1; i >= 0; i-- {
|
||||
l, n, t := levels[i].l, levels[i].n, levels[i].t
|
||||
if n.isEmpty() {
|
||||
l.pruneNode(n, t)
|
||||
}
|
||||
}
|
||||
s.removeFromCache(subject, sub)
|
||||
atomic.AddUint64(&s.genid, 1)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// pruneNode is used to prune an empty node from the tree.
|
||||
func (l *level) pruneNode(n *node, t string) {
|
||||
if n == nil {
|
||||
return
|
||||
}
|
||||
if n == l.fwc {
|
||||
l.fwc = nil
|
||||
} else if n == l.pwc {
|
||||
l.pwc = nil
|
||||
} else {
|
||||
delete(l.nodes, t)
|
||||
}
|
||||
}
|
||||
|
||||
// isEmpty will test if the node has any entries. Used
|
||||
// in pruning.
|
||||
func (n *node) isEmpty() bool {
|
||||
if len(n.psubs) == 0 && len(n.qsubs) == 0 {
|
||||
if n.next == nil || n.next.numNodes() == 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Return the number of nodes for the given level.
|
||||
func (l *level) numNodes() int {
|
||||
num := len(l.nodes)
|
||||
if l.pwc != nil {
|
||||
num++
|
||||
}
|
||||
if l.fwc != nil {
|
||||
num++
|
||||
}
|
||||
return num
|
||||
}
|
||||
|
||||
// Removes a sub from a list.
|
||||
func removeSubFromList(sub *subscription, sl []*subscription) ([]*subscription, bool) {
|
||||
for i := 0; i < len(sl); i++ {
|
||||
if sl[i] == sub {
|
||||
last := len(sl) - 1
|
||||
sl[i] = sl[last]
|
||||
sl[last] = nil
|
||||
sl = sl[:last]
|
||||
return shrinkAsNeeded(sl), true
|
||||
}
|
||||
}
|
||||
return sl, false
|
||||
}
|
||||
|
||||
// Remove the sub for the given node.
|
||||
func (s *Sublist) removeFromNode(n *node, sub *subscription) (found bool) {
|
||||
if n == nil {
|
||||
return false
|
||||
}
|
||||
if sub.queue == nil {
|
||||
n.psubs, found = removeSubFromList(sub, n.psubs)
|
||||
return found
|
||||
}
|
||||
|
||||
// We have a queue group subscription here
|
||||
if i := findQSliceForSub(sub, n.qsubs); i >= 0 {
|
||||
n.qsubs[i], found = removeSubFromList(sub, n.qsubs[i])
|
||||
if len(n.qsubs[i]) == 0 {
|
||||
last := len(n.qsubs) - 1
|
||||
n.qsubs[i] = n.qsubs[last]
|
||||
n.qsubs[last] = nil
|
||||
n.qsubs = n.qsubs[:last]
|
||||
if len(n.qsubs) == 0 {
|
||||
n.qsubs = nil
|
||||
}
|
||||
}
|
||||
return found
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Checks if we need to do a resize. This is for very large growth then
|
||||
// subsequent return to a more normal size from unsubscribe.
|
||||
func shrinkAsNeeded(sl []*subscription) []*subscription {
|
||||
lsl := len(sl)
|
||||
csl := cap(sl)
|
||||
// Don't bother if list not too big
|
||||
if csl <= 8 {
|
||||
return sl
|
||||
}
|
||||
pFree := float32(csl-lsl) / float32(csl)
|
||||
if pFree > 0.50 {
|
||||
return append([]*subscription(nil), sl...)
|
||||
}
|
||||
return sl
|
||||
}
|
||||
|
||||
// Count returns the number of subscriptions.
|
||||
func (s *Sublist) Count() uint32 {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
return s.count
|
||||
}
|
||||
|
||||
// CacheCount returns the number of result sets in the cache.
|
||||
func (s *Sublist) CacheCount() int {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
return len(s.cache)
|
||||
}
|
||||
|
||||
// Public stats for the sublist
|
||||
type SublistStats struct {
|
||||
NumSubs uint32 `json:"num_subscriptions"`
|
||||
NumCache uint32 `json:"num_cache"`
|
||||
NumInserts uint64 `json:"num_inserts"`
|
||||
NumRemoves uint64 `json:"num_removes"`
|
||||
NumMatches uint64 `json:"num_matches"`
|
||||
CacheHitRate float64 `json:"cache_hit_rate"`
|
||||
MaxFanout uint32 `json:"max_fanout"`
|
||||
AvgFanout float64 `json:"avg_fanout"`
|
||||
}
|
||||
|
||||
// Stats will return a stats structure for the current state.
|
||||
func (s *Sublist) Stats() *SublistStats {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
st := &SublistStats{}
|
||||
st.NumSubs = s.count
|
||||
st.NumCache = uint32(len(s.cache))
|
||||
st.NumInserts = s.inserts
|
||||
st.NumRemoves = s.removes
|
||||
st.NumMatches = s.matches
|
||||
if s.matches > 0 {
|
||||
st.CacheHitRate = float64(s.cacheHits) / float64(s.matches)
|
||||
}
|
||||
// whip through cache for fanout stats
|
||||
tot, max := 0, 0
|
||||
for _, r := range s.cache {
|
||||
l := len(r.psubs) + len(r.qsubs)
|
||||
tot += l
|
||||
if l > max {
|
||||
max = l
|
||||
}
|
||||
}
|
||||
st.MaxFanout = uint32(max)
|
||||
if tot > 0 {
|
||||
st.AvgFanout = float64(tot) / float64(len(s.cache))
|
||||
}
|
||||
return st
|
||||
}
|
||||
|
||||
// numLevels will return the maximum number of levels
|
||||
// contained in the Sublist tree.
|
||||
func (s *Sublist) numLevels() int {
|
||||
return visitLevel(s.root, 0)
|
||||
}
|
||||
|
||||
// visitLevel is used to descend the Sublist tree structure
|
||||
// recursively.
|
||||
func visitLevel(l *level, depth int) int {
|
||||
if l == nil || l.numNodes() == 0 {
|
||||
return depth
|
||||
}
|
||||
|
||||
depth++
|
||||
maxDepth := depth
|
||||
|
||||
for _, n := range l.nodes {
|
||||
if n == nil {
|
||||
continue
|
||||
}
|
||||
newDepth := visitLevel(n.next, depth)
|
||||
if newDepth > maxDepth {
|
||||
maxDepth = newDepth
|
||||
}
|
||||
}
|
||||
if l.pwc != nil {
|
||||
pwcDepth := visitLevel(l.pwc.next, depth)
|
||||
if pwcDepth > maxDepth {
|
||||
maxDepth = pwcDepth
|
||||
}
|
||||
}
|
||||
if l.fwc != nil {
|
||||
fwcDepth := visitLevel(l.fwc.next, depth)
|
||||
if fwcDepth > maxDepth {
|
||||
maxDepth = fwcDepth
|
||||
}
|
||||
}
|
||||
return maxDepth
|
||||
}
|
||||
|
||||
// IsValidLiteralSubject returns true if a subject is valid, false otherwise
|
||||
func IsValidLiteralSubject(subject string) bool {
|
||||
tokens := strings.Split(string(subject), tsep)
|
||||
for _, t := range tokens {
|
||||
if len(t) == 0 {
|
||||
return false
|
||||
}
|
||||
if len(t) > 1 {
|
||||
continue
|
||||
}
|
||||
switch t[0] {
|
||||
case pwc, fwc:
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// matchLiteral is used to test literal subjects, those that do not have any
|
||||
// wildcards, with a target subject. This is used in the cache layer.
|
||||
func matchLiteral(literal, subject string) bool {
|
||||
li := 0
|
||||
ll := len(literal)
|
||||
for i := 0; i < len(subject); i++ {
|
||||
if li >= ll {
|
||||
return false
|
||||
}
|
||||
b := subject[i]
|
||||
switch b {
|
||||
case pwc:
|
||||
// Skip token in literal
|
||||
ll := len(literal)
|
||||
for {
|
||||
if li >= ll || literal[li] == btsep {
|
||||
li--
|
||||
break
|
||||
}
|
||||
li++
|
||||
}
|
||||
case fwc:
|
||||
return true
|
||||
default:
|
||||
if b != literal[li] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
li++
|
||||
}
|
||||
// Make sure we have processed all of the literal's chars..
|
||||
if li < ll {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
56
vendor/github.com/nats-io/gnatsd/server/util.go
generated
vendored
Normal file
56
vendor/github.com/nats-io/gnatsd/server/util.go
generated
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
// Copyright 2012-2016 Apcera Inc. All rights reserved.
|
||||
|
||||
package server
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/nats-io/nuid"
|
||||
)
|
||||
|
||||
// Use nuid.
|
||||
func genID() string {
|
||||
return nuid.Next()
|
||||
}
|
||||
|
||||
// Ascii numbers 0-9
|
||||
const (
|
||||
asciiZero = 48
|
||||
asciiNine = 57
|
||||
)
|
||||
|
||||
// parseSize expects decimal positive numbers. We
|
||||
// return -1 to signal error
|
||||
func parseSize(d []byte) (n int) {
|
||||
if len(d) == 0 {
|
||||
return -1
|
||||
}
|
||||
for _, dec := range d {
|
||||
if dec < asciiZero || dec > asciiNine {
|
||||
return -1
|
||||
}
|
||||
n = n*10 + (int(dec) - asciiZero)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// parseInt64 expects decimal positive numbers. We
|
||||
// return -1 to signal error
|
||||
func parseInt64(d []byte) (n int64) {
|
||||
if len(d) == 0 {
|
||||
return -1
|
||||
}
|
||||
for _, dec := range d {
|
||||
if dec < asciiZero || dec > asciiNine {
|
||||
return -1
|
||||
}
|
||||
n = n*10 + (int64(dec) - asciiZero)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// Helper to move from float seconds to time.Duration
|
||||
func secondsToDuration(seconds float64) time.Duration {
|
||||
ttl := seconds * float64(time.Second)
|
||||
return time.Duration(ttl)
|
||||
}
|
||||
20
vendor/github.com/nats-io/gnatsd/test/LICENSE
generated
vendored
Normal file
20
vendor/github.com/nats-io/gnatsd/test/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2012-2016 Apcera Inc.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
408
vendor/github.com/nats-io/gnatsd/test/test.go
generated
vendored
Normal file
408
vendor/github.com/nats-io/gnatsd/test/test.go
generated
vendored
Normal file
@@ -0,0 +1,408 @@
|
||||
// Copyright 2012-2016 Apcera Inc. All rights reserved.
|
||||
|
||||
package test
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/nats-io/gnatsd/auth"
|
||||
"github.com/nats-io/gnatsd/server"
|
||||
)
|
||||
|
||||
const natsServerExe = "../gnatsd"
|
||||
|
||||
type natsServer struct {
|
||||
args []string
|
||||
cmd *exec.Cmd
|
||||
}
|
||||
|
||||
// So we can pass tests and benchmarks..
|
||||
type tLogger interface {
|
||||
Fatalf(format string, args ...interface{})
|
||||
Errorf(format string, args ...interface{})
|
||||
}
|
||||
|
||||
// DefaultTestOptions are default options for the unit tests.
|
||||
var DefaultTestOptions = server.Options{
|
||||
Host: "localhost",
|
||||
Port: 4222,
|
||||
NoLog: true,
|
||||
NoSigs: true,
|
||||
}
|
||||
|
||||
// RunDefaultServer starts a new Go routine based server using the default options
|
||||
func RunDefaultServer() *server.Server {
|
||||
return RunServer(&DefaultTestOptions)
|
||||
}
|
||||
|
||||
// RunServer starts a new Go routine based server
|
||||
func RunServer(opts *server.Options) *server.Server {
|
||||
return RunServerWithAuth(opts, nil)
|
||||
}
|
||||
|
||||
// LoadConfig loads a configuration from a filename
|
||||
func LoadConfig(configFile string) (opts *server.Options) {
|
||||
opts, err := server.ProcessConfigFile(configFile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Error processing configuration file: %v", err))
|
||||
}
|
||||
opts.NoSigs, opts.NoLog = true, true
|
||||
return
|
||||
}
|
||||
|
||||
// RunServerWithConfig starts a new Go routine based server with a configuration file.
|
||||
func RunServerWithConfig(configFile string) (srv *server.Server, opts *server.Options) {
|
||||
opts = LoadConfig(configFile)
|
||||
|
||||
// Check for auth
|
||||
var a server.Auth
|
||||
if opts.Authorization != "" {
|
||||
a = &auth.Token{Token: opts.Authorization}
|
||||
}
|
||||
if opts.Username != "" {
|
||||
a = &auth.Plain{Username: opts.Username, Password: opts.Password}
|
||||
}
|
||||
if opts.Users != nil {
|
||||
a = auth.NewMultiUser(opts.Users)
|
||||
}
|
||||
srv = RunServerWithAuth(opts, a)
|
||||
return
|
||||
}
|
||||
|
||||
// RunServerWithAuth starts a new Go routine based server with auth
|
||||
func RunServerWithAuth(opts *server.Options, auth server.Auth) *server.Server {
|
||||
if opts == nil {
|
||||
opts = &DefaultTestOptions
|
||||
}
|
||||
s := server.New(opts)
|
||||
if s == nil {
|
||||
panic("No NATS Server object returned.")
|
||||
}
|
||||
|
||||
if auth != nil {
|
||||
s.SetClientAuthMethod(auth)
|
||||
}
|
||||
|
||||
// Run server in Go routine.
|
||||
go s.Start()
|
||||
|
||||
end := time.Now().Add(10 * time.Second)
|
||||
for time.Now().Before(end) {
|
||||
addr := s.GetListenEndpoint()
|
||||
if addr == "" {
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
// Retry. We might take a little while to open a connection.
|
||||
continue
|
||||
}
|
||||
conn, err := net.Dial("tcp", addr)
|
||||
if err != nil {
|
||||
// Retry after 50ms
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
conn.Close()
|
||||
// Wait a bit to give a chance to the server to remove this
|
||||
// "client" from its state, which may otherwise interfere with
|
||||
// some tests.
|
||||
time.Sleep(25 * time.Millisecond)
|
||||
return s
|
||||
}
|
||||
panic("Unable to start NATS Server in Go Routine")
|
||||
}
|
||||
|
||||
func stackFatalf(t tLogger, f string, args ...interface{}) {
|
||||
lines := make([]string, 0, 32)
|
||||
msg := fmt.Sprintf(f, args...)
|
||||
lines = append(lines, msg)
|
||||
|
||||
// Ignore ourselves
|
||||
_, testFile, _, _ := runtime.Caller(0)
|
||||
|
||||
// Generate the Stack of callers:
|
||||
for i := 0; true; i++ {
|
||||
_, file, line, ok := runtime.Caller(i)
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
if file == testFile {
|
||||
continue
|
||||
}
|
||||
msg := fmt.Sprintf("%d - %s:%d", i, file, line)
|
||||
lines = append(lines, msg)
|
||||
}
|
||||
|
||||
t.Fatalf("%s", strings.Join(lines, "\n"))
|
||||
}
|
||||
|
||||
func acceptRouteConn(t tLogger, host string, timeout time.Duration) net.Conn {
|
||||
l, e := net.Listen("tcp", host)
|
||||
if e != nil {
|
||||
stackFatalf(t, "Error listening for route connection on %v: %v", host, e)
|
||||
}
|
||||
defer l.Close()
|
||||
|
||||
tl := l.(*net.TCPListener)
|
||||
tl.SetDeadline(time.Now().Add(timeout))
|
||||
conn, err := l.Accept()
|
||||
tl.SetDeadline(time.Time{})
|
||||
|
||||
if err != nil {
|
||||
stackFatalf(t, "Did not receive a route connection request: %v", err)
|
||||
}
|
||||
return conn
|
||||
}
|
||||
|
||||
func createRouteConn(t tLogger, host string, port int) net.Conn {
|
||||
return createClientConn(t, host, port)
|
||||
}
|
||||
|
||||
func createClientConn(t tLogger, host string, port int) net.Conn {
|
||||
addr := fmt.Sprintf("%s:%d", host, port)
|
||||
c, err := net.DialTimeout("tcp", addr, 1*time.Second)
|
||||
if err != nil {
|
||||
stackFatalf(t, "Could not connect to server: %v\n", err)
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
func checkSocket(t tLogger, addr string, wait time.Duration) {
|
||||
end := time.Now().Add(wait)
|
||||
for time.Now().Before(end) {
|
||||
conn, err := net.Dial("tcp", addr)
|
||||
if err != nil {
|
||||
// Retry after 50ms
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
conn.Close()
|
||||
// Wait a bit to give a chance to the server to remove this
|
||||
// "client" from its state, which may otherwise interfere with
|
||||
// some tests.
|
||||
time.Sleep(25 * time.Millisecond)
|
||||
return
|
||||
}
|
||||
// We have failed to bind the socket in the time allowed.
|
||||
t.Fatalf("Failed to connect to the socket: %q", addr)
|
||||
}
|
||||
|
||||
func checkInfoMsg(t tLogger, c net.Conn) server.Info {
|
||||
buf := expectResult(t, c, infoRe)
|
||||
js := infoRe.FindAllSubmatch(buf, 1)[0][1]
|
||||
var sinfo server.Info
|
||||
err := json.Unmarshal(js, &sinfo)
|
||||
if err != nil {
|
||||
stackFatalf(t, "Could not unmarshal INFO json: %v\n", err)
|
||||
}
|
||||
return sinfo
|
||||
}
|
||||
|
||||
func doConnect(t tLogger, c net.Conn, verbose, pedantic, ssl bool) {
|
||||
checkInfoMsg(t, c)
|
||||
cs := fmt.Sprintf("CONNECT {\"verbose\":%v,\"pedantic\":%v,\"ssl_required\":%v}\r\n", verbose, pedantic, ssl)
|
||||
sendProto(t, c, cs)
|
||||
}
|
||||
|
||||
func doDefaultConnect(t tLogger, c net.Conn) {
|
||||
// Basic Connect
|
||||
doConnect(t, c, false, false, false)
|
||||
}
|
||||
|
||||
const connectProto = "CONNECT {\"verbose\":false,\"user\":\"%s\",\"pass\":\"%s\",\"name\":\"%s\"}\r\n"
|
||||
|
||||
func doRouteAuthConnect(t tLogger, c net.Conn, user, pass, id string) {
|
||||
cs := fmt.Sprintf(connectProto, user, pass, id)
|
||||
sendProto(t, c, cs)
|
||||
}
|
||||
|
||||
func setupRouteEx(t tLogger, c net.Conn, opts *server.Options, id string) (sendFun, expectFun) {
|
||||
user := opts.ClusterUsername
|
||||
pass := opts.ClusterPassword
|
||||
doRouteAuthConnect(t, c, user, pass, id)
|
||||
return sendCommand(t, c), expectCommand(t, c)
|
||||
}
|
||||
|
||||
func setupRoute(t tLogger, c net.Conn, opts *server.Options) (sendFun, expectFun) {
|
||||
u := make([]byte, 16)
|
||||
io.ReadFull(rand.Reader, u)
|
||||
id := fmt.Sprintf("ROUTER:%s", hex.EncodeToString(u))
|
||||
return setupRouteEx(t, c, opts, id)
|
||||
}
|
||||
|
||||
func setupConn(t tLogger, c net.Conn) (sendFun, expectFun) {
|
||||
doDefaultConnect(t, c)
|
||||
return sendCommand(t, c), expectCommand(t, c)
|
||||
}
|
||||
|
||||
type sendFun func(string)
|
||||
type expectFun func(*regexp.Regexp) []byte
|
||||
|
||||
// Closure version for easier reading
|
||||
func sendCommand(t tLogger, c net.Conn) sendFun {
|
||||
return func(op string) {
|
||||
sendProto(t, c, op)
|
||||
}
|
||||
}
|
||||
|
||||
// Closure version for easier reading
|
||||
func expectCommand(t tLogger, c net.Conn) expectFun {
|
||||
return func(re *regexp.Regexp) []byte {
|
||||
return expectResult(t, c, re)
|
||||
}
|
||||
}
|
||||
|
||||
// Send the protocol command to the server.
|
||||
func sendProto(t tLogger, c net.Conn, op string) {
|
||||
n, err := c.Write([]byte(op))
|
||||
if err != nil {
|
||||
stackFatalf(t, "Error writing command to conn: %v\n", err)
|
||||
}
|
||||
if n != len(op) {
|
||||
stackFatalf(t, "Partial write: %d vs %d\n", n, len(op))
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
infoRe = regexp.MustCompile(`INFO\s+([^\r\n]+)\r\n`)
|
||||
pingRe = regexp.MustCompile(`PING\r\n`)
|
||||
pongRe = regexp.MustCompile(`PONG\r\n`)
|
||||
msgRe = regexp.MustCompile(`(?:(?:MSG\s+([^\s]+)\s+([^\s]+)\s+(([^\s]+)[^\S\r\n]+)?(\d+)\s*\r\n([^\\r\\n]*?)\r\n)+?)`)
|
||||
okRe = regexp.MustCompile(`\A\+OK\r\n`)
|
||||
errRe = regexp.MustCompile(`\A\-ERR\s+([^\r\n]+)\r\n`)
|
||||
subRe = regexp.MustCompile(`SUB\s+([^\s]+)((\s+)([^\s]+))?\s+([^\s]+)\r\n`)
|
||||
unsubRe = regexp.MustCompile(`UNSUB\s+([^\s]+)(\s+(\d+))?\r\n`)
|
||||
unsubmaxRe = regexp.MustCompile(`UNSUB\s+([^\s]+)(\s+(\d+))\r\n`)
|
||||
unsubnomaxRe = regexp.MustCompile(`UNSUB\s+([^\s]+)\r\n`)
|
||||
connectRe = regexp.MustCompile(`CONNECT\s+([^\r\n]+)\r\n`)
|
||||
)
|
||||
|
||||
const (
|
||||
subIndex = 1
|
||||
sidIndex = 2
|
||||
replyIndex = 4
|
||||
lenIndex = 5
|
||||
msgIndex = 6
|
||||
)
|
||||
|
||||
// Test result from server against regexp
|
||||
func expectResult(t tLogger, c net.Conn, re *regexp.Regexp) []byte {
|
||||
expBuf := make([]byte, 32768)
|
||||
// Wait for commands to be processed and results queued for read
|
||||
c.SetReadDeadline(time.Now().Add(2 * time.Second))
|
||||
n, err := c.Read(expBuf)
|
||||
c.SetReadDeadline(time.Time{})
|
||||
|
||||
if n <= 0 && err != nil {
|
||||
stackFatalf(t, "Error reading from conn: %v\n", err)
|
||||
}
|
||||
buf := expBuf[:n]
|
||||
|
||||
if !re.Match(buf) {
|
||||
stackFatalf(t, "Response did not match expected: \n\tReceived:'%q'\n\tExpected:'%s'\n", buf, re)
|
||||
}
|
||||
return buf
|
||||
}
|
||||
|
||||
func expectNothing(t tLogger, c net.Conn) {
|
||||
expBuf := make([]byte, 32)
|
||||
c.SetReadDeadline(time.Now().Add(100 * time.Millisecond))
|
||||
n, err := c.Read(expBuf)
|
||||
c.SetReadDeadline(time.Time{})
|
||||
if err == nil && n > 0 {
|
||||
stackFatalf(t, "Expected nothing, received: '%q'\n", expBuf[:n])
|
||||
}
|
||||
}
|
||||
|
||||
// This will check that we got what we expected.
|
||||
func checkMsg(t tLogger, m [][]byte, subject, sid, reply, len, msg string) {
|
||||
if string(m[subIndex]) != subject {
|
||||
stackFatalf(t, "Did not get correct subject: expected '%s' got '%s'\n", subject, m[subIndex])
|
||||
}
|
||||
if sid != "" && string(m[sidIndex]) != sid {
|
||||
stackFatalf(t, "Did not get correct sid: expected '%s' got '%s'\n", sid, m[sidIndex])
|
||||
}
|
||||
if string(m[replyIndex]) != reply {
|
||||
stackFatalf(t, "Did not get correct reply: expected '%s' got '%s'\n", reply, m[replyIndex])
|
||||
}
|
||||
if string(m[lenIndex]) != len {
|
||||
stackFatalf(t, "Did not get correct msg length: expected '%s' got '%s'\n", len, m[lenIndex])
|
||||
}
|
||||
if string(m[msgIndex]) != msg {
|
||||
stackFatalf(t, "Did not get correct msg: expected '%s' got '%s'\n", msg, m[msgIndex])
|
||||
}
|
||||
}
|
||||
|
||||
// Closure for expectMsgs
|
||||
func expectMsgsCommand(t tLogger, ef expectFun) func(int) [][][]byte {
|
||||
return func(expected int) [][][]byte {
|
||||
buf := ef(msgRe)
|
||||
matches := msgRe.FindAllSubmatch(buf, -1)
|
||||
if len(matches) != expected {
|
||||
stackFatalf(t, "Did not get correct # msgs: %d vs %d\n", len(matches), expected)
|
||||
}
|
||||
return matches
|
||||
}
|
||||
}
|
||||
|
||||
// This will check that the matches include at least one of the sids. Useful for checking
|
||||
// that we received messages on a certain queue group.
|
||||
func checkForQueueSid(t tLogger, matches [][][]byte, sids []string) {
|
||||
seen := make(map[string]int, len(sids))
|
||||
for _, sid := range sids {
|
||||
seen[sid] = 0
|
||||
}
|
||||
for _, m := range matches {
|
||||
sid := string(m[sidIndex])
|
||||
if _, ok := seen[sid]; ok {
|
||||
seen[sid]++
|
||||
}
|
||||
}
|
||||
// Make sure we only see one and exactly one.
|
||||
total := 0
|
||||
for _, n := range seen {
|
||||
total += n
|
||||
}
|
||||
if total != 1 {
|
||||
stackFatalf(t, "Did not get a msg for queue sids group: expected 1 got %d\n", total)
|
||||
}
|
||||
}
|
||||
|
||||
// This will check that the matches include all of the sids. Useful for checking
|
||||
// that we received messages on all subscribers.
|
||||
func checkForPubSids(t tLogger, matches [][][]byte, sids []string) {
|
||||
seen := make(map[string]int, len(sids))
|
||||
for _, sid := range sids {
|
||||
seen[sid] = 0
|
||||
}
|
||||
for _, m := range matches {
|
||||
sid := string(m[sidIndex])
|
||||
if _, ok := seen[sid]; ok {
|
||||
seen[sid]++
|
||||
}
|
||||
}
|
||||
// Make sure we only see one and exactly one for each sid.
|
||||
for sid, n := range seen {
|
||||
if n != 1 {
|
||||
stackFatalf(t, "Did not get a msg for sid[%s]: expected 1 got %d\n", sid, n)
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Helper function to generate next opts to make sure no port conflicts etc.
|
||||
func nextServerOpts(opts *server.Options) *server.Options {
|
||||
nopts := *opts
|
||||
nopts.Port++
|
||||
nopts.ClusterPort++
|
||||
nopts.HTTPPort++
|
||||
return &nopts
|
||||
}
|
||||
20
vendor/github.com/nats-io/gnatsd/vendor/github.com/nats-io/nuid/LICENSE
generated
vendored
Normal file
20
vendor/github.com/nats-io/gnatsd/vendor/github.com/nats-io/nuid/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2012-2016 Apcera Inc.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
121
vendor/github.com/nats-io/gnatsd/vendor/github.com/nats-io/nuid/nuid.go
generated
vendored
Normal file
121
vendor/github.com/nats-io/gnatsd/vendor/github.com/nats-io/nuid/nuid.go
generated
vendored
Normal file
@@ -0,0 +1,121 @@
|
||||
// Copyright 2016 Apcera Inc. All rights reserved.
|
||||
|
||||
// A unique identifier generator that is high performance, very fast, and tries to be entropy pool friendly.
|
||||
package nuid
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"math"
|
||||
"math/big"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
prand "math/rand"
|
||||
)
|
||||
|
||||
// NUID needs to be very fast to generate and truly unique, all while being entropy pool friendly.
|
||||
// We will use 12 bytes of crypto generated data (entropy draining), and 10 bytes of sequential data
|
||||
// that is started at a pseudo random number and increments with a pseudo-random increment.
|
||||
// Total is 22 bytes of base 62 ascii text :)
|
||||
|
||||
const (
|
||||
digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
|
||||
base = 62
|
||||
preLen = 12
|
||||
seqLen = 10
|
||||
maxSeq = int64(839299365868340224) // base^seqLen == 62^10
|
||||
minInc = int64(33)
|
||||
maxInc = int64(333)
|
||||
totalLen = preLen + seqLen
|
||||
)
|
||||
|
||||
type NUID struct {
|
||||
pre []byte
|
||||
seq int64
|
||||
inc int64
|
||||
}
|
||||
|
||||
type lockedNUID struct {
|
||||
sync.Mutex
|
||||
*NUID
|
||||
}
|
||||
|
||||
// Global NUID
|
||||
var globalNUID *lockedNUID
|
||||
|
||||
// Seed sequential random with crypto or math/random and current time
|
||||
// and generate crypto prefix.
|
||||
func init() {
|
||||
r, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt64))
|
||||
if err != nil {
|
||||
prand.Seed(time.Now().UnixNano())
|
||||
} else {
|
||||
prand.Seed(r.Int64())
|
||||
}
|
||||
globalNUID = &lockedNUID{NUID: New()}
|
||||
globalNUID.RandomizePrefix()
|
||||
}
|
||||
|
||||
// New will generate a new NUID and properly initialize the prefix, sequential start, and sequential increment.
|
||||
func New() *NUID {
|
||||
n := &NUID{
|
||||
seq: prand.Int63n(maxSeq),
|
||||
inc: minInc + prand.Int63n(maxInc-minInc),
|
||||
pre: make([]byte, preLen),
|
||||
}
|
||||
n.RandomizePrefix()
|
||||
return n
|
||||
}
|
||||
|
||||
// Generate the next NUID string from the global locked NUID instance.
|
||||
func Next() string {
|
||||
globalNUID.Lock()
|
||||
nuid := globalNUID.Next()
|
||||
globalNUID.Unlock()
|
||||
return nuid
|
||||
}
|
||||
|
||||
// Generate the next NUID string.
|
||||
func (n *NUID) Next() string {
|
||||
// Increment and capture.
|
||||
n.seq += n.inc
|
||||
if n.seq >= maxSeq {
|
||||
n.RandomizePrefix()
|
||||
n.resetSequential()
|
||||
}
|
||||
seq := n.seq
|
||||
|
||||
// Copy prefix
|
||||
var b [totalLen]byte
|
||||
bs := b[:preLen]
|
||||
copy(bs, n.pre)
|
||||
|
||||
// copy in the seq in base36.
|
||||
for i, l := len(b), seq; i > preLen; l /= base {
|
||||
i -= 1
|
||||
b[i] = digits[l%base]
|
||||
}
|
||||
return string(b[:])
|
||||
}
|
||||
|
||||
// Resets the sequential portion of the NUID.
|
||||
func (n *NUID) resetSequential() {
|
||||
n.seq = prand.Int63n(maxSeq)
|
||||
n.inc = minInc + prand.Int63n(maxInc-minInc)
|
||||
}
|
||||
|
||||
// Generate a new prefix from crypto/rand.
|
||||
// This call *can* drain entropy and will be called automatically when we exhaust the sequential range.
|
||||
// Will panic if it gets an error from rand.Int()
|
||||
func (n *NUID) RandomizePrefix() {
|
||||
var cb [preLen]byte
|
||||
cbs := cb[:]
|
||||
if nb, err := rand.Read(cbs); nb != preLen || err != nil {
|
||||
panic(fmt.Sprintf("nuid: failed generating crypto random number: %v\n", err))
|
||||
}
|
||||
|
||||
for i := 0; i < preLen; i++ {
|
||||
n.pre[i] = digits[int(cbs[i])%base]
|
||||
}
|
||||
}
|
||||
20
vendor/github.com/nats-io/gnatsd/vendor/golang.org/x/crypto/bcrypt/LICENSE
generated
vendored
Normal file
20
vendor/github.com/nats-io/gnatsd/vendor/golang.org/x/crypto/bcrypt/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2012-2016 Apcera Inc.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
35
vendor/github.com/nats-io/gnatsd/vendor/golang.org/x/crypto/bcrypt/base64.go
generated
vendored
Normal file
35
vendor/github.com/nats-io/gnatsd/vendor/golang.org/x/crypto/bcrypt/base64.go
generated
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package bcrypt
|
||||
|
||||
import "encoding/base64"
|
||||
|
||||
const alphabet = "./ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
|
||||
|
||||
var bcEncoding = base64.NewEncoding(alphabet)
|
||||
|
||||
func base64Encode(src []byte) []byte {
|
||||
n := bcEncoding.EncodedLen(len(src))
|
||||
dst := make([]byte, n)
|
||||
bcEncoding.Encode(dst, src)
|
||||
for dst[n-1] == '=' {
|
||||
n--
|
||||
}
|
||||
return dst[:n]
|
||||
}
|
||||
|
||||
func base64Decode(src []byte) ([]byte, error) {
|
||||
numOfEquals := 4 - (len(src) % 4)
|
||||
for i := 0; i < numOfEquals; i++ {
|
||||
src = append(src, '=')
|
||||
}
|
||||
|
||||
dst := make([]byte, bcEncoding.DecodedLen(len(src)))
|
||||
n, err := bcEncoding.Decode(dst, src)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dst[:n], nil
|
||||
}
|
||||
295
vendor/github.com/nats-io/gnatsd/vendor/golang.org/x/crypto/bcrypt/bcrypt.go
generated
vendored
Normal file
295
vendor/github.com/nats-io/gnatsd/vendor/golang.org/x/crypto/bcrypt/bcrypt.go
generated
vendored
Normal file
@@ -0,0 +1,295 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package bcrypt implements Provos and Mazières's bcrypt adaptive hashing
|
||||
// algorithm. See http://www.usenix.org/event/usenix99/provos/provos.pdf
|
||||
package bcrypt // import "golang.org/x/crypto/bcrypt"
|
||||
|
||||
// The code is a port of Provos and Mazières's C implementation.
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/subtle"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
|
||||
"golang.org/x/crypto/blowfish"
|
||||
)
|
||||
|
||||
const (
|
||||
MinCost int = 4 // the minimum allowable cost as passed in to GenerateFromPassword
|
||||
MaxCost int = 31 // the maximum allowable cost as passed in to GenerateFromPassword
|
||||
DefaultCost int = 10 // the cost that will actually be set if a cost below MinCost is passed into GenerateFromPassword
|
||||
)
|
||||
|
||||
// The error returned from CompareHashAndPassword when a password and hash do
|
||||
// not match.
|
||||
var ErrMismatchedHashAndPassword = errors.New("crypto/bcrypt: hashedPassword is not the hash of the given password")
|
||||
|
||||
// The error returned from CompareHashAndPassword when a hash is too short to
|
||||
// be a bcrypt hash.
|
||||
var ErrHashTooShort = errors.New("crypto/bcrypt: hashedSecret too short to be a bcrypted password")
|
||||
|
||||
// The error returned from CompareHashAndPassword when a hash was created with
|
||||
// a bcrypt algorithm newer than this implementation.
|
||||
type HashVersionTooNewError byte
|
||||
|
||||
func (hv HashVersionTooNewError) Error() string {
|
||||
return fmt.Sprintf("crypto/bcrypt: bcrypt algorithm version '%c' requested is newer than current version '%c'", byte(hv), majorVersion)
|
||||
}
|
||||
|
||||
// The error returned from CompareHashAndPassword when a hash starts with something other than '$'
|
||||
type InvalidHashPrefixError byte
|
||||
|
||||
func (ih InvalidHashPrefixError) Error() string {
|
||||
return fmt.Sprintf("crypto/bcrypt: bcrypt hashes must start with '$', but hashedSecret started with '%c'", byte(ih))
|
||||
}
|
||||
|
||||
type InvalidCostError int
|
||||
|
||||
func (ic InvalidCostError) Error() string {
|
||||
return fmt.Sprintf("crypto/bcrypt: cost %d is outside allowed range (%d,%d)", int(ic), int(MinCost), int(MaxCost))
|
||||
}
|
||||
|
||||
const (
|
||||
majorVersion = '2'
|
||||
minorVersion = 'a'
|
||||
maxSaltSize = 16
|
||||
maxCryptedHashSize = 23
|
||||
encodedSaltSize = 22
|
||||
encodedHashSize = 31
|
||||
minHashSize = 59
|
||||
)
|
||||
|
||||
// magicCipherData is an IV for the 64 Blowfish encryption calls in
|
||||
// bcrypt(). It's the string "OrpheanBeholderScryDoubt" in big-endian bytes.
|
||||
var magicCipherData = []byte{
|
||||
0x4f, 0x72, 0x70, 0x68,
|
||||
0x65, 0x61, 0x6e, 0x42,
|
||||
0x65, 0x68, 0x6f, 0x6c,
|
||||
0x64, 0x65, 0x72, 0x53,
|
||||
0x63, 0x72, 0x79, 0x44,
|
||||
0x6f, 0x75, 0x62, 0x74,
|
||||
}
|
||||
|
||||
type hashed struct {
|
||||
hash []byte
|
||||
salt []byte
|
||||
cost int // allowed range is MinCost to MaxCost
|
||||
major byte
|
||||
minor byte
|
||||
}
|
||||
|
||||
// GenerateFromPassword returns the bcrypt hash of the password at the given
|
||||
// cost. If the cost given is less than MinCost, the cost will be set to
|
||||
// DefaultCost, instead. Use CompareHashAndPassword, as defined in this package,
|
||||
// to compare the returned hashed password with its cleartext version.
|
||||
func GenerateFromPassword(password []byte, cost int) ([]byte, error) {
|
||||
p, err := newFromPassword(password, cost)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return p.Hash(), nil
|
||||
}
|
||||
|
||||
// CompareHashAndPassword compares a bcrypt hashed password with its possible
|
||||
// plaintext equivalent. Returns nil on success, or an error on failure.
|
||||
func CompareHashAndPassword(hashedPassword, password []byte) error {
|
||||
p, err := newFromHash(hashedPassword)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
otherHash, err := bcrypt(password, p.cost, p.salt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
otherP := &hashed{otherHash, p.salt, p.cost, p.major, p.minor}
|
||||
if subtle.ConstantTimeCompare(p.Hash(), otherP.Hash()) == 1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return ErrMismatchedHashAndPassword
|
||||
}
|
||||
|
||||
// Cost returns the hashing cost used to create the given hashed
|
||||
// password. When, in the future, the hashing cost of a password system needs
|
||||
// to be increased in order to adjust for greater computational power, this
|
||||
// function allows one to establish which passwords need to be updated.
|
||||
func Cost(hashedPassword []byte) (int, error) {
|
||||
p, err := newFromHash(hashedPassword)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return p.cost, nil
|
||||
}
|
||||
|
||||
func newFromPassword(password []byte, cost int) (*hashed, error) {
|
||||
if cost < MinCost {
|
||||
cost = DefaultCost
|
||||
}
|
||||
p := new(hashed)
|
||||
p.major = majorVersion
|
||||
p.minor = minorVersion
|
||||
|
||||
err := checkCost(cost)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.cost = cost
|
||||
|
||||
unencodedSalt := make([]byte, maxSaltSize)
|
||||
_, err = io.ReadFull(rand.Reader, unencodedSalt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p.salt = base64Encode(unencodedSalt)
|
||||
hash, err := bcrypt(password, p.cost, p.salt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.hash = hash
|
||||
return p, err
|
||||
}
|
||||
|
||||
func newFromHash(hashedSecret []byte) (*hashed, error) {
|
||||
if len(hashedSecret) < minHashSize {
|
||||
return nil, ErrHashTooShort
|
||||
}
|
||||
p := new(hashed)
|
||||
n, err := p.decodeVersion(hashedSecret)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hashedSecret = hashedSecret[n:]
|
||||
n, err = p.decodeCost(hashedSecret)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hashedSecret = hashedSecret[n:]
|
||||
|
||||
// The "+2" is here because we'll have to append at most 2 '=' to the salt
|
||||
// when base64 decoding it in expensiveBlowfishSetup().
|
||||
p.salt = make([]byte, encodedSaltSize, encodedSaltSize+2)
|
||||
copy(p.salt, hashedSecret[:encodedSaltSize])
|
||||
|
||||
hashedSecret = hashedSecret[encodedSaltSize:]
|
||||
p.hash = make([]byte, len(hashedSecret))
|
||||
copy(p.hash, hashedSecret)
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func bcrypt(password []byte, cost int, salt []byte) ([]byte, error) {
|
||||
cipherData := make([]byte, len(magicCipherData))
|
||||
copy(cipherData, magicCipherData)
|
||||
|
||||
c, err := expensiveBlowfishSetup(password, uint32(cost), salt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for i := 0; i < 24; i += 8 {
|
||||
for j := 0; j < 64; j++ {
|
||||
c.Encrypt(cipherData[i:i+8], cipherData[i:i+8])
|
||||
}
|
||||
}
|
||||
|
||||
// Bug compatibility with C bcrypt implementations. We only encode 23 of
|
||||
// the 24 bytes encrypted.
|
||||
hsh := base64Encode(cipherData[:maxCryptedHashSize])
|
||||
return hsh, nil
|
||||
}
|
||||
|
||||
func expensiveBlowfishSetup(key []byte, cost uint32, salt []byte) (*blowfish.Cipher, error) {
|
||||
|
||||
csalt, err := base64Decode(salt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Bug compatibility with C bcrypt implementations. They use the trailing
|
||||
// NULL in the key string during expansion.
|
||||
ckey := append(key, 0)
|
||||
|
||||
c, err := blowfish.NewSaltedCipher(ckey, csalt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var i, rounds uint64
|
||||
rounds = 1 << cost
|
||||
for i = 0; i < rounds; i++ {
|
||||
blowfish.ExpandKey(ckey, c)
|
||||
blowfish.ExpandKey(csalt, c)
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (p *hashed) Hash() []byte {
|
||||
arr := make([]byte, 60)
|
||||
arr[0] = '$'
|
||||
arr[1] = p.major
|
||||
n := 2
|
||||
if p.minor != 0 {
|
||||
arr[2] = p.minor
|
||||
n = 3
|
||||
}
|
||||
arr[n] = '$'
|
||||
n += 1
|
||||
copy(arr[n:], []byte(fmt.Sprintf("%02d", p.cost)))
|
||||
n += 2
|
||||
arr[n] = '$'
|
||||
n += 1
|
||||
copy(arr[n:], p.salt)
|
||||
n += encodedSaltSize
|
||||
copy(arr[n:], p.hash)
|
||||
n += encodedHashSize
|
||||
return arr[:n]
|
||||
}
|
||||
|
||||
func (p *hashed) decodeVersion(sbytes []byte) (int, error) {
|
||||
if sbytes[0] != '$' {
|
||||
return -1, InvalidHashPrefixError(sbytes[0])
|
||||
}
|
||||
if sbytes[1] > majorVersion {
|
||||
return -1, HashVersionTooNewError(sbytes[1])
|
||||
}
|
||||
p.major = sbytes[1]
|
||||
n := 3
|
||||
if sbytes[2] != '$' {
|
||||
p.minor = sbytes[2]
|
||||
n++
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// sbytes should begin where decodeVersion left off.
|
||||
func (p *hashed) decodeCost(sbytes []byte) (int, error) {
|
||||
cost, err := strconv.Atoi(string(sbytes[0:2]))
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
err = checkCost(cost)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
p.cost = cost
|
||||
return 3, nil
|
||||
}
|
||||
|
||||
func (p *hashed) String() string {
|
||||
return fmt.Sprintf("&{hash: %#v, salt: %#v, cost: %d, major: %c, minor: %c}", string(p.hash), p.salt, p.cost, p.major, p.minor)
|
||||
}
|
||||
|
||||
func checkCost(cost int) error {
|
||||
if cost < MinCost || cost > MaxCost {
|
||||
return InvalidCostError(cost)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
20
vendor/github.com/nats-io/gnatsd/vendor/golang.org/x/crypto/blowfish/LICENSE
generated
vendored
Normal file
20
vendor/github.com/nats-io/gnatsd/vendor/golang.org/x/crypto/blowfish/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2012-2016 Apcera Inc.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
159
vendor/github.com/nats-io/gnatsd/vendor/golang.org/x/crypto/blowfish/block.go
generated
vendored
Normal file
159
vendor/github.com/nats-io/gnatsd/vendor/golang.org/x/crypto/blowfish/block.go
generated
vendored
Normal file
@@ -0,0 +1,159 @@
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package blowfish
|
||||
|
||||
// getNextWord returns the next big-endian uint32 value from the byte slice
|
||||
// at the given position in a circular manner, updating the position.
|
||||
func getNextWord(b []byte, pos *int) uint32 {
|
||||
var w uint32
|
||||
j := *pos
|
||||
for i := 0; i < 4; i++ {
|
||||
w = w<<8 | uint32(b[j])
|
||||
j++
|
||||
if j >= len(b) {
|
||||
j = 0
|
||||
}
|
||||
}
|
||||
*pos = j
|
||||
return w
|
||||
}
|
||||
|
||||
// ExpandKey performs a key expansion on the given *Cipher. Specifically, it
|
||||
// performs the Blowfish algorithm's key schedule which sets up the *Cipher's
|
||||
// pi and substitution tables for calls to Encrypt. This is used, primarily,
|
||||
// by the bcrypt package to reuse the Blowfish key schedule during its
|
||||
// set up. It's unlikely that you need to use this directly.
|
||||
func ExpandKey(key []byte, c *Cipher) {
|
||||
j := 0
|
||||
for i := 0; i < 18; i++ {
|
||||
// Using inlined getNextWord for performance.
|
||||
var d uint32
|
||||
for k := 0; k < 4; k++ {
|
||||
d = d<<8 | uint32(key[j])
|
||||
j++
|
||||
if j >= len(key) {
|
||||
j = 0
|
||||
}
|
||||
}
|
||||
c.p[i] ^= d
|
||||
}
|
||||
|
||||
var l, r uint32
|
||||
for i := 0; i < 18; i += 2 {
|
||||
l, r = encryptBlock(l, r, c)
|
||||
c.p[i], c.p[i+1] = l, r
|
||||
}
|
||||
|
||||
for i := 0; i < 256; i += 2 {
|
||||
l, r = encryptBlock(l, r, c)
|
||||
c.s0[i], c.s0[i+1] = l, r
|
||||
}
|
||||
for i := 0; i < 256; i += 2 {
|
||||
l, r = encryptBlock(l, r, c)
|
||||
c.s1[i], c.s1[i+1] = l, r
|
||||
}
|
||||
for i := 0; i < 256; i += 2 {
|
||||
l, r = encryptBlock(l, r, c)
|
||||
c.s2[i], c.s2[i+1] = l, r
|
||||
}
|
||||
for i := 0; i < 256; i += 2 {
|
||||
l, r = encryptBlock(l, r, c)
|
||||
c.s3[i], c.s3[i+1] = l, r
|
||||
}
|
||||
}
|
||||
|
||||
// This is similar to ExpandKey, but folds the salt during the key
|
||||
// schedule. While ExpandKey is essentially expandKeyWithSalt with an all-zero
|
||||
// salt passed in, reusing ExpandKey turns out to be a place of inefficiency
|
||||
// and specializing it here is useful.
|
||||
func expandKeyWithSalt(key []byte, salt []byte, c *Cipher) {
|
||||
j := 0
|
||||
for i := 0; i < 18; i++ {
|
||||
c.p[i] ^= getNextWord(key, &j)
|
||||
}
|
||||
|
||||
j = 0
|
||||
var l, r uint32
|
||||
for i := 0; i < 18; i += 2 {
|
||||
l ^= getNextWord(salt, &j)
|
||||
r ^= getNextWord(salt, &j)
|
||||
l, r = encryptBlock(l, r, c)
|
||||
c.p[i], c.p[i+1] = l, r
|
||||
}
|
||||
|
||||
for i := 0; i < 256; i += 2 {
|
||||
l ^= getNextWord(salt, &j)
|
||||
r ^= getNextWord(salt, &j)
|
||||
l, r = encryptBlock(l, r, c)
|
||||
c.s0[i], c.s0[i+1] = l, r
|
||||
}
|
||||
|
||||
for i := 0; i < 256; i += 2 {
|
||||
l ^= getNextWord(salt, &j)
|
||||
r ^= getNextWord(salt, &j)
|
||||
l, r = encryptBlock(l, r, c)
|
||||
c.s1[i], c.s1[i+1] = l, r
|
||||
}
|
||||
|
||||
for i := 0; i < 256; i += 2 {
|
||||
l ^= getNextWord(salt, &j)
|
||||
r ^= getNextWord(salt, &j)
|
||||
l, r = encryptBlock(l, r, c)
|
||||
c.s2[i], c.s2[i+1] = l, r
|
||||
}
|
||||
|
||||
for i := 0; i < 256; i += 2 {
|
||||
l ^= getNextWord(salt, &j)
|
||||
r ^= getNextWord(salt, &j)
|
||||
l, r = encryptBlock(l, r, c)
|
||||
c.s3[i], c.s3[i+1] = l, r
|
||||
}
|
||||
}
|
||||
|
||||
func encryptBlock(l, r uint32, c *Cipher) (uint32, uint32) {
|
||||
xl, xr := l, r
|
||||
xl ^= c.p[0]
|
||||
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[1]
|
||||
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[2]
|
||||
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[3]
|
||||
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[4]
|
||||
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[5]
|
||||
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[6]
|
||||
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[7]
|
||||
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[8]
|
||||
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[9]
|
||||
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[10]
|
||||
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[11]
|
||||
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[12]
|
||||
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[13]
|
||||
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[14]
|
||||
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[15]
|
||||
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[16]
|
||||
xr ^= c.p[17]
|
||||
return xr, xl
|
||||
}
|
||||
|
||||
func decryptBlock(l, r uint32, c *Cipher) (uint32, uint32) {
|
||||
xl, xr := l, r
|
||||
xl ^= c.p[17]
|
||||
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[16]
|
||||
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[15]
|
||||
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[14]
|
||||
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[13]
|
||||
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[12]
|
||||
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[11]
|
||||
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[10]
|
||||
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[9]
|
||||
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[8]
|
||||
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[7]
|
||||
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[6]
|
||||
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[5]
|
||||
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[4]
|
||||
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[3]
|
||||
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[2]
|
||||
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[1]
|
||||
xr ^= c.p[0]
|
||||
return xr, xl
|
||||
}
|
||||
91
vendor/github.com/nats-io/gnatsd/vendor/golang.org/x/crypto/blowfish/cipher.go
generated
vendored
Normal file
91
vendor/github.com/nats-io/gnatsd/vendor/golang.org/x/crypto/blowfish/cipher.go
generated
vendored
Normal file
@@ -0,0 +1,91 @@
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package blowfish implements Bruce Schneier's Blowfish encryption algorithm.
|
||||
package blowfish // import "golang.org/x/crypto/blowfish"
|
||||
|
||||
// The code is a port of Bruce Schneier's C implementation.
|
||||
// See http://www.schneier.com/blowfish.html.
|
||||
|
||||
import "strconv"
|
||||
|
||||
// The Blowfish block size in bytes.
|
||||
const BlockSize = 8
|
||||
|
||||
// A Cipher is an instance of Blowfish encryption using a particular key.
|
||||
type Cipher struct {
|
||||
p [18]uint32
|
||||
s0, s1, s2, s3 [256]uint32
|
||||
}
|
||||
|
||||
type KeySizeError int
|
||||
|
||||
func (k KeySizeError) Error() string {
|
||||
return "crypto/blowfish: invalid key size " + strconv.Itoa(int(k))
|
||||
}
|
||||
|
||||
// NewCipher creates and returns a Cipher.
|
||||
// The key argument should be the Blowfish key, from 1 to 56 bytes.
|
||||
func NewCipher(key []byte) (*Cipher, error) {
|
||||
var result Cipher
|
||||
if k := len(key); k < 1 || k > 56 {
|
||||
return nil, KeySizeError(k)
|
||||
}
|
||||
initCipher(&result)
|
||||
ExpandKey(key, &result)
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// NewSaltedCipher creates a returns a Cipher that folds a salt into its key
|
||||
// schedule. For most purposes, NewCipher, instead of NewSaltedCipher, is
|
||||
// sufficient and desirable. For bcrypt compatiblity, the key can be over 56
|
||||
// bytes.
|
||||
func NewSaltedCipher(key, salt []byte) (*Cipher, error) {
|
||||
if len(salt) == 0 {
|
||||
return NewCipher(key)
|
||||
}
|
||||
var result Cipher
|
||||
if k := len(key); k < 1 {
|
||||
return nil, KeySizeError(k)
|
||||
}
|
||||
initCipher(&result)
|
||||
expandKeyWithSalt(key, salt, &result)
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// BlockSize returns the Blowfish block size, 8 bytes.
|
||||
// It is necessary to satisfy the Block interface in the
|
||||
// package "crypto/cipher".
|
||||
func (c *Cipher) BlockSize() int { return BlockSize }
|
||||
|
||||
// Encrypt encrypts the 8-byte buffer src using the key k
|
||||
// and stores the result in dst.
|
||||
// Note that for amounts of data larger than a block,
|
||||
// it is not safe to just call Encrypt on successive blocks;
|
||||
// instead, use an encryption mode like CBC (see crypto/cipher/cbc.go).
|
||||
func (c *Cipher) Encrypt(dst, src []byte) {
|
||||
l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
|
||||
r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7])
|
||||
l, r = encryptBlock(l, r, c)
|
||||
dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l)
|
||||
dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r)
|
||||
}
|
||||
|
||||
// Decrypt decrypts the 8-byte buffer src using the key k
|
||||
// and stores the result in dst.
|
||||
func (c *Cipher) Decrypt(dst, src []byte) {
|
||||
l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
|
||||
r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7])
|
||||
l, r = decryptBlock(l, r, c)
|
||||
dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l)
|
||||
dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r)
|
||||
}
|
||||
|
||||
func initCipher(c *Cipher) {
|
||||
copy(c.p[0:], p[0:])
|
||||
copy(c.s0[0:], s0[0:])
|
||||
copy(c.s1[0:], s1[0:])
|
||||
copy(c.s2[0:], s2[0:])
|
||||
copy(c.s3[0:], s3[0:])
|
||||
}
|
||||
199
vendor/github.com/nats-io/gnatsd/vendor/golang.org/x/crypto/blowfish/const.go
generated
vendored
Normal file
199
vendor/github.com/nats-io/gnatsd/vendor/golang.org/x/crypto/blowfish/const.go
generated
vendored
Normal file
@@ -0,0 +1,199 @@
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// The startup permutation array and substitution boxes.
|
||||
// They are the hexadecimal digits of PI; see:
|
||||
// http://www.schneier.com/code/constants.txt.
|
||||
|
||||
package blowfish
|
||||
|
||||
var s0 = [256]uint32{
|
||||
0xd1310ba6, 0x98dfb5ac, 0x2ffd72db, 0xd01adfb7, 0xb8e1afed, 0x6a267e96,
|
||||
0xba7c9045, 0xf12c7f99, 0x24a19947, 0xb3916cf7, 0x0801f2e2, 0x858efc16,
|
||||
0x636920d8, 0x71574e69, 0xa458fea3, 0xf4933d7e, 0x0d95748f, 0x728eb658,
|
||||
0x718bcd58, 0x82154aee, 0x7b54a41d, 0xc25a59b5, 0x9c30d539, 0x2af26013,
|
||||
0xc5d1b023, 0x286085f0, 0xca417918, 0xb8db38ef, 0x8e79dcb0, 0x603a180e,
|
||||
0x6c9e0e8b, 0xb01e8a3e, 0xd71577c1, 0xbd314b27, 0x78af2fda, 0x55605c60,
|
||||
0xe65525f3, 0xaa55ab94, 0x57489862, 0x63e81440, 0x55ca396a, 0x2aab10b6,
|
||||
0xb4cc5c34, 0x1141e8ce, 0xa15486af, 0x7c72e993, 0xb3ee1411, 0x636fbc2a,
|
||||
0x2ba9c55d, 0x741831f6, 0xce5c3e16, 0x9b87931e, 0xafd6ba33, 0x6c24cf5c,
|
||||
0x7a325381, 0x28958677, 0x3b8f4898, 0x6b4bb9af, 0xc4bfe81b, 0x66282193,
|
||||
0x61d809cc, 0xfb21a991, 0x487cac60, 0x5dec8032, 0xef845d5d, 0xe98575b1,
|
||||
0xdc262302, 0xeb651b88, 0x23893e81, 0xd396acc5, 0x0f6d6ff3, 0x83f44239,
|
||||
0x2e0b4482, 0xa4842004, 0x69c8f04a, 0x9e1f9b5e, 0x21c66842, 0xf6e96c9a,
|
||||
0x670c9c61, 0xabd388f0, 0x6a51a0d2, 0xd8542f68, 0x960fa728, 0xab5133a3,
|
||||
0x6eef0b6c, 0x137a3be4, 0xba3bf050, 0x7efb2a98, 0xa1f1651d, 0x39af0176,
|
||||
0x66ca593e, 0x82430e88, 0x8cee8619, 0x456f9fb4, 0x7d84a5c3, 0x3b8b5ebe,
|
||||
0xe06f75d8, 0x85c12073, 0x401a449f, 0x56c16aa6, 0x4ed3aa62, 0x363f7706,
|
||||
0x1bfedf72, 0x429b023d, 0x37d0d724, 0xd00a1248, 0xdb0fead3, 0x49f1c09b,
|
||||
0x075372c9, 0x80991b7b, 0x25d479d8, 0xf6e8def7, 0xe3fe501a, 0xb6794c3b,
|
||||
0x976ce0bd, 0x04c006ba, 0xc1a94fb6, 0x409f60c4, 0x5e5c9ec2, 0x196a2463,
|
||||
0x68fb6faf, 0x3e6c53b5, 0x1339b2eb, 0x3b52ec6f, 0x6dfc511f, 0x9b30952c,
|
||||
0xcc814544, 0xaf5ebd09, 0xbee3d004, 0xde334afd, 0x660f2807, 0x192e4bb3,
|
||||
0xc0cba857, 0x45c8740f, 0xd20b5f39, 0xb9d3fbdb, 0x5579c0bd, 0x1a60320a,
|
||||
0xd6a100c6, 0x402c7279, 0x679f25fe, 0xfb1fa3cc, 0x8ea5e9f8, 0xdb3222f8,
|
||||
0x3c7516df, 0xfd616b15, 0x2f501ec8, 0xad0552ab, 0x323db5fa, 0xfd238760,
|
||||
0x53317b48, 0x3e00df82, 0x9e5c57bb, 0xca6f8ca0, 0x1a87562e, 0xdf1769db,
|
||||
0xd542a8f6, 0x287effc3, 0xac6732c6, 0x8c4f5573, 0x695b27b0, 0xbbca58c8,
|
||||
0xe1ffa35d, 0xb8f011a0, 0x10fa3d98, 0xfd2183b8, 0x4afcb56c, 0x2dd1d35b,
|
||||
0x9a53e479, 0xb6f84565, 0xd28e49bc, 0x4bfb9790, 0xe1ddf2da, 0xa4cb7e33,
|
||||
0x62fb1341, 0xcee4c6e8, 0xef20cada, 0x36774c01, 0xd07e9efe, 0x2bf11fb4,
|
||||
0x95dbda4d, 0xae909198, 0xeaad8e71, 0x6b93d5a0, 0xd08ed1d0, 0xafc725e0,
|
||||
0x8e3c5b2f, 0x8e7594b7, 0x8ff6e2fb, 0xf2122b64, 0x8888b812, 0x900df01c,
|
||||
0x4fad5ea0, 0x688fc31c, 0xd1cff191, 0xb3a8c1ad, 0x2f2f2218, 0xbe0e1777,
|
||||
0xea752dfe, 0x8b021fa1, 0xe5a0cc0f, 0xb56f74e8, 0x18acf3d6, 0xce89e299,
|
||||
0xb4a84fe0, 0xfd13e0b7, 0x7cc43b81, 0xd2ada8d9, 0x165fa266, 0x80957705,
|
||||
0x93cc7314, 0x211a1477, 0xe6ad2065, 0x77b5fa86, 0xc75442f5, 0xfb9d35cf,
|
||||
0xebcdaf0c, 0x7b3e89a0, 0xd6411bd3, 0xae1e7e49, 0x00250e2d, 0x2071b35e,
|
||||
0x226800bb, 0x57b8e0af, 0x2464369b, 0xf009b91e, 0x5563911d, 0x59dfa6aa,
|
||||
0x78c14389, 0xd95a537f, 0x207d5ba2, 0x02e5b9c5, 0x83260376, 0x6295cfa9,
|
||||
0x11c81968, 0x4e734a41, 0xb3472dca, 0x7b14a94a, 0x1b510052, 0x9a532915,
|
||||
0xd60f573f, 0xbc9bc6e4, 0x2b60a476, 0x81e67400, 0x08ba6fb5, 0x571be91f,
|
||||
0xf296ec6b, 0x2a0dd915, 0xb6636521, 0xe7b9f9b6, 0xff34052e, 0xc5855664,
|
||||
0x53b02d5d, 0xa99f8fa1, 0x08ba4799, 0x6e85076a,
|
||||
}
|
||||
|
||||
var s1 = [256]uint32{
|
||||
0x4b7a70e9, 0xb5b32944, 0xdb75092e, 0xc4192623, 0xad6ea6b0, 0x49a7df7d,
|
||||
0x9cee60b8, 0x8fedb266, 0xecaa8c71, 0x699a17ff, 0x5664526c, 0xc2b19ee1,
|
||||
0x193602a5, 0x75094c29, 0xa0591340, 0xe4183a3e, 0x3f54989a, 0x5b429d65,
|
||||
0x6b8fe4d6, 0x99f73fd6, 0xa1d29c07, 0xefe830f5, 0x4d2d38e6, 0xf0255dc1,
|
||||
0x4cdd2086, 0x8470eb26, 0x6382e9c6, 0x021ecc5e, 0x09686b3f, 0x3ebaefc9,
|
||||
0x3c971814, 0x6b6a70a1, 0x687f3584, 0x52a0e286, 0xb79c5305, 0xaa500737,
|
||||
0x3e07841c, 0x7fdeae5c, 0x8e7d44ec, 0x5716f2b8, 0xb03ada37, 0xf0500c0d,
|
||||
0xf01c1f04, 0x0200b3ff, 0xae0cf51a, 0x3cb574b2, 0x25837a58, 0xdc0921bd,
|
||||
0xd19113f9, 0x7ca92ff6, 0x94324773, 0x22f54701, 0x3ae5e581, 0x37c2dadc,
|
||||
0xc8b57634, 0x9af3dda7, 0xa9446146, 0x0fd0030e, 0xecc8c73e, 0xa4751e41,
|
||||
0xe238cd99, 0x3bea0e2f, 0x3280bba1, 0x183eb331, 0x4e548b38, 0x4f6db908,
|
||||
0x6f420d03, 0xf60a04bf, 0x2cb81290, 0x24977c79, 0x5679b072, 0xbcaf89af,
|
||||
0xde9a771f, 0xd9930810, 0xb38bae12, 0xdccf3f2e, 0x5512721f, 0x2e6b7124,
|
||||
0x501adde6, 0x9f84cd87, 0x7a584718, 0x7408da17, 0xbc9f9abc, 0xe94b7d8c,
|
||||
0xec7aec3a, 0xdb851dfa, 0x63094366, 0xc464c3d2, 0xef1c1847, 0x3215d908,
|
||||
0xdd433b37, 0x24c2ba16, 0x12a14d43, 0x2a65c451, 0x50940002, 0x133ae4dd,
|
||||
0x71dff89e, 0x10314e55, 0x81ac77d6, 0x5f11199b, 0x043556f1, 0xd7a3c76b,
|
||||
0x3c11183b, 0x5924a509, 0xf28fe6ed, 0x97f1fbfa, 0x9ebabf2c, 0x1e153c6e,
|
||||
0x86e34570, 0xeae96fb1, 0x860e5e0a, 0x5a3e2ab3, 0x771fe71c, 0x4e3d06fa,
|
||||
0x2965dcb9, 0x99e71d0f, 0x803e89d6, 0x5266c825, 0x2e4cc978, 0x9c10b36a,
|
||||
0xc6150eba, 0x94e2ea78, 0xa5fc3c53, 0x1e0a2df4, 0xf2f74ea7, 0x361d2b3d,
|
||||
0x1939260f, 0x19c27960, 0x5223a708, 0xf71312b6, 0xebadfe6e, 0xeac31f66,
|
||||
0xe3bc4595, 0xa67bc883, 0xb17f37d1, 0x018cff28, 0xc332ddef, 0xbe6c5aa5,
|
||||
0x65582185, 0x68ab9802, 0xeecea50f, 0xdb2f953b, 0x2aef7dad, 0x5b6e2f84,
|
||||
0x1521b628, 0x29076170, 0xecdd4775, 0x619f1510, 0x13cca830, 0xeb61bd96,
|
||||
0x0334fe1e, 0xaa0363cf, 0xb5735c90, 0x4c70a239, 0xd59e9e0b, 0xcbaade14,
|
||||
0xeecc86bc, 0x60622ca7, 0x9cab5cab, 0xb2f3846e, 0x648b1eaf, 0x19bdf0ca,
|
||||
0xa02369b9, 0x655abb50, 0x40685a32, 0x3c2ab4b3, 0x319ee9d5, 0xc021b8f7,
|
||||
0x9b540b19, 0x875fa099, 0x95f7997e, 0x623d7da8, 0xf837889a, 0x97e32d77,
|
||||
0x11ed935f, 0x16681281, 0x0e358829, 0xc7e61fd6, 0x96dedfa1, 0x7858ba99,
|
||||
0x57f584a5, 0x1b227263, 0x9b83c3ff, 0x1ac24696, 0xcdb30aeb, 0x532e3054,
|
||||
0x8fd948e4, 0x6dbc3128, 0x58ebf2ef, 0x34c6ffea, 0xfe28ed61, 0xee7c3c73,
|
||||
0x5d4a14d9, 0xe864b7e3, 0x42105d14, 0x203e13e0, 0x45eee2b6, 0xa3aaabea,
|
||||
0xdb6c4f15, 0xfacb4fd0, 0xc742f442, 0xef6abbb5, 0x654f3b1d, 0x41cd2105,
|
||||
0xd81e799e, 0x86854dc7, 0xe44b476a, 0x3d816250, 0xcf62a1f2, 0x5b8d2646,
|
||||
0xfc8883a0, 0xc1c7b6a3, 0x7f1524c3, 0x69cb7492, 0x47848a0b, 0x5692b285,
|
||||
0x095bbf00, 0xad19489d, 0x1462b174, 0x23820e00, 0x58428d2a, 0x0c55f5ea,
|
||||
0x1dadf43e, 0x233f7061, 0x3372f092, 0x8d937e41, 0xd65fecf1, 0x6c223bdb,
|
||||
0x7cde3759, 0xcbee7460, 0x4085f2a7, 0xce77326e, 0xa6078084, 0x19f8509e,
|
||||
0xe8efd855, 0x61d99735, 0xa969a7aa, 0xc50c06c2, 0x5a04abfc, 0x800bcadc,
|
||||
0x9e447a2e, 0xc3453484, 0xfdd56705, 0x0e1e9ec9, 0xdb73dbd3, 0x105588cd,
|
||||
0x675fda79, 0xe3674340, 0xc5c43465, 0x713e38d8, 0x3d28f89e, 0xf16dff20,
|
||||
0x153e21e7, 0x8fb03d4a, 0xe6e39f2b, 0xdb83adf7,
|
||||
}
|
||||
|
||||
var s2 = [256]uint32{
|
||||
0xe93d5a68, 0x948140f7, 0xf64c261c, 0x94692934, 0x411520f7, 0x7602d4f7,
|
||||
0xbcf46b2e, 0xd4a20068, 0xd4082471, 0x3320f46a, 0x43b7d4b7, 0x500061af,
|
||||
0x1e39f62e, 0x97244546, 0x14214f74, 0xbf8b8840, 0x4d95fc1d, 0x96b591af,
|
||||
0x70f4ddd3, 0x66a02f45, 0xbfbc09ec, 0x03bd9785, 0x7fac6dd0, 0x31cb8504,
|
||||
0x96eb27b3, 0x55fd3941, 0xda2547e6, 0xabca0a9a, 0x28507825, 0x530429f4,
|
||||
0x0a2c86da, 0xe9b66dfb, 0x68dc1462, 0xd7486900, 0x680ec0a4, 0x27a18dee,
|
||||
0x4f3ffea2, 0xe887ad8c, 0xb58ce006, 0x7af4d6b6, 0xaace1e7c, 0xd3375fec,
|
||||
0xce78a399, 0x406b2a42, 0x20fe9e35, 0xd9f385b9, 0xee39d7ab, 0x3b124e8b,
|
||||
0x1dc9faf7, 0x4b6d1856, 0x26a36631, 0xeae397b2, 0x3a6efa74, 0xdd5b4332,
|
||||
0x6841e7f7, 0xca7820fb, 0xfb0af54e, 0xd8feb397, 0x454056ac, 0xba489527,
|
||||
0x55533a3a, 0x20838d87, 0xfe6ba9b7, 0xd096954b, 0x55a867bc, 0xa1159a58,
|
||||
0xcca92963, 0x99e1db33, 0xa62a4a56, 0x3f3125f9, 0x5ef47e1c, 0x9029317c,
|
||||
0xfdf8e802, 0x04272f70, 0x80bb155c, 0x05282ce3, 0x95c11548, 0xe4c66d22,
|
||||
0x48c1133f, 0xc70f86dc, 0x07f9c9ee, 0x41041f0f, 0x404779a4, 0x5d886e17,
|
||||
0x325f51eb, 0xd59bc0d1, 0xf2bcc18f, 0x41113564, 0x257b7834, 0x602a9c60,
|
||||
0xdff8e8a3, 0x1f636c1b, 0x0e12b4c2, 0x02e1329e, 0xaf664fd1, 0xcad18115,
|
||||
0x6b2395e0, 0x333e92e1, 0x3b240b62, 0xeebeb922, 0x85b2a20e, 0xe6ba0d99,
|
||||
0xde720c8c, 0x2da2f728, 0xd0127845, 0x95b794fd, 0x647d0862, 0xe7ccf5f0,
|
||||
0x5449a36f, 0x877d48fa, 0xc39dfd27, 0xf33e8d1e, 0x0a476341, 0x992eff74,
|
||||
0x3a6f6eab, 0xf4f8fd37, 0xa812dc60, 0xa1ebddf8, 0x991be14c, 0xdb6e6b0d,
|
||||
0xc67b5510, 0x6d672c37, 0x2765d43b, 0xdcd0e804, 0xf1290dc7, 0xcc00ffa3,
|
||||
0xb5390f92, 0x690fed0b, 0x667b9ffb, 0xcedb7d9c, 0xa091cf0b, 0xd9155ea3,
|
||||
0xbb132f88, 0x515bad24, 0x7b9479bf, 0x763bd6eb, 0x37392eb3, 0xcc115979,
|
||||
0x8026e297, 0xf42e312d, 0x6842ada7, 0xc66a2b3b, 0x12754ccc, 0x782ef11c,
|
||||
0x6a124237, 0xb79251e7, 0x06a1bbe6, 0x4bfb6350, 0x1a6b1018, 0x11caedfa,
|
||||
0x3d25bdd8, 0xe2e1c3c9, 0x44421659, 0x0a121386, 0xd90cec6e, 0xd5abea2a,
|
||||
0x64af674e, 0xda86a85f, 0xbebfe988, 0x64e4c3fe, 0x9dbc8057, 0xf0f7c086,
|
||||
0x60787bf8, 0x6003604d, 0xd1fd8346, 0xf6381fb0, 0x7745ae04, 0xd736fccc,
|
||||
0x83426b33, 0xf01eab71, 0xb0804187, 0x3c005e5f, 0x77a057be, 0xbde8ae24,
|
||||
0x55464299, 0xbf582e61, 0x4e58f48f, 0xf2ddfda2, 0xf474ef38, 0x8789bdc2,
|
||||
0x5366f9c3, 0xc8b38e74, 0xb475f255, 0x46fcd9b9, 0x7aeb2661, 0x8b1ddf84,
|
||||
0x846a0e79, 0x915f95e2, 0x466e598e, 0x20b45770, 0x8cd55591, 0xc902de4c,
|
||||
0xb90bace1, 0xbb8205d0, 0x11a86248, 0x7574a99e, 0xb77f19b6, 0xe0a9dc09,
|
||||
0x662d09a1, 0xc4324633, 0xe85a1f02, 0x09f0be8c, 0x4a99a025, 0x1d6efe10,
|
||||
0x1ab93d1d, 0x0ba5a4df, 0xa186f20f, 0x2868f169, 0xdcb7da83, 0x573906fe,
|
||||
0xa1e2ce9b, 0x4fcd7f52, 0x50115e01, 0xa70683fa, 0xa002b5c4, 0x0de6d027,
|
||||
0x9af88c27, 0x773f8641, 0xc3604c06, 0x61a806b5, 0xf0177a28, 0xc0f586e0,
|
||||
0x006058aa, 0x30dc7d62, 0x11e69ed7, 0x2338ea63, 0x53c2dd94, 0xc2c21634,
|
||||
0xbbcbee56, 0x90bcb6de, 0xebfc7da1, 0xce591d76, 0x6f05e409, 0x4b7c0188,
|
||||
0x39720a3d, 0x7c927c24, 0x86e3725f, 0x724d9db9, 0x1ac15bb4, 0xd39eb8fc,
|
||||
0xed545578, 0x08fca5b5, 0xd83d7cd3, 0x4dad0fc4, 0x1e50ef5e, 0xb161e6f8,
|
||||
0xa28514d9, 0x6c51133c, 0x6fd5c7e7, 0x56e14ec4, 0x362abfce, 0xddc6c837,
|
||||
0xd79a3234, 0x92638212, 0x670efa8e, 0x406000e0,
|
||||
}
|
||||
|
||||
var s3 = [256]uint32{
|
||||
0x3a39ce37, 0xd3faf5cf, 0xabc27737, 0x5ac52d1b, 0x5cb0679e, 0x4fa33742,
|
||||
0xd3822740, 0x99bc9bbe, 0xd5118e9d, 0xbf0f7315, 0xd62d1c7e, 0xc700c47b,
|
||||
0xb78c1b6b, 0x21a19045, 0xb26eb1be, 0x6a366eb4, 0x5748ab2f, 0xbc946e79,
|
||||
0xc6a376d2, 0x6549c2c8, 0x530ff8ee, 0x468dde7d, 0xd5730a1d, 0x4cd04dc6,
|
||||
0x2939bbdb, 0xa9ba4650, 0xac9526e8, 0xbe5ee304, 0xa1fad5f0, 0x6a2d519a,
|
||||
0x63ef8ce2, 0x9a86ee22, 0xc089c2b8, 0x43242ef6, 0xa51e03aa, 0x9cf2d0a4,
|
||||
0x83c061ba, 0x9be96a4d, 0x8fe51550, 0xba645bd6, 0x2826a2f9, 0xa73a3ae1,
|
||||
0x4ba99586, 0xef5562e9, 0xc72fefd3, 0xf752f7da, 0x3f046f69, 0x77fa0a59,
|
||||
0x80e4a915, 0x87b08601, 0x9b09e6ad, 0x3b3ee593, 0xe990fd5a, 0x9e34d797,
|
||||
0x2cf0b7d9, 0x022b8b51, 0x96d5ac3a, 0x017da67d, 0xd1cf3ed6, 0x7c7d2d28,
|
||||
0x1f9f25cf, 0xadf2b89b, 0x5ad6b472, 0x5a88f54c, 0xe029ac71, 0xe019a5e6,
|
||||
0x47b0acfd, 0xed93fa9b, 0xe8d3c48d, 0x283b57cc, 0xf8d56629, 0x79132e28,
|
||||
0x785f0191, 0xed756055, 0xf7960e44, 0xe3d35e8c, 0x15056dd4, 0x88f46dba,
|
||||
0x03a16125, 0x0564f0bd, 0xc3eb9e15, 0x3c9057a2, 0x97271aec, 0xa93a072a,
|
||||
0x1b3f6d9b, 0x1e6321f5, 0xf59c66fb, 0x26dcf319, 0x7533d928, 0xb155fdf5,
|
||||
0x03563482, 0x8aba3cbb, 0x28517711, 0xc20ad9f8, 0xabcc5167, 0xccad925f,
|
||||
0x4de81751, 0x3830dc8e, 0x379d5862, 0x9320f991, 0xea7a90c2, 0xfb3e7bce,
|
||||
0x5121ce64, 0x774fbe32, 0xa8b6e37e, 0xc3293d46, 0x48de5369, 0x6413e680,
|
||||
0xa2ae0810, 0xdd6db224, 0x69852dfd, 0x09072166, 0xb39a460a, 0x6445c0dd,
|
||||
0x586cdecf, 0x1c20c8ae, 0x5bbef7dd, 0x1b588d40, 0xccd2017f, 0x6bb4e3bb,
|
||||
0xdda26a7e, 0x3a59ff45, 0x3e350a44, 0xbcb4cdd5, 0x72eacea8, 0xfa6484bb,
|
||||
0x8d6612ae, 0xbf3c6f47, 0xd29be463, 0x542f5d9e, 0xaec2771b, 0xf64e6370,
|
||||
0x740e0d8d, 0xe75b1357, 0xf8721671, 0xaf537d5d, 0x4040cb08, 0x4eb4e2cc,
|
||||
0x34d2466a, 0x0115af84, 0xe1b00428, 0x95983a1d, 0x06b89fb4, 0xce6ea048,
|
||||
0x6f3f3b82, 0x3520ab82, 0x011a1d4b, 0x277227f8, 0x611560b1, 0xe7933fdc,
|
||||
0xbb3a792b, 0x344525bd, 0xa08839e1, 0x51ce794b, 0x2f32c9b7, 0xa01fbac9,
|
||||
0xe01cc87e, 0xbcc7d1f6, 0xcf0111c3, 0xa1e8aac7, 0x1a908749, 0xd44fbd9a,
|
||||
0xd0dadecb, 0xd50ada38, 0x0339c32a, 0xc6913667, 0x8df9317c, 0xe0b12b4f,
|
||||
0xf79e59b7, 0x43f5bb3a, 0xf2d519ff, 0x27d9459c, 0xbf97222c, 0x15e6fc2a,
|
||||
0x0f91fc71, 0x9b941525, 0xfae59361, 0xceb69ceb, 0xc2a86459, 0x12baa8d1,
|
||||
0xb6c1075e, 0xe3056a0c, 0x10d25065, 0xcb03a442, 0xe0ec6e0e, 0x1698db3b,
|
||||
0x4c98a0be, 0x3278e964, 0x9f1f9532, 0xe0d392df, 0xd3a0342b, 0x8971f21e,
|
||||
0x1b0a7441, 0x4ba3348c, 0xc5be7120, 0xc37632d8, 0xdf359f8d, 0x9b992f2e,
|
||||
0xe60b6f47, 0x0fe3f11d, 0xe54cda54, 0x1edad891, 0xce6279cf, 0xcd3e7e6f,
|
||||
0x1618b166, 0xfd2c1d05, 0x848fd2c5, 0xf6fb2299, 0xf523f357, 0xa6327623,
|
||||
0x93a83531, 0x56cccd02, 0xacf08162, 0x5a75ebb5, 0x6e163697, 0x88d273cc,
|
||||
0xde966292, 0x81b949d0, 0x4c50901b, 0x71c65614, 0xe6c6c7bd, 0x327a140a,
|
||||
0x45e1d006, 0xc3f27b9a, 0xc9aa53fd, 0x62a80f00, 0xbb25bfe2, 0x35bdd2f6,
|
||||
0x71126905, 0xb2040222, 0xb6cbcf7c, 0xcd769c2b, 0x53113ec0, 0x1640e3d3,
|
||||
0x38abbd60, 0x2547adf0, 0xba38209c, 0xf746ce76, 0x77afa1c5, 0x20756060,
|
||||
0x85cbfe4e, 0x8ae88dd8, 0x7aaaf9b0, 0x4cf9aa7e, 0x1948c25c, 0x02fb8a8c,
|
||||
0x01c36ae4, 0xd6ebe1f9, 0x90d4f869, 0xa65cdea0, 0x3f09252d, 0xc208e69f,
|
||||
0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6,
|
||||
}
|
||||
|
||||
var p = [18]uint32{
|
||||
0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344, 0xa4093822, 0x299f31d0,
|
||||
0x082efa98, 0xec4e6c89, 0x452821e6, 0x38d01377, 0xbe5466cf, 0x34e90c6c,
|
||||
0xc0ac29b7, 0xc97c50dd, 0x3f84d5b5, 0xb5470917, 0x9216d5d9, 0x8979fb1b,
|
||||
}
|
||||
20
vendor/github.com/nats-io/nats/LICENSE
generated
vendored
Normal file
20
vendor/github.com/nats-io/nats/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2012-2016 Apcera Inc.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
249
vendor/github.com/nats-io/nats/enc.go
generated
vendored
Normal file
249
vendor/github.com/nats-io/nats/enc.go
generated
vendored
Normal file
@@ -0,0 +1,249 @@
|
||||
// Copyright 2012-2015 Apcera Inc. All rights reserved.
|
||||
|
||||
package nats
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
// Default Encoders
|
||||
. "github.com/nats-io/nats/encoders/builtin"
|
||||
)
|
||||
|
||||
// Encoder interface is for all register encoders
|
||||
type Encoder interface {
|
||||
Encode(subject string, v interface{}) ([]byte, error)
|
||||
Decode(subject string, data []byte, vPtr interface{}) error
|
||||
}
|
||||
|
||||
var encMap map[string]Encoder
|
||||
var encLock sync.Mutex
|
||||
|
||||
// Indexe names into the Registered Encoders.
|
||||
const (
|
||||
JSON_ENCODER = "json"
|
||||
GOB_ENCODER = "gob"
|
||||
DEFAULT_ENCODER = "default"
|
||||
)
|
||||
|
||||
func init() {
|
||||
encMap = make(map[string]Encoder)
|
||||
// Register json, gob and default encoder
|
||||
RegisterEncoder(JSON_ENCODER, &JsonEncoder{})
|
||||
RegisterEncoder(GOB_ENCODER, &GobEncoder{})
|
||||
RegisterEncoder(DEFAULT_ENCODER, &DefaultEncoder{})
|
||||
}
|
||||
|
||||
// EncodedConn are the preferred way to interface with NATS. They wrap a bare connection to
|
||||
// a nats server and have an extendable encoder system that will encode and decode messages
|
||||
// from raw Go types.
|
||||
type EncodedConn struct {
|
||||
Conn *Conn
|
||||
Enc Encoder
|
||||
}
|
||||
|
||||
// NewEncodedConn will wrap an existing Connection and utilize the appropriate registered
|
||||
// encoder.
|
||||
func NewEncodedConn(c *Conn, encType string) (*EncodedConn, error) {
|
||||
if c == nil {
|
||||
return nil, errors.New("nats: Nil Connection")
|
||||
}
|
||||
if c.IsClosed() {
|
||||
return nil, ErrConnectionClosed
|
||||
}
|
||||
ec := &EncodedConn{Conn: c, Enc: EncoderForType(encType)}
|
||||
if ec.Enc == nil {
|
||||
return nil, fmt.Errorf("No encoder registered for '%s'", encType)
|
||||
}
|
||||
return ec, nil
|
||||
}
|
||||
|
||||
// RegisterEncoder will register the encType with the given Encoder. Useful for customization.
|
||||
func RegisterEncoder(encType string, enc Encoder) {
|
||||
encLock.Lock()
|
||||
defer encLock.Unlock()
|
||||
encMap[encType] = enc
|
||||
}
|
||||
|
||||
// EncoderForType will return the registered Encoder for the encType.
|
||||
func EncoderForType(encType string) Encoder {
|
||||
encLock.Lock()
|
||||
defer encLock.Unlock()
|
||||
return encMap[encType]
|
||||
}
|
||||
|
||||
// Publish publishes the data argument to the given subject. The data argument
|
||||
// will be encoded using the associated encoder.
|
||||
func (c *EncodedConn) Publish(subject string, v interface{}) error {
|
||||
b, err := c.Enc.Encode(subject, v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return c.Conn.publish(subject, _EMPTY_, b)
|
||||
}
|
||||
|
||||
// PublishRequest will perform a Publish() expecting a response on the
|
||||
// reply subject. Use Request() for automatically waiting for a response
|
||||
// inline.
|
||||
func (c *EncodedConn) PublishRequest(subject, reply string, v interface{}) error {
|
||||
b, err := c.Enc.Encode(subject, v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return c.Conn.publish(subject, reply, b)
|
||||
}
|
||||
|
||||
// Request will create an Inbox and perform a Request() call
|
||||
// with the Inbox reply for the data v. A response will be
|
||||
// decoded into the vPtrResponse.
|
||||
func (c *EncodedConn) Request(subject string, v interface{}, vPtr interface{}, timeout time.Duration) error {
|
||||
b, err := c.Enc.Encode(subject, v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m, err := c.Conn.Request(subject, b, timeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if reflect.TypeOf(vPtr) == emptyMsgType {
|
||||
mPtr := vPtr.(*Msg)
|
||||
*mPtr = *m
|
||||
} else {
|
||||
err = c.Enc.Decode(m.Subject, m.Data, vPtr)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Handler is a specific callback used for Subscribe. It is generalized to
|
||||
// an interface{}, but we will discover its format and arguments at runtime
|
||||
// and perform the correct callback, including de-marshalling JSON strings
|
||||
// back into the appropriate struct based on the signature of the Handler.
|
||||
//
|
||||
// Handlers are expected to have one of four signatures.
|
||||
//
|
||||
// type person struct {
|
||||
// Name string `json:"name,omitempty"`
|
||||
// Age uint `json:"age,omitempty"`
|
||||
// }
|
||||
//
|
||||
// handler := func(m *Msg)
|
||||
// handler := func(p *person)
|
||||
// handler := func(subject string, o *obj)
|
||||
// handler := func(subject, reply string, o *obj)
|
||||
//
|
||||
// These forms allow a callback to request a raw Msg ptr, where the processing
|
||||
// of the message from the wire is untouched. Process a JSON representation
|
||||
// and demarshal it into the given struct, e.g. person.
|
||||
// There are also variants where the callback wants either the subject, or the
|
||||
// subject and the reply subject.
|
||||
type Handler interface{}
|
||||
|
||||
// Dissect the cb Handler's signature
|
||||
func argInfo(cb Handler) (reflect.Type, int) {
|
||||
cbType := reflect.TypeOf(cb)
|
||||
if cbType.Kind() != reflect.Func {
|
||||
panic("nats: Handler needs to be a func")
|
||||
}
|
||||
numArgs := cbType.NumIn()
|
||||
if numArgs == 0 {
|
||||
return nil, numArgs
|
||||
}
|
||||
return cbType.In(numArgs - 1), numArgs
|
||||
}
|
||||
|
||||
var emptyMsgType = reflect.TypeOf(&Msg{})
|
||||
|
||||
// Subscribe will create a subscription on the given subject and process incoming
|
||||
// messages using the specified Handler. The Handler should be a func that matches
|
||||
// a signature from the description of Handler from above.
|
||||
func (c *EncodedConn) Subscribe(subject string, cb Handler) (*Subscription, error) {
|
||||
return c.subscribe(subject, _EMPTY_, cb)
|
||||
}
|
||||
|
||||
// QueueSubscribe will create a queue subscription on the given subject and process
|
||||
// incoming messages using the specified Handler. The Handler should be a func that
|
||||
// matches a signature from the description of Handler from above.
|
||||
func (c *EncodedConn) QueueSubscribe(subject, queue string, cb Handler) (*Subscription, error) {
|
||||
return c.subscribe(subject, queue, cb)
|
||||
}
|
||||
|
||||
// Internal implementation that all public functions will use.
|
||||
func (c *EncodedConn) subscribe(subject, queue string, cb Handler) (*Subscription, error) {
|
||||
if cb == nil {
|
||||
return nil, errors.New("nats: Handler required for EncodedConn Subscription")
|
||||
}
|
||||
argType, numArgs := argInfo(cb)
|
||||
if argType == nil {
|
||||
return nil, errors.New("nats: Handler requires at least one argument")
|
||||
}
|
||||
|
||||
cbValue := reflect.ValueOf(cb)
|
||||
wantsRaw := (argType == emptyMsgType)
|
||||
|
||||
natsCB := func(m *Msg) {
|
||||
var oV []reflect.Value
|
||||
if wantsRaw {
|
||||
oV = []reflect.Value{reflect.ValueOf(m)}
|
||||
} else {
|
||||
var oPtr reflect.Value
|
||||
if argType.Kind() != reflect.Ptr {
|
||||
oPtr = reflect.New(argType)
|
||||
} else {
|
||||
oPtr = reflect.New(argType.Elem())
|
||||
}
|
||||
if err := c.Enc.Decode(m.Subject, m.Data, oPtr.Interface()); err != nil {
|
||||
if c.Conn.Opts.AsyncErrorCB != nil {
|
||||
c.Conn.ach <- func() {
|
||||
c.Conn.Opts.AsyncErrorCB(c.Conn, m.Sub, errors.New("nats: Got an error trying to unmarshal: "+err.Error()))
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
if argType.Kind() != reflect.Ptr {
|
||||
oPtr = reflect.Indirect(oPtr)
|
||||
}
|
||||
|
||||
// Callback Arity
|
||||
switch numArgs {
|
||||
case 1:
|
||||
oV = []reflect.Value{oPtr}
|
||||
case 2:
|
||||
subV := reflect.ValueOf(m.Subject)
|
||||
oV = []reflect.Value{subV, oPtr}
|
||||
case 3:
|
||||
subV := reflect.ValueOf(m.Subject)
|
||||
replyV := reflect.ValueOf(m.Reply)
|
||||
oV = []reflect.Value{subV, replyV, oPtr}
|
||||
}
|
||||
|
||||
}
|
||||
cbValue.Call(oV)
|
||||
}
|
||||
|
||||
return c.Conn.subscribe(subject, queue, natsCB, nil)
|
||||
}
|
||||
|
||||
// FlushTimeout allows a Flush operation to have an associated timeout.
|
||||
func (c *EncodedConn) FlushTimeout(timeout time.Duration) (err error) {
|
||||
return c.Conn.FlushTimeout(timeout)
|
||||
}
|
||||
|
||||
// Flush will perform a round trip to the server and return when it
|
||||
// receives the internal reply.
|
||||
func (c *EncodedConn) Flush() error {
|
||||
return c.Conn.Flush()
|
||||
}
|
||||
|
||||
// Close will close the connection to the server. This call will release
|
||||
// all blocking calls, such as Flush(), etc.
|
||||
func (c *EncodedConn) Close() {
|
||||
c.Conn.Close()
|
||||
}
|
||||
|
||||
// LastError reports the last error encountered via the Connection.
|
||||
func (c *EncodedConn) LastError() error {
|
||||
return c.Conn.err
|
||||
}
|
||||
106
vendor/github.com/nats-io/nats/encoders/builtin/default_enc.go
generated
vendored
Normal file
106
vendor/github.com/nats-io/nats/encoders/builtin/default_enc.go
generated
vendored
Normal file
@@ -0,0 +1,106 @@
|
||||
// Copyright 2012-2015 Apcera Inc. All rights reserved.
|
||||
|
||||
package builtin
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// DefaultEncoder implementation for EncodedConn.
|
||||
// This encoder will leave []byte and string untouched, but will attempt to
|
||||
// turn numbers into appropriate strings that can be decoded. It will also
|
||||
// propely encoded and decode bools. If will encode a struct, but if you want
|
||||
// to properly handle structures you should use JsonEncoder.
|
||||
type DefaultEncoder struct {
|
||||
// Empty
|
||||
}
|
||||
|
||||
var trueB = []byte("true")
|
||||
var falseB = []byte("false")
|
||||
var nilB = []byte("")
|
||||
|
||||
// Encode
|
||||
func (je *DefaultEncoder) Encode(subject string, v interface{}) ([]byte, error) {
|
||||
switch arg := v.(type) {
|
||||
case string:
|
||||
bytes := *(*[]byte)(unsafe.Pointer(&arg))
|
||||
return bytes, nil
|
||||
case []byte:
|
||||
return arg, nil
|
||||
case bool:
|
||||
if arg {
|
||||
return trueB, nil
|
||||
} else {
|
||||
return falseB, nil
|
||||
}
|
||||
case nil:
|
||||
return nilB, nil
|
||||
default:
|
||||
var buf bytes.Buffer
|
||||
fmt.Fprintf(&buf, "%+v", arg)
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
}
|
||||
|
||||
// Decode
|
||||
func (je *DefaultEncoder) Decode(subject string, data []byte, vPtr interface{}) error {
|
||||
// Figure out what it's pointing to...
|
||||
sData := *(*string)(unsafe.Pointer(&data))
|
||||
switch arg := vPtr.(type) {
|
||||
case *string:
|
||||
*arg = sData
|
||||
return nil
|
||||
case *[]byte:
|
||||
*arg = data
|
||||
return nil
|
||||
case *int:
|
||||
n, err := strconv.ParseInt(sData, 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*arg = int(n)
|
||||
return nil
|
||||
case *int32:
|
||||
n, err := strconv.ParseInt(sData, 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*arg = int32(n)
|
||||
return nil
|
||||
case *int64:
|
||||
n, err := strconv.ParseInt(sData, 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*arg = int64(n)
|
||||
return nil
|
||||
case *float32:
|
||||
n, err := strconv.ParseFloat(sData, 32)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*arg = float32(n)
|
||||
return nil
|
||||
case *float64:
|
||||
n, err := strconv.ParseFloat(sData, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*arg = float64(n)
|
||||
return nil
|
||||
case *bool:
|
||||
b, err := strconv.ParseBool(sData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*arg = b
|
||||
return nil
|
||||
default:
|
||||
vt := reflect.TypeOf(arg).Elem()
|
||||
return fmt.Errorf("nats: Default Encoder can't decode to type %s", vt)
|
||||
}
|
||||
}
|
||||
34
vendor/github.com/nats-io/nats/encoders/builtin/gob_enc.go
generated
vendored
Normal file
34
vendor/github.com/nats-io/nats/encoders/builtin/gob_enc.go
generated
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
// Copyright 2013-2015 Apcera Inc. All rights reserved.
|
||||
|
||||
package builtin
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/gob"
|
||||
)
|
||||
|
||||
// GobEncoder is a Go specific GOB Encoder implementation for EncodedConn.
|
||||
// This encoder will use the builtin encoding/gob to Marshal
|
||||
// and Unmarshal most types, including structs.
|
||||
type GobEncoder struct {
|
||||
// Empty
|
||||
}
|
||||
|
||||
// FIXME(dlc) - This could probably be more efficient.
|
||||
|
||||
// Encode
|
||||
func (ge *GobEncoder) Encode(subject string, v interface{}) ([]byte, error) {
|
||||
b := new(bytes.Buffer)
|
||||
enc := gob.NewEncoder(b)
|
||||
if err := enc.Encode(v); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
// Decode
|
||||
func (ge *GobEncoder) Decode(subject string, data []byte, vPtr interface{}) (err error) {
|
||||
dec := gob.NewDecoder(bytes.NewBuffer(data))
|
||||
err = dec.Decode(vPtr)
|
||||
return
|
||||
}
|
||||
45
vendor/github.com/nats-io/nats/encoders/builtin/json_enc.go
generated
vendored
Normal file
45
vendor/github.com/nats-io/nats/encoders/builtin/json_enc.go
generated
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
// Copyright 2012-2015 Apcera Inc. All rights reserved.
|
||||
|
||||
package builtin
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// JsonEncoder is a JSON Encoder implementation for EncodedConn.
|
||||
// This encoder will use the builtin encoding/json to Marshal
|
||||
// and Unmarshal most types, including structs.
|
||||
type JsonEncoder struct {
|
||||
// Empty
|
||||
}
|
||||
|
||||
// Encode
|
||||
func (je *JsonEncoder) Encode(subject string, v interface{}) ([]byte, error) {
|
||||
b, err := json.Marshal(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// Decode
|
||||
func (je *JsonEncoder) Decode(subject string, data []byte, vPtr interface{}) (err error) {
|
||||
switch arg := vPtr.(type) {
|
||||
case *string:
|
||||
// If they want a string and it is a JSON string, strip quotes
|
||||
// This allows someone to send a struct but receive as a plain string
|
||||
// This cast should be efficient for Go 1.3 and beyond.
|
||||
str := string(data)
|
||||
if strings.HasPrefix(str, `"`) && strings.HasSuffix(str, `"`) {
|
||||
*arg = str[1 : len(str)-1]
|
||||
} else {
|
||||
*arg = str
|
||||
}
|
||||
case *[]byte:
|
||||
*arg = data
|
||||
default:
|
||||
err = json.Unmarshal(data, arg)
|
||||
}
|
||||
return
|
||||
}
|
||||
66
vendor/github.com/nats-io/nats/encoders/protobuf/protobuf_enc.go
generated
vendored
Normal file
66
vendor/github.com/nats-io/nats/encoders/protobuf/protobuf_enc.go
generated
vendored
Normal file
@@ -0,0 +1,66 @@
|
||||
// Copyright 2015 Apcera Inc. All rights reserved.
|
||||
|
||||
package protobuf
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/nats-io/nats"
|
||||
)
|
||||
|
||||
// Additional index for registered Encoders.
|
||||
const (
|
||||
PROTOBUF_ENCODER = "protobuf"
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Register protobuf encoder
|
||||
nats.RegisterEncoder(PROTOBUF_ENCODER, &ProtobufEncoder{})
|
||||
}
|
||||
|
||||
// ProtobufEncoder is a protobuf implementation for EncodedConn
|
||||
// This encoder will use the builtin protobuf lib to Marshal
|
||||
// and Unmarshal structs.
|
||||
type ProtobufEncoder struct {
|
||||
// Empty
|
||||
}
|
||||
|
||||
var (
|
||||
ErrInvalidProtoMsgEncode = errors.New("nats: Invalid protobuf proto.Message object passed to encode")
|
||||
ErrInvalidProtoMsgDecode = errors.New("nats: Invalid protobuf proto.Message object passed to decode")
|
||||
)
|
||||
|
||||
// Encode
|
||||
func (pb *ProtobufEncoder) Encode(subject string, v interface{}) ([]byte, error) {
|
||||
if v == nil {
|
||||
return nil, nil
|
||||
}
|
||||
i, found := v.(proto.Message)
|
||||
if !found {
|
||||
return nil, ErrInvalidProtoMsgEncode
|
||||
}
|
||||
|
||||
b, err := proto.Marshal(i)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// Decode
|
||||
func (pb *ProtobufEncoder) Decode(subject string, data []byte, vPtr interface{}) error {
|
||||
if _, ok := vPtr.(*interface{}); ok {
|
||||
return nil
|
||||
}
|
||||
i, found := vPtr.(proto.Message)
|
||||
if !found {
|
||||
return ErrInvalidProtoMsgDecode
|
||||
}
|
||||
|
||||
err := proto.Unmarshal(data, i)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
149
vendor/github.com/nats-io/nats/examples/nats-bench.go
generated
vendored
Normal file
149
vendor/github.com/nats-io/nats/examples/nats-bench.go
generated
vendored
Normal file
@@ -0,0 +1,149 @@
|
||||
// Copyright 2015 Apcera Inc. All rights reserved.
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/nats-io/nats"
|
||||
)
|
||||
|
||||
// Some sane defaults
|
||||
const (
|
||||
DefaultNumMsgs = 100000
|
||||
DefaultNumPubs = 1
|
||||
DefaultNumSubs = 0
|
||||
HashModulo = 1000
|
||||
)
|
||||
|
||||
func usage() {
|
||||
log.Fatalf("Usage: nats-bench [-s server (%s)] [--tls] [-np NUM_PUBLISHERS] [-ns NUM_SUBSCRIBERS] [-n NUM_MSGS] <subject> <msg> \n", nats.DefaultURL)
|
||||
}
|
||||
|
||||
func main() {
|
||||
var urls = flag.String("s", nats.DefaultURL, "The nats server URLs (separated by comma)")
|
||||
var tls = flag.Bool("tls", false, "Use TLS Secure Connection")
|
||||
var numPubs = flag.Int("np", DefaultNumPubs, "Number of Concurrent Publishers")
|
||||
var numSubs = flag.Int("ns", DefaultNumSubs, "Number of Concurrent Subscribers")
|
||||
var numMsgs = flag.Int("n", DefaultNumMsgs, "Number of Messages to Publish")
|
||||
|
||||
log.SetFlags(0)
|
||||
flag.Usage = usage
|
||||
flag.Parse()
|
||||
|
||||
args := flag.Args()
|
||||
if len(args) < 2 {
|
||||
usage()
|
||||
}
|
||||
|
||||
// Setup the option block
|
||||
opts := nats.DefaultOptions
|
||||
opts.Servers = strings.Split(*urls, ",")
|
||||
for i, s := range opts.Servers {
|
||||
opts.Servers[i] = strings.Trim(s, " ")
|
||||
}
|
||||
opts.Secure = *tls
|
||||
|
||||
var startwg sync.WaitGroup
|
||||
var donewg sync.WaitGroup
|
||||
|
||||
donewg.Add(*numPubs + *numSubs)
|
||||
|
||||
// Run Subscribers first
|
||||
startwg.Add(*numSubs)
|
||||
for i := 0; i < *numSubs; i++ {
|
||||
go runSubscriber(&startwg, &donewg, opts, (*numMsgs)*(*numPubs))
|
||||
}
|
||||
startwg.Wait()
|
||||
|
||||
// Now Publishers
|
||||
startwg.Add(*numPubs)
|
||||
for i := 0; i < *numPubs; i++ {
|
||||
go runPublisher(&startwg, &donewg, opts, *numMsgs)
|
||||
}
|
||||
|
||||
log.Printf("Starting benchmark\n")
|
||||
log.Printf("msgs=%d, pubs=%d, subs=%d\n", *numMsgs, *numPubs, *numSubs)
|
||||
|
||||
startwg.Wait()
|
||||
|
||||
start := time.Now()
|
||||
donewg.Wait()
|
||||
delta := time.Since(start).Seconds()
|
||||
total := float64((*numMsgs) * (*numPubs))
|
||||
if *numSubs > 0 {
|
||||
total *= float64(*numSubs)
|
||||
}
|
||||
fmt.Printf("\nNATS throughput is %s msgs/sec\n", commaFormat(int64(total/delta)))
|
||||
}
|
||||
|
||||
func runPublisher(startwg, donewg *sync.WaitGroup, opts nats.Options, numMsgs int) {
|
||||
nc, err := opts.Connect()
|
||||
if err != nil {
|
||||
log.Fatalf("Can't connect: %v\n", err)
|
||||
}
|
||||
defer nc.Close()
|
||||
startwg.Done()
|
||||
|
||||
args := flag.Args()
|
||||
subj, msg := args[0], []byte(args[1])
|
||||
|
||||
for i := 0; i < numMsgs; i++ {
|
||||
nc.Publish(subj, msg)
|
||||
if i%HashModulo == 0 {
|
||||
fmt.Fprintf(os.Stderr, "#")
|
||||
}
|
||||
}
|
||||
nc.Flush()
|
||||
donewg.Done()
|
||||
}
|
||||
|
||||
func runSubscriber(startwg, donewg *sync.WaitGroup, opts nats.Options, numMsgs int) {
|
||||
nc, err := opts.Connect()
|
||||
if err != nil {
|
||||
log.Fatalf("Can't connect: %v\n", err)
|
||||
}
|
||||
|
||||
args := flag.Args()
|
||||
subj := args[0]
|
||||
|
||||
received := 0
|
||||
nc.Subscribe(subj, func(msg *nats.Msg) {
|
||||
received++
|
||||
if received%HashModulo == 0 {
|
||||
fmt.Fprintf(os.Stderr, "*")
|
||||
}
|
||||
if received >= numMsgs {
|
||||
donewg.Done()
|
||||
nc.Close()
|
||||
}
|
||||
})
|
||||
nc.Flush()
|
||||
startwg.Done()
|
||||
}
|
||||
|
||||
func commaFormat(n int64) string {
|
||||
in := strconv.FormatInt(n, 10)
|
||||
out := make([]byte, len(in)+(len(in)-2+int(in[0]/'0'))/3)
|
||||
if in[0] == '-' {
|
||||
in, out[0] = in[1:], '-'
|
||||
}
|
||||
for i, j, k := len(in)-1, len(out)-1, 0; ; i, j = i-1, j-1 {
|
||||
out[j] = in[i]
|
||||
if i == 0 {
|
||||
return string(out)
|
||||
}
|
||||
if k++; k == 3 {
|
||||
j, k = j-1, 0
|
||||
out[j] = ','
|
||||
}
|
||||
}
|
||||
}
|
||||
42
vendor/github.com/nats-io/nats/examples/nats-pub.go
generated
vendored
Normal file
42
vendor/github.com/nats-io/nats/examples/nats-pub.go
generated
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
// Copyright 2012-2016 Apcera Inc. All rights reserved.
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"log"
|
||||
|
||||
"github.com/nats-io/nats"
|
||||
)
|
||||
|
||||
// NOTE: Use tls scheme for TLS, e.g. nats-pub -s tls://demo.nats.io:4443 foo hello
|
||||
func usage() {
|
||||
log.Fatalf("Usage: nats-pub [-s server (%s)] <subject> <msg> \n", nats.DefaultURL)
|
||||
}
|
||||
|
||||
func main() {
|
||||
var urls = flag.String("s", nats.DefaultURL, "The nats server URLs (separated by comma)")
|
||||
|
||||
log.SetFlags(0)
|
||||
flag.Usage = usage
|
||||
flag.Parse()
|
||||
|
||||
args := flag.Args()
|
||||
if len(args) < 1 {
|
||||
usage()
|
||||
}
|
||||
|
||||
nc, err := nats.Connect(*urls)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer nc.Close()
|
||||
|
||||
subj, msg := args[0], []byte(args[1])
|
||||
|
||||
nc.Publish(subj, msg)
|
||||
nc.Flush()
|
||||
|
||||
log.Printf("Published [%s] : '%s'\n", subj, msg)
|
||||
}
|
||||
55
vendor/github.com/nats-io/nats/examples/nats-qsub.go
generated
vendored
Normal file
55
vendor/github.com/nats-io/nats/examples/nats-qsub.go
generated
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
// Copyright 2012-2016 Apcera Inc. All rights reserved.
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"log"
|
||||
"os"
|
||||
"runtime"
|
||||
|
||||
"github.com/nats-io/nats"
|
||||
)
|
||||
|
||||
// NOTE: Use tls scheme for TLS, e.g. nats-qsub -s tls://demo.nats.io:4443 foo
|
||||
func usage() {
|
||||
log.Fatalf("Usage: nats-sub [-s server] [-t] <subject> <queue-group>\n")
|
||||
}
|
||||
|
||||
func printMsg(m *nats.Msg, i int) {
|
||||
log.Printf("[#%d] Received on [%s] Queue[%s] Pid[%d]: '%s'\n", i, m.Subject, m.Sub.Queue, os.Getpid(), string(m.Data))
|
||||
}
|
||||
|
||||
func main() {
|
||||
var urls = flag.String("s", nats.DefaultURL, "The nats server URLs (separated by comma)")
|
||||
var showTime = flag.Bool("t", false, "Display timestamps")
|
||||
|
||||
log.SetFlags(0)
|
||||
flag.Usage = usage
|
||||
flag.Parse()
|
||||
|
||||
args := flag.Args()
|
||||
if len(args) < 2 {
|
||||
usage()
|
||||
}
|
||||
|
||||
nc, err := nats.Connect(*urls)
|
||||
if err != nil {
|
||||
log.Fatalf("Can't connect: %v\n", err)
|
||||
}
|
||||
|
||||
subj, queue, i := args[0], args[1], 0
|
||||
|
||||
nc.QueueSubscribe(subj, queue, func(msg *nats.Msg) {
|
||||
i++
|
||||
printMsg(msg, i)
|
||||
})
|
||||
|
||||
log.Printf("Listening on [%s]\n", subj)
|
||||
if *showTime {
|
||||
log.SetFlags(log.LstdFlags)
|
||||
}
|
||||
|
||||
runtime.Goexit()
|
||||
}
|
||||
44
vendor/github.com/nats-io/nats/examples/nats-req.go
generated
vendored
Normal file
44
vendor/github.com/nats-io/nats/examples/nats-req.go
generated
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
// Copyright 2012-2016 Apcera Inc. All rights reserved.
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/nats-io/nats"
|
||||
)
|
||||
|
||||
// NOTE: Use tls scheme for TLS, e.g. nats-req -s tls://demo.nats.io:4443 foo hello
|
||||
func usage() {
|
||||
log.Fatalf("Usage: nats-req [-s server (%s)] <subject> <msg> \n", nats.DefaultURL)
|
||||
}
|
||||
|
||||
func main() {
|
||||
var urls = flag.String("s", nats.DefaultURL, "The nats server URLs (separated by comma)")
|
||||
|
||||
log.SetFlags(0)
|
||||
flag.Usage = usage
|
||||
flag.Parse()
|
||||
|
||||
args := flag.Args()
|
||||
if len(args) < 2 {
|
||||
usage()
|
||||
}
|
||||
|
||||
nc, err := nats.Connect(*urls)
|
||||
if err != nil {
|
||||
log.Fatalf("Can't connect: %v\n", err)
|
||||
}
|
||||
defer nc.Close()
|
||||
subj, payload := args[0], []byte(args[1])
|
||||
|
||||
msg, err := nc.Request(subj, []byte(payload), 1000*time.Millisecond)
|
||||
if err != nil {
|
||||
log.Fatalf("Error in Request: %v\n", err)
|
||||
}
|
||||
log.Printf("Published [%s] : '%s'\n", subj, payload)
|
||||
log.Printf("Received [%v] : '%s'\n", msg.Subject, string(msg.Data))
|
||||
}
|
||||
55
vendor/github.com/nats-io/nats/examples/nats-rply.go
generated
vendored
Normal file
55
vendor/github.com/nats-io/nats/examples/nats-rply.go
generated
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
// Copyright 2012-2016 Apcera Inc. All rights reserved.
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"log"
|
||||
"runtime"
|
||||
|
||||
"github.com/nats-io/nats"
|
||||
)
|
||||
|
||||
// NOTE: Use tls scheme for TLS, e.g. nats-rply -s tls://demo.nats.io:4443 foo hello
|
||||
func usage() {
|
||||
log.Fatalf("Usage: nats-rply [-s server][-t] <subject> <reponse>\n")
|
||||
}
|
||||
|
||||
func printMsg(m *nats.Msg, i int) {
|
||||
log.Printf("[#%d] Received on [%s]: '%s'\n", i, m.Subject, string(m.Data))
|
||||
}
|
||||
|
||||
func main() {
|
||||
var urls = flag.String("s", nats.DefaultURL, "The nats server URLs (separated by comma)")
|
||||
var showTime = flag.Bool("t", false, "Display timestamps")
|
||||
|
||||
log.SetFlags(0)
|
||||
flag.Usage = usage
|
||||
flag.Parse()
|
||||
|
||||
args := flag.Args()
|
||||
if len(args) < 2 {
|
||||
usage()
|
||||
}
|
||||
|
||||
nc, err := nats.Connect(*urls)
|
||||
if err != nil {
|
||||
log.Fatalf("Can't connect: %v\n", err)
|
||||
}
|
||||
|
||||
subj, reply, i := args[0], args[1], 0
|
||||
|
||||
nc.Subscribe(subj, func(msg *nats.Msg) {
|
||||
i++
|
||||
printMsg(msg, i)
|
||||
nc.Publish(msg.Reply, []byte(reply))
|
||||
})
|
||||
|
||||
log.Printf("Listening on [%s]\n", subj)
|
||||
if *showTime {
|
||||
log.SetFlags(log.LstdFlags)
|
||||
}
|
||||
|
||||
runtime.Goexit()
|
||||
}
|
||||
54
vendor/github.com/nats-io/nats/examples/nats-sub.go
generated
vendored
Normal file
54
vendor/github.com/nats-io/nats/examples/nats-sub.go
generated
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
// Copyright 2012-2016 Apcera Inc. All rights reserved.
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"log"
|
||||
"runtime"
|
||||
|
||||
"github.com/nats-io/nats"
|
||||
)
|
||||
|
||||
// NOTE: Use tls scheme for TLS, e.g. nats-sub -s tls://demo.nats.io:4443 foo
|
||||
func usage() {
|
||||
log.Fatalf("Usage: nats-sub [-s server] [-t] <subject> \n")
|
||||
}
|
||||
|
||||
func printMsg(m *nats.Msg, i int) {
|
||||
log.Printf("[#%d] Received on [%s]: '%s'\n", i, m.Subject, string(m.Data))
|
||||
}
|
||||
|
||||
func main() {
|
||||
var urls = flag.String("s", nats.DefaultURL, "The nats server URLs (separated by comma)")
|
||||
var showTime = flag.Bool("t", false, "Display timestamps")
|
||||
|
||||
log.SetFlags(0)
|
||||
flag.Usage = usage
|
||||
flag.Parse()
|
||||
|
||||
args := flag.Args()
|
||||
if len(args) < 1 {
|
||||
usage()
|
||||
}
|
||||
|
||||
nc, err := nats.Connect(*urls)
|
||||
if err != nil {
|
||||
log.Fatalf("Can't connect: %v\n", err)
|
||||
}
|
||||
|
||||
subj, i := args[0], 0
|
||||
|
||||
nc.Subscribe(subj, func(msg *nats.Msg) {
|
||||
i += 1
|
||||
printMsg(msg, i)
|
||||
})
|
||||
|
||||
log.Printf("Listening on [%s]\n", subj)
|
||||
if *showTime {
|
||||
log.SetFlags(log.LstdFlags)
|
||||
}
|
||||
|
||||
runtime.Goexit()
|
||||
}
|
||||
2460
vendor/github.com/nats-io/nats/nats.go
generated
vendored
Normal file
2460
vendor/github.com/nats-io/nats/nats.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
100
vendor/github.com/nats-io/nats/netchan.go
generated
vendored
Normal file
100
vendor/github.com/nats-io/nats/netchan.go
generated
vendored
Normal file
@@ -0,0 +1,100 @@
|
||||
// Copyright 2013-2014 Apcera Inc. All rights reserved.
|
||||
|
||||
package nats
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// This allows the functionality for network channels by binding send and receive Go chans
|
||||
// to subjects and optionally queue groups.
|
||||
// Data will be encoded and decoded via the EncodedConn and its associated encoders.
|
||||
|
||||
// BindSendChan binds a channel for send operations to NATS.
|
||||
func (c *EncodedConn) BindSendChan(subject string, channel interface{}) error {
|
||||
chVal := reflect.ValueOf(channel)
|
||||
if chVal.Kind() != reflect.Chan {
|
||||
return ErrChanArg
|
||||
}
|
||||
go chPublish(c, chVal, subject)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Publish all values that arrive on the channel until it is closed or we
|
||||
// encounter an error.
|
||||
func chPublish(c *EncodedConn, chVal reflect.Value, subject string) {
|
||||
for {
|
||||
val, ok := chVal.Recv()
|
||||
if !ok {
|
||||
// Channel has most likely been closed.
|
||||
return
|
||||
}
|
||||
if e := c.Publish(subject, val.Interface()); e != nil {
|
||||
// Do this under lock.
|
||||
c.Conn.mu.Lock()
|
||||
defer c.Conn.mu.Unlock()
|
||||
|
||||
if c.Conn.Opts.AsyncErrorCB != nil {
|
||||
// FIXME(dlc) - Not sure this is the right thing to do.
|
||||
// FIXME(ivan) - If the connection is not yet closed, try to schedule the callback
|
||||
if c.Conn.isClosed() {
|
||||
go c.Conn.Opts.AsyncErrorCB(c.Conn, nil, e)
|
||||
} else {
|
||||
c.Conn.ach <- func() { c.Conn.Opts.AsyncErrorCB(c.Conn, nil, e) }
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// BindRecvChan binds a channel for receive operations from NATS.
|
||||
func (c *EncodedConn) BindRecvChan(subject string, channel interface{}) (*Subscription, error) {
|
||||
return c.bindRecvChan(subject, _EMPTY_, channel)
|
||||
}
|
||||
|
||||
// BindRecvQueueChan binds a channel for queue-based receive operations from NATS.
|
||||
func (c *EncodedConn) BindRecvQueueChan(subject, queue string, channel interface{}) (*Subscription, error) {
|
||||
return c.bindRecvChan(subject, queue, channel)
|
||||
}
|
||||
|
||||
// Internal function to bind receive operations for a channel.
|
||||
func (c *EncodedConn) bindRecvChan(subject, queue string, channel interface{}) (*Subscription, error) {
|
||||
chVal := reflect.ValueOf(channel)
|
||||
if chVal.Kind() != reflect.Chan {
|
||||
return nil, ErrChanArg
|
||||
}
|
||||
argType := chVal.Type().Elem()
|
||||
|
||||
cb := func(m *Msg) {
|
||||
var oPtr reflect.Value
|
||||
if argType.Kind() != reflect.Ptr {
|
||||
oPtr = reflect.New(argType)
|
||||
} else {
|
||||
oPtr = reflect.New(argType.Elem())
|
||||
}
|
||||
if err := c.Enc.Decode(m.Subject, m.Data, oPtr.Interface()); err != nil {
|
||||
c.Conn.err = errors.New("nats: Got an error trying to unmarshal: " + err.Error())
|
||||
if c.Conn.Opts.AsyncErrorCB != nil {
|
||||
c.Conn.ach <- func() { c.Conn.Opts.AsyncErrorCB(c.Conn, m.Sub, c.Conn.err) }
|
||||
}
|
||||
return
|
||||
}
|
||||
if argType.Kind() != reflect.Ptr {
|
||||
oPtr = reflect.Indirect(oPtr)
|
||||
}
|
||||
// This is a bit hacky, but in this instance we may be trying to send to a closed channel.
|
||||
// and the user does not know when it is safe to close the channel.
|
||||
defer func() {
|
||||
// If we have panicked, recover and close the subscription.
|
||||
if r := recover(); r != nil {
|
||||
m.Sub.Unsubscribe()
|
||||
}
|
||||
}()
|
||||
// Actually do the send to the channel.
|
||||
chVal.Send(oPtr)
|
||||
}
|
||||
|
||||
return c.Conn.subscribe(subject, queue, cb, nil)
|
||||
}
|
||||
407
vendor/github.com/nats-io/nats/parser.go
generated
vendored
Normal file
407
vendor/github.com/nats-io/nats/parser.go
generated
vendored
Normal file
@@ -0,0 +1,407 @@
|
||||
// Copyright 2012-2014 Apcera Inc. All rights reserved.
|
||||
|
||||
package nats
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type msgArg struct {
|
||||
subject []byte
|
||||
reply []byte
|
||||
sid int64
|
||||
size int
|
||||
}
|
||||
|
||||
const MAX_CONTROL_LINE_SIZE = 1024
|
||||
|
||||
type parseState struct {
|
||||
state int
|
||||
as int
|
||||
drop int
|
||||
ma msgArg
|
||||
argBuf []byte
|
||||
msgBuf []byte
|
||||
scratch [MAX_CONTROL_LINE_SIZE]byte
|
||||
}
|
||||
|
||||
const (
|
||||
OP_START = iota
|
||||
OP_PLUS
|
||||
OP_PLUS_O
|
||||
OP_PLUS_OK
|
||||
OP_MINUS
|
||||
OP_MINUS_E
|
||||
OP_MINUS_ER
|
||||
OP_MINUS_ERR
|
||||
OP_MINUS_ERR_SPC
|
||||
MINUS_ERR_ARG
|
||||
OP_M
|
||||
OP_MS
|
||||
OP_MSG
|
||||
OP_MSG_SPC
|
||||
MSG_ARG
|
||||
MSG_PAYLOAD
|
||||
MSG_END
|
||||
OP_P
|
||||
OP_PI
|
||||
OP_PIN
|
||||
OP_PING
|
||||
OP_PO
|
||||
OP_PON
|
||||
OP_PONG
|
||||
)
|
||||
|
||||
// parse is the fast protocol parser engine.
|
||||
func (nc *Conn) parse(buf []byte) error {
|
||||
var i int
|
||||
var b byte
|
||||
|
||||
// Move to loop instead of range syntax to allow jumping of i
|
||||
for i = 0; i < len(buf); i++ {
|
||||
b = buf[i]
|
||||
|
||||
switch nc.ps.state {
|
||||
case OP_START:
|
||||
switch b {
|
||||
case 'M', 'm':
|
||||
nc.ps.state = OP_M
|
||||
case 'P', 'p':
|
||||
nc.ps.state = OP_P
|
||||
case '+':
|
||||
nc.ps.state = OP_PLUS
|
||||
case '-':
|
||||
nc.ps.state = OP_MINUS
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_M:
|
||||
switch b {
|
||||
case 'S', 's':
|
||||
nc.ps.state = OP_MS
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_MS:
|
||||
switch b {
|
||||
case 'G', 'g':
|
||||
nc.ps.state = OP_MSG
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_MSG:
|
||||
switch b {
|
||||
case ' ', '\t':
|
||||
nc.ps.state = OP_MSG_SPC
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_MSG_SPC:
|
||||
switch b {
|
||||
case ' ', '\t':
|
||||
continue
|
||||
default:
|
||||
nc.ps.state = MSG_ARG
|
||||
nc.ps.as = i
|
||||
}
|
||||
case MSG_ARG:
|
||||
switch b {
|
||||
case '\r':
|
||||
nc.ps.drop = 1
|
||||
case '\n':
|
||||
var arg []byte
|
||||
if nc.ps.argBuf != nil {
|
||||
arg = nc.ps.argBuf
|
||||
} else {
|
||||
arg = buf[nc.ps.as : i-nc.ps.drop]
|
||||
}
|
||||
if err := nc.processMsgArgs(arg); err != nil {
|
||||
return err
|
||||
}
|
||||
nc.ps.drop, nc.ps.as, nc.ps.state = 0, i+1, MSG_PAYLOAD
|
||||
|
||||
// jump ahead with the index. If this overruns
|
||||
// what is left we fall out and process split
|
||||
// buffer.
|
||||
i = nc.ps.as + nc.ps.ma.size - 1
|
||||
default:
|
||||
if nc.ps.argBuf != nil {
|
||||
nc.ps.argBuf = append(nc.ps.argBuf, b)
|
||||
}
|
||||
}
|
||||
case MSG_PAYLOAD:
|
||||
if nc.ps.msgBuf != nil {
|
||||
if len(nc.ps.msgBuf) >= nc.ps.ma.size {
|
||||
nc.processMsg(nc.ps.msgBuf)
|
||||
nc.ps.argBuf, nc.ps.msgBuf, nc.ps.state = nil, nil, MSG_END
|
||||
} else {
|
||||
// copy as much as we can to the buffer and skip ahead.
|
||||
toCopy := nc.ps.ma.size - len(nc.ps.msgBuf)
|
||||
avail := len(buf) - i
|
||||
|
||||
if avail < toCopy {
|
||||
toCopy = avail
|
||||
}
|
||||
|
||||
if toCopy > 0 {
|
||||
start := len(nc.ps.msgBuf)
|
||||
// This is needed for copy to work.
|
||||
nc.ps.msgBuf = nc.ps.msgBuf[:start+toCopy]
|
||||
copy(nc.ps.msgBuf[start:], buf[i:i+toCopy])
|
||||
// Update our index
|
||||
i = (i + toCopy) - 1
|
||||
} else {
|
||||
nc.ps.msgBuf = append(nc.ps.msgBuf, b)
|
||||
}
|
||||
}
|
||||
} else if i-nc.ps.as >= nc.ps.ma.size {
|
||||
nc.processMsg(buf[nc.ps.as:i])
|
||||
nc.ps.argBuf, nc.ps.msgBuf, nc.ps.state = nil, nil, MSG_END
|
||||
}
|
||||
case MSG_END:
|
||||
switch b {
|
||||
case '\n':
|
||||
nc.ps.drop, nc.ps.as, nc.ps.state = 0, i+1, OP_START
|
||||
default:
|
||||
continue
|
||||
}
|
||||
case OP_PLUS:
|
||||
switch b {
|
||||
case 'O', 'o':
|
||||
nc.ps.state = OP_PLUS_O
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_PLUS_O:
|
||||
switch b {
|
||||
case 'K', 'k':
|
||||
nc.ps.state = OP_PLUS_OK
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_PLUS_OK:
|
||||
switch b {
|
||||
case '\n':
|
||||
nc.processOK()
|
||||
nc.ps.drop, nc.ps.state = 0, OP_START
|
||||
}
|
||||
case OP_MINUS:
|
||||
switch b {
|
||||
case 'E', 'e':
|
||||
nc.ps.state = OP_MINUS_E
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_MINUS_E:
|
||||
switch b {
|
||||
case 'R', 'r':
|
||||
nc.ps.state = OP_MINUS_ER
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_MINUS_ER:
|
||||
switch b {
|
||||
case 'R', 'r':
|
||||
nc.ps.state = OP_MINUS_ERR
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_MINUS_ERR:
|
||||
switch b {
|
||||
case ' ', '\t':
|
||||
nc.ps.state = OP_MINUS_ERR_SPC
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_MINUS_ERR_SPC:
|
||||
switch b {
|
||||
case ' ', '\t':
|
||||
continue
|
||||
default:
|
||||
nc.ps.state = MINUS_ERR_ARG
|
||||
nc.ps.as = i
|
||||
}
|
||||
case MINUS_ERR_ARG:
|
||||
switch b {
|
||||
case '\r':
|
||||
nc.ps.drop = 1
|
||||
case '\n':
|
||||
var arg []byte
|
||||
if nc.ps.argBuf != nil {
|
||||
arg = nc.ps.argBuf
|
||||
nc.ps.argBuf = nil
|
||||
} else {
|
||||
arg = buf[nc.ps.as : i-nc.ps.drop]
|
||||
}
|
||||
nc.processErr(string(arg))
|
||||
nc.ps.drop, nc.ps.as, nc.ps.state = 0, i+1, OP_START
|
||||
default:
|
||||
if nc.ps.argBuf != nil {
|
||||
nc.ps.argBuf = append(nc.ps.argBuf, b)
|
||||
}
|
||||
}
|
||||
case OP_P:
|
||||
switch b {
|
||||
case 'I', 'i':
|
||||
nc.ps.state = OP_PI
|
||||
case 'O', 'o':
|
||||
nc.ps.state = OP_PO
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_PO:
|
||||
switch b {
|
||||
case 'N', 'n':
|
||||
nc.ps.state = OP_PON
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_PON:
|
||||
switch b {
|
||||
case 'G', 'g':
|
||||
nc.ps.state = OP_PONG
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_PONG:
|
||||
switch b {
|
||||
case '\n':
|
||||
nc.processPong()
|
||||
nc.ps.drop, nc.ps.state = 0, OP_START
|
||||
}
|
||||
case OP_PI:
|
||||
switch b {
|
||||
case 'N', 'n':
|
||||
nc.ps.state = OP_PIN
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_PIN:
|
||||
switch b {
|
||||
case 'G', 'g':
|
||||
nc.ps.state = OP_PING
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
case OP_PING:
|
||||
switch b {
|
||||
case '\n':
|
||||
nc.processPing()
|
||||
nc.ps.drop, nc.ps.state = 0, OP_START
|
||||
}
|
||||
default:
|
||||
goto parseErr
|
||||
}
|
||||
}
|
||||
// Check for split buffer scenarios
|
||||
if (nc.ps.state == MSG_ARG || nc.ps.state == MINUS_ERR_ARG) && nc.ps.argBuf == nil {
|
||||
nc.ps.argBuf = nc.ps.scratch[:0]
|
||||
nc.ps.argBuf = append(nc.ps.argBuf, buf[nc.ps.as:i-nc.ps.drop]...)
|
||||
// FIXME, check max len
|
||||
}
|
||||
// Check for split msg
|
||||
if nc.ps.state == MSG_PAYLOAD && nc.ps.msgBuf == nil {
|
||||
// We need to clone the msgArg if it is still referencing the
|
||||
// read buffer and we are not able to process the msg.
|
||||
if nc.ps.argBuf == nil {
|
||||
nc.cloneMsgArg()
|
||||
}
|
||||
|
||||
// If we will overflow the scratch buffer, just create a
|
||||
// new buffer to hold the split message.
|
||||
if nc.ps.ma.size > cap(nc.ps.scratch)-len(nc.ps.argBuf) {
|
||||
lrem := len(buf[nc.ps.as:])
|
||||
|
||||
nc.ps.msgBuf = make([]byte, lrem, nc.ps.ma.size)
|
||||
copy(nc.ps.msgBuf, buf[nc.ps.as:])
|
||||
} else {
|
||||
nc.ps.msgBuf = nc.ps.scratch[len(nc.ps.argBuf):len(nc.ps.argBuf)]
|
||||
nc.ps.msgBuf = append(nc.ps.msgBuf, (buf[nc.ps.as:])...)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
parseErr:
|
||||
return fmt.Errorf("nats: Parse Error [%d]: '%s'", nc.ps.state, buf[i:])
|
||||
}
|
||||
|
||||
// cloneMsgArg is used when the split buffer scenario has the pubArg in the existing read buffer, but
|
||||
// we need to hold onto it into the next read.
|
||||
func (nc *Conn) cloneMsgArg() {
|
||||
nc.ps.argBuf = nc.ps.scratch[:0]
|
||||
nc.ps.argBuf = append(nc.ps.argBuf, nc.ps.ma.subject...)
|
||||
nc.ps.argBuf = append(nc.ps.argBuf, nc.ps.ma.reply...)
|
||||
nc.ps.ma.subject = nc.ps.argBuf[:len(nc.ps.ma.subject)]
|
||||
if nc.ps.ma.reply != nil {
|
||||
nc.ps.ma.reply = nc.ps.argBuf[len(nc.ps.ma.subject):]
|
||||
}
|
||||
}
|
||||
|
||||
const argsLenMax = 4
|
||||
|
||||
func (nc *Conn) processMsgArgs(arg []byte) error {
|
||||
// Unroll splitArgs to avoid runtime/heap issues
|
||||
a := [argsLenMax][]byte{}
|
||||
args := a[:0]
|
||||
start := -1
|
||||
for i, b := range arg {
|
||||
switch b {
|
||||
case ' ', '\t', '\r', '\n':
|
||||
if start >= 0 {
|
||||
args = append(args, arg[start:i])
|
||||
start = -1
|
||||
}
|
||||
default:
|
||||
if start < 0 {
|
||||
start = i
|
||||
}
|
||||
}
|
||||
}
|
||||
if start >= 0 {
|
||||
args = append(args, arg[start:])
|
||||
}
|
||||
|
||||
switch len(args) {
|
||||
case 3:
|
||||
nc.ps.ma.subject = args[0]
|
||||
nc.ps.ma.sid = parseInt64(args[1])
|
||||
nc.ps.ma.reply = nil
|
||||
nc.ps.ma.size = int(parseInt64(args[2]))
|
||||
case 4:
|
||||
nc.ps.ma.subject = args[0]
|
||||
nc.ps.ma.sid = parseInt64(args[1])
|
||||
nc.ps.ma.reply = args[2]
|
||||
nc.ps.ma.size = int(parseInt64(args[3]))
|
||||
default:
|
||||
return fmt.Errorf("nats: processMsgArgs Parse Error: '%s'", arg)
|
||||
}
|
||||
if nc.ps.ma.sid < 0 {
|
||||
return fmt.Errorf("nats: processMsgArgs Bad or Missing Sid: '%s'", arg)
|
||||
}
|
||||
if nc.ps.ma.size < 0 {
|
||||
return fmt.Errorf("nats: processMsgArgs Bad or Missing Size: '%s'", arg)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Ascii numbers 0-9
|
||||
const (
|
||||
ascii_0 = 48
|
||||
ascii_9 = 57
|
||||
)
|
||||
|
||||
// parseInt64 expects decimal positive numbers. We
|
||||
// return -1 to signal error
|
||||
func parseInt64(d []byte) (n int64) {
|
||||
if len(d) == 0 {
|
||||
return -1
|
||||
}
|
||||
for _, dec := range d {
|
||||
if dec < ascii_0 || dec > ascii_9 {
|
||||
return -1
|
||||
}
|
||||
n = n*10 + (int64(dec) - ascii_0)
|
||||
}
|
||||
return n
|
||||
}
|
||||
93
vendor/github.com/nats-io/nats/test/test.go
generated
vendored
Normal file
93
vendor/github.com/nats-io/nats/test/test.go
generated
vendored
Normal file
@@ -0,0 +1,93 @@
|
||||
// Copyright 2015 Apcera Inc. All rights reserved.
|
||||
|
||||
package test
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/nats-io/gnatsd/server"
|
||||
"github.com/nats-io/nats"
|
||||
|
||||
gnatsd "github.com/nats-io/gnatsd/test"
|
||||
)
|
||||
|
||||
// So that we can pass tests and benchmarks...
|
||||
type tLogger interface {
|
||||
Fatalf(format string, args ...interface{})
|
||||
Errorf(format string, args ...interface{})
|
||||
}
|
||||
|
||||
// TestLogger
|
||||
type TestLogger tLogger
|
||||
|
||||
// Dumb wait program to sync on callbacks, etc... Will timeout
|
||||
func Wait(ch chan bool) error {
|
||||
return WaitTime(ch, 5*time.Second)
|
||||
}
|
||||
|
||||
// Wait for a chan with a timeout.
|
||||
func WaitTime(ch chan bool, timeout time.Duration) error {
|
||||
select {
|
||||
case <-ch:
|
||||
return nil
|
||||
case <-time.After(timeout):
|
||||
}
|
||||
return errors.New("timeout")
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// Creating client connections
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// NewDefaultConnection
|
||||
func NewDefaultConnection(t tLogger) *nats.Conn {
|
||||
return NewConnection(t, nats.DefaultPort)
|
||||
}
|
||||
|
||||
// NewConnection forms connection on a given port.
|
||||
func NewConnection(t tLogger, port int) *nats.Conn {
|
||||
url := fmt.Sprintf("nats://localhost:%d", port)
|
||||
nc, err := nats.Connect(url)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create default connection: %v\n", err)
|
||||
return nil
|
||||
}
|
||||
return nc
|
||||
}
|
||||
|
||||
// NewEConn
|
||||
func NewEConn(t tLogger) *nats.EncodedConn {
|
||||
ec, err := nats.NewEncodedConn(NewDefaultConnection(t), nats.DEFAULT_ENCODER)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create an encoded connection: %v\n", err)
|
||||
}
|
||||
return ec
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// Running gnatsd server in separate Go routines
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// RunDefaultServer will run a server on the default port.
|
||||
func RunDefaultServer() *server.Server {
|
||||
return RunServerOnPort(nats.DefaultPort)
|
||||
}
|
||||
|
||||
// RunServerOnPort will run a server on the given port.
|
||||
func RunServerOnPort(port int) *server.Server {
|
||||
opts := gnatsd.DefaultTestOptions
|
||||
opts.Port = port
|
||||
return RunServerWithOptions(opts)
|
||||
}
|
||||
|
||||
// RunServerWithOptions will run a server with the given options.
|
||||
func RunServerWithOptions(opts server.Options) *server.Server {
|
||||
return gnatsd.RunServer(&opts)
|
||||
}
|
||||
|
||||
// RunServerWithConfig will run a server with the given configuration file.
|
||||
func RunServerWithConfig(configFile string) (*server.Server, *server.Options) {
|
||||
return gnatsd.RunServerWithConfig(configFile)
|
||||
}
|
||||
21
vendor/github.com/nats-io/nuid/LICENSE
generated
vendored
Normal file
21
vendor/github.com/nats-io/nuid/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2012-2016 Apcera Inc.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
121
vendor/github.com/nats-io/nuid/nuid.go
generated
vendored
Normal file
121
vendor/github.com/nats-io/nuid/nuid.go
generated
vendored
Normal file
@@ -0,0 +1,121 @@
|
||||
// Copyright 2016 Apcera Inc. All rights reserved.
|
||||
|
||||
// A unique identifier generator that is high performance, very fast, and tries to be entropy pool friendly.
|
||||
package nuid
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"math"
|
||||
"math/big"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
prand "math/rand"
|
||||
)
|
||||
|
||||
// NUID needs to be very fast to generate and truly unique, all while being entropy pool friendly.
|
||||
// We will use 12 bytes of crypto generated data (entropy draining), and 10 bytes of sequential data
|
||||
// that is started at a pseudo random number and increments with a pseudo-random increment.
|
||||
// Total is 22 bytes of base 62 ascii text :)
|
||||
|
||||
const (
|
||||
digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
|
||||
base = 62
|
||||
preLen = 12
|
||||
seqLen = 10
|
||||
maxSeq = int64(839299365868340224) // base^seqLen == 62^10
|
||||
minInc = int64(33)
|
||||
maxInc = int64(333)
|
||||
totalLen = preLen + seqLen
|
||||
)
|
||||
|
||||
type NUID struct {
|
||||
pre []byte
|
||||
seq int64
|
||||
inc int64
|
||||
}
|
||||
|
||||
type lockedNUID struct {
|
||||
sync.Mutex
|
||||
*NUID
|
||||
}
|
||||
|
||||
// Global NUID
|
||||
var globalNUID *lockedNUID
|
||||
|
||||
// Seed sequential random with crypto or math/random and current time
|
||||
// and generate crypto prefix.
|
||||
func init() {
|
||||
r, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt64))
|
||||
if err != nil {
|
||||
prand.Seed(time.Now().UnixNano())
|
||||
} else {
|
||||
prand.Seed(r.Int64())
|
||||
}
|
||||
globalNUID = &lockedNUID{NUID: New()}
|
||||
globalNUID.RandomizePrefix()
|
||||
}
|
||||
|
||||
// New will generate a new NUID and properly initialize the prefix, sequential start, and sequential increment.
|
||||
func New() *NUID {
|
||||
n := &NUID{
|
||||
seq: prand.Int63n(maxSeq),
|
||||
inc: minInc + prand.Int63n(maxInc-minInc),
|
||||
pre: make([]byte, preLen),
|
||||
}
|
||||
n.RandomizePrefix()
|
||||
return n
|
||||
}
|
||||
|
||||
// Generate the next NUID string from the global locked NUID instance.
|
||||
func Next() string {
|
||||
globalNUID.Lock()
|
||||
nuid := globalNUID.Next()
|
||||
globalNUID.Unlock()
|
||||
return nuid
|
||||
}
|
||||
|
||||
// Generate the next NUID string.
|
||||
func (n *NUID) Next() string {
|
||||
// Increment and capture.
|
||||
n.seq += n.inc
|
||||
if n.seq >= maxSeq {
|
||||
n.RandomizePrefix()
|
||||
n.resetSequential()
|
||||
}
|
||||
seq := n.seq
|
||||
|
||||
// Copy prefix
|
||||
var b [totalLen]byte
|
||||
bs := b[:preLen]
|
||||
copy(bs, n.pre)
|
||||
|
||||
// copy in the seq in base36.
|
||||
for i, l := len(b), seq; i > preLen; l /= base {
|
||||
i -= 1
|
||||
b[i] = digits[l%base]
|
||||
}
|
||||
return string(b[:])
|
||||
}
|
||||
|
||||
// Resets the sequential portion of the NUID.
|
||||
func (n *NUID) resetSequential() {
|
||||
n.seq = prand.Int63n(maxSeq)
|
||||
n.inc = minInc + prand.Int63n(maxInc-minInc)
|
||||
}
|
||||
|
||||
// Generate a new prefix from crypto/rand.
|
||||
// This call *can* drain entropy and will be called automatically when we exhaust the sequential range.
|
||||
// Will panic if it gets an error from rand.Int()
|
||||
func (n *NUID) RandomizePrefix() {
|
||||
var cb [preLen]byte
|
||||
cbs := cb[:]
|
||||
if nb, err := rand.Read(cbs); nb != preLen || err != nil {
|
||||
panic(fmt.Sprintf("nuid: failed generating crypto random number: %v\n", err))
|
||||
}
|
||||
|
||||
for i := 0; i < preLen; i++ {
|
||||
n.pre[i] = digits[int(cbs[i])%base]
|
||||
}
|
||||
}
|
||||
119
vendor/manifest
vendored
119
vendor/manifest
vendored
@@ -23,14 +23,6 @@
|
||||
"revision": "a0146f2f931611b8bfe40f07018c97a7c881c76a",
|
||||
"branch": "master"
|
||||
},
|
||||
{
|
||||
"importpath": "github.com/PuerkitoBio/ghost/handlers",
|
||||
"repository": "https://github.com/PuerkitoBio/ghost",
|
||||
"vcs": "",
|
||||
"revision": "a0146f2f931611b8bfe40f07018c97a7c881c76a",
|
||||
"branch": "master",
|
||||
"path": "/handlers"
|
||||
},
|
||||
{
|
||||
"importpath": "github.com/Sirupsen/logrus",
|
||||
"repository": "https://github.com/Sirupsen/logrus",
|
||||
@@ -535,14 +527,6 @@
|
||||
"revision": "5215b55f46b2b919f50a1df0eaa5886afe4e3b3d",
|
||||
"branch": "master"
|
||||
},
|
||||
{
|
||||
"importpath": "github.com/davecgh/go-spew/spew",
|
||||
"repository": "https://github.com/davecgh/go-spew",
|
||||
"vcs": "",
|
||||
"revision": "2df174808ee097f90d259e432cc04442cf60be21",
|
||||
"branch": "master",
|
||||
"path": "/spew"
|
||||
},
|
||||
{
|
||||
"importpath": "github.com/docker/docker/pkg/homedir",
|
||||
"repository": "https://github.com/docker/docker",
|
||||
@@ -598,14 +582,6 @@
|
||||
"branch": "master",
|
||||
"path": "/cgroups"
|
||||
},
|
||||
{
|
||||
"importpath": "github.com/docker/libcontainer/cgroups/fs",
|
||||
"repository": "https://github.com/docker/libcontainer",
|
||||
"vcs": "",
|
||||
"revision": "83a102cc68a09d890cce3b6c2e5c14c49e6373a0",
|
||||
"branch": "master",
|
||||
"path": "/cgroups/fs"
|
||||
},
|
||||
{
|
||||
"importpath": "github.com/docker/libcontainer/configs",
|
||||
"repository": "https://github.com/docker/libcontainer",
|
||||
@@ -629,14 +605,6 @@
|
||||
"revision": "f3b10ff408486b3e248197254514778285fbdea1",
|
||||
"branch": "master"
|
||||
},
|
||||
{
|
||||
"importpath": "github.com/emicklei/go-restful/swagger",
|
||||
"repository": "https://github.com/emicklei/go-restful",
|
||||
"vcs": "",
|
||||
"revision": "f3b10ff408486b3e248197254514778285fbdea1",
|
||||
"branch": "master",
|
||||
"path": "/swagger"
|
||||
},
|
||||
{
|
||||
"importpath": "github.com/fsouza/go-dockerclient",
|
||||
"repository": "https://github.com/fsouza/go-dockerclient",
|
||||
@@ -833,6 +801,85 @@
|
||||
"revision": "35fef6f28be7e47a87d8a71ef5b80cbf2c4c167a",
|
||||
"branch": "master"
|
||||
},
|
||||
{
|
||||
"importpath": "github.com/nats-io/gnatsd/auth",
|
||||
"repository": "https://github.com/nats-io/gnatsd",
|
||||
"vcs": "git",
|
||||
"revision": "f2c17eb159e1fcc5859b25b632a60c26506f0665",
|
||||
"branch": "master",
|
||||
"path": "auth",
|
||||
"notests": true
|
||||
},
|
||||
{
|
||||
"importpath": "github.com/nats-io/gnatsd/conf",
|
||||
"repository": "https://github.com/nats-io/gnatsd",
|
||||
"vcs": "git",
|
||||
"revision": "f2c17eb159e1fcc5859b25b632a60c26506f0665",
|
||||
"branch": "master",
|
||||
"path": "conf",
|
||||
"notests": true
|
||||
},
|
||||
{
|
||||
"importpath": "github.com/nats-io/gnatsd/server",
|
||||
"repository": "https://github.com/nats-io/gnatsd",
|
||||
"vcs": "git",
|
||||
"revision": "f2c17eb159e1fcc5859b25b632a60c26506f0665",
|
||||
"branch": "master",
|
||||
"path": "/server",
|
||||
"notests": true
|
||||
},
|
||||
{
|
||||
"importpath": "github.com/nats-io/gnatsd/test",
|
||||
"repository": "https://github.com/nats-io/gnatsd",
|
||||
"vcs": "git",
|
||||
"revision": "f2c17eb159e1fcc5859b25b632a60c26506f0665",
|
||||
"branch": "master",
|
||||
"path": "test",
|
||||
"notests": true
|
||||
},
|
||||
{
|
||||
"importpath": "github.com/nats-io/gnatsd/vendor/github.com/nats-io/nuid",
|
||||
"repository": "https://github.com/nats-io/gnatsd",
|
||||
"vcs": "git",
|
||||
"revision": "f2c17eb159e1fcc5859b25b632a60c26506f0665",
|
||||
"branch": "master",
|
||||
"path": "vendor/github.com/nats-io/nuid",
|
||||
"notests": true
|
||||
},
|
||||
{
|
||||
"importpath": "github.com/nats-io/gnatsd/vendor/golang.org/x/crypto/bcrypt",
|
||||
"repository": "https://github.com/nats-io/gnatsd",
|
||||
"vcs": "git",
|
||||
"revision": "f2c17eb159e1fcc5859b25b632a60c26506f0665",
|
||||
"branch": "master",
|
||||
"path": "vendor/golang.org/x/crypto/bcrypt",
|
||||
"notests": true
|
||||
},
|
||||
{
|
||||
"importpath": "github.com/nats-io/gnatsd/vendor/golang.org/x/crypto/blowfish",
|
||||
"repository": "https://github.com/nats-io/gnatsd",
|
||||
"vcs": "git",
|
||||
"revision": "f2c17eb159e1fcc5859b25b632a60c26506f0665",
|
||||
"branch": "master",
|
||||
"path": "vendor/golang.org/x/crypto/blowfish",
|
||||
"notests": true
|
||||
},
|
||||
{
|
||||
"importpath": "github.com/nats-io/nats",
|
||||
"repository": "https://github.com/nats-io/nats",
|
||||
"vcs": "git",
|
||||
"revision": "ce9cdc9addff268b4b75b72f7b6dcca012954c6a",
|
||||
"branch": "master",
|
||||
"notests": true
|
||||
},
|
||||
{
|
||||
"importpath": "github.com/nats-io/nuid",
|
||||
"repository": "https://github.com/nats-io/nuid",
|
||||
"vcs": "git",
|
||||
"revision": "a5152d67cf63cbfb5d992a395458722a45194715",
|
||||
"branch": "master",
|
||||
"notests": true
|
||||
},
|
||||
{
|
||||
"importpath": "github.com/nu7hatch/gouuid",
|
||||
"repository": "https://github.com/nu7hatch/gouuid",
|
||||
@@ -848,14 +895,6 @@
|
||||
"branch": "master",
|
||||
"path": "/libcontainer/cgroups"
|
||||
},
|
||||
{
|
||||
"importpath": "github.com/opencontainers/runc/libcontainer/cgroups/fs",
|
||||
"repository": "https://github.com/opencontainers/runc",
|
||||
"vcs": "",
|
||||
"revision": "361f9b7921665b5894faef36fc8430aec573dfa4",
|
||||
"branch": "master",
|
||||
"path": "/libcontainer/cgroups/fs"
|
||||
},
|
||||
{
|
||||
"importpath": "github.com/opencontainers/runc/libcontainer/configs",
|
||||
"repository": "https://github.com/opencontainers/runc",
|
||||
|
||||
Reference in New Issue
Block a user