Collector and analyzer for RRD data

This commit is contained in:
divolgin
2020-10-31 00:07:23 +00:00
parent 014a716949
commit 5a1321da02
27 changed files with 2323 additions and 174 deletions

View File

@@ -11,6 +11,11 @@ on:
jobs:
build:
runs-on: ubuntu-18.04
container:
image: replicated/troubleshoot-builder:18.04
credentials:
username: repldeploy2
password: ${{ secrets.DOCKERHUB_PASSWORD }}
steps:
- uses: actions/setup-go@v1
with:
@@ -24,10 +29,15 @@ jobs:
- uses: actions/checkout@v2
- run: make
- run: make
compile-preflight:
runs-on: ubuntu-latest
runs-on: ubuntu-18.04
container:
image: replicated/troubleshoot-builder:18.04
credentials:
username: repldeploy2
password: ${{ secrets.DOCKERHUB_PASSWORD }}
needs: build
steps:
- uses: actions/setup-go@v1
@@ -46,7 +56,7 @@ jobs:
path: bin/preflight
validate-preflight:
runs-on: ubuntu-latest
runs-on: ubuntu-18.04
needs: compile-preflight
steps:
- name: Download preflight binary
@@ -54,12 +64,17 @@ jobs:
with:
name: preflight
path: bin/
- uses: engineerd/setup-kind@v0.2.0
- uses: engineerd/setup-kind@v0.4.0
- run: chmod +x bin/preflight
- run: ./bin/preflight --interactive=false --format=json https://preflight.replicated.com
compile-supportbundle:
runs-on: ubuntu-latest
runs-on: ubuntu-18.04
container:
image: replicated/troubleshoot-builder:18.04
credentials:
username: repldeploy2
password: ${{ secrets.DOCKERHUB_PASSWORD }}
needs: build
steps:
- uses: actions/setup-go@v1
@@ -78,23 +93,28 @@ jobs:
path: bin/support-bundle
validate-supportbundle:
runs-on: ubuntu-latest
runs-on: ubuntu-18.04
needs: compile-supportbundle
steps:
- uses: actions/checkout@v1
- uses: actions/checkout@master
- name: Download support-bundle binary
uses: actions/download-artifact@v1
with:
name: support-bundle
path: bin/
- uses: engineerd/setup-kind@v0.2.0
- uses: engineerd/setup-kind@v0.4.0
- run: chmod +x bin/support-bundle
- run: ./bin/support-bundle ./examples/support-bundle/sample-collectors.yaml
- run: ./bin/support-bundle ./examples/support-bundle/sample-supportbundle.yaml
- run: ./bin/support-bundle https://kots.io
goreleaser:
runs-on: ubuntu-latest
runs-on: ubuntu-18.04
container:
image: replicated/troubleshoot-builder:18.04
credentials:
username: repldeploy2
password: ${{ secrets.DOCKERHUB_PASSWORD }}
needs:
- validate-preflight
if: startsWith(github.ref, 'refs/tags/v')
@@ -109,7 +129,7 @@ jobs:
- name: Unshallow
run: git fetch --prune --unshallow
- uses: actions/setup-go@v1
with:
go-version: "1.14"

View File

@@ -29,6 +29,8 @@ endif
define LDFLAGS
-ldflags "\
-s -w \
-extldflags \"-static\" \
-X ${VERSION_PACKAGE}.version=${VERSION} \
-X ${VERSION_PACKAGE}.gitSHA=${GIT_SHA} \
-X ${VERSION_PACKAGE}.buildTime=${DATE} \
@@ -47,15 +49,15 @@ test: generate fmt vet
.PHONY: support-bundle
support-bundle: generate fmt vet
go build ${LDFLAGS} -o bin/support-bundle github.com/replicatedhq/troubleshoot/cmd/troubleshoot
go build -tags netgo ${LDFLAGS} -o bin/support-bundle github.com/replicatedhq/troubleshoot/cmd/troubleshoot
.PHONY: preflight
preflight: generate fmt vet
go build ${LDFLAGS} -o bin/preflight github.com/replicatedhq/troubleshoot/cmd/preflight
go build -tags netgo ${LDFLAGS} -o bin/preflight github.com/replicatedhq/troubleshoot/cmd/preflight
.PHONY: analyze
analyze: generate fmt vet
go build ${LDFLAGS} -o bin/analyze github.com/replicatedhq/troubleshoot/cmd/analyze
go build -tags netgo ${LDFLAGS} -o bin/analyze github.com/replicatedhq/troubleshoot/cmd/analyze
.PHONY: fmt
fmt:
@@ -118,7 +120,7 @@ snapshot-release:
.PHONY: local-release
local-release:
curl -sL https://git.io/goreleaser | bash -s -- --rm-dist --snapshot --config deploy/.goreleaser.local.yml
curl -sL https://git.io/goreleaser | bash -s -- --rm-dist --snapshot --config deploy/.goreleaser.yaml
docker tag replicated/troubleshoot:alpha localhost:32000/troubleshoot:alpha
docker tag replicated/preflight:alpha localhost:32000/preflight:alpha
docker push localhost:32000/troubleshoot:alpha

View File

@@ -40,3 +40,19 @@ For details on creating the custom resource files that drive support-bundle coll
For questions about using Troubleshoot, there's a [Replicated Community](https://help.replicated.com/community) forum, and a [#app-troubleshoot channel in Kubernetes Slack](https://kubernetes.slack.com/channels/app-troubleshoot).
# Building
The following packages are required for building the project from source code:
pkg-config
librrd-dev
libglib2.0-dev
libcairo2-dev
libpango1.0-dev
libpixman-1-dev
libpng-dev
libsdl-pango-dev
libthai-dev
libpcre3-dev
There are known issues with libc6 2.27-3ubuntu1.2 on Ubuntu 18. Upgrading to 2.27-3ubuntu1.3 (apt-get install libc6) resolves these.

View File

@@ -3,6 +3,7 @@ package cli
import (
"archive/tar"
"bytes"
"compress/gzip"
"context"
"crypto/tls"
"encoding/json"
@@ -21,7 +22,6 @@ import (
"github.com/fatih/color"
"github.com/manifoldco/promptui"
"github.com/mattn/go-isatty"
"github.com/mholt/archiver"
"github.com/pkg/errors"
"github.com/replicatedhq/troubleshoot/cmd/util"
analyzer "github.com/replicatedhq/troubleshoot/pkg/analyze"
@@ -669,14 +669,70 @@ func callbackSupportBundleAPI(r *troubleshootv1beta2.ResultRequest, archivePath
}
func tarSupportBundleDir(inputDir, outputFilename string) error {
tarGz := archiver.TarGz{
Tar: &archiver.Tar{
ImplicitTopLevelFolder: false,
},
fileWriter, err := os.Create(outputFilename)
if err != nil {
return errors.Wrap(err, "failed to create output file")
}
defer fileWriter.Close()
if err := tarGz.Archive([]string{inputDir}, outputFilename); err != nil {
return errors.Wrap(err, "create archive")
gzipWriter := gzip.NewWriter(fileWriter)
defer gzipWriter.Close()
tarWriter := tar.NewWriter(gzipWriter)
defer tarWriter.Close()
err = filepath.Walk(inputDir, func(filename string, info os.FileInfo, err error) error {
if err != nil {
return err
}
fileMode := info.Mode()
if !fileMode.IsRegular() { // support bundle can have only files
return nil
}
nameInArchive, err := filepath.Rel(inputDir, filename)
if err != nil {
return errors.Wrap(err, "failed to create relative file name")
}
// tar.FileInfoHeader call causes a crash in static builds
// https://github.com/golang/go/issues/24787
hdr := &tar.Header{
Name: nameInArchive,
ModTime: info.ModTime(),
Mode: int64(fileMode.Perm()),
Typeflag: tar.TypeReg,
Size: info.Size(),
}
err = tarWriter.WriteHeader(hdr)
if err != nil {
return errors.Wrap(err, "failed to write tar header")
}
err = func() error {
fileReader, err := os.Open(filename)
if err != nil {
return errors.Wrap(err, "failed to open source file")
}
defer fileReader.Close()
_, err = io.Copy(tarWriter, fileReader)
if err != nil {
return errors.Wrap(err, "failed to copy file into archive")
}
return nil
}()
if err != nil {
return err
}
return nil
})
if err != nil {
return errors.Wrap(err, "failed to walk source dir")
}
return nil

View File

@@ -19,10 +19,25 @@ builds:
- id: preflight
goos:
- linux
- darwin
- windows
goarch:
- amd64
env:
- CGO_ENABLED=1
main: cmd/preflight/main.go
ldflags: -s -w
-X github.com/replicatedhq/troubleshoot/pkg/version.version={{.Version}}
-X github.com/replicatedhq/troubleshoot/pkg/version.gitSHA={{.Commit}}
-X github.com/replicatedhq/troubleshoot/pkg/version.buildTime={{.Date}}
-extldflags "-static"
flags: -tags netgo -installsuffix netgo
binary: preflight
hooks: {}
- id: preflight-darwin
goos:
- darwin
goarch:
- amd64
env:
- CGO_ENABLED=0
main: cmd/preflight/main.go
@@ -37,10 +52,25 @@ builds:
- id: support-bundle
goos:
- linux
- darwin
- windows
goarch:
- amd64
env:
- CGO_ENABLED=1
main: cmd/troubleshoot/main.go
ldflags: -s -w
-X github.com/replicatedhq/troubleshoot/pkg/version.version={{.Version}}
-X github.com/replicatedhq/troubleshoot/pkg/version.gitSHA={{.Commit}}
-X github.com/replicatedhq/troubleshoot/pkg/version.buildTime={{.Date}}
-extldflags "-static"
flags: -tags netgo -installsuffix netgo
binary: support-bundle
hooks: {}
- id: support-bundle-darwin
goos:
- darwin
goarch:
- amd64
env:
- CGO_ENABLED=0
main: cmd/troubleshoot/main.go

10
go.mod
View File

@@ -8,16 +8,14 @@ require (
github.com/blang/semver v3.5.1+incompatible
github.com/chzyer/logex v1.1.11-0.20160617073814-96a4d311aa9b // indirect
github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c // indirect
github.com/dsnet/compress v0.0.1 // indirect
github.com/elazarl/goproxy v0.0.0-20191011121108-aa519ddbe484 // indirect
github.com/emicklei/go-restful v2.9.6+incompatible // indirect
github.com/fatih/color v1.7.0
github.com/frankban/quicktest v1.7.2 // indirect
github.com/go-openapi/spec v0.19.4 // indirect
github.com/go-redis/redis/v7 v7.2.0
github.com/go-sql-driver/mysql v1.5.0
github.com/gobwas/glob v0.2.3
github.com/golang/snappy v0.0.1 // indirect
github.com/google/go-cmp v0.3.1 // indirect
github.com/google/gofuzz v1.1.0
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
github.com/hashicorp/go-getter v1.3.1-0.20190627223108-da0323b9545e
@@ -27,21 +25,19 @@ require (
github.com/manifoldco/promptui v0.3.2
github.com/mattn/go-colorable v0.1.4 // indirect
github.com/mattn/go-isatty v0.0.9
github.com/mholt/archiver v3.1.1+incompatible
github.com/mitchellh/go-wordwrap v1.0.0 // indirect
github.com/nicksnyder/go-i18n v1.10.1 // indirect
github.com/nwaples/rardecode v1.0.0 // indirect
github.com/onsi/gomega v1.9.0 // indirect
github.com/pierrec/lz4 v2.2.6+incompatible // indirect
github.com/pkg/errors v0.9.1
github.com/prometheus/procfs v0.0.5 // indirect
github.com/replicatedhq/termui/v3 v3.1.1-0.20200811145416-f40076d26851
github.com/segmentio/ksuid v1.0.3
github.com/spf13/cobra v0.0.5
github.com/spf13/pflag v1.0.5
github.com/spf13/viper v1.4.0
github.com/stretchr/testify v1.5.1
github.com/tj/go-spin v1.1.0
github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 // indirect
github.com/ulikunitz/xz v0.5.6 // indirect
go.opencensus.io v0.22.0 // indirect
go.undefinedlabs.com/scopeagent v0.1.7
golang.org/x/net v0.0.0-20200202094626-16171245cfb2 // indirect

21
go.sum
View File

@@ -102,9 +102,6 @@ github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZ
github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c h1:ZfSZ3P3BedhKGUhzj7BQlPSU4OvT6tfOKe3DVHzOA7s=
github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
github.com/dsnet/compress v0.0.1 h1:PlZu0n3Tuv04TzpfPbrnI0HW/YwodEXDS+oPKahKF0Q=
github.com/dsnet/compress v0.0.1/go.mod h1:Aw8dCMJ7RioblQeTqt88akK31OvO8Dhf5JflhBbQEHo=
github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY=
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
@@ -124,8 +121,6 @@ github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLi
github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
github.com/frankban/quicktest v1.7.2 h1:2QxQoC1TS09S7fhCPsrvqYdvP1H5M1P1ih5ABm3BTYk=
github.com/frankban/quicktest v1.7.2/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
@@ -217,8 +212,6 @@ github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
@@ -305,8 +298,6 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
@@ -345,8 +336,6 @@ github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/
github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mholt/archiver v3.1.1+incompatible h1:1dCVxuqs0dJseYEhi5pl7MYPH9zDa1wBi7mF09cbNkU=
github.com/mholt/archiver v3.1.1+incompatible/go.mod h1:Dh2dOXnSdiLxRiPoVfIr/fI1TwETms9B8CTWfeh7ROU=
github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4=
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
@@ -374,8 +363,6 @@ github.com/nicksnyder/go-i18n v1.10.1 h1:isfg77E/aCD7+0lD/D00ebR2MV5vgeQ276WYyDa
github.com/nicksnyder/go-i18n v1.10.1/go.mod h1:e4Di5xjP9oTVrC6y3C7C0HoSYXjSbhh/dU0eUV32nB4=
github.com/nsf/termbox-go v0.0.0-20190121233118-02980233997d h1:x3S6kxmy49zXVVyhcnrFqxvNVCBPb2KZ9hV2RBdS840=
github.com/nsf/termbox-go v0.0.0-20190121233118-02980233997d/go.mod h1:IuKpRQcYE1Tfu+oAQqaLisqDeXgjyyltCfsaoYN18NQ=
github.com/nwaples/rardecode v1.0.0 h1:r7vGuS5akxOnR4JQSkko62RJ1ReCMXxQRPtxsiFMBOs=
github.com/nwaples/rardecode v1.0.0/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
@@ -398,8 +385,6 @@ github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/pierrec/lz4 v2.2.6+incompatible h1:6aCX4/YZ9v8q69hTyiR7dNLnTA3fgtKHVVW5BCd5Znw=
github.com/pierrec/lz4 v2.2.6+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
@@ -434,6 +419,8 @@ github.com/replicatedhq/termui/v3 v3.1.1-0.20200811145416-f40076d26851/go.mod h1
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/go-charset v0.0.0-20180617210344-2471d30d28b4/go.mod h1:qgYeAmZ5ZIpBWTGllZSQnw97Dj+woV0toclVaRGI8pc=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/segmentio/ksuid v1.0.3 h1:FoResxvleQwYiPAVKe1tMUlEirodZqlqglIuFsdDntY=
github.com/segmentio/ksuid v1.0.3/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY=
github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM=
@@ -508,8 +495,6 @@ github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijb
github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw=
github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaUXK79GlxNBwueZn0xI=
github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk=
github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 h1:nIPpBwaJSVYIxUFsDv3M8ofmx9yWTog9BfvIu0q41lo=
github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
@@ -728,10 +713,12 @@ k8s.io/client-go v0.18.0/go.mod h1:uQSYDYs4WhVZ9i6AIoEZuwUggLVEF64HOD37boKAtF8=
k8s.io/client-go v0.18.2 h1:aLB0iaD4nmwh7arT2wIn+lMnAq7OswjaejkQ8p9bBYE=
k8s.io/client-go v0.18.2/go.mod h1:Xcm5wVGXX9HAA2JJ2sSBUn3tCJ+4SVlCbl2MNNv+CIU=
k8s.io/code-generator v0.18.0/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc=
k8s.io/code-generator v0.18.2 h1:C1Nn2JiMf244CvBDKVPX0W2mZFJkVBg54T8OV7/Imso=
k8s.io/code-generator v0.18.2/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc=
k8s.io/component-base v0.18.0/go.mod h1:u3BCg0z1uskkzrnAKFzulmYaEpZF7XC9Pf/uFyb1v2c=
k8s.io/component-base v0.18.2/go.mod h1:kqLlMuhJNHQ9lz8Z7V5bxUUtjFZnrypArGl58gmDfUM=
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/gengo v0.0.0-20200114144118-36b2048a9120 h1:RPscN6KhmG54S33L+lr3GS+oD1jmchIU0ll519K6FA4=
k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=

View File

@@ -251,6 +251,20 @@ func Analyze(analyzer *troubleshootv1beta2.Analyze, getFile getCollectedFileCont
}
return []*AnalyzeResult{result}, nil
}
if analyzer.Collectd != nil {
isExcluded, err := isExcluded(analyzer.Collectd.Exclude)
if err != nil {
return nil, err
}
if isExcluded {
return nil, nil
}
result, err := analyzeCollectd(analyzer.Collectd, findFiles)
if err != nil {
return nil, err
}
return []*AnalyzeResult{result}, nil
}
return nil, errors.New("invalid analyzer")
}

316
pkg/analyze/collectd.go Normal file
View File

@@ -0,0 +1,316 @@
package analyzer
import (
"archive/tar"
"bytes"
"io"
"io/ioutil"
"math"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/pkg/errors"
troubleshootv1beta2 "github.com/replicatedhq/troubleshoot/pkg/apis/troubleshoot/v1beta2"
"github.com/replicatedhq/troubleshoot/pkg/rrd"
)
type CollectdSummary struct {
Load float64
}
func analyzeCollectd(analyzer *troubleshootv1beta2.CollectdAnalyze, getCollectedFileContents func(string) (map[string][]byte, error)) (*AnalyzeResult, error) {
rrdArchives, err := getCollectedFileContents("/collectd/rrd/*.tar")
if err != nil {
return nil, errors.Wrap(err, "failed to find rrd archives")
}
tmpDir, err := ioutil.TempDir("", "rrd")
if err != nil {
return nil, errors.Wrap(err, "failed to create temp rrd dir")
}
defer os.RemoveAll(tmpDir)
for name, data := range rrdArchives {
destDir := filepath.Join(tmpDir, filepath.Base(name))
if err := extractRRDFiles(data, destDir); err != nil {
return nil, errors.Wrap(err, "failed to extract rrd file")
}
}
loadFiles, err := findRRDLoadFiles(tmpDir)
if err != nil {
return nil, errors.Wrap(err, "failed to find load files")
}
collectdSummary := CollectdSummary{
Load: 0,
}
// load files are always present, so this loop can be used for all host metrics
for _, loadFile := range loadFiles {
pathParts := strings.Split(loadFile, string(filepath.Separator))
if len(pathParts) < 3 {
continue
}
// .../<hostname>/load/load.rrd
hostname := pathParts[len(pathParts)-3]
hostDir := strings.TrimSuffix(loadFile, "/load/load.rrd")
hostLoad, err := getHostLoad(analyzer, loadFile, hostDir)
if err != nil {
return nil, errors.Wrapf(err, "failed to find analyze %s files", hostname)
}
collectdSummary.Load = math.Max(collectdSummary.Load, hostLoad)
}
result, err := getCollectdAnalyzerOutcome(analyzer, collectdSummary)
if err != nil {
return nil, errors.Wrap(err, "failed to generate outcome")
}
return result, nil
}
func extractRRDFiles(archiveData []byte, dst string) error {
tarReader := tar.NewReader(bytes.NewReader(archiveData))
for {
header, err := tarReader.Next()
if err == io.EOF {
return nil
} else if err != nil {
return errors.Wrap(err, "failed to read rrd archive")
}
if header.Typeflag != tar.TypeReg {
continue
}
dstFileName := filepath.Join(dst, header.Name)
if err := os.MkdirAll(filepath.Dir(dstFileName), 0755); err != nil {
return errors.Wrap(err, "failed to create dest path")
}
err = func() error {
f, err := os.Create(dstFileName)
if err != nil {
return errors.Wrap(err, "failed to create dest file")
}
defer f.Close()
_, err = io.Copy(f, tarReader)
if err != nil {
return errors.Wrap(err, "failed to copy")
}
return nil
}()
if err != nil {
return errors.Wrap(err, "failed to write dest file")
}
}
}
func findRRDLoadFiles(rootDir string) ([]string, error) {
files := make([]string, 0)
err := filepath.Walk(rootDir, func(filename string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if filepath.Base(filename) == "load.rrd" {
files = append(files, filename)
}
return nil
})
if err != nil {
return nil, errors.Wrap(err, "failed to find rrd load files")
}
return files, nil
}
func getHostLoad(analyzer *troubleshootv1beta2.CollectdAnalyze, loadFile string, hostRoot string) (float64, error) {
numberOfCPUs := 0
err := filepath.Walk(hostRoot, func(filename string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if !info.IsDir() {
return nil
}
if strings.HasPrefix(filepath.Base(filename), "cpu-") {
numberOfCPUs++
}
return nil
})
if err != nil {
return 0, errors.Wrap(err, "failed to find rrd files")
}
if numberOfCPUs == 0 {
numberOfCPUs = 1 // what else can we do here? return an error?
}
fileInfo, err := rrd.Info(loadFile)
if err != nil {
return 0, errors.Wrap(err, "failed to get rrd info")
}
// Query RRD data. Start and end have to be multiples of step.
window := 7 * 24 * time.Hour
step := 1 * time.Hour
lastUpdate := int64(fileInfo["last_update"].(uint))
endSeconds := int64(lastUpdate/int64(step.Seconds())) * int64(step.Seconds())
end := time.Unix(int64(endSeconds), 0)
start := end.Add(-window)
fetchResult, err := rrd.Fetch(loadFile, "MAX", start, end, step)
if err != nil {
return 0, errors.Wrap(err, "failed to fetch load data")
}
defer fetchResult.FreeValues()
values := fetchResult.Values()
maxLoad := float64(0)
for i := 0; i < len(values); i += 3 { // +3 because "shortterm", "midterm", "longterm"
v := values[i+1] // midterm
if math.IsNaN(v) {
continue
}
maxLoad = math.Max(maxLoad, values[i+1])
}
return maxLoad / float64(numberOfCPUs), nil
}
func getCollectdAnalyzerOutcome(analyzer *troubleshootv1beta2.CollectdAnalyze, collectdSummary CollectdSummary) (*AnalyzeResult, error) {
collectorName := analyzer.CollectorName
if collectorName == "" {
collectorName = "rrd"
}
title := analyzer.CheckName
if title == "" {
title = collectorName
}
result := &AnalyzeResult{
Title: title,
IconKey: "host_load_analyze",
IconURI: "https://troubleshoot.sh/images/analyzer-icons/rrd-analyze.svg",
}
for _, outcome := range analyzer.Outcomes {
if outcome.Fail != nil {
if outcome.Fail.When == "" {
result.IsFail = true
result.Message = outcome.Fail.Message
result.URI = outcome.Fail.URI
return result, nil
}
isMatch, err := compareCollectdConditionalToActual(outcome.Fail.When, collectdSummary)
if err != nil {
return result, errors.Wrap(err, "failed to compare rrd fail conditional")
}
if isMatch {
result.IsFail = true
result.Message = outcome.Fail.Message
result.URI = outcome.Fail.URI
return result, nil
}
} else if outcome.Warn != nil {
if outcome.Pass.When == "" {
result.IsWarn = true
result.Message = outcome.Warn.Message
result.URI = outcome.Warn.URI
return result, nil
}
isMatch, err := compareCollectdConditionalToActual(outcome.Warn.When, collectdSummary)
if err != nil {
return result, errors.Wrap(err, "failed to compare rrd warn conditional")
}
if isMatch {
result.IsWarn = true
result.Message = outcome.Warn.Message
result.URI = outcome.Warn.URI
return result, nil
}
} else if outcome.Pass != nil {
if outcome.Pass.When == "" {
result.IsPass = true
result.Message = outcome.Pass.Message
result.URI = outcome.Pass.URI
return result, nil
}
isMatch, err := compareCollectdConditionalToActual(outcome.Pass.When, collectdSummary)
if err != nil {
return result, errors.Wrap(err, "failed to compare rrd pass conditional")
}
if isMatch {
result.IsPass = true
result.Message = outcome.Pass.Message
result.URI = outcome.Pass.URI
return result, nil
}
}
}
return result, nil
}
func compareCollectdConditionalToActual(conditional string, collectdSummary CollectdSummary) (bool, error) {
parts := strings.Split(strings.TrimSpace(conditional), " ")
if len(parts) != 3 {
return false, errors.New("unable to parse conditional")
}
switch parts[0] {
case "load":
expected, err := strconv.ParseFloat(parts[2], 64)
if err != nil {
return false, errors.Wrap(err, "failed to parse float")
}
switch parts[1] {
case "=", "==", "===":
return collectdSummary.Load == expected, nil
case "!=", "!==":
return collectdSummary.Load != expected, nil
case "<":
return collectdSummary.Load < expected, nil
case ">":
return collectdSummary.Load > expected, nil
case "<=":
return collectdSummary.Load <= expected, nil
case ">=":
return collectdSummary.Load >= expected, nil
}
return false, errors.Errorf("unknown rrd comparator: %q", parts[0])
}
return false, nil
}

View File

@@ -150,6 +150,12 @@ func ExtractTroubleshootBundle(reader io.Reader, destDir string) error {
}
case tar.TypeReg:
name := filepath.Join(destDir, header.Name)
dirName := filepath.Dir(name)
if err := os.MkdirAll(dirName, 0755); err != nil {
return errors.Wrapf(err, "failed to mkdir for file %s", header.Name)
}
file, err := os.OpenFile(name, os.O_RDWR|os.O_CREATE, os.FileMode(header.Mode))
if err != nil {
return errors.Wrap(err, "failed to open tar file")

View File

@@ -333,7 +333,7 @@ func Test_textAnalyze(t *testing.T) {
for _, v := range actual {
unPointered = append(unPointered, *v)
}
assert.Equal(t, test.expectResult, unPointered)
assert.ElementsMatch(t, test.expectResult, unPointered)
})
}
}

View File

@@ -16,7 +16,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// autogenerated by controller-gen object, do not modify manually
// Code generated by controller-gen. DO NOT EDIT.
package v1beta1
@@ -862,6 +862,28 @@ func (in *ImagePullSecret) DeepCopy() *ImagePullSecret {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImagePullSecrets) DeepCopyInto(out *ImagePullSecrets) {
*out = *in
if in.Data != nil {
in, out := &in.Data, &out.Data
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePullSecrets.
func (in *ImagePullSecrets) DeepCopy() *ImagePullSecrets {
if in == nil {
return nil
}
out := new(ImagePullSecrets)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Ingress) DeepCopyInto(out *Ingress) {
*out = *in
@@ -1385,6 +1407,11 @@ func (in *Run) DeepCopyInto(out *Run) {
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.ImagePullSecret != nil {
in, out := &in.ImagePullSecret, &out.ImagePullSecret
*out = new(ImagePullSecrets)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Run.

View File

@@ -115,6 +115,12 @@ type DatabaseAnalyze struct {
FileName string `json:"fileName,omitempty" yaml:"fileName,omitempty"`
}
type CollectdAnalyze struct {
AnalyzeMeta `json:",inline" yaml:",inline"`
Outcomes []*Outcome `json:"outcomes" yaml:"outcomes"`
CollectorName string `json:"collectorName" yaml:"collectorName"`
}
type AnalyzeMeta struct {
CheckName string `json:"checkName,omitempty" yaml:"checkName,omitempty"`
Exclude multitype.BoolOrString `json:"exclude,omitempty" yaml:"exclude,omitempty"`
@@ -136,4 +142,5 @@ type Analyze struct {
Postgres *DatabaseAnalyze `json:"postgres,omitempty" yaml:"postgres,omitempty"`
Mysql *DatabaseAnalyze `json:"mysql,omitempty" yaml:"mysql,omitempty"`
Redis *DatabaseAnalyze `json:"redis,omitempty" yaml:"redis,omitempty"`
Collectd *CollectdAnalyze `json:"collectd,omitempty" yaml:"collectd,omitempty"`
}

View File

@@ -123,6 +123,16 @@ type Database struct {
URI string `json:"uri" yaml:"uri"`
}
type Collectd struct {
CollectorMeta `json:",inline" yaml:",inline"`
Namespace string `json:"namespace" yaml:"namespace"`
Image string `json:"image" yaml:"image"`
ImagePullPolicy string `json:"imagePullPolicy,omitempty" yaml:"imagePullPolicy,omitempty"`
ImagePullSecret *ImagePullSecrets `json:"imagePullSecret,omitempty" yaml:"imagePullSecret,omitempty"`
Timeout string `json:"timeout,omitempty" yaml:"timeout,omitempty"`
HostPath string `json:"hostPath" yaml:"hostPath"`
}
type Collect struct {
ClusterInfo *ClusterInfo `json:"clusterInfo,omitempty" yaml:"clusterInfo,omitempty"`
ClusterResources *ClusterResources `json:"clusterResources,omitempty" yaml:"clusterResources,omitempty"`
@@ -136,6 +146,7 @@ type Collect struct {
Postgres *Database `json:"postgres,omitempty" yaml:"postgres,omitempty"`
Mysql *Database `json:"mysql,omitempty" yaml:"mysql,omitempty"`
Redis *Database `json:"redis,omitempty" yaml:"redis,omitempty"`
Collectd *Collectd `json:"collectd,omitempty" yaml:"collectd,omitempty"`
}
func (c *Collect) AccessReviewSpecs(overrideNS string) []authorizationv1.SelfSubjectAccessReviewSpec {
@@ -335,6 +346,10 @@ func (c *Collect) GetName() string {
collector = "http"
name = c.HTTP.CollectorName
}
if c.Collectd != nil {
collector = "rrd"
name = c.Collectd.CollectorName
}
if collector == "" {
return "<none>"

View File

@@ -16,7 +16,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// autogenerated by controller-gen object, do not modify manually
// Code generated by controller-gen. DO NOT EDIT.
package v1beta2
@@ -127,6 +127,11 @@ func (in *Analyze) DeepCopyInto(out *Analyze) {
*out = new(DatabaseAnalyze)
(*in).DeepCopyInto(*out)
}
if in.Collectd != nil {
in, out := &in.Collectd, &out.Collectd
*out = new(CollectdAnalyze)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Analyze.
@@ -404,6 +409,11 @@ func (in *Collect) DeepCopyInto(out *Collect) {
*out = new(Database)
**out = **in
}
if in.Collectd != nil {
in, out := &in.Collectd, &out.Collectd
*out = new(Collectd)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Collect.
@@ -416,6 +426,54 @@ func (in *Collect) DeepCopy() *Collect {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Collectd) DeepCopyInto(out *Collectd) {
*out = *in
out.CollectorMeta = in.CollectorMeta
if in.ImagePullSecret != nil {
in, out := &in.ImagePullSecret, &out.ImagePullSecret
*out = new(ImagePullSecrets)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Collectd.
func (in *Collectd) DeepCopy() *Collectd {
if in == nil {
return nil
}
out := new(Collectd)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CollectdAnalyze) DeepCopyInto(out *CollectdAnalyze) {
*out = *in
out.AnalyzeMeta = in.AnalyzeMeta
if in.Outcomes != nil {
in, out := &in.Outcomes, &out.Outcomes
*out = make([]*Outcome, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(Outcome)
(*in).DeepCopyInto(*out)
}
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CollectdAnalyze.
func (in *CollectdAnalyze) DeepCopy() *CollectdAnalyze {
if in == nil {
return nil
}
out := new(CollectdAnalyze)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Collector) DeepCopyInto(out *Collector) {
*out = *in
@@ -862,6 +920,28 @@ func (in *ImagePullSecret) DeepCopy() *ImagePullSecret {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImagePullSecrets) DeepCopyInto(out *ImagePullSecrets) {
*out = *in
if in.Data != nil {
in, out := &in.Data, &out.Data
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePullSecrets.
func (in *ImagePullSecrets) DeepCopy() *ImagePullSecrets {
if in == nil {
return nil
}
out := new(ImagePullSecrets)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Ingress) DeepCopyInto(out *Ingress) {
*out = *in
@@ -892,6 +972,7 @@ func (in *Ingress) DeepCopy() *Ingress {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LogLimits) DeepCopyInto(out *LogLimits) {
*out = *in
in.SinceTime.DeepCopyInto(&out.SinceTime)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogLimits.
@@ -921,7 +1002,7 @@ func (in *Logs) DeepCopyInto(out *Logs) {
if in.Limits != nil {
in, out := &in.Limits, &out.Limits
*out = new(LogLimits)
**out = **in
(*in).DeepCopyInto(*out)
}
}
@@ -1385,6 +1466,11 @@ func (in *Run) DeepCopyInto(out *Run) {
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.ImagePullSecret != nil {
in, out := &in.ImagePullSecret, &out.ImagePullSecret
*out = new(ImagePullSecrets)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Run.

216
pkg/collect/collectd.go Normal file
View File

@@ -0,0 +1,216 @@
package collect
import (
"context"
"path"
"path/filepath"
"time"
"github.com/pkg/errors"
troubleshootv1beta2 "github.com/replicatedhq/troubleshoot/pkg/apis/troubleshoot/v1beta2"
"github.com/replicatedhq/troubleshoot/pkg/logger"
"github.com/segmentio/ksuid"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
kuberneteserrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/kubernetes"
)
func Collectd(c *Collector, collectdCollector *troubleshootv1beta2.Collectd) (map[string][]byte, error) {
ctx := context.Background()
label := ksuid.New().String()
namespace := collectdCollector.Namespace
client, err := kubernetes.NewForConfig(c.ClientConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to create client from config")
}
dsName, err := createDaemonSet(ctx, client, collectdCollector, namespace, label)
if dsName != "" {
defer func() {
if err := client.AppsV1().DaemonSets(namespace).Delete(ctx, dsName, metav1.DeleteOptions{}); err != nil {
logger.Printf("Failed to delete daemonset %s: %v\n", dsName, err)
}
}()
if collectdCollector.ImagePullSecret != nil && collectdCollector.ImagePullSecret.Data != nil {
defer func() {
err := client.CoreV1().Secrets(namespace).Delete(ctx, collectdCollector.ImagePullSecret.Name, metav1.DeleteOptions{})
if err != nil && !kuberneteserrors.IsNotFound(err) {
logger.Printf("Failed to delete secret %s: %v\n", collectdCollector.ImagePullSecret.Name, err)
}
}()
}
}
if err != nil {
return nil, errors.Wrap(err, "failed to create daemonset")
}
if collectdCollector.Timeout == "" {
return collectRRDFiles(ctx, client, c, collectdCollector, label, namespace)
}
timeout, err := time.ParseDuration(collectdCollector.Timeout)
if err != nil {
return nil, errors.Wrap(err, "failed to parse timeout")
}
childCtx, cancel := context.WithCancel(ctx)
defer cancel()
errCh := make(chan error, 1)
resultCh := make(chan map[string][]byte, 1)
go func() {
b, err := collectRRDFiles(childCtx, client, c, collectdCollector, label, namespace)
if err != nil {
errCh <- err
} else {
resultCh <- b
}
}()
select {
case <-time.After(timeout):
return nil, errors.New("timeout")
case result := <-resultCh:
return result, nil
case err := <-errCh:
return nil, err
}
}
func createDaemonSet(ctx context.Context, client *kubernetes.Clientset, rrdCollector *troubleshootv1beta2.Collectd, namespace string, label string) (string, error) {
pullPolicy := corev1.PullIfNotPresent
volumeType := corev1.HostPathDirectory
if rrdCollector.ImagePullPolicy != "" {
pullPolicy = corev1.PullPolicy(rrdCollector.ImagePullPolicy)
}
dsLabels := map[string]string{
"rrd-collector": label,
}
ds := appsv1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "troubleshoot",
Namespace: namespace,
Labels: dsLabels,
},
Spec: appsv1.DaemonSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: dsLabels,
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: dsLabels,
},
Spec: corev1.PodSpec{
RestartPolicy: corev1.RestartPolicyAlways,
Containers: []corev1.Container{
{
Image: rrdCollector.Image,
ImagePullPolicy: pullPolicy,
Name: "collector",
Command: []string{"sleep"},
Args: []string{"1000000"},
VolumeMounts: []corev1.VolumeMount{
{
Name: "rrd",
MountPath: "/rrd",
},
},
},
},
Volumes: []corev1.Volume{
{
Name: "rrd",
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{
Path: rrdCollector.HostPath,
Type: &volumeType,
},
},
},
},
},
},
},
}
if rrdCollector.ImagePullSecret != nil && rrdCollector.ImagePullSecret.Name != "" {
err := createSecret(ctx, client, namespace, rrdCollector.ImagePullSecret)
if err != nil {
return "", errors.Wrap(err, "failed to create secret")
}
ds.Spec.Template.Spec.ImagePullSecrets = append(ds.Spec.Template.Spec.ImagePullSecrets, corev1.LocalObjectReference{Name: rrdCollector.ImagePullSecret.Name})
}
createdDS, err := client.AppsV1().DaemonSets(namespace).Create(ctx, &ds, metav1.CreateOptions{})
if err != nil {
return "", errors.Wrap(err, "failed to create daemonset")
}
// This timeout is different from collector timeout.
// Time it takes to pull images should not count towards collector timeout.
childCtx, cancel := context.WithTimeout(ctx, 30*time.Second)
defer cancel()
for {
select {
case <-time.After(1 * time.Second):
case <-childCtx.Done():
return createdDS.Name, errors.Wrap(ctx.Err(), "failed to wait for daemonset")
}
ds, err := client.AppsV1().DaemonSets(namespace).Get(ctx, createdDS.Name, metav1.GetOptions{})
if err != nil {
if !kuberneteserrors.IsNotFound(err) {
continue
}
return createdDS.Name, errors.Wrap(err, "failed to get daemonset")
}
if ds.Status.DesiredNumberScheduled != ds.Status.NumberReady {
continue
}
break
}
return createdDS.Name, nil
}
func collectRRDFiles(ctx context.Context, client *kubernetes.Clientset, c *Collector, rrdCollector *troubleshootv1beta2.Collectd, label string, namespace string) (map[string][]byte, error) {
labelSelector := map[string]string{
"rrd-collector": label,
}
opts := metav1.ListOptions{
LabelSelector: labels.SelectorFromSet(labelSelector).String(),
}
pods, err := client.CoreV1().Pods(namespace).List(ctx, opts)
if err != nil {
return nil, errors.Wrap(err, "list rrd collector pods")
}
pathPrefix := path.Join("collectd", "rrd")
runOutput := map[string][]byte{}
for _, pod := range pods.Items {
stdout, stderr, err := getFilesFromPod(ctx, client, c, pod.Name, "", namespace, "/rrd")
if err != nil {
runOutput[path.Join(pathPrefix, pod.Spec.NodeName)+".error"] = []byte(err.Error())
if len(stdout) > 0 {
runOutput[filepath.Join(pathPrefix, pod.Spec.NodeName)+".stdout"] = stdout
}
if len(stderr) > 0 {
runOutput[filepath.Join(pathPrefix, pod.Spec.NodeName)+".stderr"] = stderr
}
continue
}
runOutput[path.Join(pathPrefix, pod.Spec.NodeName)+".tar"] = stdout
}
return runOutput, nil
}

View File

@@ -42,138 +42,154 @@ func isExcluded(excludeVal multitype.BoolOrString) (bool, error) {
return parsed, nil
}
func (c *Collector) RunCollectorSync(globalRedactors []*troubleshootv1beta2.Redact) (map[string][]byte, error) {
var unRedacted map[string][]byte
func (c *Collector) RunCollectorSync(globalRedactors []*troubleshootv1beta2.Redact) (result map[string][]byte, err error) {
defer func() {
if r := recover(); r != nil {
err = errors.Errorf("recovered rom panic: %v", r)
}
}()
var isExcludedResult bool
var err error
if c.Collect.ClusterInfo != nil {
isExcludedResult, err = isExcluded(c.Collect.ClusterInfo.Exclude)
if err != nil {
return nil, err
return
}
if isExcludedResult {
return nil, nil
return
}
unRedacted, err = ClusterInfo(c)
result, err = ClusterInfo(c)
} else if c.Collect.ClusterResources != nil {
isExcludedResult, err = isExcluded(c.Collect.ClusterResources.Exclude)
if err != nil {
return nil, err
return
}
if isExcludedResult {
return nil, nil
return
}
unRedacted, err = ClusterResources(c)
result, err = ClusterResources(c)
} else if c.Collect.Secret != nil {
isExcludedResult, err = isExcluded(c.Collect.Secret.Exclude)
if err != nil {
return nil, err
return
}
if isExcludedResult {
return nil, nil
return
}
unRedacted, err = Secret(c, c.Collect.Secret)
result, err = Secret(c, c.Collect.Secret)
} else if c.Collect.Logs != nil {
isExcludedResult, err = isExcluded(c.Collect.Logs.Exclude)
if err != nil {
return nil, err
return
}
if isExcludedResult {
return nil, nil
return
}
unRedacted, err = Logs(c, c.Collect.Logs)
result, err = Logs(c, c.Collect.Logs)
} else if c.Collect.Run != nil {
isExcludedResult, err = isExcluded(c.Collect.Run.Exclude)
if err != nil {
return nil, err
return
}
if isExcludedResult {
return nil, nil
return
}
unRedacted, err = Run(c, c.Collect.Run)
result, err = Run(c, c.Collect.Run)
} else if c.Collect.Exec != nil {
isExcludedResult, err = isExcluded(c.Collect.Exec.Exclude)
if err != nil {
return nil, err
return
}
if isExcludedResult {
return nil, nil
return
}
unRedacted, err = Exec(c, c.Collect.Exec)
result, err = Exec(c, c.Collect.Exec)
} else if c.Collect.Data != nil {
isExcludedResult, err = isExcluded(c.Collect.Data.Exclude)
if err != nil {
return nil, err
return
}
if isExcludedResult {
return nil, nil
return
}
unRedacted, err = Data(c, c.Collect.Data)
result, err = Data(c, c.Collect.Data)
} else if c.Collect.Copy != nil {
isExcludedResult, err = isExcluded(c.Collect.Copy.Exclude)
if err != nil {
return nil, err
return
}
if isExcludedResult {
return nil, nil
return
}
unRedacted, err = Copy(c, c.Collect.Copy)
result, err = Copy(c, c.Collect.Copy)
} else if c.Collect.HTTP != nil {
isExcludedResult, err = isExcluded(c.Collect.HTTP.Exclude)
if err != nil {
return nil, err
return
}
if isExcludedResult {
return nil, nil
return
}
unRedacted, err = HTTP(c, c.Collect.HTTP)
result, err = HTTP(c, c.Collect.HTTP)
} else if c.Collect.Postgres != nil {
isExcludedResult, err = isExcluded(c.Collect.Postgres.Exclude)
if err != nil {
return nil, err
return
}
if isExcludedResult {
return nil, nil
return
}
unRedacted, err = Postgres(c, c.Collect.Postgres)
result, err = Postgres(c, c.Collect.Postgres)
} else if c.Collect.Mysql != nil {
isExcludedResult, err = isExcluded(c.Collect.Mysql.Exclude)
if err != nil {
return nil, err
return
}
if isExcludedResult {
return nil, nil
return
}
unRedacted, err = Mysql(c, c.Collect.Mysql)
result, err = Mysql(c, c.Collect.Mysql)
} else if c.Collect.Redis != nil {
isExcludedResult, err = isExcluded(c.Collect.Redis.Exclude)
if err != nil {
return nil, err
return
}
if isExcludedResult {
return nil, nil
return
}
unRedacted, err = Redis(c, c.Collect.Redis)
result, err = Redis(c, c.Collect.Redis)
} else if c.Collect.Collectd != nil {
// TODO: see if redaction breaks these
isExcludedResult, err = isExcluded(c.Collect.Collectd.Exclude)
if err != nil {
return
}
if isExcludedResult {
return
}
result, err = Collectd(c, c.Collect.Collectd)
} else {
return nil, errors.New("no spec found to run")
err = errors.New("no spec found to run")
return
}
if err != nil {
return nil, err
return
}
if c.PathPrefix != "" {
// prefix file paths
prefixed := map[string][]byte{}
for k, v := range unRedacted {
for k, v := range result {
prefixed[filepath.Join(c.PathPrefix, k)] = v
}
unRedacted = prefixed
result = prefixed
}
if c.Redact {
return redactMap(unRedacted, globalRedactors)
result, err = redactMap(result, globalRedactors)
}
return unRedacted, nil
return
}
func (c *Collector) GetDisplayName() string {

View File

@@ -6,6 +6,7 @@ import (
"fmt"
"path/filepath"
"github.com/pkg/errors"
troubleshootv1beta2 "github.com/replicatedhq/troubleshoot/pkg/apis/troubleshoot/v1beta2"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
@@ -37,7 +38,7 @@ func Copy(c *Collector, copyCollector *troubleshootv1beta2.Copy) (map[string][]b
for _, pod := range pods {
bundlePath := filepath.Join(copyCollector.Name, pod.Namespace, pod.Name, copyCollector.ContainerName)
files, copyErrors := copyFiles(c, client, pod, copyCollector)
files, copyErrors := copyFiles(ctx, client, c, pod, copyCollector)
if len(copyErrors) > 0 {
key := filepath.Join(bundlePath, copyCollector.ContainerPath+"-errors.json")
copyOutput[key], err = marshalNonNil(copyErrors)
@@ -56,24 +57,43 @@ func Copy(c *Collector, copyCollector *troubleshootv1beta2.Copy) (map[string][]b
return copyOutput, nil
}
func copyFiles(c *Collector, client *kubernetes.Clientset, pod corev1.Pod, copyCollector *troubleshootv1beta2.Copy) (map[string][]byte, map[string]string) {
container := pod.Spec.Containers[0].Name
func copyFiles(ctx context.Context, client *kubernetes.Clientset, c *Collector, pod corev1.Pod, copyCollector *troubleshootv1beta2.Copy) (map[string][]byte, map[string]string) {
containerName := pod.Spec.Containers[0].Name
if copyCollector.ContainerName != "" {
container = copyCollector.ContainerName
containerName = copyCollector.ContainerName
}
command := []string{"tar", "-C", filepath.Dir(copyCollector.ContainerPath), "-cf", "-", filepath.Base(copyCollector.ContainerPath)}
req := client.CoreV1().RESTClient().Post().Resource("pods").Name(pod.Name).Namespace(pod.Namespace).SubResource("exec")
scheme := runtime.NewScheme()
if err := corev1.AddToScheme(scheme); err != nil {
return nil, map[string]string{
stdout, stderr, err := getFilesFromPod(ctx, client, c, pod.Name, containerName, pod.Namespace, copyCollector.ContainerPath)
if err != nil {
errors := map[string]string{
filepath.Join(copyCollector.ContainerPath, "error"): err.Error(),
}
if len(stdout) > 0 {
errors[filepath.Join(copyCollector.ContainerPath, "stdout")] = string(stdout)
}
if len(stderr) > 0 {
errors[filepath.Join(copyCollector.ContainerPath, "stderr")] = string(stderr)
}
return nil, errors
}
return map[string][]byte{
filepath.Base(copyCollector.ContainerPath) + ".tar": stdout,
}, nil
}
func getFilesFromPod(ctx context.Context, client *kubernetes.Clientset, c *Collector, podName string, containerName string, namespace string, containerPath string) ([]byte, []byte, error) {
command := []string{"tar", "-C", filepath.Dir(containerPath), "-cf", "-", filepath.Base(containerPath)}
req := client.CoreV1().RESTClient().Post().Resource("pods").Name(podName).Namespace(namespace).SubResource("exec")
scheme := runtime.NewScheme()
if err := corev1.AddToScheme(scheme); err != nil {
return nil, nil, errors.Wrap(err, "failed to add runtime scheme")
}
parameterCodec := runtime.NewParameterCodec(scheme)
req.VersionedParams(&corev1.PodExecOptions{
Command: command,
Container: container,
Container: containerName,
Stdin: true,
Stdout: false,
Stderr: true,
@@ -82,9 +102,7 @@ func copyFiles(c *Collector, client *kubernetes.Clientset, pod corev1.Pod, copyC
exec, err := remotecommand.NewSPDYExecutor(c.ClientConfig, "POST", req.URL())
if err != nil {
return nil, map[string]string{
filepath.Join(copyCollector.ContainerPath, "error"): err.Error(),
}
return nil, nil, errors.Wrap(err, "failed to create SPDY executor")
}
output := new(bytes.Buffer)
@@ -96,21 +114,10 @@ func copyFiles(c *Collector, client *kubernetes.Clientset, pod corev1.Pod, copyC
Tty: false,
})
if err != nil {
errors := map[string]string{
filepath.Join(copyCollector.ContainerPath, "error"): err.Error(),
}
if s := output.String(); len(s) > 0 {
errors[filepath.Join(copyCollector.ContainerPath, "stdout")] = s
}
if s := stderr.String(); len(s) > 0 {
errors[filepath.Join(copyCollector.ContainerPath, "stderr")] = s
}
return nil, errors
return output.Bytes(), stderr.Bytes(), errors.Wrap(err, "failed to stream command output")
}
return map[string][]byte{
filepath.Base(copyCollector.ContainerPath) + ".tar": output.Bytes(),
}, nil
return output.Bytes(), stderr.Bytes(), nil
}
func getCopyErrosFileName(copyCollector *troubleshootv1beta2.Copy) string {

View File

@@ -155,11 +155,12 @@ func runPod(ctx context.Context, client *kubernetes.Clientset, runCollector *tro
},
}
if runCollector.ImagePullSecret != nil {
err := createSecret(ctx, client, runCollector.ImagePullSecret, &pod)
if runCollector.ImagePullSecret != nil && runCollector.ImagePullSecret.Name != "" {
err := createSecret(ctx, client, pod.Namespace, runCollector.ImagePullSecret)
if err != nil {
return nil, errors.Wrap(err, "failed to create secret")
}
pod.Spec.ImagePullSecrets = append(pod.Spec.ImagePullSecrets, corev1.LocalObjectReference{Name: runCollector.ImagePullSecret.Name})
}
created, err := client.CoreV1().Pods(namespace).Create(ctx, &pod, metav1.CreateOptions{})
if err != nil {
@@ -168,58 +169,55 @@ func runPod(ctx context.Context, client *kubernetes.Clientset, runCollector *tro
return created, nil
}
func createSecret(ctx context.Context, client *kubernetes.Clientset, imagePullSecret *troubleshootv1beta2.ImagePullSecrets, pod *corev1.Pod) error {
//In case a new secret needs to be created
if imagePullSecret.Data != nil {
var out bytes.Buffer
data := make(map[string][]byte)
if imagePullSecret.SecretType == "kubernetes.io/dockerconfigjson" {
//Check if required field in data exists
v, found := imagePullSecret.Data[".dockerconfigjson"]
if !found {
return errors.Errorf("Secret type kubernetes.io/dockerconfigjson requires argument \".dockerconfigjson\"")
}
if len(imagePullSecret.Data) > 1 {
return errors.Errorf("Secret type kubernetes.io/dockerconfigjson accepts only one argument \".dockerconfigjson\"")
}
//K8s client accepts only Json formated files as data, provided data must be decoded and indented
parsedConfig, err := base64.StdEncoding.DecodeString(v)
if err != nil {
return errors.Wrap(err, "Unable to decode data.")
}
err = json.Indent(&out, parsedConfig, "", "\t")
if err != nil {
return errors.Wrap(err, "Unable to parse encoded data.")
}
data[".dockerconfigjson"] = out.Bytes()
} else {
return errors.Errorf("ImagePullSecret must be of type: kubernetes.io/dockerconfigjson")
}
secret := corev1.Secret{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "Secret",
},
ObjectMeta: metav1.ObjectMeta{
Name: imagePullSecret.Name,
GenerateName: "troubleshoot",
Namespace: pod.Namespace,
},
Data: data,
Type: corev1.SecretType(imagePullSecret.SecretType),
}
created, err := client.CoreV1().Secrets(pod.Namespace).Create(ctx, &secret, metav1.CreateOptions{})
if err != nil {
return errors.Wrap(err, "failed to create secret")
}
pod.Spec.ImagePullSecrets = append(pod.Spec.ImagePullSecrets, corev1.LocalObjectReference{Name: created.Name})
func createSecret(ctx context.Context, client *kubernetes.Clientset, namespace string, imagePullSecret *troubleshootv1beta2.ImagePullSecrets) error {
if imagePullSecret.Data == nil {
return nil
}
//In case secret must only be added to the specs.
if imagePullSecret.Name != "" {
pod.Spec.ImagePullSecrets = append(pod.Spec.ImagePullSecrets, corev1.LocalObjectReference{Name: imagePullSecret.Name})
return nil
var out bytes.Buffer
data := make(map[string][]byte)
if imagePullSecret.SecretType != "kubernetes.io/dockerconfigjson" {
return errors.Errorf("ImagePullSecret must be of type: kubernetes.io/dockerconfigjson")
}
return errors.Errorf("Secret must at least have a Name")
// Check if required field in data exists
v, found := imagePullSecret.Data[".dockerconfigjson"]
if !found {
return errors.Errorf("Secret type kubernetes.io/dockerconfigjson requires argument \".dockerconfigjson\"")
}
if len(imagePullSecret.Data) > 1 {
return errors.Errorf("Secret type kubernetes.io/dockerconfigjson accepts only one argument \".dockerconfigjson\"")
}
// K8s client accepts only Json formated files as data, provided data must be decoded and indented
parsedConfig, err := base64.StdEncoding.DecodeString(v)
if err != nil {
return errors.Wrap(err, "Unable to decode data.")
}
err = json.Indent(&out, parsedConfig, "", "\t")
if err != nil {
return errors.Wrap(err, "Unable to parse encoded data.")
}
data[".dockerconfigjson"] = out.Bytes()
secret := corev1.Secret{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "Secret",
},
ObjectMeta: metav1.ObjectMeta{
Name: imagePullSecret.Name,
GenerateName: "troubleshoot",
Namespace: namespace,
},
Data: data,
Type: corev1.SecretType(imagePullSecret.SecretType),
}
_, err = client.CoreV1().Secrets(namespace).Create(ctx, &secret, metav1.CreateOptions{})
if err != nil {
return errors.Wrap(err, "failed to create secret")
}
return nil
}

24
pkg/rrd/LICENSE Normal file
View File

@@ -0,0 +1,24 @@
Copyright (c) 2012, Michal Derkacz
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

18
pkg/rrd/README.md Normal file
View File

@@ -0,0 +1,18 @@
# Go bindings to rrdtool C library (rrdtool)
This package implements [Go](http://golang.org) (golang) bindings for the [rrdtool](http://oss.oetiker.ch/rrdtool/) C API.
## Installing
rrd currently supports rrdtool-1.4.x
Install rrd with:
go get github.com/ziutek/rrd
## Usage
See [GoDoc](http://godoc.org/github.com/ziutek/rrd) for documentation.
## Example
See [rrd_test.go](https://github.com/ziutek/rrd/blob/master/rrd_test.go) for an example of using this package.

505
pkg/rrd/rrd.go Normal file
View File

@@ -0,0 +1,505 @@
// Copied from "github.com/ziutek/rrd"
// Simple wrapper for rrdtool C library
package rrd
import (
"fmt"
"math"
"os"
"runtime"
"strings"
"time"
)
type Error string
func (e Error) Error() string {
return string(e)
}
/*
type cstring []byte
func newCstring(s string) cstring {
cs := make(cstring, len(s)+1)
copy(cs, s)
return cs
}
func (cs cstring) p() unsafe.Pointer {
if len(cs) == 0 {
return nil
}
return unsafe.Pointer(&cs[0])
}
func (cs cstring) String() string {
return string(cs[:len(cs)-1])
}
*/
func join(args []interface{}) string {
sa := make([]string, len(args))
for i, a := range args {
var s string
switch v := a.(type) {
case time.Time:
s = i64toa(v.Unix())
default:
s = fmt.Sprint(v)
}
sa[i] = s
}
return strings.Join(sa, ":")
}
type Creator struct {
filename string
start time.Time
step uint
args []string
}
// NewCreator returns new Creator object. You need to call Create to really
// create database file.
// filename - name of database file
// start - don't accept any data timed before or at time specified
// step - base interval in seconds with which data will be fed into RRD
func NewCreator(filename string, start time.Time, step uint) *Creator {
return &Creator{
filename: filename,
start: start,
step: step,
}
}
// DS formats a DS argument and appends it to the list of arguments to be
// passed to rrdcreate(). Each element of args is formatted with fmt.Sprint().
// Please see the rrdcreate(1) manual page for in-depth documentation.
func (c *Creator) DS(name, compute string, args ...interface{}) {
c.args = append(c.args, "DS:"+name+":"+compute+":"+join(args))
}
// RRA formats an RRA argument and appends it to the list of arguments to be
// passed to rrdcreate(). Each element of args is formatted with fmt.Sprint().
// Please see the rrdcreate(1) manual page for in-depth documentation.
func (c *Creator) RRA(cf string, args ...interface{}) {
c.args = append(c.args, "RRA:"+cf+":"+join(args))
}
// Create creates new database file. If overwrite is true it overwrites
// database file if exists. If overwrite is false it returns error if file
// exists (you can use os.IsExist function to check this case).
func (c *Creator) Create(overwrite bool) error {
if !overwrite {
f, err := os.OpenFile(
c.filename,
os.O_WRONLY|os.O_CREATE|os.O_EXCL,
0666,
)
if err != nil {
return err
}
f.Close()
}
return c.create()
}
// Use cstring and unsafe.Pointer to avoid allocations for C calls
type Updater struct {
filename *cstring
template *cstring
args []*cstring
}
func NewUpdater(filename string) *Updater {
u := &Updater{filename: newCstring(filename)}
runtime.SetFinalizer(u, cfree)
return u
}
func cfree(u *Updater) {
u.filename.Free()
u.template.Free()
for _, a := range u.args {
a.Free()
}
}
func (u *Updater) SetTemplate(dsName ...string) {
u.template.Free()
u.template = newCstring(strings.Join(dsName, ":"))
}
// Cache chaches data for later save using Update(). Use it to avoid
// open/read/write/close for every update.
func (u *Updater) Cache(args ...interface{}) {
u.args = append(u.args, newCstring(join(args)))
}
// Update saves data in RRDB.
// Without args Update saves all subsequent updates buffered by Cache method.
// If you specify args it saves them immediately.
func (u *Updater) Update(args ...interface{}) error {
if len(args) != 0 {
cs := newCstring(join(args))
err := u.update([]*cstring{cs})
cs.Free()
return err
} else if len(u.args) != 0 {
err := u.update(u.args)
for _, a := range u.args {
a.Free()
}
u.args = nil
return err
}
return nil
}
type GraphInfo struct {
Print []string
Width, Height uint
Ymin, Ymax float64
}
type Grapher struct {
title string
vlabel string
width, height uint
borderWidth uint
upperLimit float64
lowerLimit float64
rigid bool
altAutoscale bool
altAutoscaleMin bool
altAutoscaleMax bool
noGridFit bool
logarithmic bool
unitsExponent int
unitsLength uint
rightAxisScale float64
rightAxisShift float64
rightAxisLabel string
noLegend bool
lazy bool
colors map[string]string
slopeMode bool
watermark string
base uint
imageFormat string
interlaced bool
daemon string
args []string
}
const (
maxUint = ^uint(0)
maxInt = int(maxUint >> 1)
minInt = -maxInt - 1
defWidth = 2
)
func NewGrapher() *Grapher {
return &Grapher{
upperLimit: -math.MaxFloat64,
lowerLimit: math.MaxFloat64,
unitsExponent: minInt,
borderWidth: defWidth,
colors: make(map[string]string),
}
}
func (g *Grapher) SetTitle(title string) {
g.title = title
}
func (g *Grapher) SetVLabel(vlabel string) {
g.vlabel = vlabel
}
func (g *Grapher) SetSize(width, height uint) {
g.width = width
g.height = height
}
func (g *Grapher) SetBorder(width uint) {
g.borderWidth = width
}
func (g *Grapher) SetLowerLimit(limit float64) {
g.lowerLimit = limit
}
func (g *Grapher) SetUpperLimit(limit float64) {
g.upperLimit = limit
}
func (g *Grapher) SetRigid() {
g.rigid = true
}
func (g *Grapher) SetAltAutoscale() {
g.altAutoscale = true
}
func (g *Grapher) SetAltAutoscaleMin() {
g.altAutoscaleMin = true
}
func (g *Grapher) SetAltAutoscaleMax() {
g.altAutoscaleMax = true
}
func (g *Grapher) SetNoGridFit() {
g.noGridFit = true
}
func (g *Grapher) SetLogarithmic() {
g.logarithmic = true
}
func (g *Grapher) SetUnitsExponent(e int) {
g.unitsExponent = e
}
func (g *Grapher) SetUnitsLength(l uint) {
g.unitsLength = l
}
func (g *Grapher) SetRightAxis(scale, shift float64) {
g.rightAxisScale = scale
g.rightAxisShift = shift
}
func (g *Grapher) SetRightAxisLabel(label string) {
g.rightAxisLabel = label
}
func (g *Grapher) SetNoLegend() {
g.noLegend = true
}
func (g *Grapher) SetLazy() {
g.lazy = true
}
func (g *Grapher) SetColor(colortag, color string) {
g.colors[colortag] = color
}
func (g *Grapher) SetSlopeMode() {
g.slopeMode = true
}
func (g *Grapher) SetImageFormat(format string) {
g.imageFormat = format
}
func (g *Grapher) SetInterlaced() {
g.interlaced = true
}
func (g *Grapher) SetBase(base uint) {
g.base = base
}
func (g *Grapher) SetWatermark(watermark string) {
g.watermark = watermark
}
func (g *Grapher) SetDaemon(daemon string) {
g.daemon = daemon
}
func (g *Grapher) AddOptions(options ...string) {
g.args = append(g.args, options...)
}
func (g *Grapher) push(cmd string, options []string) {
if len(options) > 0 {
cmd += ":" + strings.Join(options, ":")
}
g.args = append(g.args, cmd)
}
func (g *Grapher) Def(vname, rrdfile, dsname, cf string, options ...string) {
g.push(
fmt.Sprintf("DEF:%s=%s:%s:%s", vname, rrdfile, dsname, cf),
options,
)
}
func (g *Grapher) VDef(vname, rpn string) {
g.push("VDEF:"+vname+"="+rpn, nil)
}
func (g *Grapher) CDef(vname, rpn string) {
g.push("CDEF:"+vname+"="+rpn, nil)
}
func (g *Grapher) Print(vname, format string) {
g.push("PRINT:"+vname+":"+format, nil)
}
func (g *Grapher) PrintT(vname, format string) {
g.push("PRINT:"+vname+":"+format+":strftime", nil)
}
func (g *Grapher) GPrint(vname, format string) {
g.push("GPRINT:"+vname+":"+format, nil)
}
func (g *Grapher) GPrintT(vname, format string) {
g.push("GPRINT:"+vname+":"+format+":strftime", nil)
}
func (g *Grapher) Comment(s string) {
g.push("COMMENT:"+s, nil)
}
func (g *Grapher) VRule(t interface{}, color string, options ...string) {
if v, ok := t.(time.Time); ok {
t = v.Unix()
}
vr := fmt.Sprintf("VRULE:%v#%s", t, color)
g.push(vr, options)
}
func (g *Grapher) HRule(value, color string, options ...string) {
hr := "HRULE:" + value + "#" + color
g.push(hr, options)
}
func (g *Grapher) Line(width float32, value, color string, options ...string) {
line := fmt.Sprintf("LINE%f:%s", width, value)
if color != "" {
line += "#" + color
}
g.push(line, options)
}
func (g *Grapher) Area(value, color string, options ...string) {
area := "AREA:" + value
if color != "" {
area += "#" + color
}
g.push(area, options)
}
func (g *Grapher) Tick(vname, color string, options ...string) {
tick := "TICK:" + vname
if color != "" {
tick += "#" + color
}
g.push(tick, options)
}
func (g *Grapher) Shift(vname string, offset interface{}) {
if v, ok := offset.(time.Duration); ok {
offset = int64((v + time.Second/2) / time.Second)
}
shift := fmt.Sprintf("SHIFT:%s:%v", vname, offset)
g.push(shift, nil)
}
func (g *Grapher) TextAlign(align string) {
g.push("TEXTALIGN:"+align, nil)
}
// Graph returns GraphInfo and image as []byte or error
func (g *Grapher) Graph(start, end time.Time) (GraphInfo, []byte, error) {
return g.graph("-", start, end)
}
// SaveGraph saves image to file and returns GraphInfo or error
func (g *Grapher) SaveGraph(filename string, start, end time.Time) (GraphInfo, error) {
gi, _, err := g.graph(filename, start, end)
return gi, err
}
type FetchResult struct {
Filename string
Cf string
Start time.Time
End time.Time
Step time.Duration
DsNames []string
RowCnt int
values []float64
}
func (r *FetchResult) ValueAt(dsIndex, rowIndex int) float64 {
return r.values[len(r.DsNames)*rowIndex+dsIndex]
}
type Exporter struct {
maxRows uint
daemon string
args []string
}
func NewExporter() *Exporter {
return &Exporter{}
}
func (e *Exporter) SetMaxRows(maxRows uint) {
e.maxRows = maxRows
}
func (e *Exporter) push(cmd string, options []string) {
if len(options) > 0 {
cmd += ":" + strings.Join(options, ":")
}
e.args = append(e.args, cmd)
}
func (e *Exporter) Def(vname, rrdfile, dsname, cf string, options ...string) {
e.push(
fmt.Sprintf("DEF:%s=%s:%s:%s", vname, rrdfile, dsname, cf),
options,
)
}
func (e *Exporter) CDef(vname, rpn string) {
e.push("CDEF:"+vname+"="+rpn, nil)
}
func (e *Exporter) XportDef(vname, label string) {
e.push("XPORT:"+vname+":"+label, nil)
}
func (e *Exporter) Xport(start, end time.Time, step time.Duration) (XportResult, error) {
return e.xport(start, end, step)
}
func (e *Exporter) SetDaemon(daemon string) {
e.daemon = daemon
}
type XportResult struct {
Start time.Time
End time.Time
Step time.Duration
Legends []string
RowCnt int
values []float64
}
func (r *XportResult) ValueAt(legendIndex, rowIndex int) float64 {
return r.values[len(r.Legends)*rowIndex+legendIndex]
}

85
pkg/rrd/rrd_darwin.go Normal file
View File

@@ -0,0 +1,85 @@
package rrd
import (
"errors"
"strconv"
"time"
)
type cstring byte
func newCstring(s string) *cstring {
return nil
}
func (cs *cstring) Free() {
return
}
func (cs *cstring) String() string {
return ""
}
func (c *Creator) create() error {
return errors.New("not implemented")
}
func (u *Updater) update(args []*cstring) error {
return errors.New("not implemented")
}
func ftoa(f float64) string {
return strconv.FormatFloat(f, 'e', 10, 64)
}
func i64toa(i int64) string {
return strconv.FormatInt(i, 10)
}
func u64toa(u uint64) string {
return ""
}
func itoa(i int) string {
return ""
}
func utoa(u uint) string {
return ""
}
func parseInfoKey(ik string) (kname, kkey string, kid int) {
return
}
func (g *Grapher) graph(filename string, start, end time.Time) (GraphInfo, []byte, error) {
return GraphInfo{}, nil, errors.New("not implemented")
}
// Info returns information about RRD file.
func Info(filename string) (map[string]interface{}, error) {
return nil, errors.New("not implemented")
}
// Fetch retrieves data from RRD file.
func Fetch(filename, cf string, start, end time.Time, step time.Duration) (FetchResult, error) {
return FetchResult{}, errors.New("not implemented")
}
// FreeValues free values memory allocated by C.
func (r *FetchResult) FreeValues() {
}
// Values returns copy of internal array of values.
func (r *FetchResult) Values() []float64 {
return nil
}
// Export data from RRD file(s)
func (e *Exporter) xport(start, end time.Time, step time.Duration) (XportResult, error) {
return XportResult{}, errors.New("not implemented")
}
// FreeValues free values memory allocated by C.
func (r *XportResult) FreeValues() {
}

553
pkg/rrd/rrd_linux.go Normal file
View File

@@ -0,0 +1,553 @@
package rrd
/*
#include <stdlib.h>
#include <rrd.h>
#include "rrdfunc.h"
#cgo LDFLAGS: -lpthread -lrrd -lpng -lcairo -lpixman-1 -lpango-1.0 -lfontconfig -lssl -lexpat -lfreetype -lgobject-2.0 -lglib-2.0 -lpcre -lthai -ldatrie -lz -lffi -ldbi -ldl -lc -lm -Wl,--unresolved-symbols=ignore-all
#cgo CFLAGS: -std=c99 -Wno-implicit-function-declaration -Wno-int-conversion
*/
import "C"
import (
"math"
"reflect"
"strconv"
"strings"
"sync"
"time"
"unsafe"
)
type cstring C.char
func newCstring(s string) *cstring {
cs := C.malloc(C.size_t(len(s) + 1))
buf := (*[1<<31 - 1]byte)(cs)[:len(s)+1]
copy(buf, s)
buf[len(s)] = 0
return (*cstring)(cs)
}
func (cs *cstring) Free() {
if cs != nil {
C.free(unsafe.Pointer(cs))
}
}
func (cs *cstring) String() string {
buf := (*[1<<31 - 1]byte)(unsafe.Pointer(cs))
for n, b := range buf {
if b == 0 {
return string(buf[:n])
}
}
panic("rrd: bad C string")
}
var mutex sync.Mutex
func makeArgs(args []string) []*C.char {
ret := make([]*C.char, len(args))
for i, s := range args {
ret[i] = C.CString(s)
}
return ret
}
func freeCString(s *C.char) {
C.free(unsafe.Pointer(s))
}
func freeArgs(cArgs []*C.char) {
for _, s := range cArgs {
freeCString(s)
}
}
func makeError(e *C.char) error {
var null *C.char
if e == null {
return nil
}
defer freeCString(e)
return Error(C.GoString(e))
}
func (c *Creator) create() error {
filename := C.CString(c.filename)
defer freeCString(filename)
args := makeArgs(c.args)
defer freeArgs(args)
e := C.rrdCreate(
filename,
C.ulong(c.step),
C.time_t(c.start.Unix()),
C.int(len(args)),
&args[0],
)
return makeError(e)
}
func (u *Updater) update(args []*cstring) error {
e := C.rrdUpdate(
(*C.char)(u.filename),
(*C.char)(u.template),
C.int(len(args)),
(**C.char)(unsafe.Pointer(&args[0])),
)
return makeError(e)
}
var (
graphv = C.CString("graphv")
xport = C.CString("xport")
oStart = C.CString("-s")
oEnd = C.CString("-e")
oTitle = C.CString("-t")
oVlabel = C.CString("-v")
oWidth = C.CString("-w")
oHeight = C.CString("-h")
oUpperLimit = C.CString("-u")
oLowerLimit = C.CString("-l")
oRigid = C.CString("-r")
oAltAutoscale = C.CString("-A")
oAltAutoscaleMin = C.CString("-J")
oAltAutoscaleMax = C.CString("-M")
oNoGridFit = C.CString("-N")
oLogarithmic = C.CString("-o")
oUnitsExponent = C.CString("-X")
oUnitsLength = C.CString("-L")
oRightAxis = C.CString("--right-axis")
oRightAxisLabel = C.CString("--right-axis-label")
oDaemon = C.CString("--daemon")
oBorder = C.CString("--border")
oNoLegend = C.CString("-g")
oLazy = C.CString("-z")
oColor = C.CString("-c")
oSlopeMode = C.CString("-E")
oImageFormat = C.CString("-a")
oInterlaced = C.CString("-i")
oBase = C.CString("-b")
oWatermark = C.CString("-W")
oStep = C.CString("--step")
oMaxRows = C.CString("-m")
)
func ftoa(f float64) string {
return strconv.FormatFloat(f, 'e', 10, 64)
}
func ftoc(f float64) *C.char {
return C.CString(ftoa(f))
}
func i64toa(i int64) string {
return strconv.FormatInt(i, 10)
}
func i64toc(i int64) *C.char {
return C.CString(i64toa(i))
}
func u64toa(u uint64) string {
return strconv.FormatUint(u, 10)
}
func u64toc(u uint64) *C.char {
return C.CString(u64toa(u))
}
func itoa(i int) string {
return i64toa(int64(i))
}
func itoc(i int) *C.char {
return i64toc(int64(i))
}
func utoa(u uint) string {
return u64toa(uint64(u))
}
func utoc(u uint) *C.char {
return u64toc(uint64(u))
}
func (g *Grapher) makeArgs(filename string, start, end time.Time) []*C.char {
args := []*C.char{
graphv, C.CString(filename),
oStart, i64toc(start.Unix()),
oEnd, i64toc(end.Unix()),
oTitle, C.CString(g.title),
oVlabel, C.CString(g.vlabel),
}
if g.width != 0 {
args = append(args, oWidth, utoc(g.width))
}
if g.height != 0 {
args = append(args, oHeight, utoc(g.height))
}
if g.upperLimit != -math.MaxFloat64 {
args = append(args, oUpperLimit, ftoc(g.upperLimit))
}
if g.lowerLimit != math.MaxFloat64 {
args = append(args, oLowerLimit, ftoc(g.lowerLimit))
}
if g.rigid {
args = append(args, oRigid)
}
if g.altAutoscale {
args = append(args, oAltAutoscale)
}
if g.altAutoscaleMax {
args = append(args, oAltAutoscaleMax)
}
if g.altAutoscaleMin {
args = append(args, oAltAutoscaleMin)
}
if g.noGridFit {
args = append(args, oNoGridFit)
}
if g.logarithmic {
args = append(args, oLogarithmic)
}
if g.unitsExponent != minInt {
args = append(
args,
oUnitsExponent, itoc(g.unitsExponent),
)
}
if g.unitsLength != 0 {
args = append(
args,
oUnitsLength, utoc(g.unitsLength),
)
}
if g.rightAxisScale != 0 {
args = append(
args,
oRightAxis,
C.CString(ftoa(g.rightAxisScale)+":"+ftoa(g.rightAxisShift)),
)
}
if g.rightAxisLabel != "" {
args = append(
args,
oRightAxisLabel, C.CString(g.rightAxisLabel),
)
}
if g.noLegend {
args = append(args, oNoLegend)
}
if g.lazy {
args = append(args, oLazy)
}
for tag, color := range g.colors {
args = append(args, oColor, C.CString(tag+"#"+color))
}
if g.slopeMode {
args = append(args, oSlopeMode)
}
if g.imageFormat != "" {
args = append(args, oImageFormat, C.CString(g.imageFormat))
}
if g.interlaced {
args = append(args, oInterlaced)
}
if g.base != 0 {
args = append(args, oBase, utoc(g.base))
}
if g.watermark != "" {
args = append(args, oWatermark, C.CString(g.watermark))
}
if g.daemon != "" {
args = append(args, oDaemon, C.CString(g.daemon))
}
if g.borderWidth != defWidth {
args = append(args, oBorder, utoc(g.borderWidth))
}
return append(args, makeArgs(g.args)...)
}
func (e *Exporter) makeArgs(start, end time.Time, step time.Duration) []*C.char {
args := []*C.char{
xport,
oStart, i64toc(start.Unix()),
oEnd, i64toc(end.Unix()),
oStep, i64toc(int64(step.Seconds())),
}
if e.maxRows != 0 {
args = append(args, oMaxRows, utoc(e.maxRows))
}
if e.daemon != "" {
args = append(args, oDaemon, C.CString(e.daemon))
}
return append(args, makeArgs(e.args)...)
}
func parseInfoKey(ik string) (kname, kkey string, kid int) {
kid = -1
o := strings.IndexRune(ik, '[')
if o == -1 {
kname = ik
return
}
c := strings.IndexRune(ik[o+1:], ']')
if c == -1 {
kname = ik
return
}
c += o + 1
kname = ik[:o] + ik[c+1:]
kkey = ik[o+1 : c]
if strings.HasPrefix(kname, "ds.") {
return
} else if id, err := strconv.Atoi(kkey); err == nil && id >= 0 {
kid = id
}
return
}
func updateInfoValue(i *C.struct_rrd_info_t, v interface{}) interface{} {
switch i._type {
case C.RD_I_VAL:
return float64(*(*C.rrd_value_t)(unsafe.Pointer(&i.value[0])))
case C.RD_I_CNT:
return uint(*(*C.ulong)(unsafe.Pointer(&i.value[0])))
case C.RD_I_STR:
return C.GoString(*(**C.char)(unsafe.Pointer(&i.value[0])))
case C.RD_I_INT:
return int(*(*C.int)(unsafe.Pointer(&i.value[0])))
case C.RD_I_BLO:
blob := *(*C.rrd_blob_t)(unsafe.Pointer(&i.value[0]))
b := C.GoBytes(unsafe.Pointer(blob.ptr), C.int(blob.size))
if v == nil {
return b
}
return append(v.([]byte), b...)
}
return nil
}
func parseRRDInfo(i *C.rrd_info_t) map[string]interface{} {
defer C.rrd_info_free(i)
r := make(map[string]interface{})
for w := (*C.struct_rrd_info_t)(i); w != nil; w = w.next {
kname, kkey, kid := parseInfoKey(C.GoString(w.key))
v, ok := r[kname]
switch {
case kid != -1:
var a []interface{}
if ok {
a = v.([]interface{})
}
if len(a) < kid+1 {
oldA := a
a = make([]interface{}, kid+1)
copy(a, oldA)
}
a[kid] = updateInfoValue(w, a[kid])
v = a
case kkey != "":
var m map[string]interface{}
if ok {
m = v.(map[string]interface{})
} else {
m = make(map[string]interface{})
}
old, _ := m[kkey]
m[kkey] = updateInfoValue(w, old)
v = m
default:
v = updateInfoValue(w, v)
}
r[kname] = v
}
return r
}
func parseGraphInfo(i *C.rrd_info_t) (gi GraphInfo, img []byte) {
inf := parseRRDInfo(i)
if v, ok := inf["image_info"]; ok {
gi.Print = append(gi.Print, v.(string))
}
for k, v := range inf {
if k == "print" {
for _, line := range v.([]interface{}) {
gi.Print = append(gi.Print, line.(string))
}
}
}
if v, ok := inf["image_width"]; ok {
gi.Width = v.(uint)
}
if v, ok := inf["image_height"]; ok {
gi.Height = v.(uint)
}
if v, ok := inf["value_min"]; ok {
gi.Ymin = v.(float64)
}
if v, ok := inf["value_max"]; ok {
gi.Ymax = v.(float64)
}
if v, ok := inf["image"]; ok {
img = v.([]byte)
}
return
}
func (g *Grapher) graph(filename string, start, end time.Time) (GraphInfo, []byte, error) {
var i *C.rrd_info_t
args := g.makeArgs(filename, start, end)
mutex.Lock() // rrd_graph_v isn't thread safe
defer mutex.Unlock()
err := makeError(C.rrdGraph(
&i,
C.int(len(args)),
&args[0],
))
if err != nil {
return GraphInfo{}, nil, err
}
gi, img := parseGraphInfo(i)
return gi, img, nil
}
// Info returns information about RRD file.
func Info(filename string) (map[string]interface{}, error) {
fn := C.CString(filename)
defer freeCString(fn)
var i *C.rrd_info_t
err := makeError(C.rrdInfo(&i, fn))
if err != nil {
return nil, err
}
return parseRRDInfo(i), nil
}
// Fetch retrieves data from RRD file.
func Fetch(filename, cf string, start, end time.Time, step time.Duration) (FetchResult, error) {
fn := C.CString(filename)
defer freeCString(fn)
cCf := C.CString(cf)
defer freeCString(cCf)
cStart := C.time_t(start.Unix())
cEnd := C.time_t(end.Unix())
cStep := C.ulong(step.Seconds())
var (
ret C.int
cDsCnt C.ulong
cDsNames **C.char
cData *C.double
)
err := makeError(C.rrdFetch(&ret, fn, cCf, &cStart, &cEnd, &cStep, &cDsCnt, &cDsNames, &cData))
if err != nil {
return FetchResult{filename, cf, start, end, step, nil, 0, nil}, err
}
start = time.Unix(int64(cStart), 0)
end = time.Unix(int64(cEnd), 0)
step = time.Duration(cStep) * time.Second
dsCnt := int(cDsCnt)
dsNames := make([]string, dsCnt)
for i := 0; i < dsCnt; i++ {
dsName := C.arrayGetCString(cDsNames, C.int(i))
dsNames[i] = C.GoString(dsName)
C.free(unsafe.Pointer(dsName))
}
C.free(unsafe.Pointer(cDsNames))
rowCnt := (int(cEnd)-int(cStart))/int(cStep) + 1
valuesLen := dsCnt * rowCnt
var values []float64
sliceHeader := (*reflect.SliceHeader)((unsafe.Pointer(&values)))
sliceHeader.Cap = valuesLen
sliceHeader.Len = valuesLen
sliceHeader.Data = uintptr(unsafe.Pointer(cData))
return FetchResult{filename, cf, start, end, step, dsNames, rowCnt, values}, nil
}
// FreeValues free values memory allocated by C.
func (r *FetchResult) FreeValues() {
sliceHeader := (*reflect.SliceHeader)((unsafe.Pointer(&r.values)))
C.free(unsafe.Pointer(sliceHeader.Data))
}
// Values returns copy of internal array of values.
func (r *FetchResult) Values() []float64 {
return append([]float64{}, r.values...)
}
// Export data from RRD file(s)
func (e *Exporter) xport(start, end time.Time, step time.Duration) (XportResult, error) {
cStart := C.time_t(start.Unix())
cEnd := C.time_t(end.Unix())
cStep := C.ulong(step.Seconds())
args := e.makeArgs(start, end, step)
mutex.Lock()
defer mutex.Unlock()
var (
ret C.int
cXSize C.int
cColCnt C.ulong
cLegends **C.char
cData *C.double
)
err := makeError(C.rrdXport(
&ret,
C.int(len(args)),
&args[0],
&cXSize, &cStart, &cEnd, &cStep, &cColCnt, &cLegends, &cData,
))
if err != nil {
return XportResult{start, end, step, nil, 0, nil}, err
}
start = time.Unix(int64(cStart), 0)
end = time.Unix(int64(cEnd), 0)
step = time.Duration(cStep) * time.Second
colCnt := int(cColCnt)
legends := make([]string, colCnt)
for i := 0; i < colCnt; i++ {
legend := C.arrayGetCString(cLegends, C.int(i))
legends[i] = C.GoString(legend)
C.free(unsafe.Pointer(legend))
}
C.free(unsafe.Pointer(cLegends))
rowCnt := (int(cEnd) - int(cStart)) / int(cStep) //+ 1 // FIXED: + 1 added extra uninitialized value
valuesLen := colCnt * rowCnt
values := make([]float64, valuesLen)
sliceHeader := (*reflect.SliceHeader)((unsafe.Pointer(&values)))
sliceHeader.Cap = valuesLen
sliceHeader.Len = valuesLen
sliceHeader.Data = uintptr(unsafe.Pointer(cData))
return XportResult{start, end, step, legends, rowCnt, values}, nil
}
// FreeValues free values memory allocated by C.
func (r *XportResult) FreeValues() {
sliceHeader := (*reflect.SliceHeader)((unsafe.Pointer(&r.values)))
C.free(unsafe.Pointer(sliceHeader.Data))
}

85
pkg/rrd/rrd_windows.go Normal file
View File

@@ -0,0 +1,85 @@
package rrd
import (
"errors"
"strconv"
"time"
)
type cstring byte
func newCstring(s string) *cstring {
return nil
}
func (cs *cstring) Free() {
return
}
func (cs *cstring) String() string {
return ""
}
func (c *Creator) create() error {
return errors.New("not implemented")
}
func (u *Updater) update(args []*cstring) error {
return errors.New("not implemented")
}
func ftoa(f float64) string {
return strconv.FormatFloat(f, 'e', 10, 64)
}
func i64toa(i int64) string {
return strconv.FormatInt(i, 10)
}
func u64toa(u uint64) string {
return ""
}
func itoa(i int) string {
return ""
}
func utoa(u uint) string {
return ""
}
func parseInfoKey(ik string) (kname, kkey string, kid int) {
return
}
func (g *Grapher) graph(filename string, start, end time.Time) (GraphInfo, []byte, error) {
return GraphInfo{}, nil, errors.New("not implemented")
}
// Info returns information about RRD file.
func Info(filename string) (map[string]interface{}, error) {
return nil, errors.New("not implemented")
}
// Fetch retrieves data from RRD file.
func Fetch(filename, cf string, start, end time.Time, step time.Duration) (FetchResult, error) {
return FetchResult{}, errors.New("not implemented")
}
// FreeValues free values memory allocated by C.
func (r *FetchResult) FreeValues() {
}
// Values returns copy of internal array of values.
func (r *FetchResult) Values() []float64 {
return nil
}
// Export data from RRD file(s)
func (e *Exporter) xport(start, end time.Time, step time.Duration) (XportResult, error) {
return XportResult{}, errors.New("not implemented")
}
// FreeValues free values memory allocated by C.
func (r *XportResult) FreeValues() {
}

7
pkg/rrd/rrdfunc.h Normal file
View File

@@ -0,0 +1,7 @@
extern char *rrdCreate(const char *filename, unsigned long step, time_t start, int argc, const char **argv);
extern char *rrdUpdate(const char *filename, const char *template, int argc, const char **argv);
extern char *rrdGraph(rrd_info_t **ret, int argc, char **argv);
extern char *rrdInfo(rrd_info_t **ret, char *filename);
extern char *rrdFetch(int *ret, char *filename, const char *cf, time_t *start, time_t *end, unsigned long *step, unsigned long *ds_cnt, char ***ds_namv, double **data);
extern char *rrdXport(int *ret, int argc, char **argv, int *xsize, time_t *start, time_t *end, unsigned long *step, unsigned long *col_cnt, char ***legend_v, double **data);
extern char *arrayGetCString(char **values, int i);

57
pkg/rrd/rrdfunc_linux.c Normal file
View File

@@ -0,0 +1,57 @@
#include <stdlib.h>
#include <rrd.h>
char *rrdError() {
char *err = NULL;
if (rrd_test_error()) {
// RRD error is local for thread so other gorutine can call some RRD
// function in the same thread before we use C.GoString. So we need to
// copy current error before return from C to Go. It need to be freed
// after C.GoString in Go code.
err = strdup(rrd_get_error());
if (err == NULL) {
abort();
}
}
return err;
}
char *rrdCreate(const char *filename, unsigned long step, time_t start, int argc, const char **argv) {
rrd_clear_error();
rrd_create_r(filename, step, start, argc, argv);
return rrdError();
}
char *rrdUpdate(const char *filename, const char *template, int argc, const char **argv) {
rrd_clear_error();
rrd_update_r(filename, template, argc, argv);
return rrdError();
}
char *rrdGraph(rrd_info_t **ret, int argc, char **argv) {
rrd_clear_error();
*ret = rrd_graph_v(argc, argv);
return rrdError();
}
char *rrdInfo(rrd_info_t **ret, char *filename) {
rrd_clear_error();
*ret = rrd_info_r(filename);
return rrdError();
}
char *rrdFetch(int *ret, char *filename, const char *cf, time_t *start, time_t *end, unsigned long *step, unsigned long *ds_cnt, char ***ds_namv, double **data) {
rrd_clear_error();
*ret = rrd_fetch_r(filename, cf, start, end, step, ds_cnt, ds_namv, data);
return rrdError();
}
char *rrdXport(int *ret, int argc, char **argv, int *xsize, time_t *start, time_t *end, unsigned long *step, unsigned long *col_cnt, char ***legend_v, double **data) {
rrd_clear_error();
*ret = rrd_xport(argc, argv, xsize, start, end, step, col_cnt, legend_v, data);
return rrdError();
}
char *arrayGetCString(char **values, int i) {
return values[i];
}