add a collector that checks s3 access (#2007)

* add a collector that checks s3 access

* testing and analyzer

* analyzer test

* fmt
This commit is contained in:
Andrew Lavery
2026-04-09 14:12:34 -04:00
committed by GitHub
parent 670a510a2d
commit ad7d52f7e5
10 changed files with 580 additions and 3 deletions

6
go.mod
View File

@@ -6,6 +6,9 @@ require (
github.com/Masterminds/sprig/v3 v3.3.0
github.com/ahmetalpbalkan/go-cursor v0.0.0-20131010032410-8136607ea412
github.com/apparentlymart/go-cidr v1.1.0
github.com/aws/aws-sdk-go-v2 v1.41.1
github.com/aws/aws-sdk-go-v2/credentials v1.19.9
github.com/aws/aws-sdk-go-v2/service/s3 v1.96.0
github.com/blang/semver/v4 v4.0.0
github.com/casbin/govaluate v1.10.0
github.com/cilium/ebpf v0.21.0
@@ -77,10 +80,8 @@ require (
github.com/Masterminds/semver/v3 v3.4.0 // indirect
github.com/Masterminds/squirrel v1.5.4 // indirect
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
github.com/aws/aws-sdk-go-v2 v1.41.1 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4 // indirect
github.com/aws/aws-sdk-go-v2/config v1.32.9 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.19.9 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 // indirect
@@ -90,7 +91,6 @@ require (
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.8 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.17 // indirect
github.com/aws/aws-sdk-go-v2/service/s3 v1.96.0 // indirect
github.com/aws/aws-sdk-go-v2/service/signin v1.0.5 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.30.10 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.14 // indirect

View File

@@ -262,6 +262,8 @@ func GetAnalyzer(analyzer *troubleshootv1beta2.Analyze) Analyzer {
return &AnalyzeNodeMetrics{analyzer: analyzer.NodeMetrics}
case analyzer.HTTP != nil:
return &AnalyzeHTTPAnalyze{analyzer: analyzer.HTTP}
case analyzer.S3Status != nil:
return &AnalyzeS3Status{analyzer: analyzer.S3Status}
default:
return nil
}

139
pkg/analyze/s3_status.go Normal file
View File

@@ -0,0 +1,139 @@
package analyzer
import (
"encoding/json"
"fmt"
"path"
"github.com/pkg/errors"
troubleshootv1beta2 "github.com/replicatedhq/troubleshoot/pkg/apis/troubleshoot/v1beta2"
"github.com/replicatedhq/troubleshoot/pkg/collect"
)
type AnalyzeS3Status struct {
analyzer *troubleshootv1beta2.DatabaseAnalyze
}
func (a *AnalyzeS3Status) Title() string {
title := a.analyzer.CheckName
if title == "" {
title = a.collectorName()
}
return title
}
func (a *AnalyzeS3Status) IsExcluded() (bool, error) {
return isExcluded(a.analyzer.Exclude)
}
func (a *AnalyzeS3Status) Analyze(getFile getCollectedFileContents, findFiles getChildCollectedFileContents) ([]*AnalyzeResult, error) {
result, err := a.analyzeS3Status(a.analyzer, getFile)
if err != nil {
return nil, err
}
result.Strict = a.analyzer.Strict.BoolOrDefaultFalse()
return []*AnalyzeResult{result}, nil
}
func (a *AnalyzeS3Status) collectorName() string {
collectorName := a.analyzer.CollectorName
if collectorName == "" {
collectorName = "s3Status"
}
return collectorName
}
func (a *AnalyzeS3Status) analyzeS3Status(analyzer *troubleshootv1beta2.DatabaseAnalyze, getCollectedFileContents func(string) ([]byte, error)) (*AnalyzeResult, error) {
fullPath := path.Join("s3Status", fmt.Sprintf("%s.json", a.collectorName()))
collected, err := getCollectedFileContents(fullPath)
if err != nil {
return nil, errors.Wrapf(err, "failed to read collected file name: %s", fullPath)
}
databaseConnection := collect.DatabaseConnection{}
if err := json.Unmarshal(collected, &databaseConnection); err != nil {
return nil, errors.Wrap(err, "failed to unmarshal s3 status result")
}
result := &AnalyzeResult{
Title: a.Title(),
IconKey: "kubernetes_s3_analyze",
IconURI: "https://troubleshoot.sh/images/analyzer-icons/s3-analyze.svg",
}
for _, outcome := range analyzer.Outcomes {
if outcome.Fail != nil {
if outcome.Fail.When == "" {
result.IsFail = true
result.Message = outcome.Fail.Message
result.URI = outcome.Fail.URI
return result, nil
}
isMatch, err := compareDatabaseConditionalToActual(outcome.Fail.When, &databaseConnection)
if err != nil {
return result, errors.Wrap(err, "failed to compare s3 status conditional")
}
if isMatch {
result.IsFail = true
if databaseConnection.Error != "" {
result.Message = outcome.Fail.Message + " " + databaseConnection.Error
} else {
result.Message = outcome.Fail.Message
}
result.URI = outcome.Fail.URI
return result, nil
}
} else if outcome.Warn != nil {
if outcome.Warn.When == "" {
result.IsWarn = true
result.Message = outcome.Warn.Message
result.URI = outcome.Warn.URI
return result, nil
}
isMatch, err := compareDatabaseConditionalToActual(outcome.Warn.When, &databaseConnection)
if err != nil {
return result, errors.Wrap(err, "failed to compare s3 status conditional")
}
if isMatch {
result.IsWarn = true
result.Message = outcome.Warn.Message
result.URI = outcome.Warn.URI
return result, nil
}
} else if outcome.Pass != nil {
if outcome.Pass.When == "" {
result.IsPass = true
result.Message = outcome.Pass.Message
result.URI = outcome.Pass.URI
return result, nil
}
isMatch, err := compareDatabaseConditionalToActual(outcome.Pass.When, &databaseConnection)
if err != nil {
return result, errors.Wrap(err, "failed to compare s3 status conditional")
}
if isMatch {
result.IsPass = true
result.Message = outcome.Pass.Message
result.URI = outcome.Pass.URI
return result, nil
}
}
}
return result, nil
}

View File

@@ -0,0 +1,175 @@
package analyzer
import (
"encoding/json"
"testing"
troubleshootv1beta2 "github.com/replicatedhq/troubleshoot/pkg/apis/troubleshoot/v1beta2"
"github.com/replicatedhq/troubleshoot/pkg/collect"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestAnalyzeS3Status(t *testing.T) {
tests := []struct {
name string
analyzer *troubleshootv1beta2.DatabaseAnalyze
collected *collect.DatabaseConnection
wantPass bool
wantFail bool
wantWarn bool
wantMessage string
}{
{
name: "connected, pass",
analyzer: &troubleshootv1beta2.DatabaseAnalyze{
Outcomes: []*troubleshootv1beta2.Outcome{
{
Fail: &troubleshootv1beta2.SingleOutcome{
When: "connected == false",
Message: "Cannot access the S3 bucket.",
},
},
{
Pass: &troubleshootv1beta2.SingleOutcome{
When: "connected == true",
Message: "S3 bucket is accessible.",
},
},
},
},
collected: &collect.DatabaseConnection{
IsConnected: true,
},
wantPass: true,
wantMessage: "S3 bucket is accessible.",
},
{
name: "not connected, fail with error appended",
analyzer: &troubleshootv1beta2.DatabaseAnalyze{
Outcomes: []*troubleshootv1beta2.Outcome{
{
Fail: &troubleshootv1beta2.SingleOutcome{
When: "connected == false",
Message: "Cannot access the S3 bucket.",
},
},
{
Pass: &troubleshootv1beta2.SingleOutcome{
When: "connected == true",
Message: "S3 bucket is accessible.",
},
},
},
},
collected: &collect.DatabaseConnection{
IsConnected: false,
Error: "operation error S3: HeadBucket, StatusCode: 403",
},
wantFail: true,
wantMessage: "Cannot access the S3 bucket. operation error S3: HeadBucket, StatusCode: 403",
},
{
name: "not connected, fail without error",
analyzer: &troubleshootv1beta2.DatabaseAnalyze{
Outcomes: []*troubleshootv1beta2.Outcome{
{
Fail: &troubleshootv1beta2.SingleOutcome{
When: "connected == false",
Message: "Cannot access the S3 bucket.",
},
},
},
},
collected: &collect.DatabaseConnection{
IsConnected: false,
},
wantFail: true,
wantMessage: "Cannot access the S3 bucket.",
},
{
name: "warn outcome",
analyzer: &troubleshootv1beta2.DatabaseAnalyze{
Outcomes: []*troubleshootv1beta2.Outcome{
{
Warn: &troubleshootv1beta2.SingleOutcome{
When: "connected == false",
Message: "S3 bucket may be inaccessible.",
},
},
},
},
collected: &collect.DatabaseConnection{
IsConnected: false,
},
wantWarn: true,
wantMessage: "S3 bucket may be inaccessible.",
},
{
name: "unconditional fail",
analyzer: &troubleshootv1beta2.DatabaseAnalyze{
Outcomes: []*troubleshootv1beta2.Outcome{
{
Fail: &troubleshootv1beta2.SingleOutcome{
Message: "Always fails.",
},
},
},
},
collected: &collect.DatabaseConnection{
IsConnected: true,
},
wantFail: true,
wantMessage: "Always fails.",
},
{
name: "custom collector name",
analyzer: &troubleshootv1beta2.DatabaseAnalyze{
AnalyzeMeta: troubleshootv1beta2.AnalyzeMeta{
CheckName: "My S3 Check",
},
CollectorName: "my-bucket",
Outcomes: []*troubleshootv1beta2.Outcome{
{
Pass: &troubleshootv1beta2.SingleOutcome{
When: "connected == true",
Message: "Bucket OK.",
},
},
},
},
collected: &collect.DatabaseConnection{
IsConnected: true,
},
wantPass: true,
wantMessage: "Bucket OK.",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
collectedData, err := json.Marshal(tt.collected)
require.NoError(t, err)
a := &AnalyzeS3Status{analyzer: tt.analyzer}
getFile := func(path string) ([]byte, error) {
return collectedData, nil
}
results, err := a.Analyze(getFile, nil)
require.NoError(t, err)
require.Len(t, results, 1)
result := results[0]
assert.Equal(t, tt.wantPass, result.IsPass)
assert.Equal(t, tt.wantFail, result.IsFail)
assert.Equal(t, tt.wantWarn, result.IsWarn)
assert.Equal(t, tt.wantMessage, result.Message)
if tt.analyzer.CheckName != "" {
assert.Equal(t, tt.analyzer.CheckName, result.Title)
}
})
}
}

View File

@@ -316,4 +316,5 @@ type Analyze struct {
Event *EventAnalyze `json:"event,omitempty" yaml:"event,omitempty"`
NodeMetrics *NodeMetricsAnalyze `json:"nodeMetrics,omitempty" yaml:"nodeMetrics,omitempty"`
HTTP *HTTPAnalyze `json:"http,omitempty" yaml:"http,omitempty"`
S3Status *DatabaseAnalyze `json:"s3Status,omitempty" yaml:"s3Status,omitempty"`
}

View File

@@ -323,6 +323,17 @@ type SupportBundleMetadata struct {
Namespace string `json:"namespace" yaml:"namespace"`
}
type S3Status struct {
CollectorMeta `json:",inline" yaml:",inline"`
BucketName string `json:"bucketName" yaml:"bucketName"`
Region string `json:"region,omitempty" yaml:"region,omitempty"`
Endpoint string `json:"endpoint,omitempty" yaml:"endpoint,omitempty"`
AccessKeyID string `json:"accessKeyID,omitempty" yaml:"accessKeyID,omitempty"`
SecretAccessKey string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty"`
UsePathStyle bool `json:"usePathStyle,omitempty" yaml:"usePathStyle,omitempty"`
Insecure bool `json:"insecure,omitempty" yaml:"insecure,omitempty"`
}
type Collect struct {
ClusterInfo *ClusterInfo `json:"clusterInfo,omitempty" yaml:"clusterInfo,omitempty"`
ClusterResources *ClusterResources `json:"clusterResources,omitempty" yaml:"clusterResources,omitempty"`
@@ -355,6 +366,7 @@ type Collect struct {
DNS *DNS `json:"dns,omitempty" yaml:"dns,omitempty"`
Etcd *Etcd `json:"etcd,omitempty" yaml:"etcd,omitempty"`
SupportBundleMetadata *SupportBundleMetadata `json:"supportBundleMetadata,omitempty" yaml:"supportBundleMetadata,omitempty"`
S3Status *S3Status `json:"s3Status,omitempty" yaml:"s3Status,omitempty"`
}
func (c *Collect) AccessReviewSpecs(overrideNS string) []authorizationv1.SelfSubjectAccessReviewSpec {
@@ -587,6 +599,8 @@ func (c *Collect) AccessReviewSpecs(overrideNS string) []authorizationv1.SelfSub
},
NonResourceAttributes: nil,
})
} else if c.S3Status != nil {
// NOOP
}
return result
@@ -694,6 +708,10 @@ func (c *Collect) GetName() string {
collector = "support-bundle-metadata"
name = c.SupportBundleMetadata.CollectorName
}
if c.S3Status != nil {
collector = "s3Status"
name = c.S3Status.CollectorName
}
if collector == "" {
return "<none>"

View File

@@ -235,6 +235,11 @@ func (in *Analyze) DeepCopyInto(out *Analyze) {
*out = new(HTTPAnalyze)
(*in).DeepCopyInto(*out)
}
if in.S3Status != nil {
in, out := &in.S3Status, &out.S3Status
*out = new(DatabaseAnalyze)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Analyze.
@@ -995,6 +1000,11 @@ func (in *Collect) DeepCopyInto(out *Collect) {
*out = new(SupportBundleMetadata)
(*in).DeepCopyInto(*out)
}
if in.S3Status != nil {
in, out := &in.S3Status, &out.S3Status
*out = new(S3Status)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Collect.
@@ -4713,6 +4723,22 @@ func (in *RunPod) DeepCopy() *RunPod {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *S3Status) DeepCopyInto(out *S3Status) {
*out = *in
in.CollectorMeta.DeepCopyInto(&out.CollectorMeta)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3Status.
func (in *S3Status) DeepCopy() *S3Status {
if in == nil {
return nil
}
out := new(S3Status)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Secret) DeepCopyInto(out *Secret) {
*out = *in

View File

@@ -130,6 +130,8 @@ func GetCollector(collector *troubleshootv1beta2.Collect, bundlePath string, nam
return &CollectEtcd{collector.Etcd, bundlePath, clientConfig, client, ctx, RBACErrors}, true
case collector.SupportBundleMetadata != nil:
return &CollectSupportBundleMetadata{collector.SupportBundleMetadata, bundlePath, namespace, clientConfig, client, ctx, RBACErrors}, true
case collector.S3Status != nil:
return &CollectS3Status{collector.S3Status, bundlePath, RBACErrors}, true
default:
return nil, false
}
@@ -228,6 +230,9 @@ func getCollectorName(c interface{}) string {
case *CollectSupportBundleMetadata:
collector = "support-bundle-metadata"
name = v.Collector.CollectorName
case *CollectS3Status:
collector = "s3Status"
name = v.Collector.CollectorName
default:
collector = "<none>"
}

103
pkg/collect/s3_status.go Normal file
View File

@@ -0,0 +1,103 @@
package collect
import (
"bytes"
"context"
"encoding/json"
"fmt"
"time"
"crypto/tls"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/credentials"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/pkg/errors"
troubleshootv1beta2 "github.com/replicatedhq/troubleshoot/pkg/apis/troubleshoot/v1beta2"
"net/http"
)
type S3StatusResult struct {
BucketName string `json:"bucketName"`
Endpoint string `json:"endpoint,omitempty"`
Region string `json:"region,omitempty"`
IsConnected bool `json:"isConnected"`
Error string `json:"error,omitempty"`
}
type CollectS3Status struct {
Collector *troubleshootv1beta2.S3Status
BundlePath string
RBACErrors
}
func (c *CollectS3Status) Title() string {
return getCollectorName(c)
}
func (c *CollectS3Status) IsExcluded() (bool, error) {
return isExcluded(c.Collector.Exclude)
}
func (c *CollectS3Status) Collect(progressChan chan<- interface{}) (CollectorResult, error) {
result := S3StatusResult{
BucketName: c.Collector.BucketName,
Endpoint: c.Collector.Endpoint,
Region: c.Collector.Region,
}
region := c.Collector.Region
if region == "" {
region = "us-east-1"
}
opts := s3.Options{
Region: region,
Credentials: credentials.NewStaticCredentialsProvider(
c.Collector.AccessKeyID,
c.Collector.SecretAccessKey,
"",
),
UsePathStyle: c.Collector.UsePathStyle,
}
if c.Collector.Endpoint != "" {
opts.BaseEndpoint = aws.String(c.Collector.Endpoint)
}
if c.Collector.Insecure {
opts.HTTPClient = &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
},
}
}
client := s3.New(opts)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
_, err := client.HeadBucket(ctx, &s3.HeadBucketInput{
Bucket: aws.String(c.Collector.BucketName),
})
if err != nil {
result.Error = err.Error()
} else {
result.IsConnected = true
}
b, err := json.Marshal(result)
if err != nil {
return nil, errors.Wrap(err, "failed to marshal s3 status result")
}
collectorName := c.Collector.CollectorName
if collectorName == "" {
collectorName = "s3Status"
}
output := NewResult()
output.SaveResult(c.BundlePath, fmt.Sprintf("s3Status/%s.json", collectorName), bytes.NewBuffer(b))
return output, nil
}

View File

@@ -0,0 +1,108 @@
package collect
import (
"encoding/json"
"net/http"
"net/http/httptest"
"testing"
troubleshootv1beta2 "github.com/replicatedhq/troubleshoot/pkg/apis/troubleshoot/v1beta2"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestCollectS3Status_Collect(t *testing.T) {
tests := []struct {
name string
handler http.HandlerFunc
collectorName string
wantConnected bool
wantErrContains string
}{
{
name: "bucket exists",
handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method == http.MethodHead && r.URL.Path == "/test-bucket" {
w.WriteHeader(http.StatusOK)
return
}
w.WriteHeader(http.StatusNotFound)
}),
collectorName: "mybucket",
wantConnected: true,
},
{
name: "bucket not found",
handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusNotFound)
}),
collectorName: "mybucket",
wantConnected: false,
wantErrContains: "StatusCode: 404",
},
{
name: "access denied",
handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusForbidden)
}),
collectorName: "mybucket",
wantConnected: false,
wantErrContains: "StatusCode: 403",
},
{
name: "default collector name",
handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
}),
collectorName: "",
wantConnected: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ts := httptest.NewServer(tt.handler)
defer ts.Close()
collector := &CollectS3Status{
Collector: &troubleshootv1beta2.S3Status{
CollectorMeta: troubleshootv1beta2.CollectorMeta{
CollectorName: tt.collectorName,
},
BucketName: "test-bucket",
Endpoint: ts.URL,
Region: "us-east-1",
AccessKeyID: "test-key",
SecretAccessKey: "test-secret",
UsePathStyle: true,
},
BundlePath: "",
}
result, err := collector.Collect(nil)
require.NoError(t, err)
expectedName := tt.collectorName
if expectedName == "" {
expectedName = "s3Status"
}
key := "s3Status/" + expectedName + ".json"
raw, ok := result[key]
require.True(t, ok, "expected key %s in result, got keys: %v", key, result)
var s3Result S3StatusResult
err = json.Unmarshal(raw, &s3Result)
require.NoError(t, err)
assert.Equal(t, tt.wantConnected, s3Result.IsConnected)
assert.Equal(t, "test-bucket", s3Result.BucketName)
if tt.wantErrContains != "" {
assert.Contains(t, s3Result.Error, tt.wantErrContains)
} else {
assert.Empty(t, s3Result.Error)
}
})
}
}