Merge pull request #126 from RafayLabs/fix-tests

Fix tests
This commit is contained in:
Nirav Parikh
2022-05-03 13:04:02 +05:30
committed by GitHub
9 changed files with 168 additions and 447 deletions

View File

@@ -25,12 +25,11 @@ jobs:
- name: Format
run: if [ "$(gofmt -s -l . | wc -l)" -gt 0 ]; then exit 1; fi
# Tests need to be fixed. https://github.com/RafayLabs/rcloud-base/issues/46
# - name: Test all
# run: go test -v ./...
- name: Create k8s Kind Cluster
uses: helm/kind-action@v1.2.0
- name: Test 'service' package
run: go test -v ./pkg/service
- name: Test all
run: go test -v ./...
# golangci:
# strategy:

View File

@@ -48,21 +48,6 @@ func getObject(name string) ctrlclient.Object {
return o
}
func TestCRDNSApply(t *testing.T) {
applier, err := NewDynamicApplier()
if err != nil {
t.Error(err)
return
}
o3 := getObject("rafay-system-ns.yaml")
err = applier.Apply(context.TODO(), o3)
if err != nil {
t.Error(err)
return
}
}
func TestApplier(t *testing.T) {
applier, err := NewDynamicApplier()

View File

@@ -1,46 +0,0 @@
package apply
import (
"context"
"fmt"
"testing"
)
func BenchmarkLargeApply(b *testing.B) {
for n := 0; n < b.N; n++ {
largeApply()
}
}
func largeApply() {
obj := getObject("prometheus.yaml")
applier, err := NewDynamicApplier()
if err != nil {
fmt.Println(err)
return
}
err = applier.Apply(context.TODO(), obj)
if err != nil {
fmt.Println(err)
return
}
}
func TestLargeApply(t *testing.T) {
obj := getObject("prometheus.yaml")
applier, err := NewDynamicApplier()
if err != nil {
t.Error(err)
return
}
err = applier.Apply(context.TODO(), obj)
if err != nil {
t.Error(err)
return
}
}

View File

@@ -43,8 +43,8 @@ func New(cs service.ClusterService) Notifier {
}
}
func KeyFromMeta(meta commonv3.Metadata) string {
return fmt.Sprintf("%d/%d/%d/%s", meta.Partner, meta.Organization, meta.Project, meta.Name)
func KeyFromMeta(meta *commonv3.Metadata) string {
return fmt.Sprintf("%s/%s/%s/%s", meta.Partner, meta.Organization, meta.Project, meta.Name)
}
func MetaFromKey(key string) (meta commonv3.Metadata) {

View File

@@ -1,107 +0,0 @@
package controller
import (
"testing"
)
func TestCheckNamespaceConditions(t *testing.T) {
namespaceConditionChecker := checkNamespaceConditions(
withNamespaceConditionStatus(Complete),
withNamespaceConditionType(NamespaceInit),
withNamespaceConditionType(NamespaceCreate),
)
n := new(Namespace)
n.Status.Conditions = append(n.Status.Conditions, NewNamespaceCreate(Complete, "test"), NewNamespaceInit(Failed, "test"))
if namespaceConditionChecker(n) {
t.Error("expeted false")
}
namespaceConditionChecker = checkNamespaceConditions(
withNamespaceConditionStatus(Complete),
withNamespaceConditionType(NamespaceInit),
withNamespaceConditionType(NamespaceCreate),
withNamespaceConditionShortCircuit(),
)
if !namespaceConditionChecker(n) {
t.Error("expeted true")
}
n = new(Namespace)
n.Status.Conditions = append(n.Status.Conditions, NewNamespaceCreate(Complete, "test"), NewNamespaceInit(Complete, "test"))
if !namespaceConditionChecker(n) {
t.Error("expeted true")
}
}
func TestCheckTaskConditions(t *testing.T) {
taskConditionChecker := checkTaskConditions(
withTaskConditionStatus(Complete),
withTaskConditionType(TaskInit),
withTaskConditionType(TaskletCreate),
)
task := new(Task)
task.Status.Conditions = append(task.Status.Conditions, NewTaskInit(Complete, "test"), NewTaskletCreate(Failed, "test"))
if taskConditionChecker(task) {
t.Error("expeted false")
}
taskConditionChecker = checkTaskConditions(
withTaskConditionStatus(Complete),
withTaskConditionType(TaskInit),
withTaskConditionType(TaskletCreate),
withTaskConditionShortCircuit(),
)
if !taskConditionChecker(task) {
t.Error("expeted true")
}
task = new(Task)
task.Status.Conditions = append(task.Status.Conditions, NewTaskInit(Complete, "test"), NewTaskletCreate(Complete, "test"))
if !taskConditionChecker(task) {
t.Error("expeted true")
}
}
func TestCheckTaskletConditions(t *testing.T) {
taskletConditionChecker := checkTaskletConditions(
withTaskletConditionStatus(Complete),
withTaskletConditionType(TaskletInit),
withTaskletConditionType(TaskletInstall),
)
tasklet := new(Tasklet)
tasklet.Status.Conditions = append(tasklet.Status.Conditions, NewTaskletInit(Complete, "test"), NewTaskletInstall(Failed, "test"))
if taskletConditionChecker(tasklet) {
t.Error("expeted false")
}
taskletConditionChecker = checkTaskletConditions(
withTaskletConditionStatus(Complete),
withTaskletConditionType(TaskletInit),
withTaskletConditionType(TaskletInstall),
withTaskletConditionShortCircuit(),
)
if !taskletConditionChecker(tasklet) {
t.Error("expeted true")
}
tasklet = new(Tasklet)
tasklet.Status.Conditions = append(tasklet.Status.Conditions, NewTaskletInit(Complete, "test"), NewTaskletInit(Complete, "test"))
if !taskletConditionChecker(tasklet) {
t.Error("expeted true")
}
}

148
proto/types/controller/testdata/pod.yaml vendored Normal file
View File

@@ -0,0 +1,148 @@
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: "2019-09-24T18:32:31Z"
generateName: coredns-b7464766c-
labels:
k8s-app: kube-dns
pod-template-hash: b7464766c
name: coredns-b7464766c-hlbwt
namespace: kube-system
ownerReferences:
- apiVersion: apps/v1
blockOwnerDeletion: true
controller: true
kind: ReplicaSet
name: coredns-b7464766c
uid: ab7ac3fe-def9-11e9-a453-0242ac120002
resourceVersion: "2286717"
selfLink: /api/v1/namespaces/kube-system/pods/coredns-b7464766c-hlbwt
uid: ab7edb72-def9-11e9-a453-0242ac120002
spec:
containers:
- args:
- -conf
- /etc/coredns/Corefile
image: coredns/coredns:1.3.0
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 5
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
name: coredns
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /etc/coredns
name: config-volume
readOnly: true
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: coredns-token-8bvxc
readOnly: true
dnsPolicy: Default
enableServiceLinks: true
nodeName: k3d-sarat-dev-server
nodeSelector:
beta.kubernetes.io/os: linux
priority: 0
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
serviceAccount: coredns
serviceAccountName: coredns
terminationGracePeriodSeconds: 30
tolerations:
- key: CriticalAddonsOnly
operator: Exists
- effect: NoExecute
key: node.kubernetes.io/not-ready
operator: Exists
tolerationSeconds: 300
- effect: NoExecute
key: node.kubernetes.io/unreachable
operator: Exists
tolerationSeconds: 300
volumes:
- configMap:
defaultMode: 420
items:
- key: Corefile
path: Corefile
- key: NodeHosts
path: NodeHosts
name: coredns
name: config-volume
- name: coredns-token-8bvxc
secret:
defaultMode: 420
secretName: coredns-token-8bvxc
status:
conditions:
- lastProbeTime: null
lastTransitionTime: "2019-09-24T18:32:33Z"
status: "True"
type: Initialized
- lastProbeTime: null
lastTransitionTime: "2020-01-06T03:07:57Z"
status: "True"
type: Ready
- lastProbeTime: null
lastTransitionTime: "2020-01-06T03:07:57Z"
status: "True"
type: ContainersReady
- lastProbeTime: null
lastTransitionTime: "2019-09-24T18:32:33Z"
status: "True"
type: PodScheduled
containerStatuses:
- containerID: containerd://a4d08c037aeb7e57736d25ce5117f62012baa7ae32d1f81b66b46b1c12fe4935
image: docker.io/coredns/coredns:1.3.0
imageID: docker.io/coredns/coredns@sha256:e030773c7fee285435ed7fc7623532ee54c4c1c4911fb24d95cd0170a8a768bc
lastState:
terminated:
containerID: containerd://b5339ca5c7771da96bd27d490ed8871cf6d839b8d7a1c09cccebe565f31917ff
exitCode: 255
finishedAt: "2020-01-06T03:07:53Z"
reason: Unknown
startedAt: "2019-12-26T23:40:05Z"
name: coredns
ready: true
restartCount: 6
state:
running:
startedAt: "2020-01-06T03:07:56Z"
hostIP: 172.18.0.2
phase: Running
podIP: 10.42.0.161
qosClass: Burstable
startTime: "2019-09-24T18:32:33Z"

View File

@@ -0,0 +1,6 @@
apiVersion: v1
kind: Secret
metadata:
name: test-secret
annotations: null
type: Opaque

View File

@@ -0,0 +1,8 @@
name: test-step
object:
apiVersion: v1
kind: Secret
metadata:
name: test-secret
annotations: null
type: Opaque

View File

@@ -1,272 +0,0 @@
package server
import (
"context"
"fmt"
"io/ioutil"
"net"
"os"
"os/signal"
"syscall"
"testing"
"time"
"github.com/RafayLabs/rcloud-base/pkg/grpc"
"github.com/RafayLabs/rcloud-base/pkg/sentry/peering"
relayrpc "github.com/RafayLabs/rcloud-base/proto/rpc/sentry"
"github.com/google/uuid"
"google.golang.org/grpc/credentials"
)
var relayUUID1 string
var relayUUID2 string
// steps to test peering protocols
// * start a server
// * runs two client instances, send hello rpc to build active relay list
// * client1 send probe for a dummy cluster id
// * peer service broadcast survey request to clients
// * client2 sends survey reply back
// * peer service sends probe reply
// * client1 get probe reply back
func exit(cancel context.CancelFunc) {
cancel()
os.Exit(0)
}
//GenUUID generates a google UUID
func genUUID() string {
id := uuid.New()
return id.String()
}
//GetRelayIP get relay IP address
func getRelayIP1() string {
return "1.1.1.1"
}
//GetRelayIP get relay IP address
func getRelayIP2() string {
return "2.2.2.2"
}
func dummyDialinLookup(clustersni string) int {
return 1
}
func readPEM(path string) []byte {
f, err := os.Open(path)
if err != nil {
panic(err)
}
b, err := ioutil.ReadAll(f)
if err != nil {
panic(err)
}
return b
}
//os signal handler
func signalHandler(sig os.Signal, cancel context.CancelFunc) {
if sig == syscall.SIGINT || sig == syscall.SIGKILL || sig == syscall.SIGTERM || sig == syscall.SIGQUIT {
fmt.Println("Received", "signal", sig)
exit(cancel)
return
}
fmt.Println("Received", "signal", sig)
}
func startClient(ctx context.Context, t *testing.T, name, relayUUID string, exitChan chan<- bool, getRelayIP func() string, peerProbeChanel chan string) {
tlsConfig, err := ClientTLSConfig("./testdata/peersvc.crt", "./testdata/peersvc.key", "./testdata/ca.crt", "star.probe.relay.rafay.dev:7001")
if err != nil {
t.Error("Error loading peer TLC config", err)
exitChan <- true
return
}
//enforce TLS mutual authN
transportCreds := credentials.NewTLS(tlsConfig)
conn, err := grpc.NewSecureClientConn(ctx, "127.0.0.1:7001", transportCreds)
if err != nil {
t.Error("Error connecting to peer service", err)
exitChan <- true
return
}
fmt.Println("connected to grpc server")
client := relayrpc.NewRelayPeerServiceClient(conn)
// create RPC streams
helloStream, err := client.RelayPeerHelloRPC(context.Background())
if err != nil {
t.Error(
"failed to create HelloRPC stream with peer server", name, err,
)
conn.Close()
exitChan <- true
}
probeStream, err := client.RelayPeerProbeRPC(context.Background())
if err != nil {
t.Error(
err,
"failed to create ProbeRPC stream with peer server", name, err,
)
conn.Close()
exitChan <- true
}
surveyStream, err := client.RelayPeerSurveyRPC(context.Background())
if err != nil {
t.Error(
err,
"failed to create SurveyRPC stream with peer server", name, err,
)
conn.Close()
exitChan <- true
}
fmt.Println("created RPC streams with peer server")
rpcctx, rpccancel := context.WithCancel(context.Background())
go ClientHelloRPC(rpcctx, helloStream, 60*time.Second, relayUUID, getRelayIP)
//Add a init time wait for hello stream to finish
time.Sleep(2 * time.Second)
pcache, err := InitPeerCache(nil)
if err != nil {
t.Error(
err,
"failed to init peer client cache", name, err,
)
conn.Close()
exitChan <- true
}
go peering.ClientProbeRPC(rpcctx, probeStream, pcache, relayUUID, 600*time.Second, peerProbeChanel, getRelayIP)
go peering.ClientSurveyRPC(rpcctx, surveyStream, relayUUID, getRelayIP, dummyDialinLookup)
for {
//Watch for errors in rpc streams.
//On error cancel the streams and reconnect.
//HelloRPC send heartbeats in every 60Sec.
//If there is underlying connectivity issues
//HelloRPC will detect it with in 60Sec,
select {
case <-helloStream.Context().Done():
rpccancel()
conn.Close()
t.Error("stopping client", name, ctx.Err())
exitChan <- true
return
case <-probeStream.Context().Done():
rpccancel()
conn.Close()
t.Error("stopping client", name, ctx.Err())
exitChan <- true
return
case <-surveyStream.Context().Done():
rpccancel()
conn.Close()
t.Error("stopping client", name, ctx.Err())
exitChan <- true
return
case <-ctx.Done():
rpccancel()
conn.Close()
t.Error("stopping client", name, ctx.Err())
exitChan <- true
return
}
}
}
func TestRelayPeerRPC(t *testing.T) {
var ExitChan = make(chan bool)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
signalChan := make(chan os.Signal, 2)
signal.Notify(signalChan,
os.Interrupt,
syscall.SIGINT,
syscall.SIGHUP,
syscall.SIGKILL,
syscall.SIGTERM,
syscall.SIGQUIT,
)
go func() {
relayPeerService, err := NewRelayPeerService()
if err != nil {
_log.Fatalw("unable to get create relay peer service")
panic(err)
}
grpcServer, err := grpc.NewSecureServerWithPEM(readPEM("testdata/peersvc.crt"), readPEM("testdata/peersvc.key"), readPEM("testdata/ca.crt"))
if err != nil {
_log.Fatalw("cannot grpc secure server failed", "error", err)
}
go func() {
defer grpcServer.GracefulStop()
<-ctx.Done()
_log.Infow("peer service stoped due to context done")
}()
relayrpc.RegisterRelayPeerServiceServer(grpcServer, relayPeerService)
l, err := net.Listen("tcp", fmt.Sprintf(":%d", 7001))
if err != nil {
_log.Fatalw("failed to listen relay peer service port", "port", 7001, "error", err)
return
}
_log.Infow("started relay rpc service ", "port", 7001)
if err = grpcServer.Serve(l); err != nil {
_log.Fatalw("failed to server relay peer service", "error", err)
}
}()
t.Log("started RunRelayPeerRPC")
relayUUID1 = genUUID()
relayUUID2 = genUUID()
peerProbeChanel1 := make(chan string, 256)
peerProbeChanel2 := make(chan string, 256)
go startClient(ctx, t, "client1", relayUUID1, ExitChan, getRelayIP1, peerProbeChanel1)
go startClient(ctx, t, "client2", relayUUID2, ExitChan, getRelayIP2, peerProbeChanel2)
time.Sleep(5 * time.Second)
//send a dummy probe from client1
peerProbeChanel1 <- "dummycluster.relay.rafay.dev"
fmt.Println("send probe from client 1")
tick := time.NewTicker(10 * time.Second)
defer tick.Stop()
for {
select {
case <-ExitChan:
t.Errorf("got exit chanl")
exit(cancel)
case sig := <-signalChan:
signalHandler(sig, cancel)
case <-tick.C:
fmt.Println("success: test time reached. no errors from peerservice or clients. see logs showing \"cache probeRPC response\"")
exit(cancel)
}
}
}