mirror of
https://github.com/paralus/paralus.git
synced 2026-02-14 09:39:50 +00:00
refactor: remove unused func/var/methods (#315)
Signed-off-by: Dhruv Jain <92215138+jaydee029@users.noreply.github.com>
This commit is contained in:
@@ -7,6 +7,6 @@ linters:
|
||||
# - gosimple
|
||||
# - govet
|
||||
# - staticcheck
|
||||
# - unused
|
||||
- unused
|
||||
# - ineffassign
|
||||
- misspell
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
package dao
|
||||
|
||||
const (
|
||||
deletingExpr = "not (((conditions -> ?) @> ?::jsonb)) as deleting"
|
||||
conditionStatusQ = "(conditions -> ?) @> ?::jsonb"
|
||||
notConditionStatusQ = "not ((conditions -> ?) @> ?::jsonb)"
|
||||
conditionLastUpdatedQ = "(conditions #>> '{?, lastUpdated}')::timestamp with time zone < ?::timestamp with time zone"
|
||||
)
|
||||
@@ -79,12 +79,6 @@ func GetUserRoles(ctx context.Context, db bun.IDB, id uuid.UUID) ([]*userv3.Proj
|
||||
return append(append(r, pr...), pnr...), err
|
||||
}
|
||||
|
||||
type userProjectnamesaceRole struct {
|
||||
AccountId uuid.UUID `bun:"account_id,type:uuid"`
|
||||
Role string `bun:"role,type:string"`
|
||||
Project *string `bun:"project,type:string"`
|
||||
}
|
||||
|
||||
func GetQueryFilteredUsers(ctx context.Context, db bun.IDB, partner, org, group, role uuid.UUID, projects []uuid.UUID) ([]uuid.UUID, error) {
|
||||
p := []models.AccountPermission{}
|
||||
q := db.NewSelect().Model(&p).ColumnExpr("DISTINCT account_id")
|
||||
|
||||
3
main.go
3
main.go
@@ -108,7 +108,6 @@ var (
|
||||
dbUser string
|
||||
dbPassword string
|
||||
db *bun.DB
|
||||
gormDb *gorm.DB
|
||||
|
||||
// relay
|
||||
sentryPeeringHost string
|
||||
@@ -129,7 +128,6 @@ var (
|
||||
// cd relay
|
||||
coreCDRelayUserHost string
|
||||
coreCDRelayConnectorHost string
|
||||
schedulerNamespace string
|
||||
sentryBootstrapAddr string
|
||||
|
||||
// kratos
|
||||
@@ -268,7 +266,6 @@ func setup() {
|
||||
coreCDRelayConnectorHost = viper.GetString(coreCDRelayConnectorHostEnv)
|
||||
coreCDRelayUserHost = viper.GetString(coreCDRelayUserHostEnv)
|
||||
relayImage = viper.GetString(relayImageEnv)
|
||||
schedulerNamespace = viper.GetString(schedulerNamespaceEnv)
|
||||
sentryBootstrapAddr = viper.GetString(sentryBootstrapEnv)
|
||||
|
||||
auditLogStorage = viper.GetString(auditLogStorageEnv)
|
||||
|
||||
@@ -7,16 +7,11 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
logv2 "github.com/paralus/paralus/pkg/log"
|
||||
commonv3 "github.com/paralus/paralus/proto/types/commonpb/v3"
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
var (
|
||||
_log = logv2.GetLogger()
|
||||
)
|
||||
|
||||
type (
|
||||
// EventVersion is the version of event
|
||||
EventVersion string
|
||||
|
||||
@@ -3,8 +3,6 @@ package apply
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/paralus/paralus/pkg/controller/client"
|
||||
@@ -13,7 +11,6 @@ import (
|
||||
clusterv2 "github.com/paralus/paralus/proto/types/controller"
|
||||
apixv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
@@ -270,54 +267,6 @@ func (a *applier) pollCRDUntilEstablished(ctx context.Context, timeout time.Dura
|
||||
})
|
||||
}
|
||||
|
||||
func getGVKIfNotFound(obj runtime.Object) (schema.GroupVersionKind, error) {
|
||||
currentGVK := obj.GetObjectKind().GroupVersionKind()
|
||||
formedGVK := schema.GroupVersionKind{}
|
||||
|
||||
kind := currentGVK.Kind
|
||||
if len(kind) == 0 {
|
||||
gvks, _, err := scheme.Scheme.ObjectKinds(obj)
|
||||
if err != nil {
|
||||
return formedGVK, err
|
||||
}
|
||||
kind = gvks[0].Kind
|
||||
}
|
||||
|
||||
var listMeta metav1.Common
|
||||
objectMeta, err := meta.Accessor(obj)
|
||||
if err != nil {
|
||||
listMeta, err = meta.CommonAccessor(obj)
|
||||
if err != nil {
|
||||
return formedGVK, err
|
||||
}
|
||||
} else {
|
||||
listMeta = objectMeta
|
||||
}
|
||||
|
||||
version := currentGVK.GroupVersion().String()
|
||||
if len(version) == 0 {
|
||||
selfLink := listMeta.GetSelfLink()
|
||||
if len(selfLink) == 0 {
|
||||
return formedGVK, ErrNoSelfLink
|
||||
}
|
||||
selfLinkURL, err := url.Parse(selfLink)
|
||||
if err != nil {
|
||||
return formedGVK, err
|
||||
}
|
||||
// example paths: /<prefix>/<version>/*
|
||||
parts := strings.Split(selfLinkURL.Path, "/")
|
||||
if len(parts) < 3 {
|
||||
return formedGVK, fmt.Errorf("unexpected self link format: '%v'; got version '%v'", selfLink, version)
|
||||
}
|
||||
version = parts[2]
|
||||
}
|
||||
|
||||
formedGVK.Kind = kind
|
||||
formedGVK.Version = version
|
||||
|
||||
return formedGVK, nil
|
||||
}
|
||||
|
||||
func (a *applier) ApplyStatus(ctx context.Context, obj ctrlclient.Object, statusObj interface{}) error {
|
||||
var objectKey ctrlclient.ObjectKey
|
||||
var original ctrlclient.Object
|
||||
|
||||
@@ -10,15 +10,10 @@ import (
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
)
|
||||
|
||||
//var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
|
||||
var (
|
||||
patchLog = logf.Log.WithName("cluster-v2-patch")
|
||||
)
|
||||
|
||||
type patch struct {
|
||||
current client.Object
|
||||
}
|
||||
|
||||
@@ -1,18 +1,12 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/paralus/paralus/pkg/controller/scheme"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/config"
|
||||
)
|
||||
|
||||
var (
|
||||
defaultResyncInterval = time.Second * 30
|
||||
)
|
||||
|
||||
// New returns new kubernetes client
|
||||
func New() (client.Client, error) {
|
||||
|
||||
|
||||
@@ -165,18 +165,6 @@ func getJobState(j *batchv1.Job) (state clusterv2.StepObjectState, reason string
|
||||
return
|
||||
}
|
||||
|
||||
func getPersistentVolumeClaimState(pvc *corev1.PersistentVolumeClaim) (state clusterv2.StepObjectState, reason string) {
|
||||
if pvc.Status.Phase == corev1.ClaimBound {
|
||||
state = clusterv2.StepObjectComplete
|
||||
reason = "claim bound"
|
||||
return
|
||||
}
|
||||
|
||||
state = clusterv2.StepObjectCreated
|
||||
reason = "in progress"
|
||||
return
|
||||
}
|
||||
|
||||
// ObjectState returns the object state of runtime object
|
||||
func ObjectState(o runtime.Object) (state clusterv2.StepObjectState, reason string) {
|
||||
switch o.(type) {
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
clusterv2 "github.com/paralus/paralus/proto/types/controller"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
@@ -23,7 +22,6 @@ import (
|
||||
|
||||
var (
|
||||
stepLog = logf.Log.WithName("cluster-v2-step")
|
||||
ma = meta.NewAccessor()
|
||||
)
|
||||
|
||||
// Handler is the interface for working with steps
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
package step
|
||||
|
||||
import (
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
)
|
||||
|
||||
func isHardFailure(err error) {
|
||||
switch {
|
||||
case apierrs.IsBadRequest(err), apierrs.IsInvalid(err):
|
||||
|
||||
}
|
||||
}
|
||||
@@ -1590,25 +1590,6 @@ func findMapInSliceBasedOnKeyValue(m []interface{}, key string, value interface{
|
||||
return nil, 0, false, nil
|
||||
}
|
||||
|
||||
// This function takes a JSON map and sorts all the lists that should be merged
|
||||
// by key. This is needed by tests because in JSON, list order is significant,
|
||||
// but in Strategic Merge Patch, merge lists do not have significant order.
|
||||
// Sorting the lists allows for order-insensitive comparison of patched maps.
|
||||
func sortMergeListsByName(mapJSON []byte, schema LookupPatchMeta) ([]byte, error) {
|
||||
var m map[string]interface{}
|
||||
err := json.Unmarshal(mapJSON, &m)
|
||||
if err != nil {
|
||||
return nil, mergepatch.ErrBadJSONDoc
|
||||
}
|
||||
|
||||
newM, err := sortMergeListsByNameMap(m, schema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return json.Marshal(newM)
|
||||
}
|
||||
|
||||
// Function sortMergeListsByNameMap recursively sorts the merge lists by its mergeKey in a map.
|
||||
func sortMergeListsByNameMap(s map[string]interface{}, schema LookupPatchMeta) (map[string]interface{}, error) {
|
||||
newS := map[string]interface{}{}
|
||||
|
||||
@@ -9,153 +9,19 @@ import (
|
||||
|
||||
"github.com/paralus/paralus/pkg/log"
|
||||
controllerv2 "github.com/paralus/paralus/proto/types/controller"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
kjson "k8s.io/apimachinery/pkg/runtime/serializer/json"
|
||||
k8sapijson "sigs.k8s.io/kustomize/pseudo/k8s/apimachinery/pkg/runtime/serializer/json"
|
||||
)
|
||||
|
||||
const (
|
||||
delimiter = "---"
|
||||
ingressAnnotationConfigSnippetKey = "nginx.ingress.kubernetes.io/configuration-snippet"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrInvalidObject is retuned for invalid object
|
||||
ErrInvalidObject = errors.New("object does not support object interface")
|
||||
json = k8sapijson.CaseSensitiveJsonIterator()
|
||||
dmf = kjson.DefaultMetaFactory
|
||||
)
|
||||
|
||||
var _log = log.GetLogger()
|
||||
|
||||
func getIngressAnnotations(name string, orgID, partnerID string) map[string]string {
|
||||
return map[string]string{
|
||||
ingressAnnotationConfigSnippetKey: fmt.Sprintf("set $workload_name \"%s\";set $orgId \"%s\";set $partnerId \"%s\";", name, orgID, partnerID),
|
||||
}
|
||||
}
|
||||
|
||||
func addIngressAnnotations(annotations map[string]string, name string, orgId, partnerId string) {
|
||||
if _, ok := annotations[ingressAnnotationConfigSnippetKey]; !ok {
|
||||
annotations[ingressAnnotationConfigSnippetKey] = fmt.Sprintf("set $workload_name \"%s\";set $orgId \"%s\";set $partnerId \"%s\";",
|
||||
name, orgId, partnerId)
|
||||
}
|
||||
}
|
||||
|
||||
func addDebugLabels(stepTemplate *controllerv2.StepTemplate, debugLabels []byte) error {
|
||||
|
||||
if stepTemplate.Object != nil {
|
||||
accessor, err := stepTemplate.Object.Accessor()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
kind, err := accessor.Kind()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_log.Infow("addDebugLabels", "kind", kind)
|
||||
switch strings.ToLower(kind) {
|
||||
case "pod":
|
||||
accessor.SetRaw(debugLabels, "metadata", "labels")
|
||||
case "deployment", "replicationcontroller", "replicaset", "statefulset", "daemonset", "job":
|
||||
accessor.SetRaw(debugLabels, "spec", "template", "metadata", "labels")
|
||||
case "cronjob":
|
||||
accessor.SetRaw(debugLabels, "spec", "jobTemplate", "spec", "template", "metadata", "labels")
|
||||
default:
|
||||
_log.Warnw("Unknown Install component in TaskSet. Debug is not possible.", "Kind:", kind)
|
||||
return nil
|
||||
}
|
||||
|
||||
stepTemplate.Object.Raw = accessor.Bytes()
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getDebugLabelsMap(orgID, partnerID, projectID string, name string, isSystemWorkload bool) (map[string]string, error) {
|
||||
labels := make(map[string]string)
|
||||
labels["rep-organization"] = orgID
|
||||
labels["rep-partner"] = partnerID
|
||||
labels["rep-project"] = projectID
|
||||
if isSystemWorkload {
|
||||
labels["rep-addon"] = name
|
||||
} else {
|
||||
labels["rep-workload"] = name
|
||||
}
|
||||
|
||||
return labels, nil
|
||||
}
|
||||
|
||||
func resourceToStepTemplate(resource []byte) (sts []controllerv2.StepTemplate, err error) {
|
||||
|
||||
so := &controllerv2.StepObject{Raw: resource}
|
||||
var accessor controllerv2.Accessor
|
||||
var gvk schema.GroupVersionKind
|
||||
|
||||
accessor, err = so.Accessor()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
gvk, err = accessor.GroupVersionKind()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if IsListGVK(gvk) {
|
||||
var ro runtime.Object
|
||||
ro, _, err = runtimeutil.ToObject(so)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if list, ok := ro.(*corev1.List); ok {
|
||||
for _, item := range list.Items {
|
||||
|
||||
var lso = &controllerv2.StepObject{Raw: item.Raw}
|
||||
var la controllerv2.Accessor
|
||||
var ln string
|
||||
var lgvk schema.GroupVersionKind
|
||||
la, err = lso.Accessor()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
ln, err = la.Name()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
lgvk, err = la.GroupVersionKind()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var st controllerv2.StepTemplate
|
||||
st.Name = strings.ToLower(fmt.Sprintf("step-%s-%s", lgvk.Kind, ln))
|
||||
st.Object = lso
|
||||
sts = append(sts, st)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
var name string
|
||||
var st controllerv2.StepTemplate
|
||||
|
||||
name, err = accessor.Name()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
accessor.ResetAutoFields()
|
||||
|
||||
so.Raw = accessor.Bytes()
|
||||
|
||||
st.Name = strings.ToLower(fmt.Sprintf("step-%s-%s", gvk.Kind, name))
|
||||
st.Object = so
|
||||
sts = append(sts, st)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// toRuntimeObject converts JSON bytes into runtime object of
|
||||
// latest version
|
||||
func toRuntimeObject(gvk schema.GroupVersionKind, b []byte) (runtime.Object, error) {
|
||||
@@ -206,21 +72,3 @@ func stepObjectToStepTemplate(so controllerv2.StepObject) (controllerv2.StepTemp
|
||||
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// tryConvert attempts to convert the given object to the provided versions in order. This function assumes
|
||||
// the object is in internal version.
|
||||
func tryConvert(converter runtime.ObjectConvertor, object runtime.Object, versions ...schema.GroupVersion) (runtime.Object, error) {
|
||||
var last error
|
||||
for _, version := range versions {
|
||||
if version.Empty() {
|
||||
return object, nil
|
||||
}
|
||||
obj, err := converter.ConvertToVersion(object, version)
|
||||
if err != nil {
|
||||
last = err
|
||||
continue
|
||||
}
|
||||
return obj, nil
|
||||
}
|
||||
return nil, last
|
||||
}
|
||||
|
||||
@@ -9,10 +9,6 @@ var (
|
||||
namespaceGVK = schema.GroupVersionKind{Version: "v1", Kind: "Namespace"}
|
||||
placementGVK = schema.GroupVersionKind{Group: "config.paralus.dev", Version: "v2", Kind: "Placement"}
|
||||
|
||||
// namespace post install gvks
|
||||
limitRangeGVK = schema.GroupVersionKind{Version: "v1", Kind: "LimitRange"}
|
||||
resourceQuotaGVK = schema.GroupVersionKind{Version: "v1", Kind: "ResourceQuota"}
|
||||
|
||||
// task init gvks
|
||||
serviceAccountGVK = schema.GroupVersionKind{Version: "v1", Kind: "ServiceAccount"}
|
||||
)
|
||||
|
||||
@@ -21,11 +21,10 @@ var (
|
||||
|
||||
// uniqueQueue is the containing type for set-style / unique queues
|
||||
type uniqueQueue struct {
|
||||
in <-chan Resource
|
||||
out chan<- Resource
|
||||
inBuffer map[Resource]struct{}
|
||||
exists sync.Map
|
||||
buffer chan Resource
|
||||
in <-chan Resource
|
||||
out chan<- Resource
|
||||
exists sync.Map
|
||||
buffer chan Resource
|
||||
}
|
||||
|
||||
// NewUniqueQueue returns a queue for events which ensures that events in the queue are unique
|
||||
|
||||
@@ -1,29 +0,0 @@
|
||||
package gateway
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
|
||||
"github.com/grpc-ecosystem/grpc-gateway/runtime"
|
||||
"github.com/segmentio/encoding/json"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
type errorBody struct {
|
||||
Err string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
// customErrorHandler implements custom grpc error handler
|
||||
func customErrorHandler(ctx context.Context, _ *runtime.ServeMux, marshaler runtime.Marshaler, w http.ResponseWriter, _ *http.Request, err error) {
|
||||
const fallback = `{"error": "failed to marshal error message"}`
|
||||
|
||||
w.Header().Set("Content-type", marshaler.ContentType())
|
||||
w.WriteHeader(runtime.HTTPStatusFromCode(grpc.Code(err)))
|
||||
jErr := json.NewEncoder(w).Encode(errorBody{
|
||||
Err: grpc.ErrorDesc(err),
|
||||
})
|
||||
|
||||
if jErr != nil {
|
||||
w.Write([]byte(fallback))
|
||||
}
|
||||
}
|
||||
@@ -41,16 +41,6 @@ const (
|
||||
idQ = "id = ?"
|
||||
)
|
||||
|
||||
const (
|
||||
scopeOrganization = "organization"
|
||||
scopePartner = "partner"
|
||||
scopeProject = "project"
|
||||
scopeProjects = "projects"
|
||||
scopeUser = "user"
|
||||
scopeCluster = "cluster"
|
||||
scopeSSOUser = "ssouser"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNoName is returned when name is not set in query option
|
||||
// trying to build query for get/update/delete
|
||||
|
||||
@@ -10,20 +10,6 @@ import (
|
||||
|
||||
var _log = log.GetLogger()
|
||||
|
||||
const (
|
||||
_bpInprogress int = iota
|
||||
_bpFailed
|
||||
)
|
||||
|
||||
type blueprintError struct {
|
||||
errorType int
|
||||
reason string
|
||||
}
|
||||
|
||||
func (e blueprintError) Error() string {
|
||||
return e.reason
|
||||
}
|
||||
|
||||
type clusterConditionReconciler struct {
|
||||
cs service.ClusterService
|
||||
/*ps models.PlacementService*/
|
||||
@@ -67,26 +53,6 @@ func (r *clusterConditionReconciler) Reconcile(ctx context.Context, cluster *inf
|
||||
return nil
|
||||
}
|
||||
|
||||
func mergeClusterConditions(conditions []infrav3.ClusterCondition) []infrav3.ClusterCondition {
|
||||
condMap := map[infrav3.ClusterConditionType]infrav3.ClusterCondition{}
|
||||
var retConditions []infrav3.ClusterCondition
|
||||
|
||||
for _, cond := range conditions {
|
||||
if ec, ok := condMap[cond.Type]; ok {
|
||||
ec.Reason = ec.Reason + ", " + cond.Reason
|
||||
condMap[cond.Type] = ec
|
||||
} else {
|
||||
condMap[cond.Type] = cond
|
||||
}
|
||||
}
|
||||
|
||||
for _, cond := range condMap {
|
||||
retConditions = append(retConditions, cond)
|
||||
}
|
||||
|
||||
return retConditions
|
||||
}
|
||||
|
||||
func shouldUpdateClusterStatus(current, modified *infrav3.Cluster) bool {
|
||||
|
||||
// check if any of the modified conditions are different from
|
||||
|
||||
@@ -21,10 +21,6 @@ const (
|
||||
readNamespaceRolePath = "role_read_access.yaml"
|
||||
writeNamespaceRolePath = "role_write_access.yaml"
|
||||
nameSpacePath = "namespace.yaml"
|
||||
|
||||
fullAccessClusterRoleName = "full-access-cluster-role"
|
||||
readAccessClusterRoleName = "read-access-cluster-role"
|
||||
writeAccessClusterRoleName = "write-access-cluster-role"
|
||||
)
|
||||
|
||||
// GetDefaultClusterRole returns default cluster role for relay user
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
@@ -39,8 +38,6 @@ import (
|
||||
|
||||
var _log = log.GetLogger()
|
||||
|
||||
var clusterNodeSyncMutexMap = make(map[string]*sync.Mutex)
|
||||
|
||||
const (
|
||||
clusterNotifyChan = "cluster:notify"
|
||||
)
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
type relayAuditDatabaseService struct {
|
||||
db *bun.DB
|
||||
tag string
|
||||
aps AccountPermissionService
|
||||
}
|
||||
|
||||
func (ra *relayAuditDatabaseService) GetRelayAudit(ctx context.Context, req *v1.RelayAuditRequest) (res *v1.RelayAuditResponse, err error) {
|
||||
|
||||
@@ -26,28 +26,6 @@ func performRoleBasicChecks(t *testing.T, role *rolev3.Role, ruuid string) {
|
||||
}
|
||||
}
|
||||
|
||||
func performRoleBasicAuthzChecks(t *testing.T, mazc mockAuthzClient, ruuid string) {
|
||||
if len(mazc.drpm) > 0 {
|
||||
if mazc.drpm[len(mazc.drpm)-1].Role != "role-"+ruuid {
|
||||
t.Errorf("incorrect role sent to authz; expected '%v' got '%v'", "role-"+ruuid, mazc.drpm[len(mazc.drpm)-1].Role)
|
||||
}
|
||||
}
|
||||
if len(mazc.crpm) > 0 {
|
||||
if len(mazc.crpm[len(mazc.crpm)-1].RolePermissionMappingList) != 1 {
|
||||
t.Errorf("invalid number of roles sent to authz; expected 1, got '%v'", len(mazc.crpm[len(mazc.crpm)-1].RolePermissionMappingList))
|
||||
}
|
||||
if mazc.crpm[len(mazc.crpm)-1].RolePermissionMappingList[0].Role != "role-"+ruuid {
|
||||
t.Errorf("incorrect role sent to authz; expected '%v' got '%v'", "role-"+ruuid, mazc.crpm[len(mazc.crpm)-1].RolePermissionMappingList[0].Role)
|
||||
}
|
||||
if len(mazc.crpm[len(mazc.crpm)-1].RolePermissionMappingList[0].Permission) != 1 {
|
||||
t.Errorf("incorrect number of permissions sent to authz; expected '1', got '%v'", len(mazc.crpm[len(mazc.crpm)-1].RolePermissionMappingList[0].Permission))
|
||||
}
|
||||
if mazc.crpm[len(mazc.crpm)-1].RolePermissionMappingList[0].Permission[0] != "ops_star.all" {
|
||||
t.Errorf("incorrect permissions sent to authz; expected 'ops_star.all', got '%v'", mazc.crpm[len(mazc.crpm)-1].RolePermissionMappingList[0].Permission[0])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateRole(t *testing.T) {
|
||||
db, mock := getDB(t)
|
||||
defer db.Close()
|
||||
|
||||
@@ -12,20 +12,6 @@ import (
|
||||
rolev3 "github.com/paralus/paralus/proto/types/rolepb/v3"
|
||||
)
|
||||
|
||||
func performRolePermissionBasicChecks(t *testing.T, role *rolev3.RolePermission, ruuid string) {
|
||||
_, err := uuid.Parse(role.GetMetadata().GetOrganization())
|
||||
if err == nil {
|
||||
t.Error("org in metadata should be name not id")
|
||||
}
|
||||
_, err = uuid.Parse(role.GetMetadata().GetPartner())
|
||||
if err == nil {
|
||||
t.Error("partner in metadata should be name not id")
|
||||
}
|
||||
if role.GetMetadata().GetId() != ruuid {
|
||||
t.Error("invalid uuid returned")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRolePermissionList(t *testing.T) {
|
||||
db, mock := getDB(t)
|
||||
defer db.Close()
|
||||
|
||||
@@ -153,13 +153,6 @@ func addUserGroupMappingsUpdateExpectation(mock sqlmock.Sqlmock, account string)
|
||||
WithArgs().WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(uuid.New().String()))
|
||||
}
|
||||
|
||||
func addUserIdFetchExpectation(mock sqlmock.Sqlmock) string {
|
||||
uid := uuid.New().String()
|
||||
mock.ExpectQuery(`SELECT "identities"."id" FROM "identities" WHERE .*traits ->> 'email' = 'user-` + uid + `'`).
|
||||
WithArgs().WillReturnRows(sqlmock.NewRows([]string{"id", "traits"}).AddRow(uid, []byte(`{"email":"user-`+uid+`", "first_name": "John", "last_name": "Doe", "description": "The OG user."}`)))
|
||||
return uid
|
||||
}
|
||||
|
||||
func addUserFetchExpectation(mock sqlmock.Sqlmock) string {
|
||||
uid := uuid.New().String()
|
||||
mock.ExpectQuery(`SELECT "identities"."id".* FROM "identities" WHERE .*traits ->> 'email' = 'user-` + uid + `'`).
|
||||
|
||||
@@ -28,37 +28,6 @@ func performUserBasicChecks(t *testing.T, user *userv3.User, uuuid string) {
|
||||
}
|
||||
}
|
||||
|
||||
func performUserBasicAuthzChecks(t *testing.T, mazc mockAuthzClient, uuuid string, roles []*userv3.ProjectNamespaceRole) {
|
||||
if len(mazc.cp) > 0 {
|
||||
for i, u := range mazc.cp[len(mazc.cp)-1].Policies {
|
||||
if u.Sub != "u:user-"+uuuid {
|
||||
t.Errorf("invalid sub in policy sent to authz; expected '%v', got '%v'", "u:user-"+uuuid, u.Sub)
|
||||
}
|
||||
if u.Obj != roles[i].Role {
|
||||
t.Errorf("invalid obj in policy sent to authz; expected '%v', got '%v'", roles[i].Role, u.Obj)
|
||||
}
|
||||
if roles[i].Namespace != nil {
|
||||
if u.Ns != fmt.Sprint(*roles[i].Namespace) {
|
||||
t.Errorf("invalid ns in policy sent to authz; expected '%v', got '%v'", fmt.Sprint(roles[i].Namespace), u.Ns)
|
||||
}
|
||||
} else {
|
||||
if u.Ns != "*" {
|
||||
t.Errorf("invalid ns in policy sent to authz; expected '%v', got '%v'", "*", u.Ns)
|
||||
}
|
||||
}
|
||||
if roles[i].Project != nil {
|
||||
if u.Proj != *roles[i].Project {
|
||||
t.Errorf("invalid proj in policy sent to authz; expected '%v', got '%v'", roles[i].Project, u.Proj)
|
||||
}
|
||||
} else {
|
||||
if u.Proj != "*" {
|
||||
t.Errorf("invalid proj in policy sent to authz; expected '%v', got '%v'", "*", u.Proj)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateUser(t *testing.T) {
|
||||
db, mock := getDB(t)
|
||||
defer db.Close()
|
||||
|
||||
@@ -7,11 +7,6 @@ import (
|
||||
|
||||
"github.com/valyala/fastjson"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
)
|
||||
|
||||
var (
|
||||
accessorLog = ctrl.Log.WithName("accessor")
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
@@ -3,7 +3,6 @@ package server
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/paralus/paralus/internal/constants"
|
||||
@@ -211,18 +210,6 @@ func NewKubeConfigServer(bs service.BootstrapService, aps service.AccountPermiss
|
||||
return &kubeConfigServer{bs, aps, gps, kss, krs, pf, ksvc, os, ps, al}
|
||||
}
|
||||
|
||||
func checkOrgAdmin(groups []string) bool {
|
||||
orgGrp := "Organization Admins"
|
||||
sort.Strings(groups)
|
||||
indx := sort.SearchStrings(groups, orgGrp)
|
||||
if indx < len(groups) {
|
||||
if groups[indx] == orgGrp {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *kubeConfigServer) RevokeKubeconfigSSO(ctx context.Context, req *sentryrpc.RevokeKubeconfigRequest) (*sentryrpc.RevokeKubeconfigResponse, error) {
|
||||
opts := req.Opts
|
||||
accountID, err := query.GetAccountID(opts)
|
||||
|
||||
@@ -44,10 +44,6 @@ type relayObject struct {
|
||||
|
||||
// relayPeerService relay peer service
|
||||
type relayPeerService struct {
|
||||
cert []byte // rpc server certifciate
|
||||
key []byte // rpc server key
|
||||
rootCA []byte // rpc rootCA to verify client certificates.
|
||||
port int
|
||||
|
||||
//ServiceUUID ...
|
||||
ServiceUUID string
|
||||
|
||||
@@ -1,20 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
v3 "github.com/paralus/paralus/proto/types/commonpb/v3"
|
||||
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
)
|
||||
|
||||
func getStatus(err error) *v3.Status {
|
||||
if err != nil {
|
||||
return &v3.Status{
|
||||
ConditionStatus: v3.ConditionStatus_StatusFailed,
|
||||
LastUpdated: timestamppb.Now(),
|
||||
Reason: err.Error(),
|
||||
}
|
||||
}
|
||||
return &v3.Status{
|
||||
ConditionStatus: v3.ConditionStatus_StatusOK,
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user