mirror of
https://github.com/open-cluster-management-io/ocm.git
synced 2026-02-14 18:09:57 +00:00
Some checks failed
Scorecard supply-chain security / Scorecard analysis (push) Failing after 1m25s
Post / coverage (push) Failing after 36m59s
Post / images (amd64, addon-manager) (push) Failing after 7m34s
Post / images (amd64, placement) (push) Failing after 7m4s
Post / images (amd64, registration) (push) Failing after 7m8s
Post / images (amd64, registration-operator) (push) Failing after 7m3s
Post / images (amd64, work) (push) Failing after 6m59s
Post / images (arm64, addon-manager) (push) Failing after 7m0s
Post / images (arm64, placement) (push) Failing after 6m54s
Post / images (arm64, registration) (push) Failing after 6m55s
Post / images (arm64, registration-operator) (push) Failing after 6m55s
Post / images (arm64, work) (push) Failing after 7m16s
Post / image manifest (addon-manager) (push) Has been skipped
Post / image manifest (placement) (push) Has been skipped
Post / image manifest (registration) (push) Has been skipped
Post / image manifest (registration-operator) (push) Has been skipped
Post / image manifest (work) (push) Has been skipped
Post / trigger clusteradm e2e (push) Has been skipped
Signed-off-by: Jian Qiu <jqiu@redhat.com>
271 lines
7.4 KiB
Go
271 lines
7.4 KiB
Go
package helpers
|
|
|
|
import (
|
|
"bytes"
|
|
"context"
|
|
"os"
|
|
"path/filepath"
|
|
"time"
|
|
|
|
"github.com/openshift/library-go/pkg/operator/resource/resourceapply"
|
|
authv1 "k8s.io/api/authentication/v1"
|
|
corev1 "k8s.io/api/core/v1"
|
|
"k8s.io/apimachinery/pkg/api/errors"
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
"k8s.io/client-go/kubernetes"
|
|
coreclientv1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
|
"k8s.io/client-go/rest"
|
|
"k8s.io/client-go/tools/clientcmd"
|
|
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
|
"k8s.io/klog/v2"
|
|
"k8s.io/utils/pointer"
|
|
|
|
"open-cluster-management.io/sdk-go/pkg/basecontroller/events"
|
|
|
|
commonrecorder "open-cluster-management.io/ocm/pkg/common/recorder"
|
|
)
|
|
|
|
type TokenGetterFunc func() (token []byte, expiration []byte, additionalData map[string][]byte, err error)
|
|
|
|
// SATokenGetter get the saToken of target sa. If there is not secrets in the sa, use the tokenrequest to get a token.
|
|
func SATokenGetter(ctx context.Context, saName, saNamespace string, saClient kubernetes.Interface) TokenGetterFunc {
|
|
return func() ([]byte, []byte, map[string][]byte, error) {
|
|
// get the service account
|
|
sa, err := saClient.CoreV1().ServiceAccounts(saNamespace).Get(ctx, saName, metav1.GetOptions{})
|
|
if err != nil {
|
|
return nil, nil, nil, err
|
|
}
|
|
|
|
additionalData := map[string][]byte{
|
|
"serviceaccount_namespace": []byte(saNamespace),
|
|
"serviceaccount_name": []byte(saName),
|
|
"serviceaccount_uid": []byte(sa.UID),
|
|
}
|
|
|
|
for _, secret := range sa.Secrets {
|
|
// get the token secret
|
|
tokenSecretName := secret.Name
|
|
|
|
// get the token secret
|
|
tokenSecret, err := saClient.CoreV1().Secrets(saNamespace).Get(ctx, tokenSecretName, metav1.GetOptions{})
|
|
if err != nil {
|
|
return nil, nil, nil, err
|
|
}
|
|
|
|
if tokenSecret.Type != corev1.SecretTypeServiceAccountToken {
|
|
continue
|
|
}
|
|
|
|
saToken, ok := tokenSecret.Data["token"]
|
|
if !ok {
|
|
continue
|
|
}
|
|
|
|
return saToken, nil, additionalData, nil
|
|
}
|
|
|
|
// 8640 hour
|
|
tr, err := saClient.CoreV1().ServiceAccounts(saNamespace).
|
|
CreateToken(ctx, saName, &authv1.TokenRequest{
|
|
Spec: authv1.TokenRequestSpec{
|
|
ExpirationSeconds: pointer.Int64(8640 * 3600),
|
|
},
|
|
}, metav1.CreateOptions{})
|
|
if err != nil {
|
|
return nil, nil, additionalData, err
|
|
}
|
|
expiration, err := tr.Status.ExpirationTimestamp.MarshalText()
|
|
if err != nil {
|
|
return nil, nil, additionalData, nil
|
|
}
|
|
return []byte(tr.Status.Token), expiration, additionalData, nil
|
|
}
|
|
}
|
|
|
|
func SyncKubeConfigSecret(ctx context.Context, secretName, secretNamespace, kubeconfigPath string,
|
|
templateKubeconfig *rest.Config, secretClient coreclientv1.SecretsGetter,
|
|
tokenGetter TokenGetterFunc, recorder events.Recorder, labels map[string]string) error {
|
|
secret, err := secretClient.Secrets(secretNamespace).Get(ctx, secretName, metav1.GetOptions{})
|
|
switch {
|
|
case errors.IsNotFound(err):
|
|
return applyKubeconfigSecret(ctx, templateKubeconfig, secretName, secretNamespace,
|
|
kubeconfigPath, secretClient, tokenGetter, recorder, labels)
|
|
case err != nil:
|
|
return err
|
|
}
|
|
|
|
if tokenValid(secret, tokenGetter) && clusterInfoNotChanged(ctx, secret, templateKubeconfig) {
|
|
return nil
|
|
}
|
|
|
|
return applyKubeconfigSecret(ctx, templateKubeconfig, secretName, secretNamespace, kubeconfigPath,
|
|
secretClient, tokenGetter, recorder, labels)
|
|
}
|
|
|
|
func tokenValid(secret *corev1.Secret, tokenGetter TokenGetterFunc) bool {
|
|
_, tokenFound := secret.Data["token"]
|
|
expiration, expirationFound := secret.Data["expiration"]
|
|
|
|
if !tokenFound {
|
|
return false
|
|
}
|
|
|
|
if expirationFound {
|
|
expirationTime, err := time.Parse(time.RFC3339, string(expiration))
|
|
if err != nil {
|
|
return false
|
|
}
|
|
|
|
now := metav1.Now()
|
|
refreshThreshold := 8640 * time.Hour / 5
|
|
lifetime := expirationTime.Sub(now.Time)
|
|
if lifetime < refreshThreshold {
|
|
return false
|
|
}
|
|
}
|
|
|
|
_, _, additionalData, err := tokenGetter()
|
|
if err != nil {
|
|
return false
|
|
}
|
|
|
|
for k, v := range additionalData {
|
|
if !bytes.Equal(secret.Data[k], v) {
|
|
return false
|
|
}
|
|
}
|
|
|
|
return true
|
|
}
|
|
|
|
func clusterInfoNotChanged(ctx context.Context, secret *corev1.Secret, templateKubeconfig *rest.Config) bool {
|
|
logger := klog.FromContext(ctx)
|
|
|
|
// check if the templateKubeconfig is changed
|
|
templateCluster, err := assembleClusterConfig(templateKubeconfig)
|
|
if err != nil {
|
|
logger.Error(err, "Assemble template cluster config error")
|
|
return false
|
|
}
|
|
|
|
saKubeconfig, kubeconfigFound := secret.Data["kubeconfig"]
|
|
if !kubeconfigFound {
|
|
return false
|
|
}
|
|
kubeconfig, err := clientcmd.Load(saKubeconfig)
|
|
if err != nil {
|
|
logger.Error(err, "Load kubeconfig error")
|
|
return false
|
|
}
|
|
cluster, ok := kubeconfig.Clusters["cluster"]
|
|
if !ok {
|
|
logger.Info("Cluster not found")
|
|
return false
|
|
}
|
|
|
|
if cluster.Server != templateCluster.Server {
|
|
logger.Info("Cluster host changed from",
|
|
"before", cluster.Server, "after", templateCluster.Server)
|
|
return false
|
|
}
|
|
if !bytes.Equal(cluster.CertificateAuthorityData, templateCluster.CertificateAuthorityData) {
|
|
logger.Info("Cluster certificate authority data changed")
|
|
return false
|
|
}
|
|
if cluster.InsecureSkipTLSVerify != templateCluster.InsecureSkipTLSVerify {
|
|
logger.Info("Cluster insecureSkipTLSVerify changed",
|
|
"before", cluster.InsecureSkipTLSVerify, "after", templateCluster.InsecureSkipTLSVerify)
|
|
return false
|
|
}
|
|
|
|
return true
|
|
}
|
|
|
|
// applyKubeconfigSecret would render saToken to a secret.
|
|
func applyKubeconfigSecret(ctx context.Context, templateKubeconfig *rest.Config, secretName, secretNamespace,
|
|
kubeconfigPath string, secretClient coreclientv1.SecretsGetter, tokenGetter TokenGetterFunc,
|
|
recorder events.Recorder, labels map[string]string) error {
|
|
|
|
token, expiration, additionalData, err := tokenGetter()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
c, err := assembleClusterConfig(templateKubeconfig)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
kubeconfigContent, err := clientcmd.Write(clientcmdapi.Config{
|
|
Kind: "Config",
|
|
APIVersion: "v1",
|
|
Clusters: map[string]*clientcmdapi.Cluster{
|
|
"cluster": c,
|
|
},
|
|
Contexts: map[string]*clientcmdapi.Context{
|
|
"context": {
|
|
Cluster: "cluster",
|
|
AuthInfo: "user",
|
|
},
|
|
},
|
|
AuthInfos: map[string]*clientcmdapi.AuthInfo{
|
|
"user": {
|
|
TokenFile: filepath.Join(filepath.Dir(kubeconfigPath), "token"),
|
|
},
|
|
},
|
|
CurrentContext: "context",
|
|
})
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
secret := &corev1.Secret{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Namespace: secretNamespace,
|
|
Name: secretName,
|
|
Labels: labels,
|
|
},
|
|
Data: map[string][]byte{
|
|
"kubeconfig": kubeconfigContent,
|
|
"token": token,
|
|
},
|
|
}
|
|
|
|
if expiration != nil {
|
|
secret.Data["expiration"] = expiration
|
|
}
|
|
|
|
for k, v := range additionalData {
|
|
secret.Data[k] = v
|
|
}
|
|
|
|
recorderWrapper := commonrecorder.NewEventsRecorderWrapper(ctx, recorder)
|
|
_, _, err = resourceapply.ApplySecret(ctx, secretClient, recorderWrapper, secret)
|
|
return err
|
|
}
|
|
|
|
func assembleClusterConfig(templateKubeconfig *rest.Config) (*clientcmdapi.Cluster, error) {
|
|
var c *clientcmdapi.Cluster
|
|
if len(templateKubeconfig.CAData) != 0 { //nolint:gocritic
|
|
c = &clientcmdapi.Cluster{
|
|
Server: templateKubeconfig.Host,
|
|
CertificateAuthorityData: templateKubeconfig.CAData,
|
|
}
|
|
} else if len(templateKubeconfig.CAFile) != 0 {
|
|
caData, err := os.ReadFile(templateKubeconfig.CAFile)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
c = &clientcmdapi.Cluster{
|
|
Server: templateKubeconfig.Host,
|
|
CertificateAuthorityData: caData,
|
|
}
|
|
} else {
|
|
c = &clientcmdapi.Cluster{
|
|
Server: templateKubeconfig.Host,
|
|
InsecureSkipTLSVerify: true,
|
|
}
|
|
}
|
|
return c, nil
|
|
}
|