Init controller

This commit is contained in:
Stefan Prodan
2018-09-21 19:22:44 +03:00
parent c32cc95235
commit 39329bbb00
5 changed files with 396 additions and 0 deletions

View File

@@ -1,2 +1,3 @@
# steerer
service mesh steerer

108
cmd/controller/main.go Normal file
View File

@@ -0,0 +1,108 @@
package main
import (
"flag"
"log"
"time"
sharedclientset "github.com/knative/pkg/client/clientset/versioned"
sharedscheme "github.com/knative/pkg/client/clientset/versioned/scheme"
sharedinformers "github.com/knative/pkg/client/informers/externalversions"
"github.com/knative/pkg/signals"
"github.com/stefanprodan/steerer/pkg/controller"
"github.com/stefanprodan/steerer/pkg/logging"
"k8s.io/apimachinery/pkg/apis/meta/v1"
kubeinformers "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/clientcmd"
)
var (
masterURL string
kubeconfig string
)
func init() {
flag.StringVar(&kubeconfig, "kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.")
flag.StringVar(&masterURL, "master", "", "The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster.")
}
func main() {
flag.Parse()
logger, err := logging.NewLogger("debug")
if err != nil {
log.Fatalf("Error creating logger: %v", err)
}
defer logger.Sync()
stopCh := signals.SetupSignalHandler()
cfg, err := clientcmd.BuildConfigFromFlags(masterURL, kubeconfig)
if err != nil {
logger.Fatalf("Error building kubeconfig: %v", err)
}
kubeClient, err := kubernetes.NewForConfig(cfg)
if err != nil {
logger.Fatalf("Error building kubernetes clientset: %v", err)
}
sharedClient, err := sharedclientset.NewForConfig(cfg)
if err != nil {
logger.Fatalf("Error building shared clientset: %v", err)
}
sharedscheme.AddToScheme(scheme.Scheme)
kubeInformerFactory := kubeinformers.NewSharedInformerFactory(kubeClient, time.Second*30)
sharedInformerFactory := sharedinformers.NewSharedInformerFactory(sharedClient, time.Second*30)
coreServiceInformer := kubeInformerFactory.Core().V1().Services()
virtualServiceInformer := sharedInformerFactory.Networking().V1alpha3().VirtualServices()
ver, err := kubeClient.Discovery().ServerVersion()
if err != nil {
logger.Fatalf("Error calling Kubernetes API: %v", err)
}
logger.Infof("Kubernetes version %v", ver)
opts := v1.ListOptions{}
list, err := sharedClient.NetworkingV1alpha3().VirtualServices("demo").List(opts)
if err != nil {
logger.Fatalf("Error building shared clientset: %v", err)
}
logger.Infof("VirtualServices %v", len(list.Items))
c := controller.NewController(
kubeClient,
sharedClient,
logger,
coreServiceInformer,
virtualServiceInformer,
)
kubeInformerFactory.Start(stopCh)
sharedInformerFactory.Start(stopCh)
logger.Info("Waiting for informer caches to sync")
for i, synced := range []cache.InformerSynced{
coreServiceInformer.Informer().HasSynced,
virtualServiceInformer.Informer().HasSynced,
} {
if ok := cache.WaitForCacheSync(stopCh, synced); !ok {
logger.Fatalf("failed to wait for cache at index %v to sync", i)
}
}
go func(ctrl *controller.Controller) {
if runErr := ctrl.Run(2, stopCh); runErr != nil {
logger.Fatalf("Error running controller: %v", runErr)
}
}(c)
<-stopCh
}

View File

@@ -0,0 +1,226 @@
package controller
import (
"fmt"
"time"
"github.com/knative/pkg/apis/istio/v1alpha3"
sharedclientset "github.com/knative/pkg/client/clientset/versioned"
istioinformers "github.com/knative/pkg/client/informers/externalversions/istio/v1alpha3"
istiolisters "github.com/knative/pkg/client/listers/istio/v1alpha3"
"go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
corev1informers "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
corev1listers "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
)
const controllerAgentName = "steerer"
type Controller struct {
kubeclientset kubernetes.Interface
sharedclientset sharedclientset.Interface
logger *zap.SugaredLogger
serviceLister corev1listers.ServiceLister
serviceSynced cache.InformerSynced
virtualServiceLister istiolisters.VirtualServiceLister
virtualServiceSynced cache.InformerSynced
workqueue workqueue.RateLimitingInterface
recorder record.EventRecorder
}
func NewController(
kubeclientset kubernetes.Interface,
sharedclientset sharedclientset.Interface,
logger *zap.SugaredLogger,
serviceInformer corev1informers.ServiceInformer,
virtualServiceInformer istioinformers.VirtualServiceInformer,
) *Controller {
logger.Debug("Creating event broadcaster")
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(logger.Named("event-broadcaster").Infof)
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{
Interface: kubeclientset.CoreV1().Events(""),
})
recorder := eventBroadcaster.NewRecorder(
scheme.Scheme, corev1.EventSource{Component: controllerAgentName})
ctrl := &Controller{
kubeclientset: kubeclientset,
sharedclientset: sharedclientset,
logger: logger,
serviceLister: serviceInformer.Lister(),
serviceSynced: serviceInformer.Informer().HasSynced,
virtualServiceLister: virtualServiceInformer.Lister(),
virtualServiceSynced: virtualServiceInformer.Informer().HasSynced,
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), controllerAgentName),
recorder: recorder,
}
virtualServiceInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: ctrl.enqueueVirtualService,
UpdateFunc: func(old, new interface{}) {
ctrl.enqueueVirtualService(new)
},
})
return ctrl
}
func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error {
defer utilruntime.HandleCrash()
defer c.workqueue.ShutDown()
c.logger.Info("Starting controller")
for i := 0; i < threadiness; i++ {
go wait.Until(func() {
for c.processNextWorkItem() {
}
}, time.Second, stopCh)
}
c.logger.Info("Started workers")
<-stopCh
c.logger.Info("Shutting down workers")
return nil
}
func (c *Controller) processNextWorkItem() bool {
obj, shutdown := c.workqueue.Get()
if shutdown {
return false
}
err := func(obj interface{}) error {
defer c.workqueue.Done(obj)
var key string
var ok bool
if key, ok = obj.(string); !ok {
c.workqueue.Forget(obj)
utilruntime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj))
return nil
}
// Run the syncHandler, passing it the namespace/name string of the
// Foo resource to be synced.
if err := c.syncHandler(key); err != nil {
return fmt.Errorf("error syncing '%s': %s", key, err.Error())
}
// Finally, if no error occurs we Forget this item so it does not
// get queued again until another change happens.
c.workqueue.Forget(obj)
c.logger.Infof("Successfully synced '%s'", key)
return nil
}(obj)
if err != nil {
utilruntime.HandleError(err)
return true
}
return true
}
func (c *Controller) syncHandler(key string) error {
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
utilruntime.HandleError(fmt.Errorf("invalid resource key: %s", key))
return nil
}
vs, err := c.virtualServiceLister.VirtualServices(namespace).Get(name)
if errors.IsNotFound(err) {
utilruntime.HandleError(fmt.Errorf("VirtualServices '%s' in work queue no longer exists", key))
return nil
}
c.logger.Infof("VirtualService %s.%s", vs.Name, namespace)
return nil
}
func (c *Controller) enqueueVirtualService(obj interface{}) {
var key string
var err error
if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil {
utilruntime.HandleError(err)
return
}
c.workqueue.AddRateLimited(key)
}
func (c *Controller) handleObject(obj interface{}) {
var object metav1.Object
var ok bool
if object, ok = obj.(metav1.Object); !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
utilruntime.HandleError(fmt.Errorf("error decoding object, invalid type"))
return
}
object, ok = tombstone.Obj.(metav1.Object)
if !ok {
utilruntime.HandleError(fmt.Errorf("error decoding object tombstone, invalid type"))
return
}
c.logger.Debugf("Recovered deleted object '%s' from tombstone", object.GetName())
}
c.logger.Debugf("Processing object: %s", object.GetName())
if ownerRef := metav1.GetControllerOf(object); ownerRef != nil {
if ownerRef.Kind != "VirtualService" {
return
}
vs, err := c.serviceLister.Services(object.GetNamespace()).Get(ownerRef.Name)
if err != nil {
c.logger.Debugf("ignoring orphaned object '%s' of '%s'", object.GetSelfLink(), ownerRef.Name)
return
}
c.enqueueVirtualService(vs)
return
}
}
func (c *Controller) CreateVirtualService(namespace string, name string, host string, port uint32, gateway string) error {
vs := &v1alpha3.VirtualService{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: name,
},
Spec: v1alpha3.VirtualServiceSpec{
Hosts: []string{host},
Http: []v1alpha3.HTTPRoute{
{
Route: []v1alpha3.DestinationWeight{
{
Destination: v1alpha3.Destination{
Host: host,
},
Weight: 100,
},
},
},
},
},
}
if gateway != "" {
vs.Spec.Gateways = []string{gateway}
}
_, err := c.sharedclientset.NetworkingV1alpha3().VirtualServices(vs.Namespace).Create(vs)
return err
}

57
pkg/logging/logger.go Normal file
View File

@@ -0,0 +1,57 @@
package logging
import (
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
func NewLogger(logLevel string) (*zap.SugaredLogger, error) {
level := zap.NewAtomicLevelAt(zapcore.InfoLevel)
switch logLevel {
case "debug":
level = zap.NewAtomicLevelAt(zapcore.DebugLevel)
case "info":
level = zap.NewAtomicLevelAt(zapcore.InfoLevel)
case "warn":
level = zap.NewAtomicLevelAt(zapcore.WarnLevel)
case "error":
level = zap.NewAtomicLevelAt(zapcore.ErrorLevel)
case "fatal":
level = zap.NewAtomicLevelAt(zapcore.FatalLevel)
case "panic":
level = zap.NewAtomicLevelAt(zapcore.PanicLevel)
}
zapEncoderConfig := zapcore.EncoderConfig{
TimeKey: "ts",
LevelKey: "level",
NameKey: "logger",
CallerKey: "caller",
MessageKey: "msg",
StacktraceKey: "stacktrace",
LineEnding: zapcore.DefaultLineEnding,
EncodeLevel: zapcore.LowercaseLevelEncoder,
EncodeTime: zapcore.ISO8601TimeEncoder,
EncodeDuration: zapcore.SecondsDurationEncoder,
EncodeCaller: zapcore.ShortCallerEncoder,
}
zapConfig := zap.Config{
Level: level,
Development: false,
Sampling: &zap.SamplingConfig{
Initial: 100,
Thereafter: 100,
},
Encoding: "json",
EncoderConfig: zapEncoderConfig,
OutputPaths: []string{"stderr"},
ErrorOutputPaths: []string{"stderr"},
}
logger, err := zapConfig.Build()
if err != nil {
return nil, err
}
return logger.Sugar(), nil
}

4
pkg/version/version.go Normal file
View File

@@ -0,0 +1,4 @@
package version
var VERSION = "0.0.1-alpha.1"
var REVISION = "unknown"