mirror of
https://github.com/weaveworks/scope.git
synced 2026-02-14 10:00:13 +00:00
Merge branch '3810-cordon-control'
This commit is contained in:
@@ -15,7 +15,6 @@ rules:
|
||||
- pods/log
|
||||
- replicationcontrollers
|
||||
- services
|
||||
- nodes
|
||||
- namespaces
|
||||
- persistentvolumes
|
||||
- persistentvolumeclaims
|
||||
@@ -95,3 +94,13 @@ rules:
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- update
|
||||
- patch
|
||||
|
||||
7
go.sum
7
go.sum
@@ -1,9 +1,11 @@
|
||||
camlistore.org v0.0.0-20171230002226-a5a65f0d8b22 h1:VP9VuyosMHmS9zdzd5Co9TJKWPbMTfmtKc/XWctszyQ=
|
||||
camlistore.org v0.0.0-20171230002226-a5a65f0d8b22/go.mod h1:mzAP6ICVzPdfO0f3N9hAVWhO7qplHF7mbFhGsGdErTI=
|
||||
cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ=
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/DataDog/datadog-go v2.2.0+incompatible h1:V5BKkxACZLjzHjSgBbr2gvLA2Ae49yhc6CSY7MLy5k4=
|
||||
github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
|
||||
github.com/Microsoft/go-winio v0.4.11 h1:zoIOcVf0xPN1tnMVbTtEdI+P8OofVk3NObnwOQ6nK2Q=
|
||||
github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
|
||||
@@ -39,7 +41,9 @@ github.com/certifi/gocertifi v0.0.0-20150906030631-84c0a38a18fc h1:zSPFItDTOJZPd
|
||||
github.com/certifi/gocertifi v0.0.0-20150906030631-84c0a38a18fc/go.mod h1:GJKEexRPVJrBSOjoqN5VNOIKJ5Q3RViH6eu3puDRwx4=
|
||||
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible h1:C29Ae4G5GtYyYMm1aztcyj/J5ckgJm2zwdDajFbx1NY=
|
||||
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
|
||||
github.com/circonus-labs/circonusllhist v0.1.3 h1:TJH+oke8D16535+jHExHj4nQvzlZrj7ug5D7I/orNUA=
|
||||
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd h1:qMd81Ts1T2OTKmB4acZcyKaMtRnY5Y44NuXGX2GFJ1w=
|
||||
@@ -97,6 +101,7 @@ github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfU
|
||||
github.com/golang/groupcache v0.0.0-20171101203131-84a468cf14b4 h1:6o8aP0LGMKzo3NzwhhX6EJsiJ3ejmj+9yA/3p8Fjjlw=
|
||||
github.com/golang/groupcache v0.0.0-20171101203131-84a468cf14b4/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
|
||||
github.com/golang/mock v1.1.1 h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
@@ -134,6 +139,7 @@ github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxB
|
||||
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
github.com/hashicorp/go-msgpack v1.1.5 h1:9byZdVjKTe5mce63pRVNP1L7UAmdHOTEMGehn6KvJWs=
|
||||
github.com/hashicorp/go-msgpack v1.1.5/go.mod h1:gWVc3sv/wbDmR3rQsj1CAktEZzoz1YNK9NfGLXJ69/4=
|
||||
github.com/hashicorp/go-retryablehttp v0.5.3 h1:QlWt0KvWT0lq8MFppF9tsJGF+ynG7ztc2KIPhzRGk7s=
|
||||
github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
|
||||
github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM=
|
||||
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
@@ -274,6 +280,7 @@ github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJy
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/tinylib/msgp v1.1.2 h1:gWmO7n0Ys2RBEb7GPYB9Ujq8Mk5p2U08lRnmMcGy6BQ=
|
||||
github.com/tinylib/msgp v1.1.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
|
||||
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 h1:G3dpKMzFDjgEh2q1Z7zUUtKa8ViPtH+ocF0bE0g00O8=
|
||||
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
|
||||
github.com/tylerb/graceful v1.2.13 h1:yKdTh6eHcWdD8Jm3wxgJ6pNf8Lb3wwbV4Ip8fHbeMLE=
|
||||
github.com/tylerb/graceful v1.2.13/go.mod h1:LPYTbOYmUTdabwRt0TGhLllQ0MUNbs0Y5q1WXJOI9II=
|
||||
|
||||
@@ -65,6 +65,10 @@ type Client interface {
|
||||
DeleteVolumeSnapshot(namespaceID, volumeSnapshotID string) error
|
||||
ScaleUp(namespaceID, id string) error
|
||||
ScaleDown(namespaceID, id string) error
|
||||
// Cordon or Uncordon a node based on whether `desired` is true or false respectively.
|
||||
CordonNode(name string, desired bool) error
|
||||
// Returns a list of kubernetes nodes.
|
||||
GetNodes() ([]apiv1.Node, error)
|
||||
}
|
||||
|
||||
// ResourceMap is the mapping of resource and their GroupKind
|
||||
@@ -624,3 +628,33 @@ func (c *client) modifyScale(namespaceID, id string, f func(*autoscalingv1.Scale
|
||||
func (c *client) Stop() {
|
||||
close(c.quit)
|
||||
}
|
||||
|
||||
func (c *client) CordonNode(name string, desired bool) error {
|
||||
node, err := c.client.CoreV1().Nodes().Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
helper := newCordonHelper(node)
|
||||
if updateRequired := helper.updateIfRequired(desired); !updateRequired {
|
||||
return nil
|
||||
}
|
||||
|
||||
err, patchErr := helper.patchOrReplace(c.client, false)
|
||||
if patchErr != nil {
|
||||
return patchErr
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *client) GetNodes() ([]apiv1.Node, error) {
|
||||
l, err := c.client.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return l.Items, nil
|
||||
}
|
||||
|
||||
@@ -21,6 +21,8 @@ const (
|
||||
DeleteVolumeSnapshot = report.KubernetesDeleteVolumeSnapshot
|
||||
ScaleUp = report.KubernetesScaleUp
|
||||
ScaleDown = report.KubernetesScaleDown
|
||||
CordonNode = report.KubernetesCordonNode
|
||||
UncordonNode = report.KubernetesUncordonNode
|
||||
)
|
||||
|
||||
// GroupName and version used by CRDs
|
||||
@@ -474,6 +476,17 @@ func (r *Reporter) CaptureJob(f func(xfer.Request, string, string) xfer.Response
|
||||
}
|
||||
}
|
||||
|
||||
// CaptureNode is exported for testing
|
||||
func (r *Reporter) CaptureNode(f func(xfer.Request, string) xfer.Response) func(xfer.Request) xfer.Response {
|
||||
return func(req xfer.Request) xfer.Response {
|
||||
nodeID, ok := report.ParseHostNodeID(req.NodeID)
|
||||
if !ok {
|
||||
return xfer.ResponseErrorf("Invalid ID: %s", req.NodeID)
|
||||
}
|
||||
return f(req, nodeID)
|
||||
}
|
||||
}
|
||||
|
||||
// ScaleUp is the control to scale up a deployment
|
||||
func (r *Reporter) ScaleUp(req xfer.Request, namespace, id string) xfer.Response {
|
||||
return xfer.ResponseError(r.client.ScaleUp(namespace, id))
|
||||
@@ -484,6 +497,16 @@ func (r *Reporter) ScaleDown(req xfer.Request, namespace, id string) xfer.Respon
|
||||
return xfer.ResponseError(r.client.ScaleDown(namespace, id))
|
||||
}
|
||||
|
||||
// CordonNode is the control to cordon a node.
|
||||
func (r *Reporter) CordonNode(req xfer.Request, name string) xfer.Response {
|
||||
return xfer.ResponseError(r.client.CordonNode(name, true))
|
||||
}
|
||||
|
||||
// UncordonNode is the control to un-cordon a node.
|
||||
func (r *Reporter) UncordonNode(req xfer.Request, name string) xfer.Response {
|
||||
return xfer.ResponseError(r.client.CordonNode(name, false))
|
||||
}
|
||||
|
||||
func (r *Reporter) registerControls() {
|
||||
controls := map[string]xfer.ControlHandlerFunc{
|
||||
CloneVolumeSnapshot: r.CaptureVolumeSnapshot(r.cloneVolumeSnapshot),
|
||||
@@ -494,6 +517,8 @@ func (r *Reporter) registerControls() {
|
||||
DeleteVolumeSnapshot: r.CaptureVolumeSnapshot(r.deleteVolumeSnapshot),
|
||||
ScaleUp: r.CaptureDeployment(r.ScaleUp),
|
||||
ScaleDown: r.CaptureDeployment(r.ScaleDown),
|
||||
CordonNode: r.CaptureNode(r.CordonNode),
|
||||
UncordonNode: r.CaptureNode(r.UncordonNode),
|
||||
}
|
||||
r.handlerRegistry.Batch(nil, controls)
|
||||
}
|
||||
@@ -508,6 +533,8 @@ func (r *Reporter) deregisterControls() {
|
||||
DeleteVolumeSnapshot,
|
||||
ScaleUp,
|
||||
ScaleDown,
|
||||
CordonNode,
|
||||
UncordonNode,
|
||||
}
|
||||
r.handlerRegistry.Batch(controls, nil)
|
||||
}
|
||||
|
||||
87
probe/kubernetes/cordon.go
Normal file
87
probe/kubernetes/cordon.go
Normal file
@@ -0,0 +1,87 @@
|
||||
/*
|
||||
Copied from
|
||||
|
||||
https://github.com/kubernetes/kubectl/blob/master/pkg/drain/cordon.go
|
||||
|
||||
at commit f9460c53339c4bb60b20031e3c6125e8bac679e2 to add node cordon feature.
|
||||
*/
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
// CordonHelper wraps functionality to cordon/uncordon nodes
|
||||
type cordonHelper struct {
|
||||
node *apiv1.Node
|
||||
desired bool
|
||||
}
|
||||
|
||||
// NewCordonHelper returns a new CordonHelper
|
||||
func newCordonHelper(node *apiv1.Node) *cordonHelper {
|
||||
return &cordonHelper{
|
||||
node: node,
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateIfRequired returns true if c.node.Spec.Unschedulable isn't already set,
|
||||
// or false when no change is needed
|
||||
func (c *cordonHelper) updateIfRequired(desired bool) bool {
|
||||
c.desired = desired
|
||||
|
||||
return c.node.Spec.Unschedulable != c.desired
|
||||
}
|
||||
|
||||
// PatchOrReplace uses given clientset to update the node status, either by patching or
|
||||
// updating the given node object; it may return error if the object cannot be encoded as
|
||||
// JSON, or if either patch or update calls fail; it will also return a second error
|
||||
// whenever creating a patch has failed
|
||||
func (c *cordonHelper) patchOrReplace(clientset kubernetes.Interface, serverDryRun bool) (error, error) {
|
||||
client := clientset.CoreV1().Nodes()
|
||||
|
||||
oldData, err := json.Marshal(c.node)
|
||||
if err != nil {
|
||||
return err, nil
|
||||
}
|
||||
|
||||
c.node.Spec.Unschedulable = c.desired
|
||||
|
||||
newData, err := json.Marshal(c.node)
|
||||
if err != nil {
|
||||
return err, nil
|
||||
}
|
||||
|
||||
patchBytes, patchErr := strategicpatch.CreateTwoWayMergePatch(oldData, newData, c.node)
|
||||
if patchErr == nil {
|
||||
_, err = client.Patch(c.node.Name, types.StrategicMergePatchType, patchBytes)
|
||||
} else {
|
||||
updateOptions := metav1.UpdateOptions{}
|
||||
if serverDryRun {
|
||||
updateOptions.DryRun = []string{metav1.DryRunAll}
|
||||
}
|
||||
_, err = client.Update(c.node)
|
||||
}
|
||||
return err, patchErr
|
||||
}
|
||||
@@ -181,6 +181,21 @@ var (
|
||||
Icon: "fa fa-file-text",
|
||||
Rank: 2,
|
||||
}
|
||||
|
||||
CordonControl = []report.Control{
|
||||
{
|
||||
ID: CordonNode,
|
||||
Human: "Cordon",
|
||||
Icon: "fa fa-toggle-off",
|
||||
Rank: 1,
|
||||
},
|
||||
{
|
||||
ID: UncordonNode,
|
||||
Human: "Uncordon",
|
||||
Icon: "fa fa-toggle-on",
|
||||
Rank: 0,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
// Reporter generate Reports containing Container and ContainerImage topologies
|
||||
@@ -345,6 +360,10 @@ func (r *Reporter) Report() (report.Report, error) {
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
hostTopology, err := r.hostTopology()
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
|
||||
result.Pod = result.Pod.Merge(podTopology)
|
||||
result.Service = result.Service.Merge(serviceTopology)
|
||||
@@ -359,6 +378,8 @@ func (r *Reporter) Report() (report.Report, error) {
|
||||
result.VolumeSnapshot = result.VolumeSnapshot.Merge(volumeSnapshotTopology)
|
||||
result.VolumeSnapshotData = result.VolumeSnapshotData.Merge(volumeSnapshotDataTopology)
|
||||
result.Job = result.Job.Merge(jobTopology)
|
||||
result.Host = result.Host.Merge(hostTopology)
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
@@ -678,3 +699,32 @@ func (r *Reporter) namespaceTopology() (report.Topology, error) {
|
||||
})
|
||||
return result, err
|
||||
}
|
||||
|
||||
func (r *Reporter) hostTopology() (report.Topology, error) {
|
||||
result := report.MakeTopology()
|
||||
// Add buttons for Host view, with the ID of the Kubernetes probe
|
||||
for _, control := range CordonControl {
|
||||
control.ProbeID = r.probeID
|
||||
result.Controls.AddControl(control)
|
||||
}
|
||||
|
||||
nodes, err := r.client.GetNodes()
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
|
||||
for _, n := range nodes {
|
||||
var activeControl string
|
||||
if n.Spec.Unschedulable {
|
||||
activeControl = UncordonNode
|
||||
} else {
|
||||
activeControl = CordonNode
|
||||
}
|
||||
result.AddNode(
|
||||
report.MakeNode(report.MakeHostNodeID(n.Name)).
|
||||
WithTopology(report.Host).
|
||||
WithLatestActiveControls(activeControl),
|
||||
)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
@@ -205,6 +205,14 @@ func (c *mockClient) Describe(namespaceID, resourceID string, groupKind schema.G
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (c *mockClient) CordonNode(name string, desired bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *mockClient) GetNodes() ([]apiv1.Node, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
type mockPipeClient map[string]xfer.Pipe
|
||||
|
||||
func (c mockPipeClient) PipeConnection(appID, id string, pipe xfer.Pipe) error {
|
||||
|
||||
@@ -113,6 +113,9 @@ func controlsFor(topology report.Topology, nodeID string) []ControlInstance {
|
||||
}
|
||||
for _, controlID := range node.ActiveControls() {
|
||||
if control, ok := topology.Controls[controlID]; ok {
|
||||
if control.ProbeID != "" { // does this Control have an override for the node probe?
|
||||
probeID = control.ProbeID
|
||||
}
|
||||
result = append(result, ControlInstance{
|
||||
ProbeID: probeID,
|
||||
NodeID: nodeID,
|
||||
|
||||
@@ -10,6 +10,7 @@ type Control struct {
|
||||
Icon string `json:"icon"` // from https://fortawesome.github.io/Font-Awesome/cheatsheet/ please
|
||||
Confirmation string `json:"confirmation,omitempty"`
|
||||
Rank int `json:"rank"`
|
||||
ProbeID string `json:"probeId,omitempty"`
|
||||
}
|
||||
|
||||
// Merge merges other with cs, returning a fresh Controls.
|
||||
|
||||
@@ -95,6 +95,8 @@ const (
|
||||
KubernetesCloneVolumeSnapshot = "kubernetes_clone_volume_snapshot"
|
||||
KubernetesDeleteVolumeSnapshot = "kubernetes_delete_volume_snapshot"
|
||||
KubernetesDescribe = "kubernetes_describe"
|
||||
KubernetesCordonNode = "kubernetes_cordon_node"
|
||||
KubernetesUncordonNode = "kubernetes_uncordon_node"
|
||||
// probe/awsecs
|
||||
ECSCluster = "ecs_cluster"
|
||||
ECSCreatedAt = "ecs_created_at"
|
||||
|
||||
@@ -143,6 +143,37 @@ func (n Node) ActiveControls() []string {
|
||||
return strings.Split(activeControls, ScopeDelim)
|
||||
}
|
||||
|
||||
// MergeActiveControls merges the control lists from n and b
|
||||
func (n Node) MergeActiveControls(b Node) Node {
|
||||
activeControlsB, tsB, foundB := b.Latest.LookupEntry(NodeActiveControls)
|
||||
if !foundB { // nothing to merge
|
||||
return n
|
||||
}
|
||||
activeControlsA, tsA, foundA := n.Latest.LookupEntry(NodeActiveControls)
|
||||
if !foundA {
|
||||
return n.WithLatest(NodeActiveControls, tsB, activeControlsB)
|
||||
}
|
||||
if activeControlsA == activeControlsB {
|
||||
return n
|
||||
}
|
||||
// If we get here we have active controls in both n and b that are different
|
||||
merged := make(map[string]struct{})
|
||||
for _, c := range strings.Split(activeControlsA, ScopeDelim) {
|
||||
merged[c] = struct{}{}
|
||||
}
|
||||
for _, c := range strings.Split(activeControlsB, ScopeDelim) {
|
||||
merged[c] = struct{}{}
|
||||
}
|
||||
cs := make([]string, 0, len(merged))
|
||||
for c := range merged {
|
||||
cs = append(cs, c)
|
||||
}
|
||||
if tsA.Before(tsB) {
|
||||
tsA = tsB
|
||||
}
|
||||
return n.WithLatest(NodeActiveControls, tsA, strings.Join(cs, ScopeDelim))
|
||||
}
|
||||
|
||||
// WithParent returns a fresh copy of n, with one parent added
|
||||
func (n Node) WithParent(key, parent string) Node {
|
||||
n.Parents = n.Parents.AddString(key, parent)
|
||||
@@ -173,7 +204,7 @@ func (n Node) WithChild(child Node) Node {
|
||||
return n
|
||||
}
|
||||
|
||||
// Merge mergses the individual components of a node and returns a
|
||||
// Merge merges the individual components of a node and returns a
|
||||
// fresh node.
|
||||
func (n Node) Merge(other Node) Node {
|
||||
id := n.ID
|
||||
@@ -186,7 +217,8 @@ func (n Node) Merge(other Node) Node {
|
||||
} else if other.Topology != "" && topology != other.Topology {
|
||||
panic("Cannot merge nodes with different topology types: " + topology + " != " + other.Topology)
|
||||
}
|
||||
return Node{
|
||||
|
||||
newNode := Node{
|
||||
ID: id,
|
||||
Topology: topology,
|
||||
Sets: n.Sets.Merge(other.Sets),
|
||||
@@ -196,6 +228,13 @@ func (n Node) Merge(other Node) Node {
|
||||
Parents: n.Parents.Merge(other.Parents),
|
||||
Children: n.Children.Merge(other.Children),
|
||||
}
|
||||
|
||||
// Special case to merge controls from two different probes.
|
||||
if topology == Host {
|
||||
newNode = newNode.MergeActiveControls(other)
|
||||
}
|
||||
|
||||
return newNode
|
||||
}
|
||||
|
||||
// UnsafeUnMerge removes data from n that would be added by merging other,
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package report_test
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -180,3 +181,28 @@ func TestCounters(t *testing.T) {
|
||||
t.Errorf("Counters: %s", test.Diff(want, have))
|
||||
}
|
||||
}
|
||||
|
||||
func TestActiveControls(t *testing.T) {
|
||||
mtime.NowForce(time.Now())
|
||||
defer mtime.NowReset()
|
||||
|
||||
controls1 := []string{"bar", "foo"}
|
||||
node1 := report.MakeNode("node1").WithLatestActiveControls(controls1...)
|
||||
assert.Equal(t, controls1, node1.ActiveControls())
|
||||
assert.Equal(t, controls1, sorted(node1.MergeActiveControls(node1).ActiveControls()))
|
||||
|
||||
node2 := report.MakeNode("node2")
|
||||
assert.Equal(t, controls1, node1.MergeActiveControls(node2).ActiveControls())
|
||||
assert.Equal(t, controls1, node2.MergeActiveControls(node1).ActiveControls())
|
||||
|
||||
controls2 := []string{"bar", "bor"}
|
||||
controls3 := []string{"bar", "bor", "foo"}
|
||||
node3 := report.MakeNode("node1").WithLatestActiveControls(controls2...)
|
||||
assert.Equal(t, controls3, sorted(node1.MergeActiveControls(node3).ActiveControls()))
|
||||
assert.Equal(t, controls3, sorted(node3.MergeActiveControls(node1).ActiveControls()))
|
||||
}
|
||||
|
||||
func sorted(s []string) []string {
|
||||
sort.Strings(s)
|
||||
return s
|
||||
}
|
||||
|
||||
5
vendor/k8s.io/apimachinery/pkg/util/mergepatch/OWNERS
generated
vendored
Normal file
5
vendor/k8s.io/apimachinery/pkg/util/mergepatch/OWNERS
generated
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
approvers:
|
||||
- pwittrock
|
||||
reviewers:
|
||||
- mengqiy
|
||||
- apelisse
|
||||
102
vendor/k8s.io/apimachinery/pkg/util/mergepatch/errors.go
generated
vendored
Normal file
102
vendor/k8s.io/apimachinery/pkg/util/mergepatch/errors.go
generated
vendored
Normal file
@@ -0,0 +1,102 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package mergepatch
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrBadJSONDoc = errors.New("invalid JSON document")
|
||||
ErrNoListOfLists = errors.New("lists of lists are not supported")
|
||||
ErrBadPatchFormatForPrimitiveList = errors.New("invalid patch format of primitive list")
|
||||
ErrBadPatchFormatForRetainKeys = errors.New("invalid patch format of retainKeys")
|
||||
ErrBadPatchFormatForSetElementOrderList = errors.New("invalid patch format of setElementOrder list")
|
||||
ErrPatchContentNotMatchRetainKeys = errors.New("patch content doesn't match retainKeys list")
|
||||
ErrUnsupportedStrategicMergePatchFormat = errors.New("strategic merge patch format is not supported")
|
||||
)
|
||||
|
||||
func ErrNoMergeKey(m map[string]interface{}, k string) error {
|
||||
return fmt.Errorf("map: %v does not contain declared merge key: %s", m, k)
|
||||
}
|
||||
|
||||
func ErrBadArgType(expected, actual interface{}) error {
|
||||
return fmt.Errorf("expected a %s, but received a %s",
|
||||
reflect.TypeOf(expected),
|
||||
reflect.TypeOf(actual))
|
||||
}
|
||||
|
||||
func ErrBadArgKind(expected, actual interface{}) error {
|
||||
var expectedKindString, actualKindString string
|
||||
if expected == nil {
|
||||
expectedKindString = "nil"
|
||||
} else {
|
||||
expectedKindString = reflect.TypeOf(expected).Kind().String()
|
||||
}
|
||||
if actual == nil {
|
||||
actualKindString = "nil"
|
||||
} else {
|
||||
actualKindString = reflect.TypeOf(actual).Kind().String()
|
||||
}
|
||||
return fmt.Errorf("expected a %s, but received a %s", expectedKindString, actualKindString)
|
||||
}
|
||||
|
||||
func ErrBadPatchType(t interface{}, m map[string]interface{}) error {
|
||||
return fmt.Errorf("unknown patch type: %s in map: %v", t, m)
|
||||
}
|
||||
|
||||
// IsPreconditionFailed returns true if the provided error indicates
|
||||
// a precondition failed.
|
||||
func IsPreconditionFailed(err error) bool {
|
||||
_, ok := err.(ErrPreconditionFailed)
|
||||
return ok
|
||||
}
|
||||
|
||||
type ErrPreconditionFailed struct {
|
||||
message string
|
||||
}
|
||||
|
||||
func NewErrPreconditionFailed(target map[string]interface{}) ErrPreconditionFailed {
|
||||
s := fmt.Sprintf("precondition failed for: %v", target)
|
||||
return ErrPreconditionFailed{s}
|
||||
}
|
||||
|
||||
func (err ErrPreconditionFailed) Error() string {
|
||||
return err.message
|
||||
}
|
||||
|
||||
type ErrConflict struct {
|
||||
message string
|
||||
}
|
||||
|
||||
func NewErrConflict(patch, current string) ErrConflict {
|
||||
s := fmt.Sprintf("patch:\n%s\nconflicts with changes made from original to current:\n%s\n", patch, current)
|
||||
return ErrConflict{s}
|
||||
}
|
||||
|
||||
func (err ErrConflict) Error() string {
|
||||
return err.message
|
||||
}
|
||||
|
||||
// IsConflict returns true if the provided error indicates
|
||||
// a conflict between the patch and the current configuration.
|
||||
func IsConflict(err error) bool {
|
||||
_, ok := err.(ErrConflict)
|
||||
return ok
|
||||
}
|
||||
133
vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go
generated
vendored
Normal file
133
vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go
generated
vendored
Normal file
@@ -0,0 +1,133 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package mergepatch
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"sigs.k8s.io/yaml"
|
||||
)
|
||||
|
||||
// PreconditionFunc asserts that an incompatible change is not present within a patch.
|
||||
type PreconditionFunc func(interface{}) bool
|
||||
|
||||
// RequireKeyUnchanged returns a precondition function that fails if the provided key
|
||||
// is present in the patch (indicating that its value has changed).
|
||||
func RequireKeyUnchanged(key string) PreconditionFunc {
|
||||
return func(patch interface{}) bool {
|
||||
patchMap, ok := patch.(map[string]interface{})
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
|
||||
// The presence of key means that its value has been changed, so the test fails.
|
||||
_, ok = patchMap[key]
|
||||
return !ok
|
||||
}
|
||||
}
|
||||
|
||||
// RequireMetadataKeyUnchanged creates a precondition function that fails
|
||||
// if the metadata.key is present in the patch (indicating its value
|
||||
// has changed).
|
||||
func RequireMetadataKeyUnchanged(key string) PreconditionFunc {
|
||||
return func(patch interface{}) bool {
|
||||
patchMap, ok := patch.(map[string]interface{})
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
patchMap1, ok := patchMap["metadata"]
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
patchMap2, ok := patchMap1.(map[string]interface{})
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
_, ok = patchMap2[key]
|
||||
return !ok
|
||||
}
|
||||
}
|
||||
|
||||
func ToYAMLOrError(v interface{}) string {
|
||||
y, err := toYAML(v)
|
||||
if err != nil {
|
||||
return err.Error()
|
||||
}
|
||||
|
||||
return y
|
||||
}
|
||||
|
||||
func toYAML(v interface{}) (string, error) {
|
||||
y, err := yaml.Marshal(v)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("yaml marshal failed:%v\n%v\n", err, spew.Sdump(v))
|
||||
}
|
||||
|
||||
return string(y), nil
|
||||
}
|
||||
|
||||
// HasConflicts returns true if the left and right JSON interface objects overlap with
|
||||
// different values in any key. All keys are required to be strings. Since patches of the
|
||||
// same Type have congruent keys, this is valid for multiple patch types. This method
|
||||
// supports JSON merge patch semantics.
|
||||
//
|
||||
// NOTE: Numbers with different types (e.g. int(0) vs int64(0)) will be detected as conflicts.
|
||||
// Make sure the unmarshaling of left and right are consistent (e.g. use the same library).
|
||||
func HasConflicts(left, right interface{}) (bool, error) {
|
||||
switch typedLeft := left.(type) {
|
||||
case map[string]interface{}:
|
||||
switch typedRight := right.(type) {
|
||||
case map[string]interface{}:
|
||||
for key, leftValue := range typedLeft {
|
||||
rightValue, ok := typedRight[key]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if conflict, err := HasConflicts(leftValue, rightValue); err != nil || conflict {
|
||||
return conflict, err
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
default:
|
||||
return true, nil
|
||||
}
|
||||
case []interface{}:
|
||||
switch typedRight := right.(type) {
|
||||
case []interface{}:
|
||||
if len(typedLeft) != len(typedRight) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
for i := range typedLeft {
|
||||
if conflict, err := HasConflicts(typedLeft[i], typedRight[i]); err != nil || conflict {
|
||||
return conflict, err
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
default:
|
||||
return true, nil
|
||||
}
|
||||
case string, float64, bool, int64, nil:
|
||||
return !reflect.DeepEqual(left, right), nil
|
||||
default:
|
||||
return true, fmt.Errorf("unknown type: %v", reflect.TypeOf(left))
|
||||
}
|
||||
}
|
||||
6
vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS
generated
vendored
Normal file
6
vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS
generated
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
approvers:
|
||||
- pwittrock
|
||||
- mengqiy
|
||||
reviewers:
|
||||
- mengqiy
|
||||
- apelisse
|
||||
49
vendor/k8s.io/apimachinery/pkg/util/strategicpatch/errors.go
generated
vendored
Normal file
49
vendor/k8s.io/apimachinery/pkg/util/strategicpatch/errors.go
generated
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package strategicpatch
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type LookupPatchMetaError struct {
|
||||
Path string
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e LookupPatchMetaError) Error() string {
|
||||
return fmt.Sprintf("LookupPatchMetaError(%s): %v", e.Path, e.Err)
|
||||
}
|
||||
|
||||
type FieldNotFoundError struct {
|
||||
Path string
|
||||
Field string
|
||||
}
|
||||
|
||||
func (e FieldNotFoundError) Error() string {
|
||||
return fmt.Sprintf("unable to find api field %q in %s", e.Field, e.Path)
|
||||
}
|
||||
|
||||
type InvalidTypeError struct {
|
||||
Path string
|
||||
Expected string
|
||||
Actual string
|
||||
}
|
||||
|
||||
func (e InvalidTypeError) Error() string {
|
||||
return fmt.Sprintf("invalid type for %s: got %q, expected %q", e.Path, e.Actual, e.Expected)
|
||||
}
|
||||
194
vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go
generated
vendored
Normal file
194
vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go
generated
vendored
Normal file
@@ -0,0 +1,194 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package strategicpatch
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/mergepatch"
|
||||
forkedjson "k8s.io/apimachinery/third_party/forked/golang/json"
|
||||
openapi "k8s.io/kube-openapi/pkg/util/proto"
|
||||
)
|
||||
|
||||
type PatchMeta struct {
|
||||
patchStrategies []string
|
||||
patchMergeKey string
|
||||
}
|
||||
|
||||
func (pm PatchMeta) GetPatchStrategies() []string {
|
||||
if pm.patchStrategies == nil {
|
||||
return []string{}
|
||||
}
|
||||
return pm.patchStrategies
|
||||
}
|
||||
|
||||
func (pm PatchMeta) SetPatchStrategies(ps []string) {
|
||||
pm.patchStrategies = ps
|
||||
}
|
||||
|
||||
func (pm PatchMeta) GetPatchMergeKey() string {
|
||||
return pm.patchMergeKey
|
||||
}
|
||||
|
||||
func (pm PatchMeta) SetPatchMergeKey(pmk string) {
|
||||
pm.patchMergeKey = pmk
|
||||
}
|
||||
|
||||
type LookupPatchMeta interface {
|
||||
// LookupPatchMetadataForStruct gets subschema and the patch metadata (e.g. patch strategy and merge key) for map.
|
||||
LookupPatchMetadataForStruct(key string) (LookupPatchMeta, PatchMeta, error)
|
||||
// LookupPatchMetadataForSlice get subschema and the patch metadata for slice.
|
||||
LookupPatchMetadataForSlice(key string) (LookupPatchMeta, PatchMeta, error)
|
||||
// Get the type name of the field
|
||||
Name() string
|
||||
}
|
||||
|
||||
type PatchMetaFromStruct struct {
|
||||
T reflect.Type
|
||||
}
|
||||
|
||||
func NewPatchMetaFromStruct(dataStruct interface{}) (PatchMetaFromStruct, error) {
|
||||
t, err := getTagStructType(dataStruct)
|
||||
return PatchMetaFromStruct{T: t}, err
|
||||
}
|
||||
|
||||
var _ LookupPatchMeta = PatchMetaFromStruct{}
|
||||
|
||||
func (s PatchMetaFromStruct) LookupPatchMetadataForStruct(key string) (LookupPatchMeta, PatchMeta, error) {
|
||||
fieldType, fieldPatchStrategies, fieldPatchMergeKey, err := forkedjson.LookupPatchMetadataForStruct(s.T, key)
|
||||
if err != nil {
|
||||
return nil, PatchMeta{}, err
|
||||
}
|
||||
|
||||
return PatchMetaFromStruct{T: fieldType},
|
||||
PatchMeta{
|
||||
patchStrategies: fieldPatchStrategies,
|
||||
patchMergeKey: fieldPatchMergeKey,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s PatchMetaFromStruct) LookupPatchMetadataForSlice(key string) (LookupPatchMeta, PatchMeta, error) {
|
||||
subschema, patchMeta, err := s.LookupPatchMetadataForStruct(key)
|
||||
if err != nil {
|
||||
return nil, PatchMeta{}, err
|
||||
}
|
||||
elemPatchMetaFromStruct := subschema.(PatchMetaFromStruct)
|
||||
t := elemPatchMetaFromStruct.T
|
||||
|
||||
var elemType reflect.Type
|
||||
switch t.Kind() {
|
||||
// If t is an array or a slice, get the element type.
|
||||
// If element is still an array or a slice, return an error.
|
||||
// Otherwise, return element type.
|
||||
case reflect.Array, reflect.Slice:
|
||||
elemType = t.Elem()
|
||||
if elemType.Kind() == reflect.Array || elemType.Kind() == reflect.Slice {
|
||||
return nil, PatchMeta{}, errors.New("unexpected slice of slice")
|
||||
}
|
||||
// If t is an pointer, get the underlying element.
|
||||
// If the underlying element is neither an array nor a slice, the pointer is pointing to a slice,
|
||||
// e.g. https://github.com/kubernetes/kubernetes/blob/bc22e206c79282487ea0bf5696d5ccec7e839a76/staging/src/k8s.io/apimachinery/pkg/util/strategicpatch/patch_test.go#L2782-L2822
|
||||
// If the underlying element is either an array or a slice, return its element type.
|
||||
case reflect.Ptr:
|
||||
t = t.Elem()
|
||||
if t.Kind() == reflect.Array || t.Kind() == reflect.Slice {
|
||||
t = t.Elem()
|
||||
}
|
||||
elemType = t
|
||||
default:
|
||||
return nil, PatchMeta{}, fmt.Errorf("expected slice or array type, but got: %s", s.T.Kind().String())
|
||||
}
|
||||
|
||||
return PatchMetaFromStruct{T: elemType}, patchMeta, nil
|
||||
}
|
||||
|
||||
func (s PatchMetaFromStruct) Name() string {
|
||||
return s.T.Kind().String()
|
||||
}
|
||||
|
||||
func getTagStructType(dataStruct interface{}) (reflect.Type, error) {
|
||||
if dataStruct == nil {
|
||||
return nil, mergepatch.ErrBadArgKind(struct{}{}, nil)
|
||||
}
|
||||
|
||||
t := reflect.TypeOf(dataStruct)
|
||||
// Get the underlying type for pointers
|
||||
if t.Kind() == reflect.Ptr {
|
||||
t = t.Elem()
|
||||
}
|
||||
|
||||
if t.Kind() != reflect.Struct {
|
||||
return nil, mergepatch.ErrBadArgKind(struct{}{}, dataStruct)
|
||||
}
|
||||
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func GetTagStructTypeOrDie(dataStruct interface{}) reflect.Type {
|
||||
t, err := getTagStructType(dataStruct)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
type PatchMetaFromOpenAPI struct {
|
||||
Schema openapi.Schema
|
||||
}
|
||||
|
||||
func NewPatchMetaFromOpenAPI(s openapi.Schema) PatchMetaFromOpenAPI {
|
||||
return PatchMetaFromOpenAPI{Schema: s}
|
||||
}
|
||||
|
||||
var _ LookupPatchMeta = PatchMetaFromOpenAPI{}
|
||||
|
||||
func (s PatchMetaFromOpenAPI) LookupPatchMetadataForStruct(key string) (LookupPatchMeta, PatchMeta, error) {
|
||||
if s.Schema == nil {
|
||||
return nil, PatchMeta{}, nil
|
||||
}
|
||||
kindItem := NewKindItem(key, s.Schema.GetPath())
|
||||
s.Schema.Accept(kindItem)
|
||||
|
||||
err := kindItem.Error()
|
||||
if err != nil {
|
||||
return nil, PatchMeta{}, err
|
||||
}
|
||||
return PatchMetaFromOpenAPI{Schema: kindItem.subschema},
|
||||
kindItem.patchmeta, nil
|
||||
}
|
||||
|
||||
func (s PatchMetaFromOpenAPI) LookupPatchMetadataForSlice(key string) (LookupPatchMeta, PatchMeta, error) {
|
||||
if s.Schema == nil {
|
||||
return nil, PatchMeta{}, nil
|
||||
}
|
||||
sliceItem := NewSliceItem(key, s.Schema.GetPath())
|
||||
s.Schema.Accept(sliceItem)
|
||||
|
||||
err := sliceItem.Error()
|
||||
if err != nil {
|
||||
return nil, PatchMeta{}, err
|
||||
}
|
||||
return PatchMetaFromOpenAPI{Schema: sliceItem.subschema},
|
||||
sliceItem.patchmeta, nil
|
||||
}
|
||||
|
||||
func (s PatchMetaFromOpenAPI) Name() string {
|
||||
schema := s.Schema
|
||||
return schema.GetName()
|
||||
}
|
||||
2174
vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go
generated
vendored
Normal file
2174
vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
193
vendor/k8s.io/apimachinery/pkg/util/strategicpatch/types.go
generated
vendored
Normal file
193
vendor/k8s.io/apimachinery/pkg/util/strategicpatch/types.go
generated
vendored
Normal file
@@ -0,0 +1,193 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package strategicpatch
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/mergepatch"
|
||||
openapi "k8s.io/kube-openapi/pkg/util/proto"
|
||||
)
|
||||
|
||||
const (
|
||||
patchStrategyOpenapiextensionKey = "x-kubernetes-patch-strategy"
|
||||
patchMergeKeyOpenapiextensionKey = "x-kubernetes-patch-merge-key"
|
||||
)
|
||||
|
||||
type LookupPatchItem interface {
|
||||
openapi.SchemaVisitor
|
||||
|
||||
Error() error
|
||||
Path() *openapi.Path
|
||||
}
|
||||
|
||||
type kindItem struct {
|
||||
key string
|
||||
path *openapi.Path
|
||||
err error
|
||||
patchmeta PatchMeta
|
||||
subschema openapi.Schema
|
||||
hasVisitKind bool
|
||||
}
|
||||
|
||||
func NewKindItem(key string, path *openapi.Path) *kindItem {
|
||||
return &kindItem{
|
||||
key: key,
|
||||
path: path,
|
||||
}
|
||||
}
|
||||
|
||||
var _ LookupPatchItem = &kindItem{}
|
||||
|
||||
func (item *kindItem) Error() error {
|
||||
return item.err
|
||||
}
|
||||
|
||||
func (item *kindItem) Path() *openapi.Path {
|
||||
return item.path
|
||||
}
|
||||
|
||||
func (item *kindItem) VisitPrimitive(schema *openapi.Primitive) {
|
||||
item.err = errors.New("expected kind, but got primitive")
|
||||
}
|
||||
|
||||
func (item *kindItem) VisitArray(schema *openapi.Array) {
|
||||
item.err = errors.New("expected kind, but got slice")
|
||||
}
|
||||
|
||||
func (item *kindItem) VisitMap(schema *openapi.Map) {
|
||||
item.err = errors.New("expected kind, but got map")
|
||||
}
|
||||
|
||||
func (item *kindItem) VisitReference(schema openapi.Reference) {
|
||||
if !item.hasVisitKind {
|
||||
schema.SubSchema().Accept(item)
|
||||
}
|
||||
}
|
||||
|
||||
func (item *kindItem) VisitKind(schema *openapi.Kind) {
|
||||
subschema, ok := schema.Fields[item.key]
|
||||
if !ok {
|
||||
item.err = FieldNotFoundError{Path: schema.GetPath().String(), Field: item.key}
|
||||
return
|
||||
}
|
||||
|
||||
mergeKey, patchStrategies, err := parsePatchMetadata(subschema.GetExtensions())
|
||||
if err != nil {
|
||||
item.err = err
|
||||
return
|
||||
}
|
||||
item.patchmeta = PatchMeta{
|
||||
patchStrategies: patchStrategies,
|
||||
patchMergeKey: mergeKey,
|
||||
}
|
||||
item.subschema = subschema
|
||||
}
|
||||
|
||||
type sliceItem struct {
|
||||
key string
|
||||
path *openapi.Path
|
||||
err error
|
||||
patchmeta PatchMeta
|
||||
subschema openapi.Schema
|
||||
hasVisitKind bool
|
||||
}
|
||||
|
||||
func NewSliceItem(key string, path *openapi.Path) *sliceItem {
|
||||
return &sliceItem{
|
||||
key: key,
|
||||
path: path,
|
||||
}
|
||||
}
|
||||
|
||||
var _ LookupPatchItem = &sliceItem{}
|
||||
|
||||
func (item *sliceItem) Error() error {
|
||||
return item.err
|
||||
}
|
||||
|
||||
func (item *sliceItem) Path() *openapi.Path {
|
||||
return item.path
|
||||
}
|
||||
|
||||
func (item *sliceItem) VisitPrimitive(schema *openapi.Primitive) {
|
||||
item.err = errors.New("expected slice, but got primitive")
|
||||
}
|
||||
|
||||
func (item *sliceItem) VisitArray(schema *openapi.Array) {
|
||||
if !item.hasVisitKind {
|
||||
item.err = errors.New("expected visit kind first, then visit array")
|
||||
}
|
||||
subschema := schema.SubType
|
||||
item.subschema = subschema
|
||||
}
|
||||
|
||||
func (item *sliceItem) VisitMap(schema *openapi.Map) {
|
||||
item.err = errors.New("expected slice, but got map")
|
||||
}
|
||||
|
||||
func (item *sliceItem) VisitReference(schema openapi.Reference) {
|
||||
if !item.hasVisitKind {
|
||||
schema.SubSchema().Accept(item)
|
||||
} else {
|
||||
item.subschema = schema.SubSchema()
|
||||
}
|
||||
}
|
||||
|
||||
func (item *sliceItem) VisitKind(schema *openapi.Kind) {
|
||||
subschema, ok := schema.Fields[item.key]
|
||||
if !ok {
|
||||
item.err = FieldNotFoundError{Path: schema.GetPath().String(), Field: item.key}
|
||||
return
|
||||
}
|
||||
|
||||
mergeKey, patchStrategies, err := parsePatchMetadata(subschema.GetExtensions())
|
||||
if err != nil {
|
||||
item.err = err
|
||||
return
|
||||
}
|
||||
item.patchmeta = PatchMeta{
|
||||
patchStrategies: patchStrategies,
|
||||
patchMergeKey: mergeKey,
|
||||
}
|
||||
item.hasVisitKind = true
|
||||
subschema.Accept(item)
|
||||
}
|
||||
|
||||
func parsePatchMetadata(extensions map[string]interface{}) (string, []string, error) {
|
||||
ps, foundPS := extensions[patchStrategyOpenapiextensionKey]
|
||||
var patchStrategies []string
|
||||
var mergeKey, patchStrategy string
|
||||
var ok bool
|
||||
if foundPS {
|
||||
patchStrategy, ok = ps.(string)
|
||||
if ok {
|
||||
patchStrategies = strings.Split(patchStrategy, ",")
|
||||
} else {
|
||||
return "", nil, mergepatch.ErrBadArgType(patchStrategy, ps)
|
||||
}
|
||||
}
|
||||
mk, foundMK := extensions[patchMergeKeyOpenapiextensionKey]
|
||||
if foundMK {
|
||||
mergeKey, ok = mk.(string)
|
||||
if !ok {
|
||||
return "", nil, mergepatch.ErrBadArgType(mergeKey, mk)
|
||||
}
|
||||
}
|
||||
return mergeKey, patchStrategies, nil
|
||||
}
|
||||
5
vendor/k8s.io/apimachinery/third_party/forked/golang/json/OWNERS
generated
vendored
Normal file
5
vendor/k8s.io/apimachinery/third_party/forked/golang/json/OWNERS
generated
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
approvers:
|
||||
- pwittrock
|
||||
reviewers:
|
||||
- mengqiy
|
||||
- apelisse
|
||||
513
vendor/k8s.io/apimachinery/third_party/forked/golang/json/fields.go
generated
vendored
Normal file
513
vendor/k8s.io/apimachinery/third_party/forked/golang/json/fields.go
generated
vendored
Normal file
@@ -0,0 +1,513 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package json is forked from the Go standard library to enable us to find the
|
||||
// field of a struct that a given JSON key maps to.
|
||||
package json
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
const (
|
||||
patchStrategyTagKey = "patchStrategy"
|
||||
patchMergeKeyTagKey = "patchMergeKey"
|
||||
)
|
||||
|
||||
// Finds the patchStrategy and patchMergeKey struct tag fields on a given
|
||||
// struct field given the struct type and the JSON name of the field.
|
||||
// It returns field type, a slice of patch strategies, merge key and error.
|
||||
// TODO: fix the returned errors to be introspectable.
|
||||
func LookupPatchMetadataForStruct(t reflect.Type, jsonField string) (
|
||||
elemType reflect.Type, patchStrategies []string, patchMergeKey string, e error) {
|
||||
if t.Kind() == reflect.Ptr {
|
||||
t = t.Elem()
|
||||
}
|
||||
|
||||
if t.Kind() != reflect.Struct {
|
||||
e = fmt.Errorf("merging an object in json but data type is not struct, instead is: %s",
|
||||
t.Kind().String())
|
||||
return
|
||||
}
|
||||
jf := []byte(jsonField)
|
||||
// Find the field that the JSON library would use.
|
||||
var f *field
|
||||
fields := cachedTypeFields(t)
|
||||
for i := range fields {
|
||||
ff := &fields[i]
|
||||
if bytes.Equal(ff.nameBytes, jf) {
|
||||
f = ff
|
||||
break
|
||||
}
|
||||
// Do case-insensitive comparison.
|
||||
if f == nil && ff.equalFold(ff.nameBytes, jf) {
|
||||
f = ff
|
||||
}
|
||||
}
|
||||
if f != nil {
|
||||
// Find the reflect.Value of the most preferential struct field.
|
||||
tjf := t.Field(f.index[0])
|
||||
// we must navigate down all the anonymously included structs in the chain
|
||||
for i := 1; i < len(f.index); i++ {
|
||||
tjf = tjf.Type.Field(f.index[i])
|
||||
}
|
||||
patchStrategy := tjf.Tag.Get(patchStrategyTagKey)
|
||||
patchMergeKey = tjf.Tag.Get(patchMergeKeyTagKey)
|
||||
patchStrategies = strings.Split(patchStrategy, ",")
|
||||
elemType = tjf.Type
|
||||
return
|
||||
}
|
||||
e = fmt.Errorf("unable to find api field in struct %s for the json field %q", t.Name(), jsonField)
|
||||
return
|
||||
}
|
||||
|
||||
// A field represents a single field found in a struct.
|
||||
type field struct {
|
||||
name string
|
||||
nameBytes []byte // []byte(name)
|
||||
equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent
|
||||
|
||||
tag bool
|
||||
// index is the sequence of indexes from the containing type fields to this field.
|
||||
// it is a slice because anonymous structs will need multiple navigation steps to correctly
|
||||
// resolve the proper fields
|
||||
index []int
|
||||
typ reflect.Type
|
||||
omitEmpty bool
|
||||
quoted bool
|
||||
}
|
||||
|
||||
func (f field) String() string {
|
||||
return fmt.Sprintf("{name: %s, type: %v, tag: %v, index: %v, omitEmpty: %v, quoted: %v}", f.name, f.typ, f.tag, f.index, f.omitEmpty, f.quoted)
|
||||
}
|
||||
|
||||
func fillField(f field) field {
|
||||
f.nameBytes = []byte(f.name)
|
||||
f.equalFold = foldFunc(f.nameBytes)
|
||||
return f
|
||||
}
|
||||
|
||||
// byName sorts field by name, breaking ties with depth,
|
||||
// then breaking ties with "name came from json tag", then
|
||||
// breaking ties with index sequence.
|
||||
type byName []field
|
||||
|
||||
func (x byName) Len() int { return len(x) }
|
||||
|
||||
func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
|
||||
func (x byName) Less(i, j int) bool {
|
||||
if x[i].name != x[j].name {
|
||||
return x[i].name < x[j].name
|
||||
}
|
||||
if len(x[i].index) != len(x[j].index) {
|
||||
return len(x[i].index) < len(x[j].index)
|
||||
}
|
||||
if x[i].tag != x[j].tag {
|
||||
return x[i].tag
|
||||
}
|
||||
return byIndex(x).Less(i, j)
|
||||
}
|
||||
|
||||
// byIndex sorts field by index sequence.
|
||||
type byIndex []field
|
||||
|
||||
func (x byIndex) Len() int { return len(x) }
|
||||
|
||||
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
|
||||
func (x byIndex) Less(i, j int) bool {
|
||||
for k, xik := range x[i].index {
|
||||
if k >= len(x[j].index) {
|
||||
return false
|
||||
}
|
||||
if xik != x[j].index[k] {
|
||||
return xik < x[j].index[k]
|
||||
}
|
||||
}
|
||||
return len(x[i].index) < len(x[j].index)
|
||||
}
|
||||
|
||||
// typeFields returns a list of fields that JSON should recognize for the given type.
|
||||
// The algorithm is breadth-first search over the set of structs to include - the top struct
|
||||
// and then any reachable anonymous structs.
|
||||
func typeFields(t reflect.Type) []field {
|
||||
// Anonymous fields to explore at the current level and the next.
|
||||
current := []field{}
|
||||
next := []field{{typ: t}}
|
||||
|
||||
// Count of queued names for current level and the next.
|
||||
count := map[reflect.Type]int{}
|
||||
nextCount := map[reflect.Type]int{}
|
||||
|
||||
// Types already visited at an earlier level.
|
||||
visited := map[reflect.Type]bool{}
|
||||
|
||||
// Fields found.
|
||||
var fields []field
|
||||
|
||||
for len(next) > 0 {
|
||||
current, next = next, current[:0]
|
||||
count, nextCount = nextCount, map[reflect.Type]int{}
|
||||
|
||||
for _, f := range current {
|
||||
if visited[f.typ] {
|
||||
continue
|
||||
}
|
||||
visited[f.typ] = true
|
||||
|
||||
// Scan f.typ for fields to include.
|
||||
for i := 0; i < f.typ.NumField(); i++ {
|
||||
sf := f.typ.Field(i)
|
||||
if sf.PkgPath != "" { // unexported
|
||||
continue
|
||||
}
|
||||
tag := sf.Tag.Get("json")
|
||||
if tag == "-" {
|
||||
continue
|
||||
}
|
||||
name, opts := parseTag(tag)
|
||||
if !isValidTag(name) {
|
||||
name = ""
|
||||
}
|
||||
index := make([]int, len(f.index)+1)
|
||||
copy(index, f.index)
|
||||
index[len(f.index)] = i
|
||||
|
||||
ft := sf.Type
|
||||
if ft.Name() == "" && ft.Kind() == reflect.Ptr {
|
||||
// Follow pointer.
|
||||
ft = ft.Elem()
|
||||
}
|
||||
|
||||
// Record found field and index sequence.
|
||||
if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
|
||||
tagged := name != ""
|
||||
if name == "" {
|
||||
name = sf.Name
|
||||
}
|
||||
fields = append(fields, fillField(field{
|
||||
name: name,
|
||||
tag: tagged,
|
||||
index: index,
|
||||
typ: ft,
|
||||
omitEmpty: opts.Contains("omitempty"),
|
||||
quoted: opts.Contains("string"),
|
||||
}))
|
||||
if count[f.typ] > 1 {
|
||||
// If there were multiple instances, add a second,
|
||||
// so that the annihilation code will see a duplicate.
|
||||
// It only cares about the distinction between 1 or 2,
|
||||
// so don't bother generating any more copies.
|
||||
fields = append(fields, fields[len(fields)-1])
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Record new anonymous struct to explore in next round.
|
||||
nextCount[ft]++
|
||||
if nextCount[ft] == 1 {
|
||||
next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft}))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sort.Sort(byName(fields))
|
||||
|
||||
// Delete all fields that are hidden by the Go rules for embedded fields,
|
||||
// except that fields with JSON tags are promoted.
|
||||
|
||||
// The fields are sorted in primary order of name, secondary order
|
||||
// of field index length. Loop over names; for each name, delete
|
||||
// hidden fields by choosing the one dominant field that survives.
|
||||
out := fields[:0]
|
||||
for advance, i := 0, 0; i < len(fields); i += advance {
|
||||
// One iteration per name.
|
||||
// Find the sequence of fields with the name of this first field.
|
||||
fi := fields[i]
|
||||
name := fi.name
|
||||
for advance = 1; i+advance < len(fields); advance++ {
|
||||
fj := fields[i+advance]
|
||||
if fj.name != name {
|
||||
break
|
||||
}
|
||||
}
|
||||
if advance == 1 { // Only one field with this name
|
||||
out = append(out, fi)
|
||||
continue
|
||||
}
|
||||
dominant, ok := dominantField(fields[i : i+advance])
|
||||
if ok {
|
||||
out = append(out, dominant)
|
||||
}
|
||||
}
|
||||
|
||||
fields = out
|
||||
sort.Sort(byIndex(fields))
|
||||
|
||||
return fields
|
||||
}
|
||||
|
||||
// dominantField looks through the fields, all of which are known to
|
||||
// have the same name, to find the single field that dominates the
|
||||
// others using Go's embedding rules, modified by the presence of
|
||||
// JSON tags. If there are multiple top-level fields, the boolean
|
||||
// will be false: This condition is an error in Go and we skip all
|
||||
// the fields.
|
||||
func dominantField(fields []field) (field, bool) {
|
||||
// The fields are sorted in increasing index-length order. The winner
|
||||
// must therefore be one with the shortest index length. Drop all
|
||||
// longer entries, which is easy: just truncate the slice.
|
||||
length := len(fields[0].index)
|
||||
tagged := -1 // Index of first tagged field.
|
||||
for i, f := range fields {
|
||||
if len(f.index) > length {
|
||||
fields = fields[:i]
|
||||
break
|
||||
}
|
||||
if f.tag {
|
||||
if tagged >= 0 {
|
||||
// Multiple tagged fields at the same level: conflict.
|
||||
// Return no field.
|
||||
return field{}, false
|
||||
}
|
||||
tagged = i
|
||||
}
|
||||
}
|
||||
if tagged >= 0 {
|
||||
return fields[tagged], true
|
||||
}
|
||||
// All remaining fields have the same length. If there's more than one,
|
||||
// we have a conflict (two fields named "X" at the same level) and we
|
||||
// return no field.
|
||||
if len(fields) > 1 {
|
||||
return field{}, false
|
||||
}
|
||||
return fields[0], true
|
||||
}
|
||||
|
||||
var fieldCache struct {
|
||||
sync.RWMutex
|
||||
m map[reflect.Type][]field
|
||||
}
|
||||
|
||||
// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
|
||||
func cachedTypeFields(t reflect.Type) []field {
|
||||
fieldCache.RLock()
|
||||
f := fieldCache.m[t]
|
||||
fieldCache.RUnlock()
|
||||
if f != nil {
|
||||
return f
|
||||
}
|
||||
|
||||
// Compute fields without lock.
|
||||
// Might duplicate effort but won't hold other computations back.
|
||||
f = typeFields(t)
|
||||
if f == nil {
|
||||
f = []field{}
|
||||
}
|
||||
|
||||
fieldCache.Lock()
|
||||
if fieldCache.m == nil {
|
||||
fieldCache.m = map[reflect.Type][]field{}
|
||||
}
|
||||
fieldCache.m[t] = f
|
||||
fieldCache.Unlock()
|
||||
return f
|
||||
}
|
||||
|
||||
func isValidTag(s string) bool {
|
||||
if s == "" {
|
||||
return false
|
||||
}
|
||||
for _, c := range s {
|
||||
switch {
|
||||
case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
|
||||
// Backslash and quote chars are reserved, but
|
||||
// otherwise any punctuation chars are allowed
|
||||
// in a tag name.
|
||||
default:
|
||||
if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
const (
|
||||
caseMask = ^byte(0x20) // Mask to ignore case in ASCII.
|
||||
kelvin = '\u212a'
|
||||
smallLongEss = '\u017f'
|
||||
)
|
||||
|
||||
// foldFunc returns one of four different case folding equivalence
|
||||
// functions, from most general (and slow) to fastest:
|
||||
//
|
||||
// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8
|
||||
// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S')
|
||||
// 3) asciiEqualFold, no special, but includes non-letters (including _)
|
||||
// 4) simpleLetterEqualFold, no specials, no non-letters.
|
||||
//
|
||||
// The letters S and K are special because they map to 3 runes, not just 2:
|
||||
// * S maps to s and to U+017F 'ſ' Latin small letter long s
|
||||
// * k maps to K and to U+212A 'K' Kelvin sign
|
||||
// See http://play.golang.org/p/tTxjOc0OGo
|
||||
//
|
||||
// The returned function is specialized for matching against s and
|
||||
// should only be given s. It's not curried for performance reasons.
|
||||
func foldFunc(s []byte) func(s, t []byte) bool {
|
||||
nonLetter := false
|
||||
special := false // special letter
|
||||
for _, b := range s {
|
||||
if b >= utf8.RuneSelf {
|
||||
return bytes.EqualFold
|
||||
}
|
||||
upper := b & caseMask
|
||||
if upper < 'A' || upper > 'Z' {
|
||||
nonLetter = true
|
||||
} else if upper == 'K' || upper == 'S' {
|
||||
// See above for why these letters are special.
|
||||
special = true
|
||||
}
|
||||
}
|
||||
if special {
|
||||
return equalFoldRight
|
||||
}
|
||||
if nonLetter {
|
||||
return asciiEqualFold
|
||||
}
|
||||
return simpleLetterEqualFold
|
||||
}
|
||||
|
||||
// equalFoldRight is a specialization of bytes.EqualFold when s is
|
||||
// known to be all ASCII (including punctuation), but contains an 's',
|
||||
// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t.
|
||||
// See comments on foldFunc.
|
||||
func equalFoldRight(s, t []byte) bool {
|
||||
for _, sb := range s {
|
||||
if len(t) == 0 {
|
||||
return false
|
||||
}
|
||||
tb := t[0]
|
||||
if tb < utf8.RuneSelf {
|
||||
if sb != tb {
|
||||
sbUpper := sb & caseMask
|
||||
if 'A' <= sbUpper && sbUpper <= 'Z' {
|
||||
if sbUpper != tb&caseMask {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
t = t[1:]
|
||||
continue
|
||||
}
|
||||
// sb is ASCII and t is not. t must be either kelvin
|
||||
// sign or long s; sb must be s, S, k, or K.
|
||||
tr, size := utf8.DecodeRune(t)
|
||||
switch sb {
|
||||
case 's', 'S':
|
||||
if tr != smallLongEss {
|
||||
return false
|
||||
}
|
||||
case 'k', 'K':
|
||||
if tr != kelvin {
|
||||
return false
|
||||
}
|
||||
default:
|
||||
return false
|
||||
}
|
||||
t = t[size:]
|
||||
|
||||
}
|
||||
if len(t) > 0 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// asciiEqualFold is a specialization of bytes.EqualFold for use when
|
||||
// s is all ASCII (but may contain non-letters) and contains no
|
||||
// special-folding letters.
|
||||
// See comments on foldFunc.
|
||||
func asciiEqualFold(s, t []byte) bool {
|
||||
if len(s) != len(t) {
|
||||
return false
|
||||
}
|
||||
for i, sb := range s {
|
||||
tb := t[i]
|
||||
if sb == tb {
|
||||
continue
|
||||
}
|
||||
if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') {
|
||||
if sb&caseMask != tb&caseMask {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// simpleLetterEqualFold is a specialization of bytes.EqualFold for
|
||||
// use when s is all ASCII letters (no underscores, etc) and also
|
||||
// doesn't contain 'k', 'K', 's', or 'S'.
|
||||
// See comments on foldFunc.
|
||||
func simpleLetterEqualFold(s, t []byte) bool {
|
||||
if len(s) != len(t) {
|
||||
return false
|
||||
}
|
||||
for i, b := range s {
|
||||
if b&caseMask != t[i]&caseMask {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// tagOptions is the string following a comma in a struct field's "json"
|
||||
// tag, or the empty string. It does not include the leading comma.
|
||||
type tagOptions string
|
||||
|
||||
// parseTag splits a struct field's json tag into its name and
|
||||
// comma-separated options.
|
||||
func parseTag(tag string) (string, tagOptions) {
|
||||
if idx := strings.Index(tag, ","); idx != -1 {
|
||||
return tag[:idx], tagOptions(tag[idx+1:])
|
||||
}
|
||||
return tag, tagOptions("")
|
||||
}
|
||||
|
||||
// Contains reports whether a comma-separated list of options
|
||||
// contains a particular substr flag. substr must be surrounded by a
|
||||
// string boundary or commas.
|
||||
func (o tagOptions) Contains(optionName string) bool {
|
||||
if len(o) == 0 {
|
||||
return false
|
||||
}
|
||||
s := string(o)
|
||||
for s != "" {
|
||||
var next string
|
||||
i := strings.Index(s, ",")
|
||||
if i >= 0 {
|
||||
s, next = s[:i], s[i+1:]
|
||||
}
|
||||
if s == optionName {
|
||||
return true
|
||||
}
|
||||
s = next
|
||||
}
|
||||
return false
|
||||
}
|
||||
202
vendor/k8s.io/kube-openapi/LICENSE
generated
vendored
Normal file
202
vendor/k8s.io/kube-openapi/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,202 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
19
vendor/k8s.io/kube-openapi/pkg/util/proto/doc.go
generated
vendored
Normal file
19
vendor/k8s.io/kube-openapi/pkg/util/proto/doc.go
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package proto is a collection of libraries for parsing and indexing the type definitions.
|
||||
// The openapi spec contains the object model definitions and extensions metadata.
|
||||
package proto
|
||||
285
vendor/k8s.io/kube-openapi/pkg/util/proto/document.go
generated
vendored
Normal file
285
vendor/k8s.io/kube-openapi/pkg/util/proto/document.go
generated
vendored
Normal file
@@ -0,0 +1,285 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
openapi_v2 "github.com/googleapis/gnostic/OpenAPIv2"
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
func newSchemaError(path *Path, format string, a ...interface{}) error {
|
||||
err := fmt.Sprintf(format, a...)
|
||||
if path.Len() == 0 {
|
||||
return fmt.Errorf("SchemaError: %v", err)
|
||||
}
|
||||
return fmt.Errorf("SchemaError(%v): %v", path, err)
|
||||
}
|
||||
|
||||
// VendorExtensionToMap converts openapi VendorExtension to a map.
|
||||
func VendorExtensionToMap(e []*openapi_v2.NamedAny) map[string]interface{} {
|
||||
values := map[string]interface{}{}
|
||||
|
||||
for _, na := range e {
|
||||
if na.GetName() == "" || na.GetValue() == nil {
|
||||
continue
|
||||
}
|
||||
if na.GetValue().GetYaml() == "" {
|
||||
continue
|
||||
}
|
||||
var value interface{}
|
||||
err := yaml.Unmarshal([]byte(na.GetValue().GetYaml()), &value)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
values[na.GetName()] = value
|
||||
}
|
||||
|
||||
return values
|
||||
}
|
||||
|
||||
// Definitions is an implementation of `Models`. It looks for
|
||||
// models in an openapi Schema.
|
||||
type Definitions struct {
|
||||
models map[string]Schema
|
||||
}
|
||||
|
||||
var _ Models = &Definitions{}
|
||||
|
||||
// NewOpenAPIData creates a new `Models` out of the openapi document.
|
||||
func NewOpenAPIData(doc *openapi_v2.Document) (Models, error) {
|
||||
definitions := Definitions{
|
||||
models: map[string]Schema{},
|
||||
}
|
||||
|
||||
// Save the list of all models first. This will allow us to
|
||||
// validate that we don't have any dangling reference.
|
||||
for _, namedSchema := range doc.GetDefinitions().GetAdditionalProperties() {
|
||||
definitions.models[namedSchema.GetName()] = nil
|
||||
}
|
||||
|
||||
// Now, parse each model. We can validate that references exists.
|
||||
for _, namedSchema := range doc.GetDefinitions().GetAdditionalProperties() {
|
||||
path := NewPath(namedSchema.GetName())
|
||||
schema, err := definitions.ParseSchema(namedSchema.GetValue(), &path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
definitions.models[namedSchema.GetName()] = schema
|
||||
}
|
||||
|
||||
return &definitions, nil
|
||||
}
|
||||
|
||||
// We believe the schema is a reference, verify that and returns a new
|
||||
// Schema
|
||||
func (d *Definitions) parseReference(s *openapi_v2.Schema, path *Path) (Schema, error) {
|
||||
if len(s.GetProperties().GetAdditionalProperties()) > 0 {
|
||||
return nil, newSchemaError(path, "unallowed embedded type definition")
|
||||
}
|
||||
if len(s.GetType().GetValue()) > 0 {
|
||||
return nil, newSchemaError(path, "definition reference can't have a type")
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(s.GetXRef(), "#/definitions/") {
|
||||
return nil, newSchemaError(path, "unallowed reference to non-definition %q", s.GetXRef())
|
||||
}
|
||||
reference := strings.TrimPrefix(s.GetXRef(), "#/definitions/")
|
||||
if _, ok := d.models[reference]; !ok {
|
||||
return nil, newSchemaError(path, "unknown model in reference: %q", reference)
|
||||
}
|
||||
return &Ref{
|
||||
BaseSchema: d.parseBaseSchema(s, path),
|
||||
reference: reference,
|
||||
definitions: d,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Definitions) parseBaseSchema(s *openapi_v2.Schema, path *Path) BaseSchema {
|
||||
return BaseSchema{
|
||||
Description: s.GetDescription(),
|
||||
Extensions: VendorExtensionToMap(s.GetVendorExtension()),
|
||||
Path: *path,
|
||||
}
|
||||
}
|
||||
|
||||
// We believe the schema is a map, verify and return a new schema
|
||||
func (d *Definitions) parseMap(s *openapi_v2.Schema, path *Path) (Schema, error) {
|
||||
if len(s.GetType().GetValue()) != 0 && s.GetType().GetValue()[0] != object {
|
||||
return nil, newSchemaError(path, "invalid object type")
|
||||
}
|
||||
if s.GetAdditionalProperties().GetSchema() == nil {
|
||||
return nil, newSchemaError(path, "invalid object doesn't have additional properties")
|
||||
}
|
||||
sub, err := d.ParseSchema(s.GetAdditionalProperties().GetSchema(), path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Map{
|
||||
BaseSchema: d.parseBaseSchema(s, path),
|
||||
SubType: sub,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Definitions) parsePrimitive(s *openapi_v2.Schema, path *Path) (Schema, error) {
|
||||
var t string
|
||||
if len(s.GetType().GetValue()) > 1 {
|
||||
return nil, newSchemaError(path, "primitive can't have more than 1 type")
|
||||
}
|
||||
if len(s.GetType().GetValue()) == 1 {
|
||||
t = s.GetType().GetValue()[0]
|
||||
}
|
||||
switch t {
|
||||
case String:
|
||||
case Number:
|
||||
case Integer:
|
||||
case Boolean:
|
||||
case "": // Some models are completely empty, and can be safely ignored.
|
||||
// Do nothing
|
||||
default:
|
||||
return nil, newSchemaError(path, "Unknown primitive type: %q", t)
|
||||
}
|
||||
return &Primitive{
|
||||
BaseSchema: d.parseBaseSchema(s, path),
|
||||
Type: t,
|
||||
Format: s.GetFormat(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Definitions) parseArray(s *openapi_v2.Schema, path *Path) (Schema, error) {
|
||||
if len(s.GetType().GetValue()) != 1 {
|
||||
return nil, newSchemaError(path, "array should have exactly one type")
|
||||
}
|
||||
if s.GetType().GetValue()[0] != array {
|
||||
return nil, newSchemaError(path, `array should have type "array"`)
|
||||
}
|
||||
if len(s.GetItems().GetSchema()) != 1 {
|
||||
return nil, newSchemaError(path, "array should have exactly one sub-item")
|
||||
}
|
||||
sub, err := d.ParseSchema(s.GetItems().GetSchema()[0], path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Array{
|
||||
BaseSchema: d.parseBaseSchema(s, path),
|
||||
SubType: sub,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Definitions) parseKind(s *openapi_v2.Schema, path *Path) (Schema, error) {
|
||||
if len(s.GetType().GetValue()) != 0 && s.GetType().GetValue()[0] != object {
|
||||
return nil, newSchemaError(path, "invalid object type")
|
||||
}
|
||||
if s.GetProperties() == nil {
|
||||
return nil, newSchemaError(path, "object doesn't have properties")
|
||||
}
|
||||
|
||||
fields := map[string]Schema{}
|
||||
|
||||
for _, namedSchema := range s.GetProperties().GetAdditionalProperties() {
|
||||
var err error
|
||||
path := path.FieldPath(namedSchema.GetName())
|
||||
fields[namedSchema.GetName()], err = d.ParseSchema(namedSchema.GetValue(), &path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return &Kind{
|
||||
BaseSchema: d.parseBaseSchema(s, path),
|
||||
RequiredFields: s.GetRequired(),
|
||||
Fields: fields,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Definitions) parseArbitrary(s *openapi_v2.Schema, path *Path) (Schema, error) {
|
||||
return &Arbitrary{
|
||||
BaseSchema: d.parseBaseSchema(s, path),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ParseSchema creates a walkable Schema from an openapi schema. While
|
||||
// this function is public, it doesn't leak through the interface.
|
||||
func (d *Definitions) ParseSchema(s *openapi_v2.Schema, path *Path) (Schema, error) {
|
||||
objectTypes := s.GetType().GetValue()
|
||||
if len(objectTypes) == 1 {
|
||||
t := objectTypes[0]
|
||||
switch t {
|
||||
case object:
|
||||
return d.parseMap(s, path)
|
||||
case array:
|
||||
return d.parseArray(s, path)
|
||||
}
|
||||
|
||||
}
|
||||
if s.GetXRef() != "" {
|
||||
return d.parseReference(s, path)
|
||||
}
|
||||
if s.GetProperties() != nil {
|
||||
return d.parseKind(s, path)
|
||||
}
|
||||
if len(objectTypes) == 0 || (len(objectTypes) == 1 && objectTypes[0] == "") {
|
||||
return d.parseArbitrary(s, path)
|
||||
}
|
||||
return d.parsePrimitive(s, path)
|
||||
}
|
||||
|
||||
// LookupModel is public through the interface of Models. It
|
||||
// returns a visitable schema from the given model name.
|
||||
func (d *Definitions) LookupModel(model string) Schema {
|
||||
return d.models[model]
|
||||
}
|
||||
|
||||
func (d *Definitions) ListModels() []string {
|
||||
models := []string{}
|
||||
|
||||
for model := range d.models {
|
||||
models = append(models, model)
|
||||
}
|
||||
|
||||
sort.Strings(models)
|
||||
return models
|
||||
}
|
||||
|
||||
type Ref struct {
|
||||
BaseSchema
|
||||
|
||||
reference string
|
||||
definitions *Definitions
|
||||
}
|
||||
|
||||
var _ Reference = &Ref{}
|
||||
|
||||
func (r *Ref) Reference() string {
|
||||
return r.reference
|
||||
}
|
||||
|
||||
func (r *Ref) SubSchema() Schema {
|
||||
return r.definitions.models[r.reference]
|
||||
}
|
||||
|
||||
func (r *Ref) Accept(v SchemaVisitor) {
|
||||
v.VisitReference(r)
|
||||
}
|
||||
|
||||
func (r *Ref) GetName() string {
|
||||
return fmt.Sprintf("Reference to %q", r.reference)
|
||||
}
|
||||
276
vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go
generated
vendored
Normal file
276
vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go
generated
vendored
Normal file
@@ -0,0 +1,276 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Defines openapi types.
|
||||
const (
|
||||
Integer = "integer"
|
||||
Number = "number"
|
||||
String = "string"
|
||||
Boolean = "boolean"
|
||||
|
||||
// These types are private as they should never leak, and are
|
||||
// represented by actual structs.
|
||||
array = "array"
|
||||
object = "object"
|
||||
)
|
||||
|
||||
// Models interface describe a model provider. They can give you the
|
||||
// schema for a specific model.
|
||||
type Models interface {
|
||||
LookupModel(string) Schema
|
||||
ListModels() []string
|
||||
}
|
||||
|
||||
// SchemaVisitor is an interface that you need to implement if you want
|
||||
// to "visit" an openapi schema. A dispatch on the Schema type will call
|
||||
// the appropriate function based on its actual type:
|
||||
// - Array is a list of one and only one given subtype
|
||||
// - Map is a map of string to one and only one given subtype
|
||||
// - Primitive can be string, integer, number and boolean.
|
||||
// - Kind is an object with specific fields mapping to specific types.
|
||||
// - Reference is a link to another definition.
|
||||
type SchemaVisitor interface {
|
||||
VisitArray(*Array)
|
||||
VisitMap(*Map)
|
||||
VisitPrimitive(*Primitive)
|
||||
VisitKind(*Kind)
|
||||
VisitReference(Reference)
|
||||
}
|
||||
|
||||
// SchemaVisitorArbitrary is an additional visitor interface which handles
|
||||
// arbitrary types. For backwards compatability, it's a separate interface
|
||||
// which is checked for at runtime.
|
||||
type SchemaVisitorArbitrary interface {
|
||||
SchemaVisitor
|
||||
VisitArbitrary(*Arbitrary)
|
||||
}
|
||||
|
||||
// Schema is the base definition of an openapi type.
|
||||
type Schema interface {
|
||||
// Giving a visitor here will let you visit the actual type.
|
||||
Accept(SchemaVisitor)
|
||||
|
||||
// Pretty print the name of the type.
|
||||
GetName() string
|
||||
// Describes how to access this field.
|
||||
GetPath() *Path
|
||||
// Describes the field.
|
||||
GetDescription() string
|
||||
// Returns type extensions.
|
||||
GetExtensions() map[string]interface{}
|
||||
}
|
||||
|
||||
// Path helps us keep track of type paths
|
||||
type Path struct {
|
||||
parent *Path
|
||||
key string
|
||||
}
|
||||
|
||||
func NewPath(key string) Path {
|
||||
return Path{key: key}
|
||||
}
|
||||
|
||||
func (p *Path) Get() []string {
|
||||
if p == nil {
|
||||
return []string{}
|
||||
}
|
||||
if p.key == "" {
|
||||
return p.parent.Get()
|
||||
}
|
||||
return append(p.parent.Get(), p.key)
|
||||
}
|
||||
|
||||
func (p *Path) Len() int {
|
||||
return len(p.Get())
|
||||
}
|
||||
|
||||
func (p *Path) String() string {
|
||||
return strings.Join(p.Get(), "")
|
||||
}
|
||||
|
||||
// ArrayPath appends an array index and creates a new path
|
||||
func (p *Path) ArrayPath(i int) Path {
|
||||
return Path{
|
||||
parent: p,
|
||||
key: fmt.Sprintf("[%d]", i),
|
||||
}
|
||||
}
|
||||
|
||||
// FieldPath appends a field name and creates a new path
|
||||
func (p *Path) FieldPath(field string) Path {
|
||||
return Path{
|
||||
parent: p,
|
||||
key: fmt.Sprintf(".%s", field),
|
||||
}
|
||||
}
|
||||
|
||||
// BaseSchema holds data used by each types of schema.
|
||||
type BaseSchema struct {
|
||||
Description string
|
||||
Extensions map[string]interface{}
|
||||
|
||||
Path Path
|
||||
}
|
||||
|
||||
func (b *BaseSchema) GetDescription() string {
|
||||
return b.Description
|
||||
}
|
||||
|
||||
func (b *BaseSchema) GetExtensions() map[string]interface{} {
|
||||
return b.Extensions
|
||||
}
|
||||
|
||||
func (b *BaseSchema) GetPath() *Path {
|
||||
return &b.Path
|
||||
}
|
||||
|
||||
// Array must have all its element of the same `SubType`.
|
||||
type Array struct {
|
||||
BaseSchema
|
||||
|
||||
SubType Schema
|
||||
}
|
||||
|
||||
var _ Schema = &Array{}
|
||||
|
||||
func (a *Array) Accept(v SchemaVisitor) {
|
||||
v.VisitArray(a)
|
||||
}
|
||||
|
||||
func (a *Array) GetName() string {
|
||||
return fmt.Sprintf("Array of %s", a.SubType.GetName())
|
||||
}
|
||||
|
||||
// Kind is a complex object. It can have multiple different
|
||||
// subtypes for each field, as defined in the `Fields` field. Mandatory
|
||||
// fields are listed in `RequiredFields`. The key of the object is
|
||||
// always of type `string`.
|
||||
type Kind struct {
|
||||
BaseSchema
|
||||
|
||||
// Lists names of required fields.
|
||||
RequiredFields []string
|
||||
// Maps field names to types.
|
||||
Fields map[string]Schema
|
||||
}
|
||||
|
||||
var _ Schema = &Kind{}
|
||||
|
||||
func (k *Kind) Accept(v SchemaVisitor) {
|
||||
v.VisitKind(k)
|
||||
}
|
||||
|
||||
func (k *Kind) GetName() string {
|
||||
properties := []string{}
|
||||
for key := range k.Fields {
|
||||
properties = append(properties, key)
|
||||
}
|
||||
return fmt.Sprintf("Kind(%v)", properties)
|
||||
}
|
||||
|
||||
// IsRequired returns true if `field` is a required field for this type.
|
||||
func (k *Kind) IsRequired(field string) bool {
|
||||
for _, f := range k.RequiredFields {
|
||||
if f == field {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Keys returns a alphabetically sorted list of keys.
|
||||
func (k *Kind) Keys() []string {
|
||||
keys := make([]string, 0)
|
||||
for key := range k.Fields {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
return keys
|
||||
}
|
||||
|
||||
// Map is an object who values must all be of the same `SubType`.
|
||||
// The key of the object is always of type `string`.
|
||||
type Map struct {
|
||||
BaseSchema
|
||||
|
||||
SubType Schema
|
||||
}
|
||||
|
||||
var _ Schema = &Map{}
|
||||
|
||||
func (m *Map) Accept(v SchemaVisitor) {
|
||||
v.VisitMap(m)
|
||||
}
|
||||
|
||||
func (m *Map) GetName() string {
|
||||
return fmt.Sprintf("Map of %s", m.SubType.GetName())
|
||||
}
|
||||
|
||||
// Primitive is a literal. There can be multiple types of primitives,
|
||||
// and this subtype can be visited through the `subType` field.
|
||||
type Primitive struct {
|
||||
BaseSchema
|
||||
|
||||
// Type of a primitive must be one of: integer, number, string, boolean.
|
||||
Type string
|
||||
Format string
|
||||
}
|
||||
|
||||
var _ Schema = &Primitive{}
|
||||
|
||||
func (p *Primitive) Accept(v SchemaVisitor) {
|
||||
v.VisitPrimitive(p)
|
||||
}
|
||||
|
||||
func (p *Primitive) GetName() string {
|
||||
if p.Format == "" {
|
||||
return p.Type
|
||||
}
|
||||
return fmt.Sprintf("%s (%s)", p.Type, p.Format)
|
||||
}
|
||||
|
||||
// Arbitrary is a value of any type (primitive, object or array)
|
||||
type Arbitrary struct {
|
||||
BaseSchema
|
||||
}
|
||||
|
||||
var _ Schema = &Arbitrary{}
|
||||
|
||||
func (a *Arbitrary) Accept(v SchemaVisitor) {
|
||||
if visitor, ok := v.(SchemaVisitorArbitrary); ok {
|
||||
visitor.VisitArbitrary(a)
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Arbitrary) GetName() string {
|
||||
return "Arbitrary value (primitive, object or array)"
|
||||
}
|
||||
|
||||
// Reference implementation depends on the type of document.
|
||||
type Reference interface {
|
||||
Schema
|
||||
|
||||
Reference() string
|
||||
SubSchema() Schema
|
||||
}
|
||||
4
vendor/modules.txt
vendored
4
vendor/modules.txt
vendored
@@ -616,16 +616,19 @@ k8s.io/apimachinery/pkg/util/errors
|
||||
k8s.io/apimachinery/pkg/util/framer
|
||||
k8s.io/apimachinery/pkg/util/intstr
|
||||
k8s.io/apimachinery/pkg/util/json
|
||||
k8s.io/apimachinery/pkg/util/mergepatch
|
||||
k8s.io/apimachinery/pkg/util/naming
|
||||
k8s.io/apimachinery/pkg/util/net
|
||||
k8s.io/apimachinery/pkg/util/runtime
|
||||
k8s.io/apimachinery/pkg/util/sets
|
||||
k8s.io/apimachinery/pkg/util/strategicpatch
|
||||
k8s.io/apimachinery/pkg/util/validation
|
||||
k8s.io/apimachinery/pkg/util/validation/field
|
||||
k8s.io/apimachinery/pkg/util/wait
|
||||
k8s.io/apimachinery/pkg/util/yaml
|
||||
k8s.io/apimachinery/pkg/version
|
||||
k8s.io/apimachinery/pkg/watch
|
||||
k8s.io/apimachinery/third_party/forked/golang/json
|
||||
k8s.io/apimachinery/third_party/forked/golang/reflect
|
||||
# k8s.io/cli-runtime v0.0.0-20181204004549-a04da5c88c07
|
||||
## explicit
|
||||
@@ -701,6 +704,7 @@ k8s.io/client-go/util/retry
|
||||
## explicit
|
||||
k8s.io/klog
|
||||
# k8s.io/kube-openapi v0.0.0-20180108222231-a07b7bbb58e7
|
||||
k8s.io/kube-openapi/pkg/util/proto
|
||||
## explicit
|
||||
# k8s.io/kubernetes v1.13.0
|
||||
## explicit
|
||||
|
||||
Reference in New Issue
Block a user