diff --git a/k8s/portworx.yaml b/k8s/portworx.yaml index f29a54c3..39816339 100644 --- a/k8s/portworx.yaml +++ b/k8s/portworx.yaml @@ -1,4 +1,4 @@ -# SOURCE: https://install.portworx.com/?mc=false&kbver=1.17.1&b=true&s=%2Fdev%2Floop4&j=auto&c=px-workshop&stork=true&csi=true&lh=true&st=k8s +# SOURCE: https://install.portworx.com/?mc=false&kbver=1.18.2&b=true&s=%2Fdev%2Floop4&j=auto&c=px-workshop&stork=true&csi=true&lh=true&st=k8s --- kind: Service apiVersion: v1 @@ -233,6 +233,9 @@ rules: - apiGroups: [""] resources: ["persistentvolumeclaims", "persistentvolumes"] verbs: ["get", "list"] +- apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list"] - apiGroups: [""] resources: ["configmaps"] verbs: ["get", "list", "update", "create"] @@ -246,6 +249,9 @@ rules: - apiGroups: ["stork.libopenstorage.org"] resources: ["backuplocations"] verbs: ["get", "list"] +- apiGroups: ["core.libopenstorage.org"] + resources: ["*"] + verbs: ["*"] - apiGroups: [""] resources: ["events"] verbs: ["create"] @@ -293,6 +299,7 @@ roleRef: name: px-role apiGroup: rbac.authorization.k8s.io --- + apiVersion: apps/v1 kind: DaemonSet metadata: @@ -300,8 +307,6 @@ metadata: namespace: kube-system labels: name: portworx - annotations: - portworx.com/install-source: "https://install.portworx.com/?mc=false&kbver=1.17.1&b=true&s=%2Fdev%2Floop4&j=auto&c=px-workshop&stork=true&csi=true&lh=true&st=k8s" spec: selector: matchLabels: @@ -331,7 +336,7 @@ spec: hostPID: false containers: - name: portworx - image: portworx/oci-monitor:2.3.2 + image: portworx/oci-monitor:2.5.1 imagePullPolicy: Always args: ["-c", "px-workshop", "-s", "/dev/loop4", "-secret_type", "k8s", "-j", "auto", "-b", @@ -341,7 +346,7 @@ spec: value: "1500" - name: "PX_TEMPLATE_VERSION" value: "v4" - - name: CSI_ENDPOINT + - name: CSI_ENDPOINT value: unix:///var/lib/kubelet/plugins/pxd.portworx.com/csi.sock livenessProbe: @@ -375,8 +380,6 @@ spec: mountPath: /etc/pwx - name: dev mountPath: /dev - - name: csi-driver-path - mountPath: /var/lib/kubelet/plugins/pxd.portworx.com - name: optpwx mountPath: /opt/pwx - name: procmount @@ -393,11 +396,11 @@ spec: mountPath: /var/run/dbus - name: csi-node-driver-registrar image: quay.io/k8scsi/csi-node-driver-registrar:v1.1.0 + imagePullPolicy: Always args: - "--v=5" - "--csi-address=$(ADDRESS)" - "--kubelet-registration-path=/var/lib/kubelet/plugins/pxd.portworx.com/csi.sock" - imagePullPolicy: Always env: - name: ADDRESS value: /csi/csi.sock @@ -507,7 +510,7 @@ rules: resources: ["secrets"] verbs: ["get", "list"] - apiGroups: ["snapshot.storage.k8s.io"] - resources: ["volumesnapshots", "volumesnapshotcontents", "volumesnapshotclasses", "volumesnapshots/status"] + resources: ["volumesnapshots", "volumesnapshotcontents", "volumesnapshotclasses", "volumesnapshots/status", "volumesnapshotcontents/status"] verbs: ["create", "get", "list", "watch", "update", "delete"] - apiGroups: [""] resources: ["nodes"] @@ -578,8 +581,8 @@ spec: serviceAccount: px-csi-account containers: - name: csi-external-provisioner + image: quay.io/openstorage/csi-provisioner:v1.6.0-1 imagePullPolicy: Always - image: quay.io/openstorage/csi-provisioner:v1.4.0-1 args: - "--v=5" - "--provisioner=pxd.portworx.com" @@ -595,7 +598,7 @@ spec: - name: socket-dir mountPath: /csi - name: csi-snapshotter - image: quay.io/k8scsi/csi-snapshotter:v2.0.0 + image: quay.io/k8scsi/csi-snapshotter:v2.1.0 imagePullPolicy: Always args: - "--v=3" @@ -609,9 +612,23 @@ spec: volumeMounts: - name: socket-dir mountPath: /csi - - name: csi-resizer + - name: csi-snapshot-controller + image: quay.io/k8scsi/snapshot-controller:v2.1.0 + imagePullPolicy: Always + args: + - "--v=3" + - "--leader-election=true" + env: + - name: ADDRESS + value: /csi/csi.sock + securityContext: + privileged: true + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: csi-resizer + image: quay.io/k8scsi/csi-resizer:v0.5.0 imagePullPolicy: Always - image: quay.io/k8scsi/csi-resizer:v0.3.0 args: - "--v=5" - "--csi-address=$(ADDRESS)" @@ -630,6 +647,510 @@ spec: path: /var/lib/kubelet/plugins/pxd.portworx.com type: DirectoryOrCreate --- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.2.5 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/260" + creationTimestamp: null + name: volumesnapshotclasses.snapshot.storage.k8s.io +spec: + additionalPrinterColumns: + - JSONPath: .driver + name: Driver + type: string + - JSONPath: .deletionPolicy + description: Determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass + should be deleted when its bound VolumeSnapshot is deleted. + name: DeletionPolicy + type: string + - JSONPath: .metadata.creationTimestamp + name: Age + type: date + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshotClass + listKind: VolumeSnapshotClassList + plural: volumesnapshotclasses + singular: volumesnapshotclass + preserveUnknownFields: false + scope: Cluster + subresources: {} + validation: + openAPIV3Schema: + description: VolumeSnapshotClass specifies parameters that a underlying storage + system uses when creating a volume snapshot. A specific VolumeSnapshotClass + is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses + are non-namespaced + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + deletionPolicy: + description: deletionPolicy determines whether a VolumeSnapshotContent created + through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot + is deleted. Supported values are "Retain" and "Delete". "Retain" means + that the VolumeSnapshotContent and its physical snapshot on underlying + storage system are kept. "Delete" means that the VolumeSnapshotContent + and its physical snapshot on underlying storage system are deleted. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the storage driver that handles this + VolumeSnapshotClass. Required. + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + parameters: + additionalProperties: + type: string + description: parameters is a key-value map with storage driver specific + parameters for creating snapshots. These values are opaque to Kubernetes. + type: object + required: + - deletionPolicy + - driver + type: object + version: v1beta1 + versions: + - name: v1beta1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.2.5 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/260" + creationTimestamp: null + name: volumesnapshots.snapshot.storage.k8s.io +spec: + additionalPrinterColumns: + - JSONPath: .status.readyToUse + description: Indicates if a snapshot is ready to be used to restore a volume. + name: ReadyToUse + type: boolean + - JSONPath: .spec.source.persistentVolumeClaimName + description: Name of the source PVC from where a dynamically taken snapshot will + be created. + name: SourcePVC + type: string + - JSONPath: .spec.source.volumeSnapshotContentName + description: Name of the VolumeSnapshotContent which represents a pre-provisioned + snapshot. + name: SourceSnapshotContent + type: string + - JSONPath: .status.restoreSize + description: Represents the complete size of the snapshot. + name: RestoreSize + type: string + - JSONPath: .spec.volumeSnapshotClassName + description: The name of the VolumeSnapshotClass requested by the VolumeSnapshot. + name: SnapshotClass + type: string + - JSONPath: .status.boundVolumeSnapshotContentName + description: The name of the VolumeSnapshotContent to which this VolumeSnapshot + is bound. + name: SnapshotContent + type: string + - JSONPath: .status.creationTime + description: Timestamp when the point-in-time snapshot is taken by the underlying + storage system. + name: CreationTime + type: date + - JSONPath: .metadata.creationTimestamp + name: Age + type: date + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshot + listKind: VolumeSnapshotList + plural: volumesnapshots + singular: volumesnapshot + preserveUnknownFields: false + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + description: VolumeSnapshot is a user's request for either creating a point-in-time + snapshot of a persistent volume, or binding to a pre-existing snapshot. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: 'spec defines the desired characteristics of a snapshot requested + by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots + Required.' + properties: + source: + description: source specifies where a snapshot will be created from. + This field is immutable after creation. Required. + properties: + persistentVolumeClaimName: + description: persistentVolumeClaimName specifies the name of the + PersistentVolumeClaim object in the same namespace as the VolumeSnapshot + object where the snapshot should be dynamically taken from. This + field is immutable. + type: string + volumeSnapshotContentName: + description: volumeSnapshotContentName specifies the name of a pre-existing + VolumeSnapshotContent object. This field is immutable. + type: string + type: object + volumeSnapshotClassName: + description: 'volumeSnapshotClassName is the name of the VolumeSnapshotClass + requested by the VolumeSnapshot. If not specified, the default snapshot + class will be used if one exists. If not specified, and there is no + default snapshot class, dynamic snapshot creation will fail. Empty + string is not allowed for this field. TODO(xiangqian): a webhook validation + on empty string. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshot-classes' + type: string + required: + - source + type: object + status: + description: 'status represents the current information of a snapshot. NOTE: + status can be modified by sources other than system controllers, and must + not be depended upon for accuracy. Controllers should only use information + from the VolumeSnapshotContent object after verifying that the binding + is accurate and complete.' + properties: + boundVolumeSnapshotContentName: + description: 'boundVolumeSnapshotContentName represents the name of + the VolumeSnapshotContent object to which the VolumeSnapshot object + is bound. If not specified, it indicates that the VolumeSnapshot object + has not been successfully bound to a VolumeSnapshotContent object + yet. NOTE: Specified boundVolumeSnapshotContentName alone does not + mean binding is valid. Controllers MUST always verify bidirectional + binding between VolumeSnapshot and VolumeSnapshotContent to + avoid possible security issues.' + type: string + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot + is taken by the underlying storage system. In dynamic snapshot creation + case, this field will be filled in with the "creation_time" value + returned from CSI "CreateSnapshotRequest" gRPC call. For a pre-existing + snapshot, this field will be filled with the "creation_time" value + returned from the CSI "ListSnapshots" gRPC call if the driver supports + it. If not specified, it indicates that the creation time of the snapshot + is unknown. + format: date-time + type: string + error: + description: error is the last observed error during snapshot creation, + if any. This field could be helpful to upper level controllers(i.e., + application controller) to decide whether they should continue on + waiting for the snapshot to be created based on the type of error + reported. + properties: + message: + description: 'message is a string detailing the encountered error + during snapshot creation if specified. NOTE: message may be logged, + and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if a snapshot is ready to be used + to restore a volume. In dynamic snapshot creation case, this field + will be filled in with the "ready_to_use" value returned from CSI + "CreateSnapshotRequest" gRPC call. For a pre-existing snapshot, this + field will be filled with the "ready_to_use" value returned from the + CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, + this field will be set to "True". If not specified, it means the readiness + of a snapshot is unknown. + type: boolean + restoreSize: + anyOf: + - type: integer + - type: string + description: restoreSize represents the complete size of the snapshot + in bytes. In dynamic snapshot creation case, this field will be filled + in with the "size_bytes" value returned from CSI "CreateSnapshotRequest" + gRPC call. For a pre-existing snapshot, this field will be filled + with the "size_bytes" value returned from the CSI "ListSnapshots" + gRPC call if the driver supports it. When restoring a volume from + this snapshot, the size of the volume MUST NOT be smaller than the + restoreSize if it is specified, otherwise the restoration will fail. + If not specified, it indicates that the size is unknown. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + required: + - spec + type: object + version: v1beta1 + versions: + - name: v1beta1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.2.5 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/260" + creationTimestamp: null + name: volumesnapshotcontents.snapshot.storage.k8s.io +spec: + additionalPrinterColumns: + - JSONPath: .status.readyToUse + description: Indicates if a snapshot is ready to be used to restore a volume. + name: ReadyToUse + type: boolean + - JSONPath: .status.restoreSize + description: Represents the complete size of the snapshot in bytes + name: RestoreSize + type: integer + - JSONPath: .spec.deletionPolicy + description: Determines whether this VolumeSnapshotContent and its physical snapshot + on the underlying storage system should be deleted when its bound VolumeSnapshot + is deleted. + name: DeletionPolicy + type: string + - JSONPath: .spec.driver + description: Name of the CSI driver used to create the physical snapshot on the + underlying storage system. + name: Driver + type: string + - JSONPath: .spec.volumeSnapshotClassName + description: Name of the VolumeSnapshotClass to which this snapshot belongs. + name: VolumeSnapshotClass + type: string + - JSONPath: .spec.volumeSnapshotRef.name + description: Name of the VolumeSnapshot object to which this VolumeSnapshotContent + object is bound. + name: VolumeSnapshot + type: string + - JSONPath: .metadata.creationTimestamp + name: Age + type: date + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshotContent + listKind: VolumeSnapshotContentList + plural: volumesnapshotcontents + singular: volumesnapshotcontent + preserveUnknownFields: false + scope: Cluster + subresources: + status: {} + validation: + openAPIV3Schema: + description: VolumeSnapshotContent represents the actual "on-disk" snapshot + object in the underlying storage system + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: spec defines properties of a VolumeSnapshotContent created + by the underlying storage system. Required. + properties: + deletionPolicy: + description: deletionPolicy determines whether this VolumeSnapshotContent + and its physical snapshot on the underlying storage system should + be deleted when its bound VolumeSnapshot is deleted. Supported values + are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent + and its physical snapshot on underlying storage system are kept. "Delete" + means that the VolumeSnapshotContent and its physical snapshot on + underlying storage system are deleted. In dynamic snapshot creation + case, this field will be filled in with the "DeletionPolicy" field + defined in the VolumeSnapshotClass the VolumeSnapshot refers to. For + pre-existing snapshots, users MUST specify this field when creating + the VolumeSnapshotContent object. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the CSI driver used to create the + physical snapshot on the underlying storage system. This MUST be the + same as the name returned by the CSI GetPluginName() call for that + driver. Required. + type: string + source: + description: source specifies from where a snapshot will be created. + This field is immutable after creation. Required. + properties: + snapshotHandle: + description: snapshotHandle specifies the CSI "snapshot_id" of a + pre-existing snapshot on the underlying storage system. This field + is immutable. + type: string + volumeHandle: + description: volumeHandle specifies the CSI "volume_id" of the volume + from which a snapshot should be dynamically taken from. This field + is immutable. + type: string + type: object + volumeSnapshotClassName: + description: name of the VolumeSnapshotClass to which this snapshot + belongs. + type: string + volumeSnapshotRef: + description: volumeSnapshotRef specifies the VolumeSnapshot object to + which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName + field must reference to this VolumeSnapshotContent's name for the + bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent + object, name and namespace of the VolumeSnapshot object MUST be provided + for binding to happen. This field is immutable after creation. Required. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of an + entire object, this string should contain a valid JSON/Go field + access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen only + to have some well-defined way of referencing a part of an object. + TODO: this design is not final and this field is subject to change + in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference is + made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + required: + - deletionPolicy + - driver + - source + - volumeSnapshotRef + type: object + status: + description: status represents the current information of a snapshot. + properties: + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot + is taken by the underlying storage system. In dynamic snapshot creation + case, this field will be filled in with the "creation_time" value + returned from CSI "CreateSnapshotRequest" gRPC call. For a pre-existing + snapshot, this field will be filled with the "creation_time" value + returned from the CSI "ListSnapshots" gRPC call if the driver supports + it. If not specified, it indicates the creation time is unknown. The + format of this field is a Unix nanoseconds time encoded as an int64. + On Unix, the command `date +%s%N` returns the current time in nanoseconds + since 1970-01-01 00:00:00 UTC. + format: int64 + type: integer + error: + description: error is the latest observed error during snapshot creation, + if any. + properties: + message: + description: 'message is a string detailing the encountered error + during snapshot creation if specified. NOTE: message may be logged, + and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if a snapshot is ready to be used + to restore a volume. In dynamic snapshot creation case, this field + will be filled in with the "ready_to_use" value returned from CSI + "CreateSnapshotRequest" gRPC call. For a pre-existing snapshot, this + field will be filled with the "ready_to_use" value returned from the + CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, + this field will be set to "True". If not specified, it means the readiness + of a snapshot is unknown. + type: boolean + restoreSize: + description: restoreSize represents the complete size of the snapshot + in bytes. In dynamic snapshot creation case, this field will be filled + in with the "size_bytes" value returned from CSI "CreateSnapshotRequest" + gRPC call. For a pre-existing snapshot, this field will be filled + with the "size_bytes" value returned from the CSI "ListSnapshots" + gRPC call if the driver supports it. When restoring a volume from + this snapshot, the size of the volume MUST NOT be smaller than the + restoreSize if it is specified, otherwise the restoration will fail. + If not specified, it indicates that the size is unknown. + format: int64 + minimum: 0 + type: integer + snapshotHandle: + description: snapshotHandle is the CSI "snapshot_id" of a snapshot on + the underlying storage system. If not specified, it indicates that + dynamic snapshot creation has either failed or it is still in progress. + type: string + type: object + required: + - spec + type: object + version: v1beta1 + versions: + - name: v1beta1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- kind: Service apiVersion: v1 metadata: @@ -711,29 +1232,6 @@ spec: podInfoOnMount: false --- apiVersion: v1 -kind: ConfigMap -metadata: - name: stork-config - namespace: kube-system -data: - policy.cfg: |- - { - "kind": "Policy", - "apiVersion": "v1", - "extenders": [ - { - "urlPrefix": "http://stork-service.kube-system:8099", - "apiVersion": "v1beta1", - "filterVerb": "filter", - "prioritizeVerb": "prioritize", - "weight": 5, - "enableHttps": false, - "nodeCacheCapable": false - } - ] - } ---- -apiVersion: v1 kind: ServiceAccount metadata: name: stork-account @@ -761,19 +1259,6 @@ roleRef: name: stork-role apiGroup: rbac.authorization.k8s.io --- -kind: Service -apiVersion: v1 -metadata: - name: stork-service - namespace: kube-system -spec: - selector: - name: stork - ports: - - protocol: TCP - port: 8099 - targetPort: 8099 ---- apiVersion: apps/v1 kind: Deployment metadata: @@ -801,23 +1286,6 @@ spec: name: stork tier: control-plane spec: - containers: - - command: - - /stork - - --driver=pxd - - --verbose - - --leader-elect=true - - --health-monitor-interval=120 - imagePullPolicy: Always - image: openstorage/stork:2.3.1 - env: - - name: "PX_SERVICE_NAME" - value: "portworx-api" - resources: - requests: - cpu: '0.1' - name: stork - hostPID: false affinity: podAntiAffinity: requiredDuringSchedulingIgnoredDuringExecution: @@ -828,6 +1296,23 @@ spec: values: - stork topologyKey: "kubernetes.io/hostname" + hostPID: false + containers: + - command: + - /stork + - --driver=pxd + - --verbose + - --leader-elect=true + - --health-monitor-interval=120 + image: openstorage/stork:2.4.0 + imagePullPolicy: Always + env: + - name: "PX_SERVICE_NAME" + value: "portworx-api" + resources: + requests: + cpu: '0.1' + name: stork serviceAccountName: stork-account --- kind: StorageClass @@ -836,6 +1321,43 @@ metadata: name: stork-snapshot-sc provisioner: stork-snapshot --- +kind: Service +apiVersion: v1 +metadata: + name: stork-service + namespace: kube-system +spec: + selector: + name: stork + ports: + - protocol: TCP + port: 8099 + targetPort: 8099 +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: stork-config + namespace: kube-system +data: + policy.cfg: |- + { + "kind": "Policy", + "apiVersion": "v1", + "extenders": [ + { + "urlPrefix": "http://stork-service.kube-system:8099", + "apiVersion": "v1beta1", + "filterVerb": "filter", + "prioritizeVerb": "prioritize", + "weight": 5, + "enableHttps": false, + "nodeCacheCapable": false, + "httpTimeout": 300000000000 + } + ] + } +--- apiVersion: v1 kind: ServiceAccount metadata: @@ -929,29 +1451,6 @@ spec: name: stork-scheduler name: stork-scheduler spec: - containers: - - command: - - /usr/local/bin/kube-scheduler - - --address=0.0.0.0 - - --leader-elect=true - - --scheduler-name=stork - - --policy-configmap=stork-config - - --policy-configmap-namespace=kube-system - - --lock-object-name=stork-scheduler - image: gcr.io/google_containers/kube-scheduler-amd64:v1.17.1 - livenessProbe: - httpGet: - path: /healthz - port: 10251 - initialDelaySeconds: 15 - name: stork-scheduler - readinessProbe: - httpGet: - path: /healthz - port: 10251 - resources: - requests: - cpu: '0.1' affinity: podAntiAffinity: requiredDuringSchedulingIgnoredDuringExecution: @@ -963,6 +1462,30 @@ spec: - stork-scheduler topologyKey: "kubernetes.io/hostname" hostPID: false + containers: + - command: + - /usr/local/bin/kube-scheduler + - --address=0.0.0.0 + - --leader-elect=true + - --scheduler-name=stork + - --policy-configmap=stork-config + - --policy-configmap-namespace=kube-system + - --lock-object-name=stork-scheduler + image: gcr.io/google_containers/kube-scheduler-amd64:v1.18.2 + imagePullPolicy: Always + livenessProbe: + httpGet: + path: /healthz + port: 10251 + initialDelaySeconds: 15 + name: stork-scheduler + readinessProbe: + httpGet: + path: /healthz + port: 10251 + resources: + requests: + cpu: '0.1' serviceAccountName: stork-scheduler-account --- apiVersion: v1 @@ -1067,7 +1590,7 @@ spec: spec: initContainers: - name: config-init - image: portworx/lh-config-sync:2.0.5 + image: portworx/lh-config-sync:2.0.7 imagePullPolicy: Always args: - "init" @@ -1076,7 +1599,7 @@ spec: mountPath: /config/lh containers: - name: px-lighthouse - image: portworx/px-lighthouse:2.0.6 + image: portworx/px-lighthouse:2.0.7 imagePullPolicy: Always args: [ "-kubernetes", "true" ] ports: @@ -1086,7 +1609,7 @@ spec: - name: config mountPath: /config/lh - name: config-sync - image: portworx/lh-config-sync:2.0.5 + image: portworx/lh-config-sync:2.0.7 imagePullPolicy: Always args: - "sync" @@ -1094,13 +1617,135 @@ spec: - name: config mountPath: /config/lh - name: stork-connector - image: portworx/lh-stork-connector:2.0.5 + image: portworx/lh-stork-connector:2.0.7 imagePullPolicy: Always serviceAccountName: px-lh-account volumes: - name: config emptyDir: {} --- +apiVersion: v1 +kind: ConfigMap +metadata: + name: autopilot-config + namespace: kube-system +data: + config.yaml: |- + providers: + - name: default + type: prometheus + params: url=http://prometheus:9090 + min_poll_interval: 2 +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: autopilot-account + namespace: kube-system +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: autopilot-role +rules: + - apiGroups: ["*"] + resources: ["*"] + verbs: ["*"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: autopilot-role-binding +subjects: +- kind: ServiceAccount + name: autopilot-account + namespace: kube-system +roleRef: + kind: ClusterRole + name: autopilot-role + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + scheduler.alpha.kubernetes.io/critical-pod: "" + labels: + tier: control-plane + name: autopilot + namespace: kube-system +spec: + selector: + matchLabels: + name: autopilot + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + type: RollingUpdate + replicas: 1 + template: + metadata: + annotations: + scheduler.alpha.kubernetes.io/critical-pod: "" + labels: + name: autopilot + tier: control-plane + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: "name" + operator: In + values: + - autopilot + topologyKey: "kubernetes.io/hostname" + hostPID: false + containers: + - command: + - /autopilot + - -f + - ./etc/config/config.yaml + - -log-level + - debug + imagePullPolicy: Always + image: portworx/autopilot:1.2.0 + resources: + requests: + cpu: '0.1' + securityContext: + privileged: false + name: autopilot + volumeMounts: + - name: config-volume + mountPath: /etc/config + serviceAccountName: autopilot-account + volumes: + - name: config-volume + configMap: + name: autopilot-config + items: + - key: config.yaml + path: config.yaml +--- +apiVersion: v1 +kind: Service +metadata: + name: autopilot + namespace: kube-system + labels: + name: autopilot-service +spec: + ports: + - name: autopilot + protocol: TCP + port: 9628 + selector: + name: autopilot + tier: control-plane +--- # That one is an extra. # Create a default Storage Class to simplify Portworx setup. kind: StorageClass @@ -1113,4 +1758,59 @@ provisioner: kubernetes.io/portworx-volume parameters: repl: "2" priority_io: "high" - +--- +# This DaemonSet will set up the (virtual) block device +# used by portworx. +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: setup-loop4-for-portworx + namespace: kube-system +spec: + selector: + matchLabels: + app: setup-loop4-for-portworx + template: + metadata: + labels: + app: setup-loop4-for-portworx + spec: + volumes: + - name: hostfs + hostPath: + path: / + tolerations: + - effect: NoSchedule + operator: Exists + initContainers: + - name: setup-loop4-for-portworx + image: alpine + volumeMounts: + - name: hostfs + mountPath: /hostfs + securityContext: + privileged: true + command: + - chroot + - /hostfs + - /bin/sh + - -c + - | + set -e + if ! [ -f /portworx.blk ]; then + echo "Creating /portworx.blk..." + truncate --size 10G /portworx.blk + fi + if ! grep -q loop /proc/devices; then + echo "Loading loop module..." + modprobe loop + fi + echo "Checking /dev/loop4..." + if ! losetup /dev/loop4; then + echo "Associating /dev/loop4 with /portworx.blk..." + losetup /dev/loop4 /portworx.blk + fi + echo "Loop device setup complete." + containers: + - name: do-nothing + image: k8s.gcr.io/pause diff --git a/k8s/postgres.yaml b/k8s/postgres.yaml index a9331f2a..69c90c37 100644 --- a/k8s/postgres.yaml +++ b/k8s/postgres.yaml @@ -22,7 +22,10 @@ spec: command: ["sh", "-c", "if [ -d /vol/lost+found ]; then rmdir /vol/lost+found; fi"] containers: - name: postgres - image: postgres:11 + image: postgres:12 + env: + - name: POSTGRES_HOST_AUTH_METHOD + value: trust volumeMounts: - mountPath: /var/lib/postgresql/data name: postgres diff --git a/slides/k8s/portworx.md b/slides/k8s/portworx.md index 796d29e5..0a0badac 100644 --- a/slides/k8s/portworx.md +++ b/slides/k8s/portworx.md @@ -74,29 +74,78 @@ --- -## Portworx requirements +## Installing Portworx -- Kubernetes cluster ✔️ +- Portworx installation is relatively simple -- Optional key/value store (etcd or Consul) ❌ +- ... But we made it *even simpler!* -- At least one available block device ❌ +- We are going to use a YAML manifest that will take care of everything + +- Warning: this manifest is customized for a very specific setup + + (like the VMs that we provide during workshops and training sessions) + +- It will probably *not work* If you are using a different setup + + (like Docker Desktop, k3s, MicroK8S, Minikube ...) --- -## The key-value store +## The simplified Portworx installer -- In the current version of Portworx (1.4) it is recommended to use etcd or Consul +- The Portworx installation will take a few minutes -- But Portworx also has beta support for an embedded key/value store +- Let's start it, then we'll explain what happens behind the scenes -- For simplicity, we are going to use the latter option +.exercise[ - (but if we have deployed Consul or etcd, we can use that, too) +- Install Portworx: + ```bash + kubectl apply -f ~/container.training/k8s/portworx.yaml + ``` + +] + + + +*Note: this was tested with Kubernetes 1.18. Newer versions may or may not work.* --- -## One available block device +class: extra-details + +## What's in this YAML manifest? + +- Portworx installation itself, pre-configured for our setup + +- A default *Storage Class* using Portworx + +- A *Daemon Set* to create loop devices on each node of the cluster + +--- + +class: extra-details + +## Portworx installation + +- The official way to install Portworx is to use [PX-Central](https://central.portworx.com/) + + (this requires a free account) + +- PX-Central will ask us a few questions about our cluster + + (Kubernetes version, on-prem/cloud deployment, etc.) + +- Using our answers, it will generate a YAML manifest that we can use + +--- + +class: extra-details + +## Portworx storage configuration + +- Portworx needs at least one *block device* - Block device = disk or partition on a disk @@ -112,71 +161,41 @@ --- +class: extra-details + ## Setting up a loop device -- We are going to create a 10 GB (empty) file on each node +- Our `portworx.yaml` manifest includes a *Daemon Set* that will: -- Then make a loop device from it, to be used by Portworx + - create a 10 GB (empty) file on each node -.exercise[ + - load the `loop` module (if it's not already loaded) -- Create a 10 GB file on each node: - ```bash - for N in $(seq 1 4); do ssh node$N sudo truncate --size 10G /portworx.blk; done - ``` - (If SSH asks to confirm host keys, enter `yes` each time.) + - associate a loop device with the 10 GB file -- Associate the file to a loop device on each node: - ```bash - for N in $(seq 1 4); do ssh node$N sudo losetup /dev/loop4 /portworx.blk; done - ``` - -] - ---- - -## Installing Portworx - -- To install Portworx, we need to go to https://install.portworx.com/ - -- This website will ask us a bunch of questions about our cluster - -- Then, it will generate a YAML file that we should apply to our cluster - --- - -- Or, we can just apply that YAML file directly (it's in `k8s/portworx.yaml`) - -.exercise[ - -- Install Portworx: - ```bash - kubectl apply -f ~/container.training/k8s/portworx.yaml - ``` - -] +- After these steps, we have a block device that Portworx can use --- class: extra-details -## Generating a custom YAML file +## Implementation details -If you want to generate a YAML file tailored to your own needs, the easiest -way is to use https://install.portworx.com/. +- The file is `/portworx.blk` -FYI, this is how we obtained the YAML file used earlier: -``` -KBVER=$(kubectl version -o json | jq -r .serverVersion.gitVersion) -BLKDEV=/dev/loop4 -curl https://install.portworx.com/1.4/?kbver=$KBVER&b=true&s=$BLKDEV&c=px-workshop&stork=true&lh=true -``` -If you want to use an external key/value store, add one of the following: -``` -&k=etcd://`XXX`:2379 -&k=consul://`XXX`:8500 -``` -... where `XXX` is the name or address of your etcd or Consul server. + (it is a [sparse file](https://en.wikipedia.org/wiki/Sparse_file) created with `truncate`) + +- The loop device is `/dev/loop4` + +- This can be verified by running `sudo losetup` + +- The *Daemon Set* uses a privileged *Init Container* + +- We can check the logs of that container with: + ```bash + kubectl logs --selector=app=setup-loop4-for-portworx \ + -c setup-loop4-for-portworx + ``` --- @@ -276,11 +295,9 @@ parameters: priority_io: "high" ``` -- It says "use Portworx to create volumes" +- It says "use Portworx to create volumes and keep 2 replicas of these volumes" -- It tells Portworx to "keep 2 replicas of these volumes" - -- It marks the Storage Class as being the default one +- The annotation makes this Storage Class the default one --- @@ -323,7 +340,10 @@ spec: schedulerName: stork containers: - name: postgres - image: postgres:11 + image: postgres:12 + env: + - name: POSTGRES_HOST_AUTH_METHOD + value: trust volumeMounts: - mountPath: /var/lib/postgresql/data name: postgres @@ -401,14 +421,14 @@ autopilot prompt detection expects $ or # at the beginning of the line. - Populate it with `pgbench`: ```bash - pgbench -i -s 10 demo + pgbench -i demo ``` ] - The `-i` flag means "create tables" -- The `-s 10` flag means "create 10 x 100,000 rows" +- If you want more data in the test tables, add e.g. `-s 10` (to get 10x more rows) --- @@ -428,11 +448,55 @@ autopilot prompt detection expects $ or # at the beginning of the line. psql demo -c "select count(*) from pgbench_accounts" ``` - +- Check that `pgbench_history` is currently empty: + ```bash + psql demo -c "select count(*) from pgbench_history" + ``` ] -(We should see a count of 1,000,000 rows.) +--- + +## Testing the load generator + +- Let's use `pgbench` to generate a few transactions + +.exercise[ + +- Run `pgbench` for 10 seconds, reporting progress every second: + ```bash + pgbench -P 1 -T 10 demo + ``` + +- Check the size of the history table now: + ```bash + psql demo -c "select count(*) from pgbench_history" + ``` + +] + +Note: on small cloud instances, a typical speed is about 100 transactions/second. + +--- + +## Generating transactions + +- Now let's use `pgbench` to generate more transactions + +- While it's running, we will disrupt the database server + +.exercise[ + +- Run `pgbench` for 10 minutes, reporting progress every second: + ```bash + pgbench -P 1 -T 600 demo + ``` + +- You can use a longer time period if you need more time to run the next steps + + + +] --- @@ -522,15 +586,18 @@ By "disrupt" we mean: "disconnect it from the network". ```key ^J``` --> -- Check the number of rows in the `pgbench_accounts` table: +- Check how many transactions are now in the `pgbench_history` table: ```bash - psql demo -c "select count(*) from pgbench_accounts" + psql demo -c "select count(*) from pgbench_history" ``` ] +If the 10-second test that we ran earlier gave e.g. 80 transactions per second, +and we failed the node after 30 seconds, we should have about 2400 row in that table. + --- ## Double-check that the pod has really moved @@ -598,7 +665,7 @@ class: extra-details - If we need to see what's going on with Portworx: ``` - PXPOD=$(kubectl -n kube-system get pod -l name=portworx -o json | + PXPOD=$(kubectl -n kube-system get pod -l name=portworx -o json | jq -r .items[0].metadata.name) kubectl -n kube-system exec $PXPOD -- /opt/pwx/bin/pxctl status ```