mirror of
https://github.com/SynologyOpenSource/synology-csi.git
synced 2026-02-13 21:00:03 +00:00
Update to version 1.1.0
This commit is contained in:
1
.gitignore
vendored
1
.gitignore
vendored
@@ -1,2 +1,3 @@
|
||||
bin/
|
||||
config/client-info.yml
|
||||
test/sanity/sanity-test-secret-file.yaml
|
||||
|
||||
@@ -24,7 +24,7 @@ FROM alpine:latest
|
||||
LABEL maintainers="Synology Authors" \
|
||||
description="Synology CSI Plugin"
|
||||
|
||||
RUN apk add --no-cache e2fsprogs e2fsprogs-extra xfsprogs xfsprogs-extra blkid util-linux iproute2 bash btrfs-progs
|
||||
RUN apk add --no-cache e2fsprogs e2fsprogs-extra xfsprogs xfsprogs-extra blkid util-linux iproute2 bash btrfs-progs ca-certificates cifs-utils
|
||||
|
||||
# Create symbolic link for chroot.sh
|
||||
WORKDIR /
|
||||
|
||||
3
Makefile
3
Makefile
@@ -2,7 +2,7 @@
|
||||
|
||||
REGISTRY_NAME=synology
|
||||
IMAGE_NAME=synology-csi
|
||||
IMAGE_VERSION=v1.0.1
|
||||
IMAGE_VERSION=v1.1.0
|
||||
IMAGE_TAG=$(REGISTRY_NAME)/$(IMAGE_NAME):$(IMAGE_VERSION)
|
||||
|
||||
# For now, only build linux/amd64 platform
|
||||
@@ -32,6 +32,7 @@ synocli:
|
||||
$(BUILD_ENV) go build -v -ldflags $(BUILD_FLAGS) -o ./bin/synocli ./synocli
|
||||
|
||||
test:
|
||||
go clean -testcache
|
||||
go test -v ./test/...
|
||||
clean:
|
||||
-rm -rf ./bin
|
||||
|
||||
68
README.md
68
README.md
@@ -4,10 +4,11 @@ The official [Container Storage Interface](https://github.com/container-storage-
|
||||
|
||||
### Container Images & Kubernetes Compatibility
|
||||
Driver Name: csi.san.synology.com
|
||||
| Driver Version | Image | Supported K8s Version |
|
||||
| -------------- | --------------------------------------------------------------------- | --------------------- |
|
||||
| [v1.0.1](https://github.com/SynologyOpenSource/synology-csi/tree/release-v1.0.1) | [synology-csi:v1.0.1](https://hub.docker.com/r/synology/synology-csi) | 1.20+ |
|
||||
| [v1.0.0](https://github.com/SynologyOpenSource/synology-csi/tree/release-v1.0.0) | [synology-csi:v1.0.0](https://hub.docker.com/r/synology/synology-csi) | 1.19 |
|
||||
| Driver Version | Image | Supported K8s Version |
|
||||
| -------------------------------------------------------------------------------- | --------------------------------------------------------------------- | --------------------- |
|
||||
| [v1.1.0](https://github.com/SynologyOpenSource/synology-csi/tree/release-v1.1.0) | [synology-csi:v1.1.0](https://hub.docker.com/r/synology/synology-csi) | 1.20+ |
|
||||
| [v1.0.1](https://github.com/SynologyOpenSource/synology-csi/tree/release-v1.0.1) | [synology-csi:v1.0.1](https://hub.docker.com/r/synology/synology-csi) | 1.20+ |
|
||||
| [v1.0.0](https://github.com/SynologyOpenSource/synology-csi/tree/release-v1.0.0) | [synology-csi:v1.0.0](https://hub.docker.com/r/synology/synology-csi) | 1.19 |
|
||||
|
||||
|
||||
|
||||
@@ -99,6 +100,7 @@ Create and apply StorageClasses with the properties you want.
|
||||
|
||||
1. Create YAML files using the one at `deploy/kubernetes/<k8s version>/storage-class.yml` as the example, whose content is as below:
|
||||
|
||||
**iSCSI Protocol**
|
||||
```
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
@@ -114,18 +116,56 @@ Create and apply StorageClasses with the properties you want.
|
||||
reclaimPolicy: Retain
|
||||
allowVolumeExpansion: true
|
||||
```
|
||||
|
||||
**SMB/CIFS Protocol**
|
||||
|
||||
Before creating an SMB/CIFS storage class, you must **create a secret** and specify the DSM user whom you want to give permissions to.
|
||||
|
||||
```
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: cifs-csi-credentials
|
||||
namespace: default
|
||||
type: Opaque
|
||||
stringData:
|
||||
username: <username> # DSM user account accessing the shared folder
|
||||
password: <password> # DSM user password accessing the shared folder
|
||||
```
|
||||
|
||||
After creating the secret, create a storage class and fill the secret for node-stage-secret. This is a **required** step if you're using SMB, or there will be errors when staging volumes.
|
||||
|
||||
```
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: synostorage-smb
|
||||
provisioner: csi.san.synology.com
|
||||
parameters:
|
||||
protocol: "smb"
|
||||
dsm: '192.168.1.1'
|
||||
location: '/volume1'
|
||||
csi.storage.k8s.io/node-stage-secret-name: "cifs-csi-credentials"
|
||||
csi.storage.k8s.io/node-stage-secret-namespace: "default"
|
||||
reclaimPolicy: Delete
|
||||
allowVolumeExpansion: true
|
||||
```
|
||||
|
||||
2. Configure the StorageClass properties by assigning the parameters in the table. You can also leave blank if you don’t have a preference:
|
||||
|
||||
| Name | Type | Description | Default |
|
||||
| ---------- | ------ | ----------------------------------------------------------------------------------------------------------------- | ------- |
|
||||
| *dsm* | string | The IPv4 address of your DSM, which must be included in the `client-info.yml` for the CSI driver to log in to DSM | - |
|
||||
| *location* | string | The location (/volume1, /volume2, ...) on DSM where the LUN for *PersistentVolume* will be created | - |
|
||||
| *fsType* | string | The formatting file system of the *PersistentVolumes* when you mount them on the pods | 'ext4' |
|
||||
| Name | Type | Description | Default | Supported protocols |
|
||||
| ------------------------------------------------ | ------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------- | ------------------- |
|
||||
| *dsm* | string | The IPv4 address of your DSM, which must be included in the `client-info.yml` for the CSI driver to log in to DSM | - | iSCSI, SMB |
|
||||
| *location* | string | The location (/volume1, /volume2, ...) on DSM where the LUN for *PersistentVolume* will be created | - | iSCSI, SMB |
|
||||
| *fsType* | string | The formatting file system of the *PersistentVolumes* when you mount them on the pods. This parameter only works with iSCSI. For SMB, the fsType is always ‘cifs‘. | 'ext4' | iSCSI |
|
||||
| *protocol* | string | The backing storage protocol. Enter ‘iscsi’ to create LUNs or ‘smb‘ to create shared folders on DSM. | 'iscsi' | iSCSI, SMB |
|
||||
| *csi.storage.k8s.io/node-stage-secret-name* | string | The name of node-stage-secret. Required if DSM shared folder is accessed via SMB. | - | SMB |
|
||||
| *csi.storage.k8s.io/node-stage-secret-namespace* | string | The namespace of node-stage-secret. Required if DSM shared folder is accessed via SMB. | - | SMB |
|
||||
|
||||
**Notice**
|
||||
|
||||
- If you leave the parameter *location* blank, the CSI driver will choose a volume on DSM with available storage to create the volumes.
|
||||
- All volumes created by the CSI driver are Thin Provisioned LUNs on DSM. This will allow you to take snapshots of them.
|
||||
- All iSCSI volumes created by the CSI driver are Thin Provisioned LUNs on DSM. This will allow you to take snapshots of them.
|
||||
|
||||
3. Apply the YAML files to the Kubernetes cluster.
|
||||
|
||||
@@ -154,10 +194,10 @@ Create and apply VolumeSnapshotClasses with the properties you want.
|
||||
|
||||
2. Configure volume snapshot class properties by assigning the following parameters, all parameters are optional:
|
||||
|
||||
| Name | Type | Description | Default |
|
||||
| ------------- | ------ | -------------------------------------------- | ------- |
|
||||
| *description* | string | The description of the snapshot on DSM | "" |
|
||||
| *is_locked* | string | Whether you want to lock the snapshot on DSM | 'false' |
|
||||
| Name | Type | Description | Default | Supported protocols |
|
||||
| ------------- | ------ | -------------------------------------------- | ------- | ------------------- |
|
||||
| *description* | string | The description of the snapshot on DSM | "" | iSCSI |
|
||||
| *is_locked* | string | Whether you want to lock the snapshot on DSM | 'false' | iSCSI, SMB |
|
||||
|
||||
3. Apply the YAML files to the Kubernetes cluster.
|
||||
|
||||
|
||||
20
deploy/example/storageclass-smb.yaml
Normal file
20
deploy/example/storageclass-smb.yaml
Normal file
@@ -0,0 +1,20 @@
|
||||
---
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: synology-smb-storage
|
||||
provisioner: csi.san.synology.com
|
||||
parameters:
|
||||
protocol: "smb" # required for smb protocol
|
||||
# Before creating an SMB storage class, you must create a secret and specify the DSM user whom you want to give permissions to.
|
||||
csi.storage.k8s.io/node-stage-secret-name: "cifs-csi-credentials" # required for smb protocol
|
||||
csi.storage.k8s.io/node-stage-secret-namespace: "default" # required for smb protocol
|
||||
# dsm: "1.1.1.1"
|
||||
# location: '/volume1'
|
||||
# mountOptions:
|
||||
# - dir_mode=0777
|
||||
# - file_mode=0777
|
||||
# - uid=0
|
||||
# - gid=0
|
||||
reclaimPolicy: Delete
|
||||
allowVolumeExpansion: true
|
||||
@@ -46,6 +46,9 @@ rules:
|
||||
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||
resources: ["volumesnapshotcontents"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["get"]
|
||||
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
@@ -140,7 +143,7 @@ spec:
|
||||
capabilities:
|
||||
add: ["SYS_ADMIN"]
|
||||
allowPrivilegeEscalation: true
|
||||
image: synology/synology-csi:v1.0.1
|
||||
image: synology/synology-csi:v1.1.0
|
||||
args:
|
||||
- --nodeid=NotUsed
|
||||
- --endpoint=$(CSI_ENDPOINT)
|
||||
|
||||
@@ -86,7 +86,7 @@ spec:
|
||||
securityContext:
|
||||
privileged: true
|
||||
imagePullPolicy: IfNotPresent
|
||||
image: synology/synology-csi:v1.0.1
|
||||
image: synology/synology-csi:v1.1.0
|
||||
args:
|
||||
- --nodeid=$(KUBE_NODE_NAME)
|
||||
- --endpoint=$(CSI_ENDPOINT)
|
||||
|
||||
@@ -81,7 +81,7 @@ spec:
|
||||
capabilities:
|
||||
add: ["SYS_ADMIN"]
|
||||
allowPrivilegeEscalation: true
|
||||
image: synology/synology-csi:v1.0.1
|
||||
image: synology/synology-csi:v1.1.0
|
||||
args:
|
||||
- --nodeid=NotUsed
|
||||
- --endpoint=$(CSI_ENDPOINT)
|
||||
|
||||
@@ -7,5 +7,5 @@ metadata:
|
||||
driver: csi.san.synology.com
|
||||
deletionPolicy: Delete
|
||||
# parameters:
|
||||
# description: 'Kubernetes CSI'
|
||||
# description: 'Kubernetes CSI' # only for iscsi protocol
|
||||
# is_locked: 'false'
|
||||
@@ -46,6 +46,9 @@ rules:
|
||||
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||
resources: ["volumesnapshotcontents"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["get"]
|
||||
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
@@ -140,7 +143,7 @@ spec:
|
||||
capabilities:
|
||||
add: ["SYS_ADMIN"]
|
||||
allowPrivilegeEscalation: true
|
||||
image: synology/synology-csi:v1.0.1
|
||||
image: synology/synology-csi:v1.1.0
|
||||
args:
|
||||
- --nodeid=NotUsed
|
||||
- --endpoint=$(CSI_ENDPOINT)
|
||||
|
||||
@@ -86,7 +86,7 @@ spec:
|
||||
securityContext:
|
||||
privileged: true
|
||||
imagePullPolicy: IfNotPresent
|
||||
image: synology/synology-csi:v1.0.1
|
||||
image: synology/synology-csi:v1.1.0
|
||||
args:
|
||||
- --nodeid=$(KUBE_NODE_NAME)
|
||||
- --endpoint=$(CSI_ENDPOINT)
|
||||
|
||||
@@ -81,7 +81,7 @@ spec:
|
||||
capabilities:
|
||||
add: ["SYS_ADMIN"]
|
||||
allowPrivilegeEscalation: true
|
||||
image: synology/synology-csi:v1.0.1
|
||||
image: synology/synology-csi:v1.1.0
|
||||
args:
|
||||
- --nodeid=NotUsed
|
||||
- --endpoint=$(CSI_ENDPOINT)
|
||||
|
||||
@@ -7,5 +7,5 @@ metadata:
|
||||
driver: csi.san.synology.com
|
||||
deletionPolicy: Delete
|
||||
# parameters:
|
||||
# description: 'Kubernetes CSI'
|
||||
# description: 'Kubernetes CSI' # only for iscsi protocol
|
||||
# is_locked: 'false'
|
||||
6
go.mod
6
go.mod
@@ -5,14 +5,14 @@ go 1.16
|
||||
require (
|
||||
github.com/antonfisher/nested-logrus-formatter v1.3.1
|
||||
github.com/cenkalti/backoff/v4 v4.1.1
|
||||
github.com/container-storage-interface/spec v1.3.0
|
||||
github.com/golang/protobuf v1.4.3
|
||||
github.com/container-storage-interface/spec v1.5.0
|
||||
github.com/kubernetes-csi/csi-lib-utils v0.9.1
|
||||
github.com/kubernetes-csi/csi-test/v4 v4.2.0
|
||||
github.com/kubernetes-csi/csi-test/v4 v4.3.0
|
||||
github.com/sirupsen/logrus v1.7.0
|
||||
github.com/spf13/cobra v1.1.3
|
||||
github.com/spf13/pflag v1.0.5
|
||||
google.golang.org/grpc v1.34.0
|
||||
google.golang.org/protobuf v1.25.0
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
k8s.io/mount-utils v0.21.2
|
||||
k8s.io/utils v0.0.0-20210527160623-6fdb442a123b
|
||||
|
||||
10
go.sum
10
go.sum
@@ -57,8 +57,8 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/container-storage-interface/spec v1.2.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4=
|
||||
github.com/container-storage-interface/spec v1.3.0 h1:wMH4UIoWnK/TXYw8mbcIHgZmB6kHOeIsYsiaTJwa6bc=
|
||||
github.com/container-storage-interface/spec v1.3.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4=
|
||||
github.com/container-storage-interface/spec v1.5.0 h1:lvKxe3uLgqQeVQcrnL2CPQKISoKjTJxojEs9cBk+HXo=
|
||||
github.com/container-storage-interface/spec v1.5.0/go.mod h1:8K96oQNkJ7pFcC2R9Z1ynGGBB1I93kcS6PGg3SsOk8s=
|
||||
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
@@ -200,8 +200,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kubernetes-csi/csi-lib-utils v0.9.1 h1:sGq6ifVujfMSkfTsMZip44Ttv8SDXvsBlFk9GdYl/b8=
|
||||
github.com/kubernetes-csi/csi-lib-utils v0.9.1/go.mod h1:8E2jVUX9j3QgspwHXa6LwyN7IHQDjW9jX3kwoWnSC+M=
|
||||
github.com/kubernetes-csi/csi-test/v4 v4.2.0 h1:uyFJMSN9vnOOuQwndB43Kp4Bi/dScuATdv4FMuGJJQ8=
|
||||
github.com/kubernetes-csi/csi-test/v4 v4.2.0/go.mod h1:HuWP7lCCJzehodzd4kO170soxqgzSQHZ5Jbp1pKPlmA=
|
||||
github.com/kubernetes-csi/csi-test/v4 v4.3.0 h1:3fi7ymnoFvCXQa/uauL1UrvnivuaT4r/gRJ2+RsQboc=
|
||||
github.com/kubernetes-csi/csi-test/v4 v4.3.0/go.mod h1:qJ77AkqjA5MBoBDGKHsPqyce/6miqoid+dZ4B00Miuw=
|
||||
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
@@ -269,7 +269,6 @@ github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7z
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/robertkrimen/otto v0.0.0-20200922221731-ef014fd054ac/go.mod h1:xvqspoSXJTIpemEonrMDFq6XzwHYYgToXWj5eRX1OtY=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
@@ -501,7 +500,6 @@ gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMy
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
||||
gopkg.in/sourcemap.v1 v1.0.5/go.mod h1:2RlvNNSMglmRrcvhfuzp4hQHwOtjxlbjX7UPY/GXb78=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
||||
|
||||
@@ -19,15 +19,17 @@ package driver
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"time"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
|
||||
"github.com/SynologyOpenSource/synology-csi/pkg/dsm/webapi"
|
||||
"github.com/SynologyOpenSource/synology-csi/pkg/interfaces"
|
||||
"github.com/SynologyOpenSource/synology-csi/pkg/models"
|
||||
"github.com/SynologyOpenSource/synology-csi/pkg/utils"
|
||||
@@ -118,36 +120,56 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
|
||||
isThin = utils.StringToBoolean(params["thin_provisioning"])
|
||||
}
|
||||
|
||||
protocol := strings.ToLower(params["protocol"])
|
||||
if protocol == "" {
|
||||
protocol = utils.ProtocolDefault
|
||||
} else if !isProtocolSupport(protocol) {
|
||||
return nil, status.Error(codes.InvalidArgument, "Unsupported volume protocol")
|
||||
}
|
||||
|
||||
spec := &models.CreateK8sVolumeSpec{
|
||||
DsmIp: params["dsm"],
|
||||
K8sVolumeName: volName,
|
||||
LunName: fmt.Sprintf("%s-%s", models.LunPrefix, volName),
|
||||
LunName: models.GenLunName(volName),
|
||||
ShareName: models.GenShareName(volName),
|
||||
Location: params["location"],
|
||||
Size: sizeInByte,
|
||||
Type: params["type"],
|
||||
ThinProvisioning: isThin,
|
||||
TargetName: fmt.Sprintf("%s-%s", models.LunPrefix, volName),
|
||||
TargetName: fmt.Sprintf("%s-%s", models.TargetPrefix, volName),
|
||||
MultipleSession: multiSession,
|
||||
SourceSnapshotId: srcSnapshotId,
|
||||
SourceVolumeId: srcVolumeId,
|
||||
Protocol: protocol,
|
||||
}
|
||||
|
||||
lunInfo, dsmIp, err := cs.dsmService.CreateVolume(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
// idempotency
|
||||
// Note: an SMB PV may not be tested existed precisely because the share folder name was sliced from k8sVolumeName
|
||||
k8sVolume := cs.dsmService.GetVolumeByName(volName)
|
||||
if k8sVolume == nil {
|
||||
k8sVolume, err = cs.dsmService.CreateVolume(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
// already existed
|
||||
log.Debugf("Volume [%s] already exists in [%s], backing name: [%s]", volName, k8sVolume.DsmIp, k8sVolume.Name)
|
||||
}
|
||||
|
||||
if int64(lunInfo.Size) != sizeInByte {
|
||||
if (k8sVolume.Protocol == utils.ProtocolIscsi && k8sVolume.SizeInBytes != sizeInByte) ||
|
||||
(k8sVolume.Protocol == utils.ProtocolSmb && utils.BytesToMB(k8sVolume.SizeInBytes) != utils.BytesToMBCeil(sizeInByte)) {
|
||||
return nil , status.Errorf(codes.AlreadyExists, "Already existing volume name with different capacity")
|
||||
}
|
||||
|
||||
return &csi.CreateVolumeResponse{
|
||||
Volume: &csi.Volume{
|
||||
VolumeId: lunInfo.Uuid,
|
||||
CapacityBytes: int64(lunInfo.Size),
|
||||
VolumeId: k8sVolume.VolumeId,
|
||||
CapacityBytes: k8sVolume.SizeInBytes,
|
||||
ContentSource: volContentSrc,
|
||||
VolumeContext: map[string]string{
|
||||
"dsm": dsmIp,
|
||||
"dsm": k8sVolume.DsmIp,
|
||||
"protocol": k8sVolume.Protocol,
|
||||
"source": k8sVolume.Source,
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
@@ -212,9 +234,11 @@ func (cs *controllerServer) ListVolumes(ctx context.Context, req *csi.ListVolume
|
||||
pagingSkip := ("" != startingToken)
|
||||
infos := cs.dsmService.ListVolumes()
|
||||
|
||||
sort.Sort(models.ByVolumeId(infos))
|
||||
|
||||
var count int32 = 0
|
||||
for _, info := range infos {
|
||||
if info.Lun.Uuid == startingToken {
|
||||
if info.VolumeId == startingToken {
|
||||
pagingSkip = false
|
||||
}
|
||||
|
||||
@@ -223,18 +247,20 @@ func (cs *controllerServer) ListVolumes(ctx context.Context, req *csi.ListVolume
|
||||
}
|
||||
|
||||
if maxEntries > 0 && count >= maxEntries {
|
||||
nextToken = info.Lun.Uuid
|
||||
nextToken = info.VolumeId
|
||||
break
|
||||
}
|
||||
|
||||
entries = append(entries, &csi.ListVolumesResponse_Entry{
|
||||
Volume: &csi.Volume{
|
||||
VolumeId: info.Lun.Uuid,
|
||||
CapacityBytes: int64(info.Lun.Size),
|
||||
VolumeId: info.VolumeId,
|
||||
CapacityBytes: info.SizeInBytes,
|
||||
VolumeContext: map[string]string{
|
||||
"dsm": info.DsmIp,
|
||||
"lunName": info.Lun.Name,
|
||||
"targetIqn": info.Target.Iqn,
|
||||
"shareName": info.Share.Name,
|
||||
"protocol": info.Protocol,
|
||||
},
|
||||
},
|
||||
})
|
||||
@@ -290,7 +316,7 @@ func (cs *controllerServer) ControllerGetCapabilities(ctx context.Context, req *
|
||||
|
||||
func (cs *controllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateSnapshotRequest) (*csi.CreateSnapshotResponse, error) {
|
||||
srcVolId := req.GetSourceVolumeId()
|
||||
snapshotName := req.GetName()
|
||||
snapshotName := req.GetName() // snapshot-XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
|
||||
params := req.GetParameters()
|
||||
|
||||
if srcVolId == "" {
|
||||
@@ -301,37 +327,28 @@ func (cs *controllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS
|
||||
return nil, status.Error(codes.InvalidArgument, "Snapshot name is empty.")
|
||||
}
|
||||
|
||||
snapshotInfos, err := cs.dsmService.ListAllSnapshots()
|
||||
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, fmt.Sprintf("Failed to ListAllSnapshots(), err: %v", err))
|
||||
}
|
||||
|
||||
// idempotency
|
||||
for _, snapshotInfo := range snapshotInfos {
|
||||
if snapshotInfo.Name == snapshotName {
|
||||
if snapshotInfo.ParentUuid != srcVolId {
|
||||
return nil, status.Errorf(codes.AlreadyExists, fmt.Sprintf("Snapshot [%s] already exists but volume id is incompatible", snapshotName))
|
||||
}
|
||||
|
||||
createTime, err := ptypes.TimestampProto(time.Unix(snapshotInfo.CreateTime, 0))
|
||||
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, fmt.Sprintf("Failed to convert create time, err: %v", err))
|
||||
}
|
||||
|
||||
return &csi.CreateSnapshotResponse{
|
||||
Snapshot: &csi.Snapshot{
|
||||
SizeBytes: snapshotInfo.TotalSize,
|
||||
SnapshotId: snapshotInfo.Uuid,
|
||||
SourceVolumeId: snapshotInfo.ParentUuid,
|
||||
CreationTime: createTime,
|
||||
ReadyToUse: (snapshotInfo.Status == "Healthy"),
|
||||
},
|
||||
}, nil
|
||||
orgSnap := cs.dsmService.GetSnapshotByName(snapshotName)
|
||||
if orgSnap != nil {
|
||||
// already existed
|
||||
if orgSnap.ParentUuid != srcVolId {
|
||||
return nil, status.Errorf(codes.AlreadyExists, fmt.Sprintf("Snapshot [%s] already exists but volume id is incompatible", snapshotName))
|
||||
}
|
||||
if orgSnap.CreateTime < 0 {
|
||||
return nil, status.Errorf(codes.Internal, fmt.Sprintf("Bad create time: %v", orgSnap.CreateTime))
|
||||
}
|
||||
return &csi.CreateSnapshotResponse{
|
||||
Snapshot: &csi.Snapshot{
|
||||
SizeBytes: orgSnap.SizeInBytes,
|
||||
SnapshotId: orgSnap.Uuid,
|
||||
SourceVolumeId: orgSnap.ParentUuid,
|
||||
CreationTime: timestamppb.New(time.Unix(orgSnap.CreateTime, 0)),
|
||||
ReadyToUse: (orgSnap.Status == "Healthy"),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// not exist, going to create a new snapshot
|
||||
spec := &models.CreateK8sVolumeSnapshotSpec{
|
||||
K8sVolumeId: srcVolId,
|
||||
SnapshotName: snapshotName,
|
||||
@@ -340,35 +357,19 @@ func (cs *controllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS
|
||||
IsLocked: utils.StringToBoolean(params["is_locked"]),
|
||||
}
|
||||
|
||||
snapshotId, err := cs.dsmService.CreateSnapshot(spec)
|
||||
|
||||
snapshot, err := cs.dsmService.CreateSnapshot(spec)
|
||||
if err != nil {
|
||||
if err == utils.OutOfFreeSpaceError("") || err == utils.SnapshotReachMaxCountError("") {
|
||||
return nil,status.Errorf(codes.ResourceExhausted, fmt.Sprintf("Failed to CreateSnapshot(%s), err: %v", srcVolId, err))
|
||||
} else {
|
||||
return nil, status.Errorf(codes.Internal, fmt.Sprintf("Failed to CreateSnapshot(%s), err: %v", srcVolId, err))
|
||||
}
|
||||
}
|
||||
|
||||
snapshotInfo, err := cs.dsmService.GetSnapshot(snapshotId)
|
||||
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, fmt.Sprintf("Failed to GetSnapshot(%s), err: %v", snapshotId, err))
|
||||
}
|
||||
|
||||
createTime, err := ptypes.TimestampProto(time.Unix(snapshotInfo.CreateTime, 0))
|
||||
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, fmt.Sprintf("Failed to convert create time, err: %v", err))
|
||||
log.Errorf("Failed to CreateSnapshot, snapshotName: %s, srcVolId: %s, err: %v", snapshotName, srcVolId, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &csi.CreateSnapshotResponse{
|
||||
Snapshot: &csi.Snapshot{
|
||||
SizeBytes: snapshotInfo.TotalSize,
|
||||
SnapshotId: snapshotInfo.Uuid,
|
||||
SourceVolumeId: snapshotInfo.ParentUuid,
|
||||
CreationTime: createTime,
|
||||
ReadyToUse: (snapshotInfo.Status == "Healthy"),
|
||||
SizeBytes: snapshot.SizeInBytes,
|
||||
SnapshotId: snapshot.Uuid,
|
||||
SourceVolumeId: snapshot.ParentUuid,
|
||||
CreationTime: timestamppb.New(time.Unix(snapshot.CreateTime, 0)),
|
||||
ReadyToUse: (snapshot.Status == "Healthy"),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
@@ -402,22 +403,19 @@ func (cs *controllerServer) ListSnapshots(ctx context.Context, req *csi.ListSnap
|
||||
}
|
||||
|
||||
pagingSkip := ("" != startingToken)
|
||||
var snapshotInfos []webapi.SnapshotInfo
|
||||
var err error
|
||||
var snapshots []*models.K8sSnapshotRespSpec
|
||||
|
||||
if (srcVolId != "") {
|
||||
snapshotInfos, err = cs.dsmService.ListSnapshots(srcVolId)
|
||||
snapshots = cs.dsmService.ListSnapshots(srcVolId)
|
||||
} else {
|
||||
snapshotInfos, err = cs.dsmService.ListAllSnapshots()
|
||||
snapshots = cs.dsmService.ListAllSnapshots()
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, fmt.Sprintf("Failed to ListSnapshots(%s), err: %v", srcVolId, err))
|
||||
}
|
||||
sort.Sort(models.BySnapshotAndParentUuid(snapshots))
|
||||
|
||||
var count int32 = 0
|
||||
for _, snapshotInfo := range snapshotInfos {
|
||||
if snapshotInfo.Uuid == startingToken {
|
||||
for _, snapshot := range snapshots {
|
||||
if snapshot.Uuid == startingToken {
|
||||
pagingSkip = false
|
||||
}
|
||||
|
||||
@@ -425,28 +423,21 @@ func (cs *controllerServer) ListSnapshots(ctx context.Context, req *csi.ListSnap
|
||||
continue
|
||||
}
|
||||
|
||||
if snapshotId != "" && snapshotInfo.Uuid != snapshotId {
|
||||
if snapshotId != "" && snapshot.Uuid != snapshotId {
|
||||
continue
|
||||
}
|
||||
|
||||
if maxEntries > 0 && count >= maxEntries {
|
||||
nextToken = snapshotInfo.Uuid
|
||||
nextToken = snapshot.Uuid
|
||||
break
|
||||
}
|
||||
|
||||
createTime, err := ptypes.TimestampProto(time.Unix(snapshotInfo.CreateTime, 0))
|
||||
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, fmt.Sprintf("Failed to convert create time, err: %v", err))
|
||||
}
|
||||
|
||||
entries = append(entries, &csi.ListSnapshotsResponse_Entry{
|
||||
Snapshot: &csi.Snapshot{
|
||||
SizeBytes: snapshotInfo.TotalSize,
|
||||
SnapshotId: snapshotInfo.Uuid,
|
||||
SourceVolumeId: snapshotInfo.ParentUuid,
|
||||
CreationTime: createTime,
|
||||
ReadyToUse: (snapshotInfo.Status == "Healthy"),
|
||||
SizeBytes: snapshot.SizeInBytes,
|
||||
SnapshotId: snapshot.Uuid,
|
||||
SourceVolumeId: snapshot.ParentUuid,
|
||||
CreationTime: timestamppb.New(time.Unix(snapshot.CreateTime, 0)),
|
||||
ReadyToUse: (snapshot.Status == "Healthy"),
|
||||
},
|
||||
})
|
||||
|
||||
@@ -477,14 +468,14 @@ func (cs *controllerServer) ControllerExpandVolume(ctx context.Context, req *csi
|
||||
"InvalidArgument: Please check CapacityRange[%v]", capRange)
|
||||
}
|
||||
|
||||
if err := cs.dsmService.ExpandLun(volumeId, sizeInByte); err != nil {
|
||||
return nil, status.Error(codes.Internal,
|
||||
fmt.Sprintf("Failed to expand volume [%s], err: %v", volumeId, err))
|
||||
k8sVolume, err := cs.dsmService.ExpandVolume(volumeId, sizeInByte)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &csi.ControllerExpandVolumeResponse{
|
||||
CapacityBytes: sizeInByte,
|
||||
NodeExpansionRequired: true,
|
||||
CapacityBytes: k8sVolume.SizeInBytes,
|
||||
NodeExpansionRequired: (k8sVolume.Protocol == utils.ProtocolIscsi),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -20,11 +20,16 @@ import (
|
||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/SynologyOpenSource/synology-csi/pkg/interfaces"
|
||||
"github.com/SynologyOpenSource/synology-csi/pkg/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
DriverName = "csi.san.synology.com" // CSI dirver name
|
||||
DriverVersion = "1.0.1"
|
||||
DriverVersion = "1.1.0"
|
||||
)
|
||||
|
||||
var (
|
||||
supportedProtocolList = []string{utils.ProtocolIscsi, utils.ProtocolSmb}
|
||||
)
|
||||
|
||||
type IDriver interface {
|
||||
@@ -73,6 +78,8 @@ func NewControllerAndNodeDriver(nodeID string, endpoint string, dsmService inter
|
||||
d.addNodeServiceCapabilities([]csi.NodeServiceCapability_RPC_Type{
|
||||
csi.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME,
|
||||
csi.NodeServiceCapability_RPC_EXPAND_VOLUME,
|
||||
csi.NodeServiceCapability_RPC_VOLUME_MOUNT_GROUP,
|
||||
// csi.NodeServiceCapability_RPC_GET_VOLUME_STATS, //TODO
|
||||
})
|
||||
|
||||
log.Infof("New driver created: name=%s, nodeID=%s, version=%s, endpoint=%s", d.name, d.nodeID, d.version, d.endpoint)
|
||||
@@ -127,3 +134,7 @@ func (d *Driver) addNodeServiceCapabilities(nsc []csi.NodeServiceCapability_RPC_
|
||||
func (d *Driver) getVolumeCapabilityAccessModes() []*csi.VolumeCapability_AccessMode { // for debugging
|
||||
return d.vCap
|
||||
}
|
||||
|
||||
func isProtocolSupport(protocol string) bool {
|
||||
return utils.SliceContains(supportedProtocolList, protocol)
|
||||
}
|
||||
@@ -20,15 +20,19 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/cenkalti/backoff/v4"
|
||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"k8s.io/mount-utils"
|
||||
|
||||
"github.com/SynologyOpenSource/synology-csi/pkg/dsm/webapi"
|
||||
"github.com/SynologyOpenSource/synology-csi/pkg/interfaces"
|
||||
"github.com/SynologyOpenSource/synology-csi/pkg/models"
|
||||
"github.com/SynologyOpenSource/synology-csi/pkg/utils"
|
||||
)
|
||||
|
||||
@@ -138,37 +142,107 @@ func (ns *nodeServer) loginTarget(volumeId string) error {
|
||||
func (ns *nodeServer) logoutTarget(volumeId string) {
|
||||
k8sVolume := ns.dsmService.GetVolume(volumeId)
|
||||
|
||||
if k8sVolume == nil {
|
||||
if k8sVolume == nil || k8sVolume.Protocol != utils.ProtocolIscsi {
|
||||
return
|
||||
}
|
||||
|
||||
ns.Initiator.logout(k8sVolume.Target.Iqn, k8sVolume.DsmIp)
|
||||
}
|
||||
|
||||
func (ns *nodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) {
|
||||
volumeId, stagingTargetPath, volumeCapability :=
|
||||
req.GetVolumeId(), req.GetStagingTargetPath(), req.GetVolumeCapability()
|
||||
func checkGidPresentInMountFlags(volumeMountGroup string, mountFlags []string) (bool, error) {
|
||||
gidPresentInMountFlags := false
|
||||
for _, mountFlag := range mountFlags {
|
||||
if strings.HasPrefix(mountFlag, "gid") {
|
||||
gidPresentInMountFlags = true
|
||||
kvpair := strings.Split(mountFlag, "=")
|
||||
if volumeMountGroup != "" && len(kvpair) == 2 && !strings.EqualFold(volumeMountGroup, kvpair[1]) {
|
||||
return false, status.Error(codes.InvalidArgument, fmt.Sprintf("gid(%s) in storageClass and pod fsgroup(%s) are not equal", kvpair[1], volumeMountGroup))
|
||||
}
|
||||
}
|
||||
}
|
||||
return gidPresentInMountFlags, nil
|
||||
}
|
||||
|
||||
if volumeId == "" || stagingTargetPath == "" || volumeCapability == nil {
|
||||
return nil, status.Error(codes.InvalidArgument,
|
||||
"InvalidArgument: Please check volume ID, staging target path and volume capability.")
|
||||
func (ns *nodeServer) mountSensitiveWithRetry(sourcePath string, targetPath string, fsType string, options []string, sensitiveOptions []string) error {
|
||||
mountBackoff := backoff.NewExponentialBackOff()
|
||||
mountBackoff.InitialInterval = 1 * time.Second
|
||||
mountBackoff.Multiplier = 2
|
||||
mountBackoff.RandomizationFactor = 0.1
|
||||
mountBackoff.MaxElapsedTime = 5 * time.Second
|
||||
|
||||
checkFinished := func() error {
|
||||
if err := ns.Mounter.MountSensitive(sourcePath, targetPath, fsType, options, sensitiveOptions); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
mountNotify := func(err error, duration time.Duration) {
|
||||
log.Infof("Retry MountSensitive, waiting %3.2f seconds .....", float64(duration.Seconds()))
|
||||
}
|
||||
|
||||
if err := backoff.RetryNotify(checkFinished, mountBackoff, mountNotify); err != nil {
|
||||
log.Errorf("Could not finish mount after %3.2f seconds.", float64(mountBackoff.MaxElapsedTime.Seconds()))
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debugf("Mount successfully. source: %s, target: %s", sourcePath, targetPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ns *nodeServer) setSMBVolumePermission(sourcePath string, userName string, authType utils.AuthType) error {
|
||||
s := strings.Split(strings.TrimPrefix(sourcePath, "//"), "/")
|
||||
if len(s) != 2 {
|
||||
return fmt.Errorf("Failed to parse dsmIp and shareName from source path")
|
||||
}
|
||||
dsmIp, shareName := s[0], s[1]
|
||||
|
||||
dsm, err := ns.dsmService.GetDsm(dsmIp)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to get DSM[%s]", dsmIp)
|
||||
}
|
||||
|
||||
permission := webapi.SharePermission{
|
||||
Name: userName,
|
||||
}
|
||||
switch authType {
|
||||
case utils.AuthTypeReadWrite:
|
||||
permission.IsWritable = true
|
||||
case utils.AuthTypeReadOnly:
|
||||
permission.IsReadonly = true
|
||||
case utils.AuthTypeNoAccess:
|
||||
permission.IsDeny = true
|
||||
default:
|
||||
return fmt.Errorf("Unknown auth type: %s", string(authType))
|
||||
}
|
||||
|
||||
permissions := append([]*webapi.SharePermission{}, &permission)
|
||||
spec := webapi.SharePermissionSetSpec{
|
||||
Name: shareName,
|
||||
UserGroupType: models.UserGroupTypeLocalUser,
|
||||
Permissions: permissions,
|
||||
}
|
||||
|
||||
return dsm.SharePermissionSet(spec)
|
||||
}
|
||||
|
||||
func (ns *nodeServer) nodeStageISCSIVolume(ctx context.Context, spec *models.NodeStageVolumeSpec) (*csi.NodeStageVolumeResponse, error) {
|
||||
// if block mode, skip mount
|
||||
if volumeCapability.GetBlock() != nil {
|
||||
if spec.VolumeCapability.GetBlock() != nil {
|
||||
return &csi.NodeStageVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
if err := ns.loginTarget(volumeId); err != nil {
|
||||
if err := ns.loginTarget(spec.VolumeId); err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
volumeMountPath := ns.getVolumeMountPath(volumeId)
|
||||
volumeMountPath := ns.getVolumeMountPath(spec.VolumeId)
|
||||
if volumeMountPath == "" {
|
||||
return nil, status.Error(codes.Internal, "Can't get volume mount path")
|
||||
}
|
||||
|
||||
notMount, err := ns.Mounter.Interface.IsLikelyNotMountPoint(stagingTargetPath)
|
||||
notMount, err := ns.Mounter.Interface.IsLikelyNotMountPoint(spec.StagingTargetPath)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
@@ -177,17 +251,104 @@ func (ns *nodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVol
|
||||
return &csi.NodeStageVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
fsType := volumeCapability.GetMount().GetFsType()
|
||||
mountFlags := volumeCapability.GetMount().GetMountFlags()
|
||||
options := append([]string{"rw"}, mountFlags...)
|
||||
fsType := spec.VolumeCapability.GetMount().GetFsType()
|
||||
options := append([]string{"rw"}, spec.VolumeCapability.GetMount().GetMountFlags()...)
|
||||
|
||||
if err = ns.Mounter.FormatAndMount(volumeMountPath, stagingTargetPath, fsType, options); err != nil {
|
||||
if err = ns.Mounter.FormatAndMount(volumeMountPath, spec.StagingTargetPath, fsType, options); err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
return &csi.NodeStageVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
func (ns *nodeServer) nodeStageSMBVolume(ctx context.Context, spec *models.NodeStageVolumeSpec, secrets map[string]string) (*csi.NodeStageVolumeResponse, error) {
|
||||
if spec.VolumeCapability.GetBlock() != nil {
|
||||
return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("SMB protocol only allows 'mount' access type"))
|
||||
}
|
||||
|
||||
if spec.Source == "" { //"//<host>/<shareName>"
|
||||
return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("Missing 'source' field"))
|
||||
}
|
||||
|
||||
if secrets == nil {
|
||||
return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("Missing secrets for node staging volume"))
|
||||
}
|
||||
|
||||
username := strings.TrimSpace(secrets["username"])
|
||||
password := strings.TrimSpace(secrets["password"])
|
||||
domain := strings.TrimSpace(secrets["domain"])
|
||||
|
||||
// set permission to access the share
|
||||
if err := ns.setSMBVolumePermission(spec.Source, username, utils.AuthTypeReadWrite); err != nil {
|
||||
return nil, status.Error(codes.Internal, fmt.Sprintf("Failed to set permission, source: %s, err: %v", spec.Source, err))
|
||||
}
|
||||
|
||||
// create mount point if not exists
|
||||
targetPath := spec.StagingTargetPath
|
||||
notMount, err := createTargetMountPath(ns.Mounter.Interface, targetPath, false)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
if !notMount {
|
||||
log.Infof("NodeStageVolume: %s is already mounted", targetPath)
|
||||
return &csi.NodeStageVolumeResponse{}, nil // already mount
|
||||
}
|
||||
|
||||
fsType := "cifs"
|
||||
options := spec.VolumeCapability.GetMount().GetMountFlags()
|
||||
|
||||
volumeMountGroup := spec.VolumeCapability.GetMount().GetVolumeMountGroup()
|
||||
gidPresent, err := checkGidPresentInMountFlags(volumeMountGroup, options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !gidPresent && volumeMountGroup != "" {
|
||||
options = append(options, fmt.Sprintf("gid=%s", volumeMountGroup))
|
||||
}
|
||||
|
||||
|
||||
if domain != "" {
|
||||
options = append(options, fmt.Sprintf("%s=%s", "domain", domain))
|
||||
}
|
||||
var sensitiveOptions = []string{fmt.Sprintf("%s=%s,%s=%s", "username", username, "password", password)}
|
||||
if err := ns.mountSensitiveWithRetry(spec.Source, targetPath, fsType, options, sensitiveOptions); err != nil {
|
||||
return nil, status.Error(codes.Internal,
|
||||
fmt.Sprintf("Volume[%s] failed to mount %q on %q. err: %v", spec.VolumeId, spec.Source, targetPath, err))
|
||||
}
|
||||
return &csi.NodeStageVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
func (ns *nodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) {
|
||||
volumeId, stagingTargetPath, volumeCapability :=
|
||||
req.GetVolumeId(), req.GetStagingTargetPath(), req.GetVolumeCapability()
|
||||
|
||||
if volumeId == "" || stagingTargetPath == "" || volumeCapability == nil {
|
||||
return nil, status.Error(codes.InvalidArgument,
|
||||
"InvalidArgument: Please check volume ID, staging target path and volume capability.")
|
||||
}
|
||||
|
||||
if volumeCapability.GetBlock() != nil && volumeCapability.GetMount() != nil {
|
||||
return nil, status.Error(codes.InvalidArgument, "Cannot mix block and mount capabilities")
|
||||
}
|
||||
|
||||
spec := &models.NodeStageVolumeSpec{
|
||||
VolumeId: volumeId,
|
||||
StagingTargetPath: stagingTargetPath,
|
||||
VolumeCapability: volumeCapability,
|
||||
Dsm: req.VolumeContext["dsm"],
|
||||
Source: req.VolumeContext["source"], // filled by CreateVolume response
|
||||
}
|
||||
|
||||
switch req.VolumeContext["protocol"] {
|
||||
case utils.ProtocolSmb:
|
||||
return ns.nodeStageSMBVolume(ctx, spec, req.GetSecrets())
|
||||
case utils.ProtocolIscsi:
|
||||
return ns.nodeStageISCSIVolume(ctx, spec)
|
||||
default:
|
||||
return nil, status.Error(codes.InvalidArgument, "Unknown protocol")
|
||||
}
|
||||
}
|
||||
|
||||
func (ns *nodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstageVolumeRequest) (*csi.NodeUnstageVolumeResponse, error) {
|
||||
volumeID, stagingTargetPath := req.GetVolumeId(), req.GetStagingTargetPath()
|
||||
|
||||
@@ -216,13 +377,19 @@ func (ns *nodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstag
|
||||
|
||||
func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) {
|
||||
volumeId, targetPath, stagingTargetPath := req.GetVolumeId(), req.GetTargetPath(), req.GetStagingTargetPath()
|
||||
isBlock := req.GetVolumeCapability().GetBlock() != nil
|
||||
|
||||
if volumeId == "" || targetPath == "" || stagingTargetPath == "" {
|
||||
return nil, status.Error(codes.InvalidArgument,
|
||||
"InvalidArgument: Please check volume ID, target path and staging target path.")
|
||||
}
|
||||
|
||||
if req.GetVolumeCapability() == nil {
|
||||
return nil, status.Error(codes.InvalidArgument, "Volume capability missing in request")
|
||||
}
|
||||
|
||||
isBlock := req.GetVolumeCapability().GetBlock() != nil // raw block, only for iscsi protocol
|
||||
fsType := req.GetVolumeCapability().GetMount().GetFsType()
|
||||
|
||||
notMount, err := createTargetMountPath(ns.Mounter.Interface, targetPath, isBlock)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
@@ -231,29 +398,36 @@ func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
|
||||
return &csi.NodePublishVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
if err := ns.loginTarget(volumeId); err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
volumeMountPath := ns.getVolumeMountPath(volumeId)
|
||||
if volumeMountPath == "" {
|
||||
return nil, status.Error(codes.Internal, "Can't get volume mount path")
|
||||
}
|
||||
|
||||
options := []string{"bind"}
|
||||
if req.GetReadonly() {
|
||||
options = append(options, "ro")
|
||||
}
|
||||
|
||||
if isBlock {
|
||||
err = ns.Mounter.Interface.Mount(volumeMountPath, targetPath, "", options)
|
||||
} else {
|
||||
fsType := req.GetVolumeCapability().GetMount().GetFsType()
|
||||
err = ns.Mounter.Interface.Mount(stagingTargetPath, targetPath, fsType, options)
|
||||
}
|
||||
switch req.VolumeContext["protocol"] {
|
||||
case utils.ProtocolSmb:
|
||||
if err := ns.Mounter.Interface.Mount(stagingTargetPath, targetPath, "", options); err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
case utils.ProtocolIscsi:
|
||||
if err := ns.loginTarget(volumeId); err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
volumeMountPath := ns.getVolumeMountPath(volumeId)
|
||||
if volumeMountPath == "" {
|
||||
return nil, status.Error(codes.Internal, "Can't get volume mount path")
|
||||
}
|
||||
|
||||
if isBlock {
|
||||
err = ns.Mounter.Interface.Mount(volumeMountPath, targetPath, "", options)
|
||||
} else {
|
||||
err = ns.Mounter.Interface.Mount(stagingTargetPath, targetPath, fsType, options)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
default:
|
||||
return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("Unknown protocol: %s", req.VolumeContext["protocol"]))
|
||||
}
|
||||
|
||||
return &csi.NodePublishVolumeResponse{}, nil
|
||||
@@ -329,8 +503,19 @@ func (ns *nodeServer) NodeGetVolumeStats(ctx context.Context, req *csi.NodeGetVo
|
||||
fmt.Sprintf("Volume[%s] does not exist on the %s", volumeId, volumePath))
|
||||
}
|
||||
|
||||
lun := k8sVolume.Lun
|
||||
|
||||
if k8sVolume.Protocol == utils.ProtocolSmb {
|
||||
return &csi.NodeGetVolumeStatsResponse{
|
||||
Usage: []*csi.VolumeUsage{
|
||||
&csi.VolumeUsage{
|
||||
Total: k8sVolume.SizeInBytes,
|
||||
Unit: csi.VolumeUsage_BYTES,
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
lun := k8sVolume.Lun
|
||||
return &csi.NodeGetVolumeStatsResponse{
|
||||
Usage: []*csi.VolumeUsage{
|
||||
&csi.VolumeUsage{
|
||||
@@ -355,6 +540,11 @@ func (ns *nodeServer) NodeExpandVolume(ctx context.Context, req *csi.NodeExpandV
|
||||
return nil, status.Error(codes.NotFound, fmt.Sprintf("Volume[%s] is not found", volumeId))
|
||||
}
|
||||
|
||||
if k8sVolume.Protocol == utils.ProtocolSmb {
|
||||
return &csi.NodeExpandVolumeResponse{
|
||||
CapacityBytes: sizeInByte}, nil
|
||||
}
|
||||
|
||||
if err := ns.Initiator.rescan(k8sVolume.Target.Iqn); err != nil {
|
||||
return nil, status.Error(codes.Internal, fmt.Sprintf("Failed to rescan. err: %v", err))
|
||||
}
|
||||
|
||||
@@ -149,11 +149,11 @@ func getLunTypeByInputParams(lunType string, isThin bool, locationFsType string)
|
||||
return "", fmt.Errorf("Unknown volume fs type: %s", locationFsType)
|
||||
}
|
||||
|
||||
func (service *DsmService) createMappingTarget(dsm *webapi.DSM, spec *models.CreateK8sVolumeSpec, lunUuid string) error {
|
||||
func (service *DsmService) createMappingTarget(dsm *webapi.DSM, spec *models.CreateK8sVolumeSpec, lunUuid string) (webapi.TargetInfo, error) {
|
||||
dsmInfo, err := dsm.DsmInfoGet()
|
||||
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, fmt.Sprintf("Failed to get DSM[%s] info", dsm.Ip));
|
||||
return webapi.TargetInfo{}, status.Errorf(codes.Internal, fmt.Sprintf("Failed to get DSM[%s] info", dsm.Ip));
|
||||
}
|
||||
|
||||
genTargetIqn := func() string {
|
||||
@@ -175,35 +175,35 @@ func (service *DsmService) createMappingTarget(dsm *webapi.DSM, spec *models.Cre
|
||||
targetId, err := dsm.TargetCreate(targetSpec)
|
||||
|
||||
if err != nil && !errors.Is(err, utils.AlreadyExistError("")) {
|
||||
return status.Errorf(codes.Internal, fmt.Sprintf("Failed to create target with spec: %v, err: %v", targetSpec, err))
|
||||
return webapi.TargetInfo{}, status.Errorf(codes.Internal, fmt.Sprintf("Failed to create target with spec: %v, err: %v", targetSpec, err))
|
||||
}
|
||||
|
||||
if targetInfo, err := dsm.TargetGet(targetSpec.Name); err != nil {
|
||||
return status.Errorf(codes.Internal, fmt.Sprintf("Failed to get target with spec: %v, err: %v", targetSpec, err))
|
||||
targetInfo, err := dsm.TargetGet(targetSpec.Name)
|
||||
if err != nil {
|
||||
return webapi.TargetInfo{}, status.Errorf(codes.Internal, fmt.Sprintf("Failed to get target with spec: %v, err: %v", targetSpec, err))
|
||||
} else {
|
||||
targetId = strconv.Itoa(targetInfo.TargetId);
|
||||
}
|
||||
|
||||
if spec.MultipleSession == true {
|
||||
if err := dsm.TargetSet(targetId, 0); err != nil {
|
||||
return status.Errorf(codes.Internal, fmt.Sprintf("Failed to set target [%s] max session, err: %v", spec.TargetName, err))
|
||||
return webapi.TargetInfo{}, status.Errorf(codes.Internal, fmt.Sprintf("Failed to set target [%s] max session, err: %v", spec.TargetName, err))
|
||||
}
|
||||
}
|
||||
|
||||
err = dsm.LunMapTarget([]string{targetId}, lunUuid)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, fmt.Sprintf("Failed to map target [%s] to lun [%s], err: %v", spec.TargetName, lunUuid, err))
|
||||
if err := dsm.LunMapTarget([]string{targetId}, lunUuid); err != nil {
|
||||
return webapi.TargetInfo{}, status.Errorf(codes.Internal, fmt.Sprintf("Failed to map target [%s] to lun [%s], err: %v", spec.TargetName, lunUuid, err))
|
||||
}
|
||||
|
||||
return nil
|
||||
return targetInfo, nil
|
||||
}
|
||||
|
||||
func (service *DsmService) createVolumeByDsm(dsm *webapi.DSM, spec *models.CreateK8sVolumeSpec) (webapi.LunInfo, error) {
|
||||
func (service *DsmService) createVolumeByDsm(dsm *webapi.DSM, spec *models.CreateK8sVolumeSpec) (*models.K8sVolumeRespSpec, error) {
|
||||
// 1. Find a available location
|
||||
if spec.Location == "" {
|
||||
vol, err := service.getFirstAvailableVolume(dsm, spec.Size)
|
||||
if err != nil {
|
||||
return webapi.LunInfo{},
|
||||
return nil,
|
||||
status.Errorf(codes.Internal, fmt.Sprintf("Failed to get available location, err: %v", err))
|
||||
}
|
||||
spec.Location = vol.Path
|
||||
@@ -212,13 +212,13 @@ func (service *DsmService) createVolumeByDsm(dsm *webapi.DSM, spec *models.Creat
|
||||
// 2. Check if location exists
|
||||
dsmVolInfo, err := dsm.VolumeGet(spec.Location)
|
||||
if err != nil {
|
||||
return webapi.LunInfo{},
|
||||
return nil,
|
||||
status.Errorf(codes.InvalidArgument, fmt.Sprintf("Unable to find location %s", spec.Location))
|
||||
}
|
||||
|
||||
lunType, err := getLunTypeByInputParams(spec.Type, spec.ThinProvisioning, dsmVolInfo.FsType)
|
||||
if err != nil {
|
||||
return webapi.LunInfo{},
|
||||
return nil,
|
||||
status.Errorf(codes.InvalidArgument, fmt.Sprintf("Unknown volume fs type: %s, location: %s", dsmVolInfo.FsType, spec.Location))
|
||||
}
|
||||
|
||||
@@ -234,29 +234,29 @@ func (service *DsmService) createVolumeByDsm(dsm *webapi.DSM, spec *models.Creat
|
||||
_, err = dsm.LunCreate(lunSpec)
|
||||
|
||||
if err != nil && !errors.Is(err, utils.AlreadyExistError("")) {
|
||||
return webapi.LunInfo{},
|
||||
status.Errorf(codes.Internal, fmt.Sprintf("Failed to create Volume with name: %s, err: %v", spec.K8sVolumeName, err))
|
||||
return nil,
|
||||
status.Errorf(codes.Internal, fmt.Sprintf("Failed to create LUN, err: %v", err))
|
||||
}
|
||||
|
||||
// No matter lun existed or not, Get Lun by name
|
||||
lunInfo, err := dsm.LunGet(spec.LunName)
|
||||
if err != nil {
|
||||
return webapi.LunInfo{},
|
||||
return nil,
|
||||
// discussion with log
|
||||
status.Errorf(codes.Internal, fmt.Sprintf("Failed to get existed LUN with name: %s, err: %v", spec.LunName, err))
|
||||
}
|
||||
|
||||
// 4. Create Target and Map to Lun
|
||||
err = service.createMappingTarget(dsm, spec, lunInfo.Uuid)
|
||||
targetInfo, err := service.createMappingTarget(dsm, spec, lunInfo.Uuid)
|
||||
if err != nil {
|
||||
// FIXME need to delete lun and target
|
||||
return webapi.LunInfo{},
|
||||
return nil,
|
||||
status.Errorf(codes.Internal, fmt.Sprintf("Failed to create and map target, err: %v", err))
|
||||
}
|
||||
|
||||
log.Debugf("[%s] CreateVolume Successfully. VolumeId: %s", dsm.Ip, lunInfo.Uuid);
|
||||
log.Debugf("[%s] CreateVolume Successfully. VolumeId: %s", dsm.Ip, lunInfo.Uuid)
|
||||
|
||||
return lunInfo, nil
|
||||
return DsmLunToK8sVolume(dsm.Ip, lunInfo, targetInfo), nil
|
||||
}
|
||||
|
||||
func waitCloneFinished(dsm *webapi.DSM, lunName string) error {
|
||||
@@ -291,47 +291,47 @@ func waitCloneFinished(dsm *webapi.DSM, lunName string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (service *DsmService) createVolumeBySnapshot(dsm *webapi.DSM, spec *models.CreateK8sVolumeSpec, snapshotInfo webapi.SnapshotInfo) (webapi.LunInfo, error) {
|
||||
if spec.Size != 0 && spec.Size != snapshotInfo.TotalSize {
|
||||
return webapi.LunInfo{}, status.Errorf(codes.OutOfRange, "Lun size [%d] is not equal to snapshot size [%d]", spec.Size, snapshotInfo.TotalSize)
|
||||
func (service *DsmService) createVolumeBySnapshot(dsm *webapi.DSM, spec *models.CreateK8sVolumeSpec, srcSnapshot *models.K8sSnapshotRespSpec) (*models.K8sVolumeRespSpec, error) {
|
||||
if spec.Size != 0 && spec.Size != srcSnapshot.SizeInBytes {
|
||||
return nil, status.Errorf(codes.OutOfRange, "Requested lun size [%d] is not equal to snapshot size [%d]", spec.Size, srcSnapshot.SizeInBytes)
|
||||
}
|
||||
|
||||
snapshotCloneSpec := webapi.SnapshotCloneSpec{
|
||||
Name: spec.LunName,
|
||||
SrcLunUuid: snapshotInfo.ParentUuid,
|
||||
SrcSnapshotUuid: snapshotInfo.Uuid,
|
||||
SrcLunUuid: srcSnapshot.ParentUuid,
|
||||
SrcSnapshotUuid: srcSnapshot.Uuid,
|
||||
}
|
||||
|
||||
if _, err := dsm.SnapshotClone(snapshotCloneSpec); err != nil && !errors.Is(err, utils.AlreadyExistError("")) {
|
||||
return webapi.LunInfo{},
|
||||
status.Errorf(codes.Internal, fmt.Sprintf("Failed to create volume with source snapshot ID: %s, err: %v", snapshotInfo.Uuid, err))
|
||||
return nil,
|
||||
status.Errorf(codes.Internal, fmt.Sprintf("Failed to create volume with source snapshot ID: %s, err: %v", srcSnapshot.Uuid, err))
|
||||
}
|
||||
|
||||
if err := waitCloneFinished(dsm, spec.LunName); err != nil {
|
||||
return webapi.LunInfo{}, status.Errorf(codes.Internal, err.Error())
|
||||
return nil, status.Errorf(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
lunInfo, err := dsm.LunGet(spec.LunName)
|
||||
if err != nil {
|
||||
return webapi.LunInfo{},
|
||||
return nil,
|
||||
status.Errorf(codes.Internal, fmt.Sprintf("Failed to get existed LUN with name: %s, err: %v", spec.LunName, err))
|
||||
}
|
||||
|
||||
err = service.createMappingTarget(dsm, spec, lunInfo.Uuid)
|
||||
targetInfo, err := service.createMappingTarget(dsm, spec, lunInfo.Uuid)
|
||||
if err != nil {
|
||||
// FIXME need to delete lun and target
|
||||
return webapi.LunInfo{},
|
||||
return nil,
|
||||
status.Errorf(codes.Internal, fmt.Sprintf("Failed to create and map target, err: %v", err))
|
||||
}
|
||||
|
||||
log.Debugf("[%s] createVolumeBySnapshot Successfully. VolumeId: %s", dsm.Ip, lunInfo.Uuid);
|
||||
log.Debugf("[%s] createVolumeBySnapshot Successfully. VolumeId: %s", dsm.Ip, lunInfo.Uuid)
|
||||
|
||||
return lunInfo, nil
|
||||
return DsmLunToK8sVolume(dsm.Ip, lunInfo, targetInfo), nil
|
||||
}
|
||||
|
||||
func (service *DsmService) createVolumeByVolume(dsm *webapi.DSM, spec *models.CreateK8sVolumeSpec, srcLunInfo webapi.LunInfo) (webapi.LunInfo, error) {
|
||||
func (service *DsmService) createVolumeByVolume(dsm *webapi.DSM, spec *models.CreateK8sVolumeSpec, srcLunInfo webapi.LunInfo) (*models.K8sVolumeRespSpec, error) {
|
||||
if spec.Size != 0 && spec.Size != int64(srcLunInfo.Size) {
|
||||
return webapi.LunInfo{}, status.Errorf(codes.OutOfRange, "Lun size [%d] is not equal to src lun size [%d]", spec.Size, srcLunInfo.Size)
|
||||
return nil, status.Errorf(codes.OutOfRange, "Requested lun size [%d] is not equal to src lun size [%d]", spec.Size, srcLunInfo.Size)
|
||||
}
|
||||
|
||||
if spec.Location == "" {
|
||||
@@ -345,166 +345,230 @@ func (service *DsmService) createVolumeByVolume(dsm *webapi.DSM, spec *models.Cr
|
||||
}
|
||||
|
||||
if _, err := dsm.LunClone(lunCloneSpec); err != nil && !errors.Is(err, utils.AlreadyExistError("")) {
|
||||
return webapi.LunInfo{},
|
||||
return nil,
|
||||
status.Errorf(codes.Internal, fmt.Sprintf("Failed to create volume with source volume ID: %s, err: %v", srcLunInfo.Uuid, err))
|
||||
}
|
||||
|
||||
if err := waitCloneFinished(dsm, spec.LunName); err != nil {
|
||||
return webapi.LunInfo{}, status.Errorf(codes.Internal, err.Error())
|
||||
return nil, status.Errorf(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
lunInfo, err := dsm.LunGet(spec.LunName)
|
||||
if err != nil {
|
||||
return webapi.LunInfo{},
|
||||
return nil,
|
||||
status.Errorf(codes.Internal, fmt.Sprintf("Failed to get existed LUN with name: %s, err: %v", spec.LunName, err))
|
||||
}
|
||||
|
||||
err = service.createMappingTarget(dsm, spec, lunInfo.Uuid)
|
||||
targetInfo, err := service.createMappingTarget(dsm, spec, lunInfo.Uuid)
|
||||
if err != nil {
|
||||
// FIXME need to delete lun and target
|
||||
return webapi.LunInfo{},
|
||||
return nil,
|
||||
status.Errorf(codes.Internal, fmt.Sprintf("Failed to create and map target, err: %v", err))
|
||||
}
|
||||
|
||||
log.Debugf("[%s] createVolumeByVolume Successfully. VolumeId: %s", dsm.Ip, lunInfo.Uuid);
|
||||
log.Debugf("[%s] createVolumeByVolume Successfully. VolumeId: %s", dsm.Ip, lunInfo.Uuid)
|
||||
|
||||
return lunInfo, nil
|
||||
return DsmLunToK8sVolume(dsm.Ip, lunInfo, targetInfo), nil
|
||||
}
|
||||
|
||||
func (service *DsmService) CreateVolume(spec *models.CreateK8sVolumeSpec) (webapi.LunInfo, string, error) {
|
||||
func DsmShareToK8sVolume(dsmIp string, info webapi.ShareInfo) *models.K8sVolumeRespSpec {
|
||||
return &models.K8sVolumeRespSpec{
|
||||
DsmIp: dsmIp,
|
||||
VolumeId: info.Uuid,
|
||||
SizeInBytes: utils.MBToBytes(info.QuotaValueInMB),
|
||||
Location: info.VolPath,
|
||||
Name: info.Name,
|
||||
Source: "//" + dsmIp + "/" + info.Name,
|
||||
Protocol: utils.ProtocolSmb,
|
||||
Share: info,
|
||||
}
|
||||
}
|
||||
|
||||
func DsmLunToK8sVolume(dsmIp string, info webapi.LunInfo, targetInfo webapi.TargetInfo) *models.K8sVolumeRespSpec {
|
||||
return &models.K8sVolumeRespSpec{
|
||||
DsmIp: dsmIp,
|
||||
VolumeId: info.Uuid,
|
||||
SizeInBytes: int64(info.Size),
|
||||
Location: info.Location,
|
||||
Name: info.Name,
|
||||
Source: "",
|
||||
Protocol: utils.ProtocolIscsi,
|
||||
Lun: info,
|
||||
Target: targetInfo,
|
||||
}
|
||||
}
|
||||
|
||||
func (service *DsmService) CreateVolume(spec *models.CreateK8sVolumeSpec) (*models.K8sVolumeRespSpec, error) {
|
||||
if spec.SourceVolumeId != "" {
|
||||
/* Create volume by exists volume (Clone) */
|
||||
k8sVolume := service.GetVolume(spec.SourceVolumeId)
|
||||
if k8sVolume == nil {
|
||||
return webapi.LunInfo{}, "", status.Errorf(codes.NotFound, fmt.Sprintf("No such volume id: %s", spec.SourceVolumeId))
|
||||
return nil, status.Errorf(codes.NotFound, fmt.Sprintf("No such volume id: %s", spec.SourceVolumeId))
|
||||
}
|
||||
|
||||
dsm, err := service.GetDsm(k8sVolume.DsmIp)
|
||||
if err != nil {
|
||||
return webapi.LunInfo{}, "", status.Errorf(codes.Internal, fmt.Sprintf("Failed to get DSM[%s]", k8sVolume.DsmIp))
|
||||
return nil, status.Errorf(codes.Internal, fmt.Sprintf("Failed to get DSM[%s]", k8sVolume.DsmIp))
|
||||
}
|
||||
|
||||
lunInfo, err := service.createVolumeByVolume(dsm, spec, k8sVolume.Lun)
|
||||
return lunInfo, dsm.Ip, err
|
||||
} else if spec.SourceSnapshotId != "" {
|
||||
/* Create volume by snapshot */
|
||||
for _, dsm := range service.dsms {
|
||||
snapshotInfo, err := dsm.SnapshotGet(spec.SourceSnapshotId)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// found source by snapshot id, check allowable
|
||||
if spec.DsmIp != "" && spec.DsmIp != dsm.Ip {
|
||||
msg := fmt.Sprintf("The source PVC and destination PVCs must be on the same DSM for cloning from snapshots. Source is on %s, but new PVC is on %s",
|
||||
dsm.Ip, spec.DsmIp)
|
||||
return webapi.LunInfo{}, "", status.Errorf(codes.InvalidArgument, msg)
|
||||
}
|
||||
if spec.Location != "" && spec.Location != snapshotInfo.RootPath {
|
||||
msg := fmt.Sprintf("The source PVC and destination PVCs must be on the same location for cloning from snapshots. Source is on %s, but new PVC is on %s",
|
||||
snapshotInfo.RootPath, spec.Location)
|
||||
return webapi.LunInfo{}, "", status.Errorf(codes.InvalidArgument, msg)
|
||||
}
|
||||
|
||||
lunInfo, err := service.createVolumeBySnapshot(dsm, spec, snapshotInfo)
|
||||
return lunInfo, dsm.Ip, err
|
||||
if spec.Protocol == utils.ProtocolIscsi {
|
||||
return service.createVolumeByVolume(dsm, spec, k8sVolume.Lun)
|
||||
} else if spec.Protocol == utils.ProtocolSmb {
|
||||
return service.createSMBVolumeByVolume(dsm, spec, k8sVolume.Share)
|
||||
}
|
||||
return webapi.LunInfo{}, "", status.Errorf(codes.NotFound, fmt.Sprintf("No such snapshot id: %s", spec.SourceSnapshotId))
|
||||
} else if spec.DsmIp != "" {
|
||||
/* Create volume by specific dsm ip */
|
||||
dsm, err := service.GetDsm(spec.DsmIp)
|
||||
if err != nil {
|
||||
return webapi.LunInfo{}, "", status.Errorf(codes.Internal, fmt.Sprintf("%v", err))
|
||||
}
|
||||
lunInfo, err := service.createVolumeByDsm(dsm, spec)
|
||||
return lunInfo, dsm.Ip, err
|
||||
} else {
|
||||
/* Find appropriate dsm to create volume */
|
||||
for _, dsm := range service.dsms {
|
||||
lunInfo, err := service.createVolumeByDsm(dsm, spec)
|
||||
if err != nil {
|
||||
log.Errorf("[%s] Failed to create Volume: %v", dsm.Ip, err)
|
||||
continue
|
||||
}
|
||||
return lunInfo, dsm.Ip, nil
|
||||
}
|
||||
return webapi.LunInfo{}, "", status.Errorf(codes.Internal, fmt.Sprintf("Couldn't find any host available to create Volume"))
|
||||
return nil, status.Error(codes.InvalidArgument, "Unknown protocol")
|
||||
}
|
||||
|
||||
if spec.SourceSnapshotId != "" {
|
||||
/* Create volume by snapshot */
|
||||
snapshot := service.GetSnapshotByUuid(spec.SourceSnapshotId)
|
||||
if snapshot == nil {
|
||||
return nil, status.Errorf(codes.NotFound, fmt.Sprintf("No such snapshot id: %s", spec.SourceSnapshotId))
|
||||
}
|
||||
|
||||
// found source by snapshot id, check allowable
|
||||
if spec.DsmIp != "" && spec.DsmIp != snapshot.DsmIp {
|
||||
msg := fmt.Sprintf("The source PVC and destination PVCs must be on the same DSM for cloning from snapshots. Source is on %s, but new PVC is on %s",
|
||||
snapshot.DsmIp, spec.DsmIp)
|
||||
return nil, status.Errorf(codes.InvalidArgument, msg)
|
||||
}
|
||||
if spec.Location != "" && spec.Location != snapshot.RootPath {
|
||||
msg := fmt.Sprintf("The source PVC and destination PVCs must be on the same location for cloning from snapshots. Source is on %s, but new PVC is on %s",
|
||||
snapshot.RootPath, spec.Location)
|
||||
return nil, status.Errorf(codes.InvalidArgument, msg)
|
||||
}
|
||||
if spec.Protocol != snapshot.Protocol {
|
||||
msg := fmt.Sprintf("The source PVC and destination PVCs shouldn't have different protocols. Source is %s, but new PVC is %s",
|
||||
snapshot.Protocol, spec.Protocol)
|
||||
return nil, status.Errorf(codes.InvalidArgument, msg)
|
||||
}
|
||||
|
||||
dsm, err := service.GetDsm(snapshot.DsmIp)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, fmt.Sprintf("Failed to get DSM[%s]", snapshot.DsmIp))
|
||||
}
|
||||
|
||||
if spec.Protocol == utils.ProtocolIscsi {
|
||||
return service.createVolumeBySnapshot(dsm, spec, snapshot)
|
||||
} else if spec.Protocol == utils.ProtocolSmb {
|
||||
return service.createSMBVolumeBySnapshot(dsm, spec, snapshot)
|
||||
}
|
||||
return nil, status.Error(codes.InvalidArgument, "Unknown protocol")
|
||||
}
|
||||
|
||||
/* Find appropriate dsm to create volume */
|
||||
for _, dsm := range service.dsms {
|
||||
if spec.DsmIp != "" && spec.DsmIp != dsm.Ip {
|
||||
continue
|
||||
}
|
||||
|
||||
var k8sVolume *models.K8sVolumeRespSpec
|
||||
var err error
|
||||
if spec.Protocol == utils.ProtocolIscsi {
|
||||
k8sVolume, err = service.createVolumeByDsm(dsm, spec)
|
||||
} else if spec.Protocol == utils.ProtocolSmb {
|
||||
k8sVolume, err = service.createSMBVolumeByDsm(dsm, spec)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
log.Errorf("[%s] Failed to create Volume: %v", dsm.Ip, err)
|
||||
continue
|
||||
}
|
||||
|
||||
return k8sVolume, nil
|
||||
}
|
||||
|
||||
return nil, status.Errorf(codes.Internal, fmt.Sprintf("Couldn't find any host available to create Volume"))
|
||||
}
|
||||
|
||||
func (service *DsmService) DeleteVolume(volumeId string) error {
|
||||
k8sVolume := service.GetVolume(volumeId)
|
||||
func (service *DsmService) DeleteVolume(volId string) error {
|
||||
k8sVolume := service.GetVolume(volId)
|
||||
if k8sVolume == nil {
|
||||
log.Infof("Skip delete volume[%s] that is no exist", volumeId)
|
||||
log.Infof("Skip delete volume[%s] that is no exist", volId)
|
||||
return nil
|
||||
}
|
||||
|
||||
lun, target := k8sVolume.Lun, k8sVolume.Target
|
||||
dsm, err := service.GetDsm(k8sVolume.DsmIp)
|
||||
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, fmt.Sprintf("Failed to get DSM[%s]", k8sVolume.DsmIp))
|
||||
}
|
||||
|
||||
if err := dsm.LunDelete(lun.Uuid); err != nil {
|
||||
return err
|
||||
}
|
||||
if k8sVolume.Protocol == utils.ProtocolSmb {
|
||||
if err := dsm.ShareDelete(k8sVolume.Share.Name); err != nil {
|
||||
log.Errorf("[%s] Failed to delete Share(%s): %v", dsm.Ip, k8sVolume.Share.Name, err)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
lun, target := k8sVolume.Lun, k8sVolume.Target
|
||||
|
||||
if len(target.MappedLuns) != 1 {
|
||||
log.Infof("Skip deletes target[%s] that was mapped with lun. DSM[%s]", target.Name, dsm.Ip)
|
||||
return nil
|
||||
}
|
||||
if err := dsm.LunDelete(lun.Uuid); err != nil {
|
||||
if _, err := dsm.LunGet(lun.Uuid); err != nil && errors.Is(err, utils.NoSuchLunError("")) {
|
||||
return nil
|
||||
}
|
||||
log.Errorf("[%s] Failed to delete LUN(%s): %v", dsm.Ip, lun.Uuid, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := dsm.TargetDelete(strconv.Itoa(target.TargetId)); err != nil {
|
||||
return err
|
||||
if len(target.MappedLuns) != 1 {
|
||||
log.Infof("Skip deletes target[%s] that was mapped with lun. DSM[%s]", target.Name, dsm.Ip)
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := dsm.TargetDelete(strconv.Itoa(target.TargetId)); err != nil {
|
||||
if _, err := dsm.TargetGet(strconv.Itoa(target.TargetId)); err != nil {
|
||||
return nil
|
||||
}
|
||||
log.Errorf("[%s] Failed to delete target(%d): %v", dsm.Ip, target.TargetId, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (service *DsmService) listVolumesByDsm(dsm *webapi.DSM, infos *[]*models.ListK8sVolumeRespSpec) {
|
||||
targetInfos, err := dsm.TargetList()
|
||||
if err != nil {
|
||||
log.Errorf("[%s] Failed to list targets: %v", dsm.Ip, err)
|
||||
}
|
||||
func (service *DsmService) listISCSIVolumes(dsmIp string) (infos []*models.K8sVolumeRespSpec) {
|
||||
for _, dsm := range service.dsms {
|
||||
if dsmIp != "" && dsmIp != dsm.Ip {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, target := range targetInfos {
|
||||
// TODO: use target.ConnectedSessions to filter targets
|
||||
for _, mapping := range target.MappedLuns {
|
||||
lun, err := dsm.LunGet(mapping.LunUuid)
|
||||
if err != nil {
|
||||
log.Errorf("[%s] Failed to get LUN(%s): %v", dsm.Ip, mapping.LunUuid, err)
|
||||
targetInfos, err := dsm.TargetList()
|
||||
if err != nil {
|
||||
log.Errorf("[%s] Failed to list targets: %v", dsm.Ip, err)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, target := range targetInfos {
|
||||
// TODO: use target.ConnectedSessions to filter targets
|
||||
for _, mapping := range target.MappedLuns {
|
||||
lun, err := dsm.LunGet(mapping.LunUuid)
|
||||
if err != nil {
|
||||
log.Errorf("[%s] Failed to get LUN(%s): %v", dsm.Ip, mapping.LunUuid, err)
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(lun.Name, models.LunPrefix) {
|
||||
continue
|
||||
}
|
||||
|
||||
// FIXME: filter same LUN mapping to two target
|
||||
infos = append(infos, DsmLunToK8sVolume(dsm.Ip, lun, target))
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(lun.Name, models.LunPrefix) {
|
||||
continue
|
||||
}
|
||||
|
||||
*infos = append(*infos, &models.ListK8sVolumeRespSpec{
|
||||
DsmIp: dsm.Ip,
|
||||
Target: target,
|
||||
Lun: lun,
|
||||
})
|
||||
}
|
||||
}
|
||||
return
|
||||
return infos
|
||||
}
|
||||
|
||||
func (service *DsmService) ListVolumes() []*models.ListK8sVolumeRespSpec {
|
||||
var infos []*models.ListK8sVolumeRespSpec
|
||||
|
||||
for _, dsm := range service.dsms {
|
||||
service.listVolumesByDsm(dsm, &infos)
|
||||
}
|
||||
func (service *DsmService) ListVolumes() (infos []*models.K8sVolumeRespSpec) {
|
||||
infos = append(infos, service.listISCSIVolumes("")...)
|
||||
infos = append(infos, service.listSMBVolumes("")...)
|
||||
|
||||
return infos
|
||||
}
|
||||
|
||||
func (service *DsmService) GetVolume(lunUuid string) *models.ListK8sVolumeRespSpec {
|
||||
func (service *DsmService) GetVolume(volId string) *models.K8sVolumeRespSpec {
|
||||
volumes := service.ListVolumes()
|
||||
|
||||
for _, volume := range volumes {
|
||||
if volume.Lun.Uuid == lunUuid {
|
||||
if volume.VolumeId == volId {
|
||||
return volume
|
||||
}
|
||||
}
|
||||
@@ -512,100 +576,74 @@ func (service *DsmService) GetVolume(lunUuid string) *models.ListK8sVolumeRespSp
|
||||
return nil
|
||||
}
|
||||
|
||||
func (service *DsmService) ExpandLun(lunUuid string, newSize int64) error {
|
||||
k8sVolume := service.GetVolume(lunUuid);
|
||||
if k8sVolume == nil {
|
||||
return status.Errorf(codes.InvalidArgument, fmt.Sprintf("Can't find volume[%s].", lunUuid))
|
||||
}
|
||||
|
||||
if int64(k8sVolume.Lun.Size) > newSize {
|
||||
return status.Errorf(codes.InvalidArgument,
|
||||
fmt.Sprintf("Failed to expand volume[%s], because expand size[%d] bigger than before[%d].",
|
||||
lunUuid, newSize, k8sVolume.Lun.Size))
|
||||
}
|
||||
|
||||
spec := webapi.LunUpdateSpec{
|
||||
Uuid: lunUuid,
|
||||
NewSize: uint64(newSize),
|
||||
}
|
||||
|
||||
dsm, err := service.GetDsm(k8sVolume.DsmIp)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, fmt.Sprintf("Failed to get DSM[%s]", k8sVolume.DsmIp))
|
||||
}
|
||||
|
||||
if err := dsm.LunUpdate(spec); err != nil {
|
||||
return status.Errorf(codes.InvalidArgument,
|
||||
fmt.Sprintf("Failed to expand volume[%s]. err: %v", lunUuid, err))
|
||||
func (service *DsmService) GetVolumeByName(volName string) *models.K8sVolumeRespSpec {
|
||||
volumes := service.ListVolumes()
|
||||
for _, volume := range volumes {
|
||||
if volume.Name == models.GenLunName(volName) ||
|
||||
volume.Name == models.GenShareName(volName) {
|
||||
return volume
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (service *DsmService) CreateSnapshot(spec *models.CreateK8sVolumeSnapshotSpec) (string, error) {
|
||||
k8sVolume := service.GetVolume(spec.K8sVolumeId);
|
||||
func (service *DsmService) GetSnapshotByName(snapshotName string) *models.K8sSnapshotRespSpec {
|
||||
snaps := service.ListAllSnapshots()
|
||||
for _, snap := range snaps {
|
||||
if snap.Name == snapshotName {
|
||||
return snap
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (service *DsmService) ExpandVolume(volId string, newSize int64) (*models.K8sVolumeRespSpec, error) {
|
||||
k8sVolume := service.GetVolume(volId);
|
||||
if k8sVolume == nil {
|
||||
return "", status.Errorf(codes.NotFound, fmt.Sprintf("Can't find volume[%s].", spec.K8sVolumeId))
|
||||
return nil, status.Errorf(codes.InvalidArgument, fmt.Sprintf("Can't find volume[%s].", volId))
|
||||
}
|
||||
|
||||
snapshotSpec := webapi.SnapshotCreateSpec{
|
||||
Name: spec.SnapshotName,
|
||||
LunUuid: spec.K8sVolumeId,
|
||||
Description: spec.Description,
|
||||
TakenBy: spec.TakenBy,
|
||||
IsLocked: spec.IsLocked,
|
||||
if k8sVolume.SizeInBytes > newSize {
|
||||
return nil, status.Errorf(codes.InvalidArgument,
|
||||
fmt.Sprintf("Failed to expand volume[%s], because expand size[%d] smaller than before[%d].",
|
||||
volId, newSize, k8sVolume.SizeInBytes))
|
||||
}
|
||||
|
||||
dsm, err := service.GetDsm(k8sVolume.DsmIp)
|
||||
if err != nil {
|
||||
return "", status.Errorf(codes.InvalidArgument, fmt.Sprintf("Failed to get dsm: %v", err))
|
||||
return nil, status.Errorf(codes.Internal, fmt.Sprintf("Failed to get DSM[%s]", k8sVolume.DsmIp))
|
||||
}
|
||||
|
||||
snapshotUuid, err := dsm.SnapshotCreate(snapshotSpec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
if k8sVolume.Protocol == utils.ProtocolSmb {
|
||||
newSizeInMB := utils.BytesToMBCeil(newSize) // round up to MB
|
||||
if err := dsm.SetShareQuota(k8sVolume.Share, newSizeInMB); err != nil {
|
||||
log.Errorf("[%s] Failed to set quota [%d (MB)] to Share [%s]: %v",
|
||||
dsm.Ip, newSizeInMB, k8sVolume.Share.Name, err)
|
||||
return nil, status.Errorf(codes.Internal, fmt.Sprintf("Failed to expand volume[%s]. err: %v", volId, err))
|
||||
}
|
||||
// convert MB to bytes, may be diff from the input newSize
|
||||
k8sVolume.SizeInBytes = utils.MBToBytes(newSizeInMB)
|
||||
} else {
|
||||
spec := webapi.LunUpdateSpec{
|
||||
Uuid: volId,
|
||||
NewSize: uint64(newSize),
|
||||
}
|
||||
if err := dsm.LunUpdate(spec); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, fmt.Sprintf("Failed to expand volume[%s]. err: %v", volId, err))
|
||||
}
|
||||
k8sVolume.SizeInBytes = newSize
|
||||
}
|
||||
|
||||
return snapshotUuid, nil
|
||||
return k8sVolume, nil
|
||||
}
|
||||
|
||||
func (service *DsmService) DeleteSnapshot(snapshotUuid string) error {
|
||||
for _, dsm := range service.dsms {
|
||||
_, err := dsm.SnapshotGet(snapshotUuid)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
func (service *DsmService) CreateSnapshot(spec *models.CreateK8sVolumeSnapshotSpec) (*models.K8sSnapshotRespSpec, error) {
|
||||
srcVolId := spec.K8sVolumeId
|
||||
|
||||
err = dsm.SnapshotDelete(snapshotUuid)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, fmt.Sprintf("Failed to delete snapshot [%s]. err: %v", snapshotUuid, err))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (service *DsmService) ListAllSnapshots() ([]webapi.SnapshotInfo, error) {
|
||||
var allInfos []webapi.SnapshotInfo
|
||||
|
||||
for _, dsm := range service.dsms {
|
||||
infos, err := dsm.SnapshotList("")
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
allInfos = append(allInfos, infos...)
|
||||
}
|
||||
|
||||
return allInfos, nil
|
||||
}
|
||||
|
||||
func (service *DsmService) ListSnapshots(lunUuid string) ([]webapi.SnapshotInfo, error) {
|
||||
k8sVolume := service.GetVolume(lunUuid);
|
||||
k8sVolume := service.GetVolume(srcVolId);
|
||||
if k8sVolume == nil {
|
||||
return []webapi.SnapshotInfo{}, nil // return empty when the volume does not exist
|
||||
return nil, status.Errorf(codes.NotFound, fmt.Sprintf("Can't find volume[%s].", srcVolId))
|
||||
}
|
||||
|
||||
dsm, err := service.GetDsm(k8sVolume.DsmIp)
|
||||
@@ -613,24 +651,201 @@ func (service *DsmService) ListSnapshots(lunUuid string) ([]webapi.SnapshotInfo,
|
||||
return nil, status.Errorf(codes.InvalidArgument, fmt.Sprintf("Failed to get dsm: %v", err))
|
||||
}
|
||||
|
||||
infos, err := dsm.SnapshotList(lunUuid)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal,
|
||||
fmt.Sprintf("Failed to list snapshot on lun [%s]. err: %v", lunUuid, err))
|
||||
if k8sVolume.Protocol == utils.ProtocolIscsi {
|
||||
snapshotSpec := webapi.SnapshotCreateSpec{
|
||||
Name: spec.SnapshotName,
|
||||
LunUuid: srcVolId,
|
||||
Description: spec.Description,
|
||||
TakenBy: spec.TakenBy,
|
||||
IsLocked: spec.IsLocked,
|
||||
}
|
||||
|
||||
snapshotUuid, err := dsm.SnapshotCreate(snapshotSpec)
|
||||
if err != nil {
|
||||
if err == utils.OutOfFreeSpaceError("") || err == utils.SnapshotReachMaxCountError("") {
|
||||
return nil,status.Errorf(codes.ResourceExhausted, fmt.Sprintf("Failed to SnapshotCreate(%s), err: %v", srcVolId, err))
|
||||
}
|
||||
return nil, status.Errorf(codes.Internal, fmt.Sprintf("Failed to SnapshotCreate(%s), err: %v", srcVolId, err))
|
||||
}
|
||||
|
||||
if snapshot := service.getISCSISnapshot(snapshotUuid); snapshot != nil {
|
||||
return snapshot, nil
|
||||
}
|
||||
|
||||
return nil, status.Errorf(codes.NotFound, fmt.Sprintf("Failed to get iscsi snapshot (%s). Not found", snapshotUuid))
|
||||
} else if k8sVolume.Protocol == utils.ProtocolSmb {
|
||||
snapshotSpec := webapi.ShareSnapshotCreateSpec{
|
||||
ShareName: k8sVolume.Share.Name,
|
||||
Desc: models.ShareSnapshotDescPrefix + spec.SnapshotName, // limitations: don't change the desc by DSM
|
||||
IsLocked: spec.IsLocked,
|
||||
}
|
||||
|
||||
snapshotTime, err := dsm.ShareSnapshotCreate(snapshotSpec)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, fmt.Sprintf("Failed to ShareSnapshotCreate(%s), err: %v", srcVolId, err))
|
||||
}
|
||||
|
||||
snapshots := service.listSMBSnapshotsByDsm(dsm)
|
||||
for _, snapshot := range snapshots {
|
||||
if snapshot.Time == snapshotTime && snapshot.ParentUuid == srcVolId {
|
||||
return snapshot, nil
|
||||
}
|
||||
}
|
||||
return nil, status.Errorf(codes.NotFound, fmt.Sprintf("Failed to get smb snapshot (%s, %s). Not found", snapshotTime, srcVolId))
|
||||
}
|
||||
|
||||
return infos, nil
|
||||
return nil, status.Error(codes.InvalidArgument, "Unsupported volume protocol")
|
||||
}
|
||||
|
||||
func (service *DsmService) GetSnapshot(snapshotUuid string) (webapi.SnapshotInfo, error) {
|
||||
for _, dsm := range service.dsms {
|
||||
info, err := dsm.SnapshotGet(snapshotUuid)
|
||||
func (service *DsmService) GetSnapshotByUuid(snapshotUuid string) *models.K8sSnapshotRespSpec {
|
||||
snaps := service.ListAllSnapshots()
|
||||
for _, snap := range snaps {
|
||||
if snap.Uuid == snapshotUuid {
|
||||
return snap
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (service *DsmService) DeleteSnapshot(snapshotUuid string) error {
|
||||
snapshot := service.GetSnapshotByUuid(snapshotUuid)
|
||||
if snapshot == nil {
|
||||
return nil
|
||||
}
|
||||
dsm, err := service.GetDsm(snapshot.DsmIp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if snapshot.Protocol == utils.ProtocolSmb {
|
||||
if err := dsm.ShareSnapshotDelete(snapshot.Time, snapshot.ParentName); err != nil {
|
||||
if snapshot := service.getSMBSnapshot(snapshotUuid); snapshot == nil { // idempotency
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Errorf("Failed to delete Share snapshot [%s]. err: %v", snapshotUuid, err)
|
||||
return err
|
||||
}
|
||||
} else if snapshot.Protocol == utils.ProtocolIscsi {
|
||||
if err := dsm.SnapshotDelete(snapshotUuid); err != nil {
|
||||
if _, err := dsm.SnapshotGet(snapshotUuid); err != nil { // idempotency
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Errorf("Failed to delete LUN snapshot [%s]. err: %v", snapshotUuid, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (service *DsmService) listISCSISnapshotsByDsm(dsm *webapi.DSM) (infos []*models.K8sSnapshotRespSpec) {
|
||||
volumes := service.listISCSIVolumes(dsm.Ip)
|
||||
for _, volume := range volumes {
|
||||
lunInfo := volume.Lun
|
||||
lunSnaps, err := dsm.SnapshotList(lunInfo.Uuid)
|
||||
if err != nil {
|
||||
log.Errorf("[%s] Failed to list LUN[%s] snapshots: %v", dsm.Ip, lunInfo.Uuid, err)
|
||||
continue
|
||||
}
|
||||
|
||||
return info, nil
|
||||
for _, info := range lunSnaps {
|
||||
infos = append(infos, DsmLunSnapshotToK8sSnapshot(dsm.Ip, info, lunInfo))
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (service *DsmService) ListAllSnapshots() []*models.K8sSnapshotRespSpec {
|
||||
var allInfos []*models.K8sSnapshotRespSpec
|
||||
|
||||
for _, dsm := range service.dsms {
|
||||
allInfos = append(allInfos, service.listISCSISnapshotsByDsm(dsm)...)
|
||||
allInfos = append(allInfos, service.listSMBSnapshotsByDsm(dsm)...)
|
||||
}
|
||||
|
||||
return webapi.SnapshotInfo{}, status.Errorf(codes.Internal, fmt.Sprintf("No such snapshot uuid [%s]", snapshotUuid))
|
||||
return allInfos
|
||||
}
|
||||
|
||||
func (service *DsmService) ListSnapshots(volId string) []*models.K8sSnapshotRespSpec {
|
||||
var allInfos []*models.K8sSnapshotRespSpec
|
||||
|
||||
k8sVolume := service.GetVolume(volId);
|
||||
if k8sVolume == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
dsm, err := service.GetDsm(k8sVolume.DsmIp)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to get DSM[%s]", k8sVolume.DsmIp)
|
||||
return nil
|
||||
}
|
||||
|
||||
if k8sVolume.Protocol == utils.ProtocolIscsi {
|
||||
infos, err := dsm.SnapshotList(volId)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to SnapshotList[%s]", volId)
|
||||
return nil
|
||||
}
|
||||
for _, info := range infos {
|
||||
allInfos = append(allInfos, DsmLunSnapshotToK8sSnapshot(dsm.Ip, info, k8sVolume.Lun))
|
||||
}
|
||||
} else {
|
||||
infos, err := dsm.ShareSnapshotList(k8sVolume.Share.Name)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to ShareSnapshotList[%s]", k8sVolume.Share.Name)
|
||||
return nil
|
||||
}
|
||||
for _, info := range infos {
|
||||
allInfos = append(allInfos, DsmShareSnapshotToK8sSnapshot(dsm.Ip, info, k8sVolume.Share))
|
||||
}
|
||||
}
|
||||
|
||||
return allInfos
|
||||
}
|
||||
|
||||
func DsmShareSnapshotToK8sSnapshot(dsmIp string, info webapi.ShareSnapshotInfo, shareInfo webapi.ShareInfo) *models.K8sSnapshotRespSpec {
|
||||
return &models.K8sSnapshotRespSpec{
|
||||
DsmIp: dsmIp,
|
||||
Name: strings.ReplaceAll(info.Desc, models.ShareSnapshotDescPrefix, ""), // snapshot-XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
|
||||
Uuid: info.Uuid,
|
||||
ParentName: shareInfo.Name,
|
||||
ParentUuid: shareInfo.Uuid,
|
||||
Status: "Healthy", // share snapshot always Healthy
|
||||
SizeInBytes: utils.MBToBytes(shareInfo.QuotaValueInMB), // unable to get snapshot quota, return parent quota instead
|
||||
CreateTime: GMTToUnixSecond(info.Time),
|
||||
Time: info.Time,
|
||||
RootPath: shareInfo.VolPath,
|
||||
Protocol: utils.ProtocolSmb,
|
||||
}
|
||||
}
|
||||
|
||||
func DsmLunSnapshotToK8sSnapshot(dsmIp string, info webapi.SnapshotInfo, lunInfo webapi.LunInfo) *models.K8sSnapshotRespSpec {
|
||||
return &models.K8sSnapshotRespSpec{
|
||||
DsmIp: dsmIp,
|
||||
Name: info.Name,
|
||||
Uuid: info.Uuid,
|
||||
ParentName: lunInfo.Name, // it can be empty for iscsi
|
||||
ParentUuid: info.ParentUuid,
|
||||
Status: info.Status,
|
||||
SizeInBytes: info.TotalSize,
|
||||
CreateTime: info.CreateTime,
|
||||
Time: "",
|
||||
RootPath: info.RootPath,
|
||||
Protocol: utils.ProtocolIscsi,
|
||||
}
|
||||
}
|
||||
|
||||
func (service *DsmService) getISCSISnapshot(snapshotUuid string) *models.K8sSnapshotRespSpec {
|
||||
for _, dsm := range service.dsms {
|
||||
snapshots := service.listISCSISnapshotsByDsm(dsm)
|
||||
for _, snap := range snapshots {
|
||||
if snap.Uuid == snapshotUuid {
|
||||
return snap
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
231
pkg/dsm/service/share_volume.go
Normal file
231
pkg/dsm/service/share_volume.go
Normal file
@@ -0,0 +1,231 @@
|
||||
/*
|
||||
* Copyright 2022 Synology Inc.
|
||||
*/
|
||||
|
||||
package service
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/SynologyOpenSource/synology-csi/pkg/dsm/webapi"
|
||||
"github.com/SynologyOpenSource/synology-csi/pkg/models"
|
||||
"github.com/SynologyOpenSource/synology-csi/pkg/utils"
|
||||
)
|
||||
|
||||
func GMTToUnixSecond(timeStr string) (int64) {
|
||||
t, err := time.Parse("GMT-07-2006.01.02-15.04.05", timeStr)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
return -1
|
||||
}
|
||||
return t.Unix()
|
||||
}
|
||||
|
||||
func (service *DsmService) createSMBVolumeBySnapshot(dsm *webapi.DSM, spec *models.CreateK8sVolumeSpec, srcSnapshot *models.K8sSnapshotRespSpec) (*models.K8sVolumeRespSpec, error) {
|
||||
srcShareInfo, err := dsm.ShareGet(srcSnapshot.ParentName)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, fmt.Sprintf("Failed to get share: %s, err: %v", srcSnapshot.ParentName, err))
|
||||
}
|
||||
|
||||
shareCloneSpec := webapi.ShareCloneSpec{
|
||||
Name: spec.ShareName,
|
||||
Snapshot: srcSnapshot.Time,
|
||||
ShareInfo: webapi.ShareInfo{
|
||||
Name: spec.ShareName,
|
||||
VolPath: srcSnapshot.RootPath,
|
||||
Desc: "Cloned from [" + srcSnapshot.Time + "] by csi driver", // max: 64
|
||||
EnableRecycleBin: srcShareInfo.EnableRecycleBin,
|
||||
RecycleBinAdminOnly: srcShareInfo.RecycleBinAdminOnly,
|
||||
NameOrg: srcSnapshot.ParentName,
|
||||
},
|
||||
}
|
||||
|
||||
if _, err := dsm.ShareClone(shareCloneSpec); err != nil && !errors.Is(err, utils.AlreadyExistError("")) {
|
||||
return nil,
|
||||
status.Errorf(codes.Internal, fmt.Sprintf("Failed to create volume with source volume ID: %s, err: %v", srcShareInfo.Uuid, err))
|
||||
}
|
||||
|
||||
shareInfo, err := dsm.ShareGet(spec.ShareName)
|
||||
if err != nil {
|
||||
return nil,
|
||||
status.Errorf(codes.Internal, fmt.Sprintf("Failed to get existed Share with name: [%s], err: %v", spec.ShareName, err))
|
||||
}
|
||||
|
||||
newSizeInMB := utils.BytesToMBCeil(spec.Size)
|
||||
if shareInfo.QuotaValueInMB == 0 {
|
||||
// known issue for some DS, manually set quota to the new share
|
||||
if err := dsm.SetShareQuota(shareInfo, newSizeInMB); err != nil {
|
||||
msg := fmt.Sprintf("Failed to set quota [%d] to Share [%s], err: %v", newSizeInMB, shareInfo.Name, err)
|
||||
log.Error(msg)
|
||||
return nil, status.Errorf(codes.Internal, msg)
|
||||
}
|
||||
|
||||
shareInfo.QuotaValueInMB = newSizeInMB
|
||||
}
|
||||
|
||||
if newSizeInMB != shareInfo.QuotaValueInMB {
|
||||
// FIXME: need to delete share
|
||||
return nil,
|
||||
status.Errorf(codes.OutOfRange, "Requested share quotaMB [%d] is not equal to snapshot restore quotaMB [%d]", newSizeInMB, shareInfo.QuotaValueInMB)
|
||||
}
|
||||
|
||||
log.Debugf("[%s] createSMBVolumeBySnapshot Successfully. VolumeId: %s", dsm.Ip, shareInfo.Uuid);
|
||||
|
||||
return DsmShareToK8sVolume(dsm.Ip, shareInfo), nil
|
||||
}
|
||||
|
||||
func (service *DsmService) createSMBVolumeByVolume(dsm *webapi.DSM, spec *models.CreateK8sVolumeSpec, srcShareInfo webapi.ShareInfo) (*models.K8sVolumeRespSpec, error) {
|
||||
newSizeInMB := utils.BytesToMBCeil(spec.Size)
|
||||
if spec.Size != 0 && newSizeInMB != srcShareInfo.QuotaValueInMB {
|
||||
return nil,
|
||||
status.Errorf(codes.OutOfRange, "Requested share quotaMB [%d] is not equal to src share quotaMB [%d]", newSizeInMB, srcShareInfo.QuotaValueInMB)
|
||||
}
|
||||
|
||||
shareCloneSpec := webapi.ShareCloneSpec{
|
||||
Name: spec.ShareName,
|
||||
Snapshot: "",
|
||||
ShareInfo: webapi.ShareInfo{
|
||||
Name: spec.ShareName,
|
||||
VolPath: srcShareInfo.VolPath, // must be same with srcShare location
|
||||
Desc: "Cloned from [" + srcShareInfo.Name + "] by csi driver", // max: 64
|
||||
EnableRecycleBin: srcShareInfo.EnableRecycleBin,
|
||||
RecycleBinAdminOnly: srcShareInfo.RecycleBinAdminOnly,
|
||||
NameOrg: srcShareInfo.Name,
|
||||
},
|
||||
}
|
||||
|
||||
if _, err := dsm.ShareClone(shareCloneSpec); err != nil && !errors.Is(err, utils.AlreadyExistError("")) {
|
||||
return nil,
|
||||
status.Errorf(codes.Internal, fmt.Sprintf("Failed to create volume with source volume ID: %s, err: %v", srcShareInfo.Uuid, err))
|
||||
}
|
||||
|
||||
shareInfo, err := dsm.ShareGet(spec.ShareName)
|
||||
if err != nil {
|
||||
return nil,
|
||||
status.Errorf(codes.Internal, fmt.Sprintf("Failed to get existed Share with name: [%s], err: %v", spec.ShareName, err))
|
||||
}
|
||||
|
||||
if shareInfo.QuotaValueInMB == 0 {
|
||||
// known issue for some DS, manually set quota to the new share
|
||||
if err := dsm.SetShareQuota(shareInfo, newSizeInMB); err != nil {
|
||||
msg := fmt.Sprintf("Failed to set quota [%d] to Share [%s], err: %v", newSizeInMB, shareInfo.Name, err)
|
||||
log.Error(msg)
|
||||
return nil, status.Errorf(codes.Internal, msg)
|
||||
}
|
||||
|
||||
shareInfo.QuotaValueInMB = newSizeInMB
|
||||
}
|
||||
|
||||
log.Debugf("[%s] createSMBVolumeByVolume Successfully. VolumeId: %s", dsm.Ip, shareInfo.Uuid);
|
||||
|
||||
return DsmShareToK8sVolume(dsm.Ip, shareInfo), nil
|
||||
}
|
||||
|
||||
func (service *DsmService) createSMBVolumeByDsm(dsm *webapi.DSM, spec *models.CreateK8sVolumeSpec) (*models.K8sVolumeRespSpec, error) {
|
||||
// TODO: Check if share name is allowable
|
||||
|
||||
// 1. Find a available location
|
||||
if spec.Location == "" {
|
||||
vol, err := service.getFirstAvailableVolume(dsm, spec.Size)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, fmt.Sprintf("Failed to get available location, err: %v", err))
|
||||
}
|
||||
spec.Location = vol.Path
|
||||
}
|
||||
|
||||
// 2. Check if location exists
|
||||
_, err := dsm.VolumeGet(spec.Location)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.InvalidArgument, fmt.Sprintf("Unable to find location %s", spec.Location))
|
||||
}
|
||||
|
||||
// 3. Create Share
|
||||
sizeInMB := utils.BytesToMBCeil(spec.Size)
|
||||
shareSpec := webapi.ShareCreateSpec{
|
||||
Name: spec.ShareName,
|
||||
ShareInfo: webapi.ShareInfo{
|
||||
Name: spec.ShareName,
|
||||
VolPath: spec.Location,
|
||||
Desc: "Created by Synology K8s CSI",
|
||||
EnableShareCow: false,
|
||||
EnableRecycleBin: true,
|
||||
RecycleBinAdminOnly: true,
|
||||
Encryption: 0,
|
||||
QuotaForCreate: &sizeInMB,
|
||||
},
|
||||
}
|
||||
|
||||
log.Debugf("ShareCreate spec: %v", shareSpec)
|
||||
err = dsm.ShareCreate(shareSpec)
|
||||
if err != nil && !errors.Is(err, utils.AlreadyExistError("")) {
|
||||
return nil, status.Errorf(codes.Internal, fmt.Sprintf("Failed to create share, err: %v", err))
|
||||
}
|
||||
|
||||
shareInfo, err := dsm.ShareGet(spec.ShareName)
|
||||
if err != nil {
|
||||
return nil,
|
||||
status.Errorf(codes.Internal, fmt.Sprintf("Failed to get existed Share with name: %s, err: %v", spec.ShareName, err))
|
||||
}
|
||||
|
||||
log.Debugf("[%s] createSMBVolumeByDsm Successfully. VolumeId: %s", dsm.Ip, shareInfo.Uuid)
|
||||
|
||||
return DsmShareToK8sVolume(dsm.Ip, shareInfo), nil
|
||||
}
|
||||
|
||||
func (service *DsmService) listSMBVolumes(dsmIp string) (infos []*models.K8sVolumeRespSpec) {
|
||||
for _, dsm := range service.dsms {
|
||||
if dsmIp != "" && dsmIp != dsm.Ip {
|
||||
continue
|
||||
}
|
||||
|
||||
shares, err := dsm.ShareList()
|
||||
if err != nil {
|
||||
log.Errorf("[%s] Failed to list shares: %v", dsm.Ip, err)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, share := range shares {
|
||||
if !strings.HasPrefix(share.Name, models.SharePrefix) {
|
||||
continue
|
||||
}
|
||||
infos = append(infos, DsmShareToK8sVolume(dsm.Ip, share))
|
||||
}
|
||||
}
|
||||
|
||||
return infos
|
||||
}
|
||||
|
||||
func (service *DsmService) listSMBSnapshotsByDsm(dsm *webapi.DSM) (infos []*models.K8sSnapshotRespSpec) {
|
||||
volumes := service.listSMBVolumes(dsm.Ip)
|
||||
for _, volume := range volumes {
|
||||
shareInfo := volume.Share
|
||||
shareSnaps, err := dsm.ShareSnapshotList(shareInfo.Name)
|
||||
if err != nil {
|
||||
log.Errorf("[%s] Failed to list share snapshots: %v", dsm.Ip, err)
|
||||
continue
|
||||
}
|
||||
for _, info := range shareSnaps {
|
||||
infos = append(infos, DsmShareSnapshotToK8sSnapshot(dsm.Ip, info, shareInfo))
|
||||
}
|
||||
}
|
||||
return infos
|
||||
}
|
||||
|
||||
func (service *DsmService) getSMBSnapshot(snapshotUuid string) *models.K8sSnapshotRespSpec {
|
||||
for _, dsm := range service.dsms {
|
||||
snapshots := service.listSMBSnapshotsByDsm(dsm)
|
||||
for _, snap := range snapshots {
|
||||
if snap.Uuid == snapshotUuid {
|
||||
return snap
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -108,6 +108,8 @@ func errCodeMapping(errCode int, oriErr error) error {
|
||||
switch errCode {
|
||||
case 18990002: // Out of free space
|
||||
return utils.OutOfFreeSpaceError("")
|
||||
case 18990531: // No such LUN
|
||||
return utils.NoSuchLunError("")
|
||||
case 18990538: // Duplicated LUN name
|
||||
return utils.AlreadyExistError("")
|
||||
case 18990541:
|
||||
@@ -125,7 +127,7 @@ func errCodeMapping(errCode int, oriErr error) error {
|
||||
}
|
||||
|
||||
if errCode > 18990000 {
|
||||
return utils.IscsiDefaultError("")
|
||||
return utils.IscsiDefaultError{errCode}
|
||||
}
|
||||
return oriErr
|
||||
}
|
||||
@@ -144,7 +146,7 @@ func (dsm *DSM) LunList() ([]LunInfo, error) {
|
||||
|
||||
resp, err := dsm.sendRequest("", &LunInfos{}, params, "webapi/entry.cgi")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errCodeMapping(resp.ErrorCode, err)
|
||||
}
|
||||
|
||||
lunInfos, ok := resp.Data.(*LunInfos)
|
||||
@@ -175,9 +177,8 @@ func (dsm *DSM) LunCreate(spec LunCreateSpec) (string, error) {
|
||||
}
|
||||
|
||||
resp, err := dsm.sendRequest("", &LunCreateResp{}, params, "webapi/entry.cgi")
|
||||
err = errCodeMapping(resp.ErrorCode, err)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return "", errCodeMapping(resp.ErrorCode, err)
|
||||
}
|
||||
|
||||
lunResp, ok := resp.Data.(*LunCreateResp)
|
||||
@@ -196,9 +197,9 @@ func (dsm *DSM) LunUpdate(spec LunUpdateSpec) error {
|
||||
params.Add("uuid", strconv.Quote(spec.Uuid))
|
||||
params.Add("new_size", strconv.FormatInt(int64(spec.NewSize), 10))
|
||||
|
||||
_, err := dsm.sendRequest("", &struct{}{}, params, "webapi/entry.cgi")
|
||||
resp, err := dsm.sendRequest("", &struct{}{}, params, "webapi/entry.cgi")
|
||||
if err != nil {
|
||||
return err
|
||||
return errCodeMapping(resp.ErrorCode, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -217,9 +218,9 @@ func (dsm *DSM) LunGet(uuid string) (LunInfo, error) {
|
||||
}
|
||||
info := Info{}
|
||||
|
||||
_, err := dsm.sendRequest("", &info, params, "webapi/entry.cgi")
|
||||
resp, err := dsm.sendRequest("", &info, params, "webapi/entry.cgi")
|
||||
if err != nil {
|
||||
return LunInfo{}, err
|
||||
return LunInfo{}, errCodeMapping(resp.ErrorCode, err)
|
||||
}
|
||||
|
||||
return info.Lun, nil
|
||||
@@ -239,9 +240,8 @@ func (dsm *DSM) LunClone(spec LunCloneSpec) (string, error) {
|
||||
}
|
||||
|
||||
resp, err := dsm.sendRequest("", &LunCloneResp{}, params, "webapi/entry.cgi")
|
||||
err = errCodeMapping(resp.ErrorCode, err)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return "", errCodeMapping(resp.ErrorCode, err)
|
||||
}
|
||||
|
||||
cloneLunResp, ok := resp.Data.(*LunCloneResp)
|
||||
@@ -265,7 +265,7 @@ func (dsm *DSM) TargetList() ([]TargetInfo, error) {
|
||||
|
||||
resp, err := dsm.sendRequest("", &TargetInfos{}, params, "webapi/entry.cgi")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errCodeMapping(resp.ErrorCode, err)
|
||||
}
|
||||
|
||||
trgInfos, ok := resp.Data.(*TargetInfos)
|
||||
@@ -288,9 +288,9 @@ func (dsm *DSM) TargetGet(targetId string) (TargetInfo, error) {
|
||||
}
|
||||
info := Info{}
|
||||
|
||||
_, err := dsm.sendRequest("", &info, params, "webapi/entry.cgi")
|
||||
resp, err := dsm.sendRequest("", &info, params, "webapi/entry.cgi")
|
||||
if err != nil {
|
||||
return TargetInfo{}, err
|
||||
return TargetInfo{}, errCodeMapping(resp.ErrorCode, err)
|
||||
}
|
||||
|
||||
return info.Target, nil
|
||||
@@ -305,9 +305,9 @@ func (dsm *DSM) TargetSet(targetId string, maxSession int) error {
|
||||
params.Add("target_id", strconv.Quote(targetId))
|
||||
params.Add("max_sessions", strconv.Itoa(maxSession))
|
||||
|
||||
_, err := dsm.sendRequest("", &struct{}{}, params, "webapi/entry.cgi")
|
||||
resp, err := dsm.sendRequest("", &struct{}{}, params, "webapi/entry.cgi")
|
||||
if err != nil {
|
||||
return err
|
||||
return errCodeMapping(resp.ErrorCode, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -327,9 +327,8 @@ func (dsm *DSM) TargetCreate(spec TargetCreateSpec) (string, error) {
|
||||
}
|
||||
|
||||
resp, err := dsm.sendRequest("", &TrgCreateResp{}, params, "webapi/entry.cgi")
|
||||
err = errCodeMapping(resp.ErrorCode, err)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return "", errCodeMapping(resp.ErrorCode, err)
|
||||
}
|
||||
|
||||
trgResp, ok := resp.Data.(*TrgCreateResp)
|
||||
@@ -352,9 +351,9 @@ func (dsm *DSM) LunMapTarget(targetIds []string, lunUuid string) error {
|
||||
log.Debugln(params)
|
||||
}
|
||||
|
||||
_, err := dsm.sendRequest("", &struct{}{}, params, "webapi/entry.cgi")
|
||||
resp, err := dsm.sendRequest("", &struct{}{}, params, "webapi/entry.cgi")
|
||||
if err != nil {
|
||||
return err
|
||||
return errCodeMapping(resp.ErrorCode, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -366,9 +365,9 @@ func (dsm *DSM) LunDelete(lunUuid string) error {
|
||||
params.Add("version", "1")
|
||||
params.Add("uuid", strconv.Quote(lunUuid))
|
||||
|
||||
_, err := dsm.sendRequest("", &struct{}{}, params, "webapi/entry.cgi")
|
||||
resp, err := dsm.sendRequest("", &struct{}{}, params, "webapi/entry.cgi")
|
||||
if err != nil {
|
||||
return err
|
||||
return errCodeMapping(resp.ErrorCode, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -380,9 +379,9 @@ func (dsm *DSM) TargetDelete(targetName string) error {
|
||||
params.Add("version", "1")
|
||||
params.Add("target_id", strconv.Quote(targetName))
|
||||
|
||||
_, err := dsm.sendRequest("", &struct{}{}, params, "webapi/entry.cgi")
|
||||
resp, err := dsm.sendRequest("", &struct{}{}, params, "webapi/entry.cgi")
|
||||
if err != nil {
|
||||
return err
|
||||
return errCodeMapping(resp.ErrorCode, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -7,21 +7,109 @@ import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strconv"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/SynologyOpenSource/synology-csi/pkg/utils"
|
||||
"github.com/SynologyOpenSource/synology-csi/pkg/logger"
|
||||
)
|
||||
|
||||
type ShareInfo struct {
|
||||
Name string `json:"name"`
|
||||
VolPath string `json:"vol_path"`
|
||||
Name string `json:"name"` // required
|
||||
VolPath string `json:"vol_path"` // required
|
||||
Desc string `json:"desc"`
|
||||
EnableShareCow bool `json:"enable_share_cow"`
|
||||
EnableShareCow bool `json:"enable_share_cow"` // field for create
|
||||
EnableRecycleBin bool `json:"enable_recycle_bin"`
|
||||
RecycleBinAdminOnly bool `json:"recycle_bin_admin_only"`
|
||||
Encryption int `json:"encryption"`
|
||||
Encryption int `json:"encryption"` // field for create
|
||||
QuotaForCreate *int64 `json:"share_quota,omitempty"`
|
||||
QuotaValueInMB int64 `json:"quota_value"` // field for get
|
||||
SupportSnapshot bool `json:"support_snapshot"` // field for get
|
||||
Uuid string `json:"uuid"` // field for get
|
||||
NameOrg string `json:"name_org"` // required for clone
|
||||
}
|
||||
|
||||
type ShareUpdateInfo struct {
|
||||
Name string `json:"name"` // required
|
||||
VolPath string `json:"vol_path"` // required
|
||||
QuotaForCreate *int64 `json:"share_quota,omitempty"`
|
||||
// Add properties you want to update to shares here
|
||||
}
|
||||
|
||||
type ShareSnapshotInfo struct {
|
||||
Uuid string `json:"ruuid"`
|
||||
Time string `json:"time"`
|
||||
Desc string `json:"desc"`
|
||||
SnapSize string `json:"snap_size"` // the complete size of the snapshot
|
||||
Lock bool `json:"lock"`
|
||||
ScheduleSnapshot bool `json:"schedule_snapshot"`
|
||||
}
|
||||
|
||||
type ShareCreateSpec struct {
|
||||
Name string `json:"name"`
|
||||
ShareInfo ShareInfo `json:"shareinfo"`
|
||||
Name string
|
||||
ShareInfo ShareInfo
|
||||
}
|
||||
|
||||
type ShareCloneSpec struct {
|
||||
Name string
|
||||
Snapshot string
|
||||
ShareInfo ShareInfo
|
||||
}
|
||||
|
||||
type ShareSnapshotCreateSpec struct {
|
||||
ShareName string
|
||||
Desc string
|
||||
IsLocked bool
|
||||
}
|
||||
|
||||
type SharePermissionSetSpec struct {
|
||||
Name string
|
||||
UserGroupType string // "local_user"/"local_group"/"system"
|
||||
Permissions []*SharePermission
|
||||
}
|
||||
|
||||
type SharePermission struct {
|
||||
Name string `json:"name"`
|
||||
IsReadonly bool `json:"is_readonly"`
|
||||
IsWritable bool `json:"is_writable"`
|
||||
IsDeny bool `json:"is_deny"`
|
||||
IsCustom bool `json:"is_custom,omitempty"`
|
||||
IsAdmin bool `json:"is_admin,omitempty"` // field for list
|
||||
}
|
||||
|
||||
func shareErrCodeMapping(errCode int, oriErr error) error {
|
||||
switch errCode {
|
||||
case 402: // No such share
|
||||
return utils.NoSuchShareError("")
|
||||
case 403: // Invalid input value
|
||||
return utils.BadParametersError("")
|
||||
case 3301: // already exists
|
||||
return utils.AlreadyExistError("")
|
||||
case 3309:
|
||||
return utils.ShareReachMaxCountError("")
|
||||
case 3328:
|
||||
return utils.ShareSystemBusyError("")
|
||||
}
|
||||
|
||||
if errCode >= 3300 {
|
||||
return utils.ShareDefaultError{errCode}
|
||||
}
|
||||
return oriErr
|
||||
}
|
||||
|
||||
// ----------------------- Share APIs -----------------------
|
||||
func (dsm *DSM) ShareGet(shareName string) (ShareInfo, error) {
|
||||
params := url.Values{}
|
||||
params.Add("api", "SYNO.Core.Share")
|
||||
params.Add("method", "get")
|
||||
params.Add("version", "1")
|
||||
params.Add("additional", "[\"encryption\", \"enable_share_cow\", \"recyclebin\", \"support_snapshot\", \"share_quota\"]")
|
||||
params.Add("name", strconv.Quote(shareName))
|
||||
|
||||
info := ShareInfo{}
|
||||
|
||||
resp, err := dsm.sendRequest("", &info, params, "webapi/entry.cgi")
|
||||
|
||||
return info, shareErrCodeMapping(resp.ErrorCode, err)
|
||||
}
|
||||
|
||||
func (dsm *DSM) ShareList() ([]ShareInfo, error) {
|
||||
@@ -29,7 +117,7 @@ func (dsm *DSM) ShareList() ([]ShareInfo, error) {
|
||||
params.Add("api", "SYNO.Core.Share")
|
||||
params.Add("method", "list")
|
||||
params.Add("version", "1")
|
||||
params.Add("additional", "[\"encryption\"]")
|
||||
params.Add("additional", "[\"encryption\", \"enable_share_cow\", \"recyclebin\", \"support_snapshot\", \"share_quota\"]")
|
||||
|
||||
type ShareInfos struct {
|
||||
Shares []ShareInfo `json:"shares"`
|
||||
@@ -37,7 +125,7 @@ func (dsm *DSM) ShareList() ([]ShareInfo, error) {
|
||||
|
||||
resp, err := dsm.sendRequest("", &ShareInfos{}, params, "webapi/entry.cgi")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, shareErrCodeMapping(resp.ErrorCode, err)
|
||||
}
|
||||
|
||||
infos, ok := resp.Data.(*ShareInfos)
|
||||
@@ -61,13 +149,49 @@ func (dsm *DSM) ShareCreate(spec ShareCreateSpec) error {
|
||||
}
|
||||
params.Add("shareinfo", string(js))
|
||||
|
||||
// response : {"data":{"name":"Share-2"},"success":true}
|
||||
_, err = dsm.sendRequest("", &struct{}{}, params, "webapi/entry.cgi")
|
||||
if err != nil {
|
||||
return err
|
||||
resp, err := dsm.sendRequest("", &struct{}{}, params, "webapi/entry.cgi")
|
||||
|
||||
return shareErrCodeMapping(resp.ErrorCode, err)
|
||||
}
|
||||
|
||||
func (dsm *DSM) ShareClone(spec ShareCloneSpec) (string, error) {
|
||||
params := url.Values{}
|
||||
params.Add("api", "SYNO.Core.Share")
|
||||
params.Add("method", "clone")
|
||||
params.Add("version", "1")
|
||||
params.Add("name", strconv.Quote(spec.Name))
|
||||
|
||||
// if clone from snapshot, the NameOrg must be the parent of the snapshot, or the webapi will return 3300
|
||||
// if the snapshot doesn't exist, it will return 3300 too.
|
||||
if spec.ShareInfo.NameOrg == "" {
|
||||
return "", fmt.Errorf("Clone failed. The source name can't be empty.")
|
||||
}
|
||||
|
||||
return nil
|
||||
if spec.Snapshot != "" {
|
||||
params.Add("snapshot", strconv.Quote(spec.Snapshot))
|
||||
}
|
||||
|
||||
js, err := json.Marshal(spec.ShareInfo)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
params.Add("shareinfo", string(js))
|
||||
|
||||
type ShareCreateResp struct {
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
resp, err := dsm.sendRequest("", &ShareCreateResp{}, params, "webapi/entry.cgi")
|
||||
if err != nil {
|
||||
return "", shareErrCodeMapping(resp.ErrorCode, err)
|
||||
}
|
||||
|
||||
shareResp, ok := resp.Data.(*ShareCreateResp)
|
||||
if !ok {
|
||||
return "", fmt.Errorf("Failed to assert response to %T", &ShareCreateResp{})
|
||||
}
|
||||
|
||||
return shareResp.Name, nil
|
||||
}
|
||||
|
||||
func (dsm *DSM) ShareDelete(shareName string) error {
|
||||
@@ -75,13 +199,165 @@ func (dsm *DSM) ShareDelete(shareName string) error {
|
||||
params.Add("api", "SYNO.Core.Share")
|
||||
params.Add("method", "delete")
|
||||
params.Add("version", "1")
|
||||
params.Add("name", fmt.Sprintf("[%s]", strconv.Quote(shareName)))
|
||||
|
||||
resp, err := dsm.sendRequest("", &struct{}{}, params, "webapi/entry.cgi")
|
||||
|
||||
return shareErrCodeMapping(resp.ErrorCode, err)
|
||||
}
|
||||
|
||||
func (dsm *DSM) ShareSet(shareName string, updateInfo ShareUpdateInfo) error {
|
||||
params := url.Values{}
|
||||
params.Add("api", "SYNO.Core.Share")
|
||||
params.Add("method", "set")
|
||||
params.Add("version", "1")
|
||||
params.Add("name", strconv.Quote(shareName))
|
||||
|
||||
// response : {"success":true}
|
||||
_, err := dsm.sendRequest("", &struct{}{}, params, "webapi/entry.cgi")
|
||||
js, err := json.Marshal(updateInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
params.Add("shareinfo", string(js))
|
||||
|
||||
if logger.WebapiDebug {
|
||||
log.Debugln(params)
|
||||
}
|
||||
|
||||
resp, err := dsm.sendRequest("", &struct{}{}, params, "webapi/entry.cgi")
|
||||
|
||||
return shareErrCodeMapping(resp.ErrorCode, err)
|
||||
}
|
||||
|
||||
func (dsm *DSM) SetShareQuota(shareInfo ShareInfo, newSizeInMB int64) error {
|
||||
updateInfo := ShareUpdateInfo{
|
||||
Name: shareInfo.Name,
|
||||
VolPath: shareInfo.VolPath,
|
||||
QuotaForCreate: &newSizeInMB,
|
||||
}
|
||||
return dsm.ShareSet(shareInfo.Name, updateInfo)
|
||||
}
|
||||
|
||||
// ----------------------- Share Snapshot APIs -----------------------
|
||||
func (dsm *DSM) ShareSnapshotCreate(spec ShareSnapshotCreateSpec) (string, error) {
|
||||
params := url.Values{}
|
||||
params.Add("api", "SYNO.Core.Share.Snapshot")
|
||||
params.Add("method", "create")
|
||||
params.Add("version", "1")
|
||||
params.Add("name", strconv.Quote(spec.ShareName))
|
||||
|
||||
type SnapInfo struct {
|
||||
Desc string `json:"desc"`
|
||||
IsLocked bool `json:"lock"` // default true
|
||||
}
|
||||
|
||||
snapinfo := SnapInfo{
|
||||
Desc: spec.Desc,
|
||||
IsLocked: spec.IsLocked,
|
||||
}
|
||||
js, err := json.Marshal(snapinfo)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
params.Add("snapinfo", string(js))
|
||||
|
||||
var snapTime string
|
||||
resp, err := dsm.sendRequest("", &snapTime, params, "webapi/entry.cgi")
|
||||
if err != nil {
|
||||
return "", shareErrCodeMapping(resp.ErrorCode, err)
|
||||
}
|
||||
|
||||
return snapTime, nil // "GMT+08-2022.01.14-19.18.29"
|
||||
}
|
||||
|
||||
func (dsm *DSM) ShareSnapshotList(name string) ([]ShareSnapshotInfo, error) {
|
||||
params := url.Values{}
|
||||
params.Add("api", "SYNO.Core.Share.Snapshot")
|
||||
params.Add("method", "list")
|
||||
params.Add("version", "2")
|
||||
params.Add("name", strconv.Quote(name))
|
||||
params.Add("additional", "[\"desc\", \"lock\", \"schedule_snapshot\", \"ruuid\", \"snap_size\"]")
|
||||
|
||||
type Infos struct {
|
||||
Snapshots []ShareSnapshotInfo `json:"snapshots"`
|
||||
Total int `json:"total"`
|
||||
}
|
||||
|
||||
resp, err := dsm.sendRequest("", &Infos{}, params, "webapi/entry.cgi")
|
||||
if err != nil {
|
||||
return nil, shareErrCodeMapping(resp.ErrorCode, err)
|
||||
}
|
||||
|
||||
infos, ok := resp.Data.(*Infos)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Failed to assert response to %T", &Infos{})
|
||||
}
|
||||
|
||||
return infos.Snapshots, nil
|
||||
}
|
||||
|
||||
func (dsm *DSM) ShareSnapshotDelete(snapTime string, shareName string) error {
|
||||
params := url.Values{}
|
||||
params.Add("api", "SYNO.Core.Share.Snapshot")
|
||||
params.Add("method", "delete")
|
||||
params.Add("version", "1")
|
||||
params.Add("name", strconv.Quote(shareName))
|
||||
params.Add("snapshots", fmt.Sprintf("[%s]", strconv.Quote(snapTime))) // ["GMT+08-2022.01.14-19.18.29"]
|
||||
|
||||
var objmap []map[string]interface{}
|
||||
resp, err := dsm.sendRequest("", &objmap, params, "webapi/entry.cgi")
|
||||
if err != nil {
|
||||
return shareErrCodeMapping(resp.ErrorCode, err)
|
||||
}
|
||||
|
||||
if len(objmap) > 0 {
|
||||
return fmt.Errorf("Failed to delete snapshot, API common error. snapshot: %s", snapTime)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ----------------------- Share Permission APIs -----------------------
|
||||
func (dsm *DSM) SharePermissionSet(spec SharePermissionSetSpec) error {
|
||||
params := url.Values{}
|
||||
params.Add("api", "SYNO.Core.Share.Permission")
|
||||
params.Add("method", "set")
|
||||
params.Add("version", "1")
|
||||
params.Add("name", strconv.Quote(spec.Name))
|
||||
params.Add("user_group_type", strconv.Quote(spec.UserGroupType))
|
||||
|
||||
js, err := json.Marshal(spec.Permissions)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
params.Add("permissions", string(js))
|
||||
|
||||
resp, err := dsm.sendRequest("", &struct{}{}, params, "webapi/entry.cgi")
|
||||
|
||||
return shareErrCodeMapping(resp.ErrorCode, err)
|
||||
}
|
||||
|
||||
func (dsm *DSM) SharePermissionList(shareName string, userGroupType string) ([]SharePermission, error) {
|
||||
params := url.Values{}
|
||||
params.Add("api", "SYNO.Core.Share.Permission")
|
||||
params.Add("method", "list")
|
||||
params.Add("version", "1")
|
||||
params.Add("name", strconv.Quote(shareName))
|
||||
params.Add("user_group_type", strconv.Quote(userGroupType))
|
||||
|
||||
type SharePermissions struct {
|
||||
Permissions []SharePermission `json:"items"`
|
||||
}
|
||||
|
||||
resp, err := dsm.sendRequest("", &SharePermissions{}, params, "webapi/entry.cgi")
|
||||
if err != nil {
|
||||
return nil, shareErrCodeMapping(resp.ErrorCode, err)
|
||||
}
|
||||
|
||||
infos, ok := resp.Data.(*SharePermissions)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Failed to assert response to %T", &SharePermissions{})
|
||||
}
|
||||
|
||||
return infos.Permissions, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -16,14 +16,15 @@ type IDsmService interface {
|
||||
GetDsm(ip string) (*webapi.DSM, error)
|
||||
GetDsmsCount() int
|
||||
ListDsmVolumes(ip string) ([]webapi.VolInfo, error)
|
||||
CreateVolume(spec *models.CreateK8sVolumeSpec) (webapi.LunInfo, string, error)
|
||||
DeleteVolume(volumeId string) error
|
||||
ListVolumes() []*models.ListK8sVolumeRespSpec
|
||||
GetVolume(lunUuid string) *models.ListK8sVolumeRespSpec
|
||||
ExpandLun(lunUuid string, newSize int64) error
|
||||
CreateSnapshot(spec *models.CreateK8sVolumeSnapshotSpec) (string, error)
|
||||
CreateVolume(spec *models.CreateK8sVolumeSpec) (*models.K8sVolumeRespSpec, error)
|
||||
DeleteVolume(volId string) error
|
||||
ListVolumes() []*models.K8sVolumeRespSpec
|
||||
GetVolume(volId string) *models.K8sVolumeRespSpec
|
||||
ExpandVolume(volId string, newSize int64) (*models.K8sVolumeRespSpec, error)
|
||||
CreateSnapshot(spec *models.CreateK8sVolumeSnapshotSpec) (*models.K8sSnapshotRespSpec, error)
|
||||
DeleteSnapshot(snapshotUuid string) error
|
||||
ListAllSnapshots() ([]webapi.SnapshotInfo, error)
|
||||
ListSnapshots(lunUuid string) ([]webapi.SnapshotInfo, error)
|
||||
GetSnapshot(snapshotUuid string) (webapi.SnapshotInfo, error)
|
||||
ListAllSnapshots() []*models.K8sSnapshotRespSpec
|
||||
ListSnapshots(volId string) []*models.K8sSnapshotRespSpec
|
||||
GetVolumeByName(volName string) *models.K8sVolumeRespSpec
|
||||
GetSnapshotByName(snapshotName string) *models.K8sSnapshotRespSpec
|
||||
}
|
||||
@@ -2,6 +2,10 @@
|
||||
|
||||
package models
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
const (
|
||||
K8sCsiName = "Kubernetes CSI"
|
||||
|
||||
@@ -15,8 +19,30 @@ const (
|
||||
LunTypeBlunThick = "BLUN_THICK" // thick provision, mapped to type 259
|
||||
MaxIqnLen = 128
|
||||
|
||||
// Share definitions
|
||||
MaxShareLen = 32
|
||||
MaxShareDescLen = 64
|
||||
UserGroupTypeLocalUser = "local_user"
|
||||
UserGroupTypeLocalGroup = "local_group"
|
||||
UserGroupTypeSystem = "system"
|
||||
|
||||
|
||||
// CSI definitions
|
||||
TargetPrefix = "k8s-csi"
|
||||
LunPrefix = "k8s-csi"
|
||||
IqnPrefix = "iqn.2000-01.com.synology:"
|
||||
TargetPrefix = "k8s-csi"
|
||||
LunPrefix = "k8s-csi"
|
||||
IqnPrefix = "iqn.2000-01.com.synology:"
|
||||
SharePrefix = "k8s-csi"
|
||||
ShareSnapshotDescPrefix = "(Do not change)"
|
||||
)
|
||||
|
||||
func GenLunName(volName string) string {
|
||||
return fmt.Sprintf("%s-%s", LunPrefix, volName)
|
||||
}
|
||||
|
||||
func GenShareName(volName string) string {
|
||||
shareName := fmt.Sprintf("%s-%s", SharePrefix, volName)
|
||||
if len(shareName) > MaxShareLen {
|
||||
return shareName[:MaxShareLen]
|
||||
}
|
||||
return shareName
|
||||
}
|
||||
|
||||
@@ -3,6 +3,8 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||
"github.com/SynologyOpenSource/synology-csi/pkg/dsm/webapi"
|
||||
)
|
||||
|
||||
@@ -10,21 +12,43 @@ type CreateK8sVolumeSpec struct {
|
||||
DsmIp string
|
||||
K8sVolumeName string
|
||||
LunName string
|
||||
ShareName string
|
||||
Location string
|
||||
Size int64
|
||||
Type string
|
||||
ThinProvisioning bool
|
||||
TargetName string
|
||||
TargetIqn string
|
||||
MultipleSession bool
|
||||
SourceSnapshotId string
|
||||
SourceVolumeId string
|
||||
Protocol string
|
||||
}
|
||||
|
||||
type ListK8sVolumeRespSpec struct {
|
||||
DsmIp string
|
||||
Lun webapi.LunInfo
|
||||
Target webapi.TargetInfo
|
||||
type K8sVolumeRespSpec struct {
|
||||
DsmIp string
|
||||
VolumeId string
|
||||
SizeInBytes int64
|
||||
Location string
|
||||
Name string
|
||||
Source string
|
||||
Lun webapi.LunInfo
|
||||
Target webapi.TargetInfo
|
||||
Share webapi.ShareInfo
|
||||
Protocol string
|
||||
}
|
||||
|
||||
type K8sSnapshotRespSpec struct {
|
||||
DsmIp string
|
||||
Name string
|
||||
Uuid string
|
||||
ParentName string
|
||||
ParentUuid string
|
||||
Status string
|
||||
SizeInBytes int64
|
||||
CreateTime int64
|
||||
Time string // only for share snapshot delete
|
||||
RootPath string
|
||||
Protocol string
|
||||
}
|
||||
|
||||
type CreateK8sVolumeSnapshotSpec struct {
|
||||
@@ -34,3 +58,23 @@ type CreateK8sVolumeSnapshotSpec struct {
|
||||
TakenBy string
|
||||
IsLocked bool
|
||||
}
|
||||
|
||||
type NodeStageVolumeSpec struct {
|
||||
VolumeId string
|
||||
StagingTargetPath string
|
||||
VolumeCapability *csi.VolumeCapability
|
||||
Dsm string
|
||||
Source string
|
||||
}
|
||||
|
||||
type ByVolumeId []*K8sVolumeRespSpec
|
||||
func (a ByVolumeId) Len() int { return len(a) }
|
||||
func (a ByVolumeId) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a ByVolumeId) Less(i, j int) bool { return a[i].VolumeId < a[j].VolumeId }
|
||||
|
||||
type BySnapshotAndParentUuid []*K8sSnapshotRespSpec
|
||||
func (a BySnapshotAndParentUuid) Len() int { return len(a) }
|
||||
func (a BySnapshotAndParentUuid) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a BySnapshotAndParentUuid) Less(i, j int) bool {
|
||||
return fmt.Sprintf("%s/%s", a[i].ParentUuid, a[i].Uuid) < fmt.Sprintf("%s/%s", a[j].ParentUuid, a[j].Uuid)
|
||||
}
|
||||
|
||||
@@ -2,14 +2,28 @@
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type OutOfFreeSpaceError string
|
||||
type AlreadyExistError string
|
||||
type BadParametersError string
|
||||
type NoSuchLunError string
|
||||
type LunReachMaxCountError string
|
||||
type TargetReachMaxCountError string
|
||||
type NoSuchSnapshotError string
|
||||
type BadLunTypeError string
|
||||
type SnapshotReachMaxCountError string
|
||||
type IscsiDefaultError string
|
||||
type IscsiDefaultError struct {
|
||||
ErrCode int
|
||||
}
|
||||
type NoSuchShareError string
|
||||
type ShareReachMaxCountError string
|
||||
type ShareSystemBusyError string
|
||||
type ShareDefaultError struct {
|
||||
ErrCode int
|
||||
}
|
||||
|
||||
func (_ OutOfFreeSpaceError) Error() string {
|
||||
return "Out of free space"
|
||||
@@ -17,6 +31,14 @@ func (_ OutOfFreeSpaceError) Error() string {
|
||||
func (_ AlreadyExistError) Error() string {
|
||||
return "Already Existed"
|
||||
}
|
||||
func (_ BadParametersError) Error() string {
|
||||
return "Invalid input value"
|
||||
}
|
||||
|
||||
// ISCSI errors
|
||||
func (_ NoSuchLunError) Error() string {
|
||||
return "No such LUN"
|
||||
}
|
||||
|
||||
func (_ LunReachMaxCountError) Error() string {
|
||||
return "Number of LUN reach limit"
|
||||
@@ -38,6 +60,23 @@ func (_ SnapshotReachMaxCountError) Error() string {
|
||||
return "Number of snapshot reach limit"
|
||||
}
|
||||
|
||||
func (_ IscsiDefaultError) Error() string {
|
||||
return "Default ISCSI error"
|
||||
func (e IscsiDefaultError) Error() string {
|
||||
return fmt.Sprintf("ISCSI API error. Error code: %d", e.ErrCode)
|
||||
}
|
||||
|
||||
// Share errors
|
||||
func (_ NoSuchShareError) Error() string {
|
||||
return "No such share"
|
||||
}
|
||||
|
||||
func (_ ShareReachMaxCountError) Error() string {
|
||||
return "Number of share reach limit"
|
||||
}
|
||||
|
||||
func (_ ShareSystemBusyError) Error() string {
|
||||
return "Share system is temporary busy"
|
||||
}
|
||||
|
||||
func (e ShareDefaultError) Error() string {
|
||||
return fmt.Sprintf("Share API error. Error code: %d", e.ErrCode)
|
||||
}
|
||||
@@ -8,7 +8,42 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
const UNIT_GB = 1024 * 1024 * 1024
|
||||
type AuthType string
|
||||
|
||||
const (
|
||||
UNIT_GB = 1024 * 1024 * 1024
|
||||
UNIT_MB = 1024 * 1024
|
||||
|
||||
ProtocolSmb = "smb"
|
||||
ProtocolIscsi = "iscsi"
|
||||
ProtocolDefault = ProtocolIscsi
|
||||
|
||||
AuthTypeReadWrite AuthType = "rw"
|
||||
AuthTypeReadOnly AuthType = "ro"
|
||||
AuthTypeNoAccess AuthType = "no"
|
||||
)
|
||||
|
||||
func SliceContains(items []string, s string) bool {
|
||||
for _, item := range items {
|
||||
if s == item {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func MBToBytes(size int64) int64 {
|
||||
return size * UNIT_MB
|
||||
}
|
||||
|
||||
func BytesToMB(size int64) int64 {
|
||||
return size / UNIT_MB
|
||||
}
|
||||
|
||||
// Ceiling
|
||||
func BytesToMBCeil(size int64) int64 {
|
||||
return (size + UNIT_MB - 1) / UNIT_MB
|
||||
}
|
||||
|
||||
func StringToBoolean(value string) bool {
|
||||
value = strings.ToLower(value)
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/SynologyOpenSource/synology-csi/pkg/dsm/common"
|
||||
"github.com/SynologyOpenSource/synology-csi/pkg/dsm/webapi"
|
||||
)
|
||||
|
||||
@@ -51,6 +52,30 @@ var cmdDsmLogin = &cobra.Command{
|
||||
},
|
||||
}
|
||||
|
||||
// Always get the first client from ClientInfo for synocli testing
|
||||
func LoginDsmForTest() (*webapi.DSM, error) {
|
||||
info, err := common.LoadConfig("./config/client-info.yml")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to read config: %v", err)
|
||||
}
|
||||
if len(info.Clients) == 0 {
|
||||
return nil, fmt.Errorf("No client in client-info.yml")
|
||||
}
|
||||
|
||||
dsm := &webapi.DSM{
|
||||
Ip: info.Clients[0].Host,
|
||||
Port: info.Clients[0].Port,
|
||||
Username: info.Clients[0].Username,
|
||||
Password: info.Clients[0].Password,
|
||||
Https: info.Clients[0].Https,
|
||||
}
|
||||
|
||||
if err := dsm.Login(); err != nil {
|
||||
return nil, fmt.Errorf("Failed to login to DSM: [%s]. err: %v", dsm.Ip, err)
|
||||
}
|
||||
return dsm, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
cmdDsm.AddCommand(cmdDsmLogin)
|
||||
|
||||
|
||||
@@ -16,6 +16,7 @@ var rootCmd = &cobra.Command{
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(cmdDsm)
|
||||
rootCmd.AddCommand(cmdShare)
|
||||
}
|
||||
|
||||
func Execute() {
|
||||
|
||||
580
synocli/cmd/share.go
Normal file
580
synocli/cmd/share.go
Normal file
@@ -0,0 +1,580 @@
|
||||
/*
|
||||
* Copyright 2022 Synology Inc.
|
||||
*/
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/SynologyOpenSource/synology-csi/pkg/dsm/webapi"
|
||||
"github.com/SynologyOpenSource/synology-csi/pkg/utils"
|
||||
"strconv"
|
||||
|
||||
"text/tabwriter"
|
||||
)
|
||||
|
||||
var cmdShare = &cobra.Command{
|
||||
Use: "share",
|
||||
Short: "share API",
|
||||
Long: `DSM share API`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
cmd.Help()
|
||||
},
|
||||
}
|
||||
|
||||
var cmdShareGet = &cobra.Command{
|
||||
Use: "get <name>",
|
||||
Short: "get share",
|
||||
Args: cobra.MinimumNArgs(1),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
dsm, err := LoginDsmForTest()
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer func() {
|
||||
dsm.Logout()
|
||||
}()
|
||||
|
||||
share, err := dsm.ShareGet(args[0])
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Printf("Success, ShareGet(%s) = %#v\n", args[0], share)
|
||||
},
|
||||
}
|
||||
|
||||
var cmdShareList = &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "list shares",
|
||||
Args: cobra.MinimumNArgs(0),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
dsm, err := LoginDsmForTest()
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer func() {
|
||||
dsm.Logout()
|
||||
}()
|
||||
|
||||
shares, err := dsm.ShareList()
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
tw := tabwriter.NewWriter(os.Stdout, 8, 0, 2, ' ', 0)
|
||||
|
||||
fmt.Fprintf(tw, "%s\t%-32s\t%-10s\t%-64s\t%s\t%s\t%s\t%s\t%-10s\t%s\t%s\n",
|
||||
"id:", "Name:", "VolPath", "Desc:", "Quota(MB):", "ShareCow:", "RecycleBin:", "RBAdminOnly:", "Encryption:", "CanSnap:", "Uuid:")
|
||||
for i, info := range shares {
|
||||
fmt.Fprintf(tw, "%d\t%-32s\t", i, info.Name)
|
||||
fmt.Fprintf(tw, "%-10s\t", info.VolPath)
|
||||
fmt.Fprintf(tw, "%-64s\t", info.Desc)
|
||||
fmt.Fprintf(tw, "%d\t", info.QuotaValueInMB)
|
||||
fmt.Fprintf(tw, "%v\t", info.EnableShareCow)
|
||||
fmt.Fprintf(tw, "%v\t", info.EnableRecycleBin)
|
||||
fmt.Fprintf(tw, "%v\t", info.RecycleBinAdminOnly)
|
||||
fmt.Fprintf(tw, "%-10d\t", info.Encryption)
|
||||
fmt.Fprintf(tw, "%v\t", info.SupportSnapshot)
|
||||
fmt.Fprintf(tw, "%s\t", info.Uuid)
|
||||
fmt.Fprintf(tw, "\n")
|
||||
|
||||
_ = tw.Flush()
|
||||
}
|
||||
|
||||
fmt.Printf("Success, ShareList()\n")
|
||||
},
|
||||
}
|
||||
|
||||
var cmdShareCreate = &cobra.Command{
|
||||
Use: "create <name> <location> [<size_bytes>]",
|
||||
Short: "create share",
|
||||
Args: cobra.MinimumNArgs(2),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
dsm, err := LoginDsmForTest()
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer func() {
|
||||
dsm.Logout()
|
||||
}()
|
||||
|
||||
var size int64 = 0
|
||||
if len(args) >= 3 {
|
||||
size, err = strconv.ParseInt(args[2], 10, 64)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
sizeInMB := utils.BytesToMBCeil(size)
|
||||
testSpec := webapi.ShareCreateSpec{
|
||||
Name: args[0],
|
||||
ShareInfo: webapi.ShareInfo{
|
||||
Name: args[0],
|
||||
VolPath: args[1],
|
||||
Desc: "Created by synocli",
|
||||
EnableShareCow: false,
|
||||
EnableRecycleBin: true,
|
||||
RecycleBinAdminOnly: true,
|
||||
Encryption: 0,
|
||||
QuotaForCreate: &sizeInMB,
|
||||
},
|
||||
}
|
||||
|
||||
fmt.Printf("spec = %#v, sizeInMB = %d\n", testSpec, sizeInMB)
|
||||
err = dsm.ShareCreate(testSpec)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
share, err := dsm.ShareGet(args[0])
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
fmt.Printf("Success, ShareCreate(%s) resp = %#v\n", args[0], share)
|
||||
},
|
||||
}
|
||||
|
||||
var cmdShareDelete = &cobra.Command{
|
||||
Use: "delete <name>",
|
||||
Short: "delete share",
|
||||
Args: cobra.MinimumNArgs(1),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
dsm, err := LoginDsmForTest()
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer func() {
|
||||
dsm.Logout()
|
||||
}()
|
||||
|
||||
err = dsm.ShareDelete(args[0])
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Printf("Success, ShareDelete(%s)\n", args[0])
|
||||
},
|
||||
}
|
||||
|
||||
var cmdShareClone = &cobra.Command{
|
||||
Use: "clone <new name> <src name> <is_snapshot [true|false]>",
|
||||
Short: "clone share",
|
||||
Args: cobra.MinimumNArgs(2),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
dsm, err := LoginDsmForTest()
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer func() {
|
||||
dsm.Logout()
|
||||
}()
|
||||
|
||||
fromSnapshot := false
|
||||
if len(args) >= 3 && args[2] == "true" {
|
||||
fromSnapshot = true
|
||||
}
|
||||
|
||||
newName := args[0]
|
||||
srcName := args[1]
|
||||
orgShareName := ""
|
||||
snapshot := ""
|
||||
if fromSnapshot {
|
||||
snapshot = srcName
|
||||
shares, err := dsm.ShareList()
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
for _, share := range shares {
|
||||
snaps, err := dsm.ShareSnapshotList(share.Name)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
for _, snap := range snaps {
|
||||
if snap.Time != srcName {
|
||||
continue
|
||||
}
|
||||
orgShareName = share.Name
|
||||
break
|
||||
}
|
||||
if orgShareName != "" {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if orgShareName == "" {
|
||||
fmt.Println("Failed to find org Share of the snapshot")
|
||||
os.Exit(1)
|
||||
}
|
||||
} else {
|
||||
orgShareName = srcName
|
||||
}
|
||||
srcShare, err := dsm.ShareGet(orgShareName)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
shareSpec := webapi.ShareCloneSpec{
|
||||
Name: newName,
|
||||
Snapshot: snapshot,
|
||||
ShareInfo: webapi.ShareInfo{
|
||||
Name: newName,
|
||||
VolPath: srcShare.VolPath,
|
||||
Desc: "Cloned from "+srcName+" by synocli",
|
||||
EnableRecycleBin: srcShare.EnableRecycleBin,
|
||||
RecycleBinAdminOnly: srcShare.RecycleBinAdminOnly,
|
||||
NameOrg: orgShareName,
|
||||
},
|
||||
}
|
||||
fmt.Printf("newName: %s, fromSnapshot: %v (%s), orgShareName: %s\n", newName, fromSnapshot, snapshot, orgShareName)
|
||||
_, err = dsm.ShareClone(shareSpec)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
share, err := dsm.ShareGet(newName)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
fmt.Printf("Success, ShareClone(%s, %s) new share = %#v\n", newName, srcName, share)
|
||||
},
|
||||
}
|
||||
|
||||
var cmdShareSnapshotCreate = &cobra.Command{
|
||||
Use: "snap_create <src name> <desc> <is_lock [true|false]>",
|
||||
Short: "create share snapshot (only share located in btrfs volume can take snapshots)",
|
||||
Args: cobra.MinimumNArgs(3),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
dsm, err := LoginDsmForTest()
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer func() {
|
||||
dsm.Logout()
|
||||
}()
|
||||
|
||||
spec := webapi.ShareSnapshotCreateSpec{
|
||||
ShareName: args[0],
|
||||
Desc: args[1],
|
||||
IsLocked: utils.StringToBoolean(args[2]),
|
||||
}
|
||||
|
||||
fmt.Printf("spec = %#v\n", spec)
|
||||
snapTime, err := dsm.ShareSnapshotCreate(spec)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Printf("resp = %s\n", snapTime)
|
||||
|
||||
snaps, err := dsm.ShareSnapshotList(spec.ShareName)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
var snapshot webapi.ShareSnapshotInfo
|
||||
for _, snap := range snaps {
|
||||
if snap.Time == snapTime {
|
||||
snapshot = snap
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("Success, ShareSnapshotCreate(%s), snap = %#v\n", args[0], snapshot)
|
||||
},
|
||||
}
|
||||
|
||||
var cmdShareSnapshotDelete = &cobra.Command{
|
||||
Use: "snap_delete <share name> <snapshot time>",
|
||||
Short: "delete share snapshot",
|
||||
Args: cobra.MinimumNArgs(2),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
dsm, err := LoginDsmForTest()
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer func() {
|
||||
dsm.Logout()
|
||||
}()
|
||||
|
||||
if err := dsm.ShareSnapshotDelete(args[1], args[0]); err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Printf("Success, ShareSnapshotDelete(%s, %s)\n", args[1], args[0])
|
||||
},
|
||||
}
|
||||
|
||||
func shareSnapshotListAll(dsm *webapi.DSM, shareName string) ([]webapi.ShareSnapshotInfo, error) {
|
||||
if shareName != "" {
|
||||
return dsm.ShareSnapshotList(shareName)
|
||||
}
|
||||
|
||||
var infos []webapi.ShareSnapshotInfo
|
||||
shares, err := dsm.ShareList()
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, share := range shares {
|
||||
snaps, err := dsm.ShareSnapshotList(share.Name)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return nil, err
|
||||
}
|
||||
infos = append(infos, snaps...)
|
||||
}
|
||||
|
||||
return infos, nil
|
||||
}
|
||||
|
||||
var cmdShareSnapshotList = &cobra.Command{
|
||||
Use: "snap_list [<share name>]",
|
||||
Short: "list share snapshots",
|
||||
Args: cobra.MinimumNArgs(0),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
dsm, err := LoginDsmForTest()
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer func() {
|
||||
dsm.Logout()
|
||||
}()
|
||||
|
||||
shareName := ""
|
||||
if len(args) >= 1 {
|
||||
shareName = args[0]
|
||||
}
|
||||
|
||||
snaps, err := shareSnapshotListAll(dsm, shareName)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
tw := tabwriter.NewWriter(os.Stdout, 8, 0, 2, ' ', 0)
|
||||
|
||||
fmt.Fprintf(tw, "id:\tTime:\tDesc:\tLock:\tScheduleSnapshot:\n")
|
||||
for i, info := range snaps {
|
||||
fmt.Fprintf(tw, "%d\t%-32s\t", i, info.Time)
|
||||
fmt.Fprintf(tw, "%-32s\t", info.Desc)
|
||||
fmt.Fprintf(tw, "%v\t", info.Lock)
|
||||
fmt.Fprintf(tw, "%v\t", info.ScheduleSnapshot)
|
||||
fmt.Fprintf(tw, "\n")
|
||||
|
||||
_ = tw.Flush()
|
||||
}
|
||||
fmt.Printf("Success, ShareSnapList(%s)\n", shareName)
|
||||
},
|
||||
}
|
||||
|
||||
func createSharePermission(name string, acl string) *webapi.SharePermission {
|
||||
var permission webapi.SharePermission
|
||||
permission.Name = name
|
||||
|
||||
switch strings.ToLower(acl) {
|
||||
case "rw":
|
||||
permission.IsWritable = true
|
||||
case "ro":
|
||||
permission.IsReadonly = true
|
||||
case "no":
|
||||
permission.IsDeny = true
|
||||
default:
|
||||
fmt.Println("[ERROR] Unknown ACL")
|
||||
return nil
|
||||
}
|
||||
return &permission
|
||||
}
|
||||
|
||||
func getShareLocalUserPermission(dsm *webapi.DSM, shareName string, userName string) (*webapi.SharePermission, error) {
|
||||
infos, err := dsm.SharePermissionList(shareName, "local_user")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, info := range infos {
|
||||
if info.Name == userName {
|
||||
return &info, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("Permission Not Found.")
|
||||
}
|
||||
|
||||
var cmdSharePermissionList = &cobra.Command{
|
||||
Use: "permission_list <name> [local_user|local_group|system]",
|
||||
Short: "list permissions",
|
||||
Args: cobra.MinimumNArgs(1),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
dsm, err := LoginDsmForTest()
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer func() {
|
||||
dsm.Logout()
|
||||
}()
|
||||
|
||||
userGroupType := "local_user"
|
||||
if len(args) >= 2 {
|
||||
userGroupType = args[1]
|
||||
}
|
||||
|
||||
infos, err := dsm.SharePermissionList(args[0], userGroupType)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
tw := tabwriter.NewWriter(os.Stdout, 8, 0, 2, ' ', 0)
|
||||
|
||||
fmt.Fprintf(tw, "%s\t%-20s\t%-10s\t%-12s\t%-12s\t%-10s\t%-10s\n", "id:", "Name:", "IsAdmin", "IsReadonly:", "IsWritable:", "IsDeny:", "IsCustom:")
|
||||
for i, info := range infos {
|
||||
fmt.Fprintf(tw, "%d\t", i)
|
||||
fmt.Fprintf(tw, "%-20s\t", info.Name)
|
||||
fmt.Fprintf(tw, "%-10v\t", info.IsAdmin)
|
||||
fmt.Fprintf(tw, "%-12v\t", info.IsReadonly)
|
||||
fmt.Fprintf(tw, "%-12v\t", info.IsWritable)
|
||||
fmt.Fprintf(tw, "%-10v\t", info.IsDeny)
|
||||
fmt.Fprintf(tw, "%-10v\t", info.IsCustom)
|
||||
fmt.Fprintf(tw, "\n")
|
||||
|
||||
_ = tw.Flush()
|
||||
}
|
||||
|
||||
fmt.Printf("Success, SharePermissionList(%s)\n", args[0])
|
||||
},
|
||||
}
|
||||
|
||||
var cmdSharePermissionSet = &cobra.Command{
|
||||
Use: "permission_set <share name> <username> <[rw|ro|no]>",
|
||||
Short: "set permission",
|
||||
Args: cobra.MinimumNArgs(3),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
dsm, err := LoginDsmForTest()
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer func() {
|
||||
dsm.Logout()
|
||||
}()
|
||||
|
||||
shareName := args[0]
|
||||
userName := args[1]
|
||||
userGroupType := "local_user"
|
||||
permission := createSharePermission(userName, args[2])
|
||||
if permission == nil {
|
||||
fmt.Println("Failed. Invalid Argument.")
|
||||
os.Exit(1)
|
||||
}
|
||||
permissions := append([]*webapi.SharePermission{}, permission)
|
||||
|
||||
spec := webapi.SharePermissionSetSpec{
|
||||
Name: shareName,
|
||||
UserGroupType: userGroupType,
|
||||
Permissions: permissions,
|
||||
}
|
||||
|
||||
fmt.Printf("spec = %#v\n", spec)
|
||||
if err := dsm.SharePermissionSet(spec); err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
newPermission, err := getShareLocalUserPermission(dsm, shareName, userName)
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to get share local_user permission(%s, %s): %v\n", shareName, userName, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
fmt.Printf("Success, SharePermissionSet(%s), newPermission: %#v\n", shareName, newPermission)
|
||||
},
|
||||
}
|
||||
|
||||
var cmdShareSet = &cobra.Command{
|
||||
Use: "set <share name> <new size>",
|
||||
Short: "share set (only share located in btrfs volume can be resized)",
|
||||
Args: cobra.MinimumNArgs(2),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
dsm, err := LoginDsmForTest()
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer func() {
|
||||
dsm.Logout()
|
||||
}()
|
||||
|
||||
shareName := args[0]
|
||||
newSize, err := strconv.ParseInt(args[1], 10, 64)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
share, err := dsm.ShareGet(shareName)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Printf("old = %#v\n", share)
|
||||
|
||||
newSizeInMB := utils.BytesToMBCeil(newSize)
|
||||
updateInfo := webapi.ShareUpdateInfo{
|
||||
Name: share.Name,
|
||||
VolPath: share.VolPath,
|
||||
QuotaForCreate: &newSizeInMB,
|
||||
}
|
||||
if err := dsm.ShareSet(shareName, updateInfo); err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
newShare, err := dsm.ShareGet(shareName)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Printf("new = %#v\n", newShare)
|
||||
|
||||
fmt.Printf("Success, ShareSet(%s), quota: %v -> %v MB\n", shareName, share.QuotaValueInMB, newShare.QuotaValueInMB)
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
func init() {
|
||||
cmdShare.AddCommand(cmdShareGet)
|
||||
cmdShare.AddCommand(cmdShareCreate)
|
||||
cmdShare.AddCommand(cmdShareDelete)
|
||||
cmdShare.AddCommand(cmdShareList)
|
||||
cmdShare.AddCommand(cmdShareClone)
|
||||
cmdShare.AddCommand(cmdShareSnapshotCreate)
|
||||
cmdShare.AddCommand(cmdShareSnapshotDelete)
|
||||
cmdShare.AddCommand(cmdShareSnapshotList)
|
||||
|
||||
cmdShare.AddCommand(cmdSharePermissionList)
|
||||
cmdShare.AddCommand(cmdSharePermissionSet)
|
||||
cmdShare.AddCommand(cmdShareSet)
|
||||
}
|
||||
12
test/sanity/sanity-test-secret-file-template.yaml
Normal file
12
test/sanity/sanity-test-secret-file-template.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
# CreateVolumeSecret:
|
||||
# DeleteVolumeSecret:
|
||||
# ControllerPublishVolumeSecret:
|
||||
# ControllerUnpublishVolumeSecret:
|
||||
# ControllerValidateVolumeCapabilitiesSecret:
|
||||
NodeStageVolumeSecret:
|
||||
username: "username"
|
||||
password: "password"
|
||||
# NodePublishVolumeSecret:
|
||||
# CreateSnapshotSecret:
|
||||
# DeleteSnapshotSecret:
|
||||
# ControllerExpandVolumeSecret:
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
|
||||
const (
|
||||
ConfigPath = "./../../config/client-info.yml"
|
||||
SecretsFilePath = "./sanity-test-secret-file.yaml"
|
||||
)
|
||||
|
||||
func TestSanity(t *testing.T) {
|
||||
@@ -69,6 +70,15 @@ func TestSanity(t *testing.T) {
|
||||
testConfig.TargetPath = targetPath
|
||||
testConfig.StagingPath = stagingPath
|
||||
testConfig.Address = endpoint
|
||||
testConfig.SecretsFile = SecretsFilePath
|
||||
|
||||
// Set Input parameters for test
|
||||
testConfig.TestVolumeParameters = map[string]string{
|
||||
"protocol": "smb",
|
||||
}
|
||||
|
||||
// testConfig.TestVolumeAccessType = "block" // raw block
|
||||
|
||||
// Run test
|
||||
sanity.Test(t, testConfig)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user