Skip to content
This repository has been archived by the owner on Oct 21, 2020. It is now read-only.

Commit

Permalink
Merge pull request #609 from humblec/file-provisioner-1
Browse files Browse the repository at this point in the history
Add clusterid description to README and also fix error strings.
  • Loading branch information
childsb authored Feb 21, 2018
2 parents 3fe78d2 + c621e7e commit d30fcc2
Show file tree
Hide file tree
Showing 5 changed files with 119 additions and 33 deletions.
17 changes: 14 additions & 3 deletions gluster/file/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@ quay.io/external_storage/glusterfile-provisioner:latest

Gluster File Provisioner is an external provisioner which dynamically provisions gluster file volumes on demand. The persistentVolumeClaim which has been requested with this external provisioner's identity (for e.g.# `gluster.org/glusterfile`) will be served by this provisioner.

This project is related to and relies on the following projects:

* [glusterfs](https://github.com/gluster/glusterfs)
* [heketi](https://github.com/heketi/heketi)
* [gluster-kubernetes](https://github.com/gluster/gluster-kubernetes)
Expand Down Expand Up @@ -51,7 +53,7 @@ parameters:
restuser: "admin"
restsecretnamespace: "default"
restsecretname: "heketi-secret"
clusterids: "454811fcedbec6316bc10e591a57b472"
clusterid: "454811fcedbec6316bc10e591a57b472"
volumetype: "replicate:3"
volumeoptions: "features.shard enable"
volumenameprefix: "dept-dev"
Expand All @@ -66,6 +68,13 @@ parameters:

* `gidMin` + `gidMax` : The minimum and maximum value of GID range for the storage class. A unique value (GID) in this range ( gidMin-gidMax ) will be used for dynamically provisioned volumes. These are optional values. If not specified, the volume will be provisioned with a value between 2000-2147483647 which are defaults for gidMin and gidMax respectively.

* `clusterid`: It is the ID of the cluster which will be used by Heketi when provisioning the volume. It can also be a list of comma separated cluster IDs. This is an optional parameter.

Note
To get the cluster ID, execute the following command:
~~~
# heketi-cli cluster list
~~~
* `volumetype` : The volume type and its parameters can be configured with this optional value. If the volume type is not mentioned, it's up to the provisioner to decide the volume type.
For example:

Expand All @@ -88,9 +97,11 @@ For available volume options and its administration refer: ([Administration Guid

Please note that, the value for this parameter cannot contain `_` in storageclass. This is an optional parameter.

Reference : ([How to configure Gluster on Kubernetes](https://github.com/gluster/gluster-kubernetes/blob/master/docs/setup-guide.md))
Additional Reference:

([How to configure Gluster on Kubernetes](https://github.com/gluster/gluster-kubernetes/blob/master/docs/setup-guide.md))

Reference : ([How to configure Heketi](https://github.com/heketi/heketi/wiki/Setting-up-the-topology))
([How to configure Heketi](https://github.com/heketi/heketi/wiki/Setting-up-the-topology))

When the persistent volumes are dynamically provisioned, the Gluster plugin automatically create an endpoint and a headless service in the name `glusterfile-dynamic-<claimname>`. This dynamic endpoint and service will be deleted automatically when the persistent volume claim is deleted.

Expand Down
131 changes: 103 additions & 28 deletions gluster/file/cmd/glusterfile-provisioner/glusterfile-provisioner.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,11 +32,13 @@ import (
"github.com/pborman/uuid"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/kubernetes/pkg/volume"
)

const (
Expand Down Expand Up @@ -188,7 +190,7 @@ func (p *glusterfileProvisioner) CreateVolume(gid *int, config *provisionerConfi
var clusterIDs []string
customVolumeName := ""

glog.V(2).Infof("create volume of size %d GiB and configuration %+v", sz, config)
glog.V(2).Infof("create volume of size %dGiB and configuration %+v", sz, config)

if config.url == "" {
glog.Errorf("REST server endpoint is empty")
Expand All @@ -214,11 +216,11 @@ func (p *glusterfileProvisioner) CreateVolume(gid *int, config *provisionerConfi
volumeReq := &gapi.VolumeCreateRequest{Size: sz, Name: customVolumeName, Clusters: clusterIDs, Gid: gid64, Durability: config.volumeType, GlusterVolumeOptions: p.volumeOptions}
volume, err := cli.VolumeCreate(volumeReq)
if err != nil {
glog.Errorf("error creating volume: %v ", err)
return nil, 0, "", fmt.Errorf("error creating volume: %v", err)
glog.Errorf("failed to create gluster volume: %v", err)
return nil, 0, "", fmt.Errorf("failed to create gluster volume: %v", err)
}

glog.V(1).Infof("volume with size %d and name: %s created", volume.Size, volume.Name)
glog.V(1).Infof("volume with size %d and name %s created", volume.Size, volume.Name)

volID = volume.Id
dynamicHostIps, err := getClusterNodes(cli, volume.Cluster)
Expand All @@ -234,11 +236,11 @@ func (p *glusterfileProvisioner) CreateVolume(gid *int, config *provisionerConfi
glog.Errorf("failed to create endpoint/service %v/%v: %v", epNamespace, epServiceName, err)
deleteErr := cli.VolumeDelete(volume.Id)
if deleteErr != nil {
glog.Errorf("error when deleting the volume: %v , manual deletion required", deleteErr)
glog.Errorf("error when deleting the volume: %v, manual deletion required", deleteErr)
}
return nil, 0, "", fmt.Errorf("failed to create endpoint/service %v/%v: %v", epNamespace, epServiceName, err)
}
glog.V(3).Infof("dynamic endpoint %v and service : %v ", endpoint, service)
glog.V(3).Infof("dynamic endpoint %v and service %v", endpoint, service)

return &v1.GlusterfsVolumeSource{
EndpointsName: endpoint.Name,
Expand All @@ -247,6 +249,70 @@ func (p *glusterfileProvisioner) CreateVolume(gid *int, config *provisionerConfi
}, sz, volID, nil
}

func (p *glusterfileProvisioner) RequiresFSResize() bool {
return false
}

func (p *glusterfileProvisioner) ExpandVolumeDevice(spec *volume.Spec, newSize resource.Quantity, oldSize resource.Quantity) (resource.Quantity, error) {
pvSpec := spec.PersistentVolume.Spec
volumeName := pvSpec.Glusterfs.Path
glog.V(2).Infof("Request to expand volume: [%s]", volumeName)
volumeID, err := getVolumeID(spec.PersistentVolume, volumeName)

if err != nil {
return oldSize, fmt.Errorf("failed to get volumeID for volume [%s], err: %v", volumeName, err)
}

heketiModeArgs, credErr := p.getRESTCredentials(spec.PersistentVolume)
if credErr != nil {
glog.Errorf("failed to retrieve REST credentials from pv: %v", credErr)
return oldSize, fmt.Errorf("failed to retrieve REST credentials from pv: %v", credErr)
}

glog.V(4).Infof("Expanding volume %q with configuration %+v", volumeID)

//Create REST server connection
cli := gcli.NewClient(heketiModeArgs["url"], heketiModeArgs["user"], heketiModeArgs["restsecretvalue"])
if cli == nil {
glog.Errorf("failed to create glusterfs REST client")
return oldSize, fmt.Errorf("failed to create glusterfs REST client, REST server authentication failed")
}

// Find out delta size
expansionSize := (newSize.Value() - oldSize.Value())

expansionSizeGiB := int(util.RoundUpToGiB(expansionSize))

// Find out requested Size
//requestGiB := util.RoundUpToGiB(newSize)
requestGiB := int(util.RoundUpToGiB(newSize.Value()))

//Check the existing volume size
currentVolumeInfo, err := cli.VolumeInfo(volumeID)
if err != nil {
glog.Errorf("error when fetching details of volume[%s]: %v", volumeName, err)
return oldSize, err
}

if (currentVolumeInfo.Size) >= requestGiB {
return newSize, nil
}

// Make volume expansion request
volumeExpandReq := &gapi.VolumeExpandRequest{Size: expansionSizeGiB}

// Expand the volume
volumeInfoRes, err := cli.VolumeExpand(volumeID, volumeExpandReq)
if err != nil {
glog.Errorf("error when expanding the volume[%s]: %v", volumeName, err)
return oldSize, err
}

glog.V(2).Infof("volume %s expanded to new size %d successfully", volumeName, volumeInfoRes.Size)
newVolumeSize := resource.MustParse(fmt.Sprintf("%dGi", volumeInfoRes.Size))
return newVolumeSize, nil
}

func (p *glusterfileProvisioner) createEndpointService(namespace string, epServiceName string, hostips []string, pvcname string) (endpoint *v1.Endpoints, service *v1.Service, err error) {

addrlist := make([]v1.EndpointAddress, len(hostips))
Expand Down Expand Up @@ -350,32 +416,16 @@ func getVolumeID(pv *v1.PersistentVolume, volumeName string) (string, error) {
return volumeID, nil
}

func (p *glusterfileProvisioner) Delete(volume *v1.PersistentVolume) error {

glog.V(1).Infof("deleting volume")

err := p.allocator.Release(volume)
if err != nil {
return err
}

glog.V(2).Infof("delete volume: %s", volume.Spec.Glusterfs.Path)

volumeName := volume.Spec.Glusterfs.Path
volumeID, err := getVolumeID(volume, volumeName)
if err != nil {
return fmt.Errorf("failed to get volumeID: %v", err)
}

delRestString, ok := volume.Annotations[restStr]
func (p *glusterfileProvisioner) getRESTCredentials(pv *v1.PersistentVolume) (map[string]string, error) {
restString, ok := pv.Annotations[restStr]
if !ok {
return fmt.Errorf("volume annotation for server details not found on PV")
return nil, fmt.Errorf("volume annotation for server details not found on PV")
}

delRestStrSlice := dstrings.Split(delRestString, ",")
restStrSlice := dstrings.Split(restString, ",")
heketiModeArgs := make(map[string]string)

for _, v := range delRestStrSlice {
for _, v := range restStrSlice {
if v != "" {
s := dstrings.Split(v, ":")

Expand All @@ -393,9 +443,34 @@ func (p *glusterfileProvisioner) Delete(volume *v1.PersistentVolume) error {
heketiModeArgs["restsecretvalue"], err = parseSecret(heketiModeArgs["secretnamespace"], heketiModeArgs["secret"], p.client)
if err != nil {
glog.Errorf("failed to parse secret %s: %v", heketiModeArgs["secret"], err)
return err
return nil, err
}
}

return heketiModeArgs, nil
}

func (p *glusterfileProvisioner) Delete(volume *v1.PersistentVolume) error {

glog.V(1).Infof("deleting volume, path %s", volume.Spec.Glusterfs.Path)

err := p.allocator.Release(volume)
if err != nil {
return err
}

volumeName := volume.Spec.Glusterfs.Path
volumeID, err := getVolumeID(volume, volumeName)
if err != nil {
return fmt.Errorf("failed to get volumeID: %v", err)
}

heketiModeArgs, credErr := p.getRESTCredentials(volume)
if credErr != nil {
glog.Errorf("failed to retrieve REST credentials from pv: %v", credErr)
return fmt.Errorf("failed to retrieve REST credentials from pv: %v", credErr)
}

cli := gcli.NewClient(heketiModeArgs["url"], heketiModeArgs["user"], heketiModeArgs["restsecretvalue"])
if cli == nil {
glog.Errorf("failed to create REST client")
Expand Down
2 changes: 1 addition & 1 deletion gluster/file/examples/claim.yaml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: claimheketi
name: claim1
annotations:
volume.beta.kubernetes.io/storage-class: "glusterfile"
spec:
Expand Down
2 changes: 1 addition & 1 deletion gluster/file/examples/class.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ parameters:
restuser: "admin"
restsecretnamespace: "default"
restsecretname: "heketi-secret"
#clusterids: "454811fcedbec6316bc10e591a57b472"
#clusterid: "454811fcedbec6316bc10e591a57b472"
volumetype: "replicate:3"
volumeoptions: "features.shard enable"
volumenameprefix: "dept-dev"
File renamed without changes.

0 comments on commit d30fcc2

Please sign in to comment.