Skip to content

Commit 5ad8626

Browse files
committed
first commit
0 parents  commit 5ad8626

14 files changed

+1443
-0
lines changed

Dockerfile

+33
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
#FROM golang:1.12.4 as builder
2+
FROM registry.cn-hangzhou.aliyuncs.com/bamboo/golang:1.12.4 as builder
3+
4+
ARG DIR=$GOPATH/src/github.com/kubernetes-sigs/san-client
5+
6+
WORKDIR $DIR
7+
8+
RUN mkdir -p $DIR
9+
10+
COPY / $DIR
11+
12+
ENV GO111MODULE=on
13+
# setting go mod proxy, proxy run on onebox
14+
ENV GOPROXY=https://goproxy.io
15+
#RUN go mod tidy
16+
#RUN go mod vendor
17+
#ENV GO111MODULE=off
18+
19+
RUN go build -o main -ldflags '-s -w' -v $DIR/cmd/san-client-provisioner
20+
21+
FROM alpine:3.9
22+
23+
ARG APK_MIRROR=mirrors.aliyun.com
24+
RUN sed -i "s/dl-cdn.alpinelinux.org/${APK_MIRROR}/g" /etc/apk/repositories
25+
26+
RUN apk add --no-cache libc6-compat
27+
## DON'T modify above, as it's common for all Alpine based parent and docker caching layer will used
28+
29+
CMD ["./main"]
30+
31+
COPY --from=builder /go/src/github.com/kubernetes-sigs/san-client/main .
32+
33+

README.md

+28
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,28 @@
1+
## san 挂载问题排查
2+
1. FailedMount
3+
4+
Warning FailedMount 11s (x6 over 50s) kubelet, dbm02 MountVolume.WaitForAttach failed for volume "pvc-53918fcd-c350-11e9-8c87-50af732f4b85" : no fc disk found
5+
6+
7+
ls /dev/disk/by-id/ | grep 845 #
8+
9+
ssh [email protected] lshostvdiskmap | grep -i 845
10+
11+
## delete by SCSI ID
12+
scsi_id=${scsi_id}
13+
wwid=${wwid}
14+
devices=lsscsi -i |grep MCS | grep ":${scsi_id}]" | grep -v $wwid | awk '{print $1}'
15+
for i in $devices; do device=${i:1:-1} ; echo 1 >/sys/class/scsi_device/$device/device/delete ; done
16+
17+
18+
## format
19+
挂载如果是非 readonly 需要注意format问题,数据会丢失
20+
21+
# Annotation
22+
23+
- ThinRateAnnotation = "volume.beta.kubernetes.io/thin-rate"
24+
25+
- MountOptionAnnotation = "volume.beta.kubernetes.io/mount-options"
26+
27+
- MkfsFsTypeAnnotation = "volume.beta.kubernetes.io/mkfs-fstype"
28+
default ext4
+278
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,278 @@
1+
package main
2+
3+
import (
4+
"flag"
5+
"fmt"
6+
log "github.com/golang/glog"
7+
"github.com/kubernetes-sigs/san-client/fc"
8+
"k8s.io/api/core/v1"
9+
storage "k8s.io/api/storage/v1"
10+
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
11+
"k8s.io/apimachinery/pkg/util/wait"
12+
"k8s.io/client-go/kubernetes"
13+
"k8s.io/client-go/kubernetes/scheme"
14+
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
15+
"k8s.io/client-go/rest"
16+
"k8s.io/client-go/tools/record"
17+
volumehelpers "k8s.io/cloud-provider/volume/helpers"
18+
"k8s.io/klog"
19+
"k8s.io/kubernetes/pkg/apis/core/v1/helper"
20+
"os"
21+
"sigs.k8s.io/sig-storage-lib-external-provisioner/controller"
22+
"strconv"
23+
"strings"
24+
)
25+
26+
const (
27+
ThinRateAnnotation = "volume.beta.kubernetes.io/mount-options"
28+
MountOptionAnnotation = "volume.beta.kubernetes.io/mkfs-fstype"
29+
MkfsFsTypeAnnotation = "volume.beta.kubernetes.io/thin-rate"
30+
DiskNameAnnotation = "volume.beta.kubernetes.io/disk-name"
31+
32+
EventComponent = "san-client-provisioner"
33+
)
34+
35+
type sanProvisioner struct {
36+
client kubernetes.Interface
37+
eventRecorder record.EventRecorder
38+
}
39+
40+
// NewSanProvisioner creates a new san provisioner
41+
func NewSanProvisioner(client kubernetes.Interface) controller.Provisioner {
42+
43+
//TODO: remove this
44+
v1.AddToScheme(scheme.Scheme)
45+
broadcaster := record.NewBroadcaster()
46+
broadcaster.StartLogging(klog.Infof)
47+
broadcaster.StartRecordingToSink(&corev1.EventSinkImpl{Interface: client.CoreV1().Events(v1.NamespaceAll)})
48+
eventRecorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: EventComponent})
49+
50+
return &sanProvisioner{
51+
client: client,
52+
eventRecorder: eventRecorder,
53+
}
54+
}
55+
56+
var _ controller.Provisioner = &sanProvisioner{}
57+
58+
// Provision creates a storage asset and returns a PV object representing it.
59+
func (p *sanProvisioner) Provision(options controller.ProvisionOptions) (*v1.PersistentVolume, error) {
60+
log.V(5).Infof("Provision options: %+v", options)
61+
// step1. create disk
62+
volSizeBytes := options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
63+
capacity, err := volumehelpers.RoundUpToGiBInt(volSizeBytes)
64+
if err != nil {
65+
return nil, err
66+
}
67+
params := options.StorageClass.Parameters
68+
fc := fc.NewFcClient(params["user"], params["password"], params["sanserver"], 22, params["sanCliPipelineEndpoint"])
69+
thinRate := ""
70+
if _, ok := options.PVC.Annotations[ThinRateAnnotation]; ok {
71+
thinRate = options.PVC.Annotations[ThinRateAnnotation]
72+
}
73+
volumeName := p.genVolumeName(options.PVName, options.PVC)
74+
createErr := fc.CreateVolume(volumeName, strconv.Itoa(capacity), thinRate)
75+
if createErr != nil {
76+
return nil, createErr
77+
}
78+
// step2. GetWWIdByVolume
79+
wwid, pvcErr := fc.GetWWIdByVolume(volumeName)
80+
if pvcErr != nil {
81+
return nil, pvcErr
82+
}
83+
log.V(5).Infof("GetWWIdByVolume wwid: %s", wwid)
84+
// step3. mapHostForVolume
85+
// TODO bind node
86+
if options.SelectedNode != nil {
87+
nodeName := options.SelectedNode.Name
88+
if _, ok := options.SelectedNode.Labels["kubernetes.io/hostname"]; ok {
89+
nodeName = options.SelectedNode.Labels["kubernetes.io/hostname"]
90+
}
91+
log.Infof("SelectedNode nodeName: %s", nodeName)
92+
}
93+
94+
err = fc.MapAllHostForVolume(volumeName)
95+
if err != nil {
96+
return nil, err
97+
}
98+
99+
// step4. removeAbnormalDevice
100+
101+
//get all k8s nodeAddress
102+
nodes, err := p.client.CoreV1().Nodes().List(metav1.ListOptions{})
103+
if err != nil {
104+
log.Errorf("get nodes err :%v", err)
105+
return nil, err
106+
}
107+
nodesIp, err := getAllNodeIP(nodes)
108+
if err != nil || nodesIp == nil {
109+
log.Errorf("getAllNodeIP nodesIp: %+v, err :%v", nodesIp, err)
110+
return nil, err
111+
}
112+
113+
err = fc.RemoveAbnormalDevice(nodesIp, volumeName, wwid)
114+
if err != nil {
115+
log.Errorf("RemoveAbnormalDevice err :%v", err)
116+
return nil, err
117+
}
118+
119+
// step5. create pv
120+
// fsType default ext4 and mountOptions
121+
fsType := "ext4"
122+
var mountOptions []string
123+
if options.PVC.Annotations != nil {
124+
if ft, ok := options.PVC.Annotations[MkfsFsTypeAnnotation]; ok {
125+
fsType = ft
126+
}
127+
if mo, ok := options.PVC.Annotations[MountOptionAnnotation]; ok {
128+
mountOptions = strings.Split(mo, ",")
129+
}
130+
}
131+
132+
pv := &v1.PersistentVolume{
133+
ObjectMeta: metav1.ObjectMeta{
134+
Name: options.PVName,
135+
Annotations: map[string]string{
136+
DiskNameAnnotation: volumeName,
137+
},
138+
},
139+
Spec: v1.PersistentVolumeSpec{
140+
PersistentVolumeReclaimPolicy: *options.StorageClass.ReclaimPolicy,
141+
AccessModes: options.PVC.Spec.AccessModes,
142+
Capacity: v1.ResourceList{
143+
v1.ResourceName(v1.ResourceStorage): options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)],
144+
},
145+
PersistentVolumeSource: v1.PersistentVolumeSource{
146+
FC: &v1.FCVolumeSource{
147+
WWIDs: []string{wwid},
148+
FSType: fsType,
149+
},
150+
},
151+
MountOptions: mountOptions,
152+
},
153+
}
154+
// check disk is normal mapper
155+
log.V(5).Infof("provision pv success")
156+
return pv, nil
157+
}
158+
159+
func (p *sanProvisioner) genVolumeName(pvName string, pvc *v1.PersistentVolumeClaim) string {
160+
volumeName := fmt.Sprintf("%s_%s", pvc.Name, pvc.Namespace)
161+
// FC SAN disk name max length 63
162+
if len(volumeName) > 63 {
163+
p.eventRecorder.Event(pvc, v1.EventTypeWarning, "genVolumeName", fmt.Sprintf("volumeName max length 63, this use pv name, vn: %s", pvName))
164+
return pvName
165+
}
166+
return volumeName
167+
}
168+
169+
func getAllNodeIP(nodes *v1.NodeList) (nodeIP map[string]string, err error) {
170+
if len(nodes.Items) == 0 {
171+
return nil, fmt.Errorf("nodes is nill")
172+
}
173+
nodeIP = make(map[string]string)
174+
for _, node := range nodes.Items {
175+
var name, ip string
176+
177+
name = node.Name
178+
for _, addr := range node.Status.Addresses {
179+
log.V(5).Infof("node: %s, addr: %+v", name, addr)
180+
if addr.Type == v1.NodeInternalIP {
181+
ip = addr.Address
182+
break
183+
}
184+
}
185+
if len(ip) == 0 {
186+
return nil, fmt.Errorf("node: %s, not found NodeExternalIP", name)
187+
}
188+
// HACK: san host name
189+
nodeIP[fmt.Sprintf("h_%s", name)] = ip
190+
}
191+
return
192+
}
193+
194+
// Delete removes the storage asset that was created by Provision represented
195+
// by the given PV.
196+
func (p *sanProvisioner) Delete(volume *v1.PersistentVolume) error {
197+
log.V(5).Infof("Delete volume: %+v", volume)
198+
199+
class, err := p.getClassForVolume(volume)
200+
if err != nil {
201+
return err
202+
}
203+
params := class.Parameters
204+
205+
volumeName := ""
206+
if dn, ok := volume.Annotations[DiskNameAnnotation]; ok {
207+
volumeName = dn
208+
} else {
209+
return fmt.Errorf("pv: %s, annotation not found disk name ", volume.Name)
210+
}
211+
212+
fc := fc.NewFcClient(params["user"], params["password"], params["sanserver"], 22, params["sanCliPipelineEndpoint"])
213+
log.V(5).Infof("DeleteVolume: begin delete san: %s, %s, %s, %s", volumeName, params["user"], params["password"], params["sanserver"])
214+
// step1. RemoveAllMapping
215+
err = fc.RemoveAllMapping(volumeName)
216+
if err != nil {
217+
return err
218+
}
219+
// step2. DeleteVolume
220+
err = fc.DeleteVolume(volumeName)
221+
if err != nil {
222+
return err
223+
}
224+
log.V(5).Infof("provision delete pv success")
225+
return nil
226+
}
227+
228+
// getClassForVolume returns StorageClass
229+
func (p *sanProvisioner) getClassForVolume(pv *v1.PersistentVolume) (*storage.StorageClass, error) {
230+
231+
className := helper.GetPersistentVolumeClass(pv)
232+
if className == "" {
233+
return nil, fmt.Errorf("Volume has no storage class")
234+
}
235+
class, err := p.client.StorageV1().StorageClasses().Get(className, metav1.GetOptions{})
236+
if err != nil {
237+
return nil, err
238+
}
239+
return class, nil
240+
}
241+
242+
func main() {
243+
flag.Parse()
244+
flag.Set("logtostderr", "true")
245+
// Create an InClusterConfig and use it to create a client for the controller
246+
// to use to communicate with Kubernetes
247+
log.V(5).Infof("san provisioner start")
248+
config, err := rest.InClusterConfig()
249+
if err != nil {
250+
log.Fatalf("Failed to create config: %v", err)
251+
}
252+
253+
provisionerName := os.Getenv("PROVISIONER_NAME")
254+
if provisionerName == "" {
255+
log.Fatalf("environment variable %s is not set! Please set it.", provisionerName)
256+
}
257+
258+
clientset, err := kubernetes.NewForConfig(config)
259+
if err != nil {
260+
log.Fatalf("Failed to create client: %v", err)
261+
}
262+
263+
// The controller needs to know what the server version is because out-of-tree
264+
// provisioners aren't officially supported until 1.5
265+
serverVersion, err := clientset.Discovery().ServerVersion()
266+
if err != nil {
267+
log.Fatalf("Error getting server version: %v", err)
268+
}
269+
270+
// Create the provisioner: it implements the Provisioner interface expected by
271+
// the controller
272+
sanProvisioner := NewSanProvisioner(clientset)
273+
274+
// Start the provision controller which will dynamically provision hostPath
275+
pc := controller.NewProvisionController(clientset, provisionerName, sanProvisioner, serverVersion.GitVersion)
276+
//
277+
pc.Run(wait.NeverStop)
278+
}

0 commit comments

Comments
 (0)