nfs - server (10.0.1.9)
############
Amazon machine linux
# yum update
# yum install nfs-utils -y
# systemctl enable nfs-server
# systemctl start nfs-server
# mkdir -p /srv/nfs/k8sdata
# chmod -R 777 /srv/nfs/k8sdata
# vi /etc/exports
/srv/nfs/k8sdata *(rw,no_subtree_check,no_root_squash,insecure)
:wq!
# exportfs -rav
# exportfs -v
/srv/nfs/k8sdata
(rw,sync,wdelay,hide,no_subtree_check,sec=sys,insecure,no_root_squash,no_all_squash)
Now in NFS- Client:
#################
Ubuntu 16.04
# sudo apt-get update
# sudo apt-get install nfs-common
# showmount -e 10.0.1.9
Export list for 10.0.1.9:
/srv/nfs/k8sdata *
Testing
-------
# sudo mount -t nfs 10.0.1.9:/srv/nfs/k8sdata /mnt
# root@dn1:~# df -h | grep nfs
10.0.1.9:/srv/nfs/k8sdata 8.0G 1.8G 6.3G 22% /mnt
# umount /mnt
Now in the kubectl terminal issue the pv and pvc create with yamls
cat > 4-pv-nfs.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv-nfs-pv1
labels:
type: local
spec:
storageClassName: manual
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
server: 10.0.1.9
path: "/srv/nfs/k8sdata"
cat > 4-pvc-nfs.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pvc-nfs-pv1
spec:
storageClassName: manual
accessModes:
- ReadWriteMany
resources:
requests:
storage: 500Mi
--------------------------------------
Here the catch is that, the storageClassName in pvc yaml file should be same as the storageClassName in pv yaml file:
# kubectl create -f 4-pv-nfs.yaml
persistentvolume/pv-nfs-pv1 created
# kubectl create -f 4-pvc-nfs.yaml
persistentvolumeclaim/pvc-nfs-pv1 created
ubuntu@namenode:~$ kubectl get pv,pvc
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
persistentvolume/pv-nfs-pv1 1Gi RWX Retain Bound default/pvc-nfs-pv1 manual 4m26s
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
persistentvolumeclaim/pvc-nfs-pv1 Bound pv-nfs-pv1 1Gi RWX manual 6s
Now u can create a deployment with below Volume parameters:
--------------------------------------
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:
run: nginx
name: nginx-deploy-nfs
spec:
replicas: 1
selector:
matchLabels:
run: nginx
template:
metadata:
labels:
run: nginx
spec:
volumes:
- name: www
persistentVolumeClaim:
claimName: pvc-nfs-pv1
containers:
- image: nginx
name: nginx
volumeMounts:
- name: www
mountPath: /usr/share/nginx/html
----------------------------------------
Here the catch is , the ( -name: www ) of VolumeMounts in "containers:" section should be the same as the ( -name: www ) in the "volumes:" section.
ubuntu@namenode:~/kubernetes/yamls$ kubectl create -f 4-nfs-nginx-1.6.yaml
deployment.apps/nginx-deploy-nfs created
ubuntu@namenode:~/kubernetes/yamls$ kubectl get pods -o wide | grep nfs
nginx-deploy-nfs-6fdd5b84cc-s4qfp 1/1 Running 0 35s 10.244.1.35 dn1
Now check in Dn1 if the nfs is mounted
root@dn1:~# df -h | grep nfs
10.0.1.9:/srv/nfs/k8sdata 8.0G 1.8G 6.3G 22% /var/lib/kubelet/pods/89523633-0a50-43c6-b13a-a96270f0c819/volumes/kubernetes.io~nfs/pv-nfs-pv1
Now expose your deployment and create the nginx-ingress resouce rule for getting this deployment from outside.
# kubectl expose deploy nginx-deploy-nfs --port 80
That is all
Cheers.
############
Amazon machine linux
# yum update
# yum install nfs-utils -y
# systemctl enable nfs-server
# systemctl start nfs-server
# mkdir -p /srv/nfs/k8sdata
# chmod -R 777 /srv/nfs/k8sdata
# vi /etc/exports
/srv/nfs/k8sdata *(rw,no_subtree_check,no_root_squash,insecure)
:wq!
# exportfs -rav
# exportfs -v
/srv/nfs/k8sdata
Now in NFS- Client:
#################
Ubuntu 16.04
# sudo apt-get update
# sudo apt-get install nfs-common
# showmount -e 10.0.1.9
Export list for 10.0.1.9:
/srv/nfs/k8sdata *
Testing
-------
# sudo mount -t nfs 10.0.1.9:/srv/nfs/k8sdata /mnt
# root@dn1:~# df -h | grep nfs
10.0.1.9:/srv/nfs/k8sdata 8.0G 1.8G 6.3G 22% /mnt
# umount /mnt
Now in the kubectl terminal issue the pv and pvc create with yamls
cat > 4-pv-nfs.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv-nfs-pv1
labels:
type: local
spec:
storageClassName: manual
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
server: 10.0.1.9
path: "/srv/nfs/k8sdata"
cat > 4-pvc-nfs.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pvc-nfs-pv1
spec:
storageClassName: manual
accessModes:
- ReadWriteMany
resources:
requests:
storage: 500Mi
--------------------------------------
Here the catch is that, the storageClassName in pvc yaml file should be same as the storageClassName in pv yaml file:
# kubectl create -f 4-pv-nfs.yaml
persistentvolume/pv-nfs-pv1 created
# kubectl create -f 4-pvc-nfs.yaml
persistentvolumeclaim/pvc-nfs-pv1 created
ubuntu@namenode:~$ kubectl get pv,pvc
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
persistentvolume/pv-nfs-pv1 1Gi RWX Retain Bound default/pvc-nfs-pv1 manual 4m26s
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
persistentvolumeclaim/pvc-nfs-pv1 Bound pv-nfs-pv1 1Gi RWX manual 6s
Now u can create a deployment with below Volume parameters:
--------------------------------------
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:
run: nginx
name: nginx-deploy-nfs
spec:
replicas: 1
selector:
matchLabels:
run: nginx
template:
metadata:
labels:
run: nginx
spec:
volumes:
- name: www
persistentVolumeClaim:
claimName: pvc-nfs-pv1
containers:
- image: nginx
name: nginx
volumeMounts:
- name: www
mountPath: /usr/share/nginx/html
----------------------------------------
Here the catch is , the ( -name: www ) of VolumeMounts in "containers:" section should be the same as the ( -name: www ) in the "volumes:" section.
ubuntu@namenode:~/kubernetes/yamls$ kubectl create -f 4-nfs-nginx-1.6.yaml
deployment.apps/nginx-deploy-nfs created
ubuntu@namenode:~/kubernetes/yamls$ kubectl get pods -o wide | grep nfs
nginx-deploy-nfs-6fdd5b84cc-s4qfp 1/1 Running 0 35s 10.244.1.35 dn1
Now check in Dn1 if the nfs is mounted
root@dn1:~# df -h | grep nfs
10.0.1.9:/srv/nfs/k8sdata 8.0G 1.8G 6.3G 22% /var/lib/kubelet/pods/89523633-0a50-43c6-b13a-a96270f0c819/volumes/kubernetes.io~nfs/pv-nfs-pv1
Now expose your deployment and create the nginx-ingress resouce rule for getting this deployment from outside.
# kubectl expose deploy nginx-deploy-nfs --port 80
That is all
Cheers.
Comments