First install docker This environment needs to be installed by yourself . The steps are as follows :

1
2
3
4
5
6
7
8
9
10
11
yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo https: //download .docker.com /linux/centos/docker-ce .repo
install docker
yum list docker-ce --showduplicates | sort -r see docker Relevant version
#yum install docker-ce # because repo On by default stable Warehouse , So the latest stable version is installed here .
#yum install <FQPN> # for example :yum install docker-ce-18.06.0.ce -y
# Here's the verified version , Proposed installation
yum install docker-ce-18.06.0.ce -y
systemctl start docker
systemctl enable docker
docker version( Because the installation is 1.13.4 Version of k8s, Proposed installation docker18.06)

Then use Alibaba cloud's modified minikube Installation , Otherwise it's initializing minikube I get stuck on the wall and can't get down

1
2
3
curl -Lo minikube http: //kubernetes .oss-cn-hangzhou.aliyuncs.com /minikube/releases/v0 .35.0 /minikube-linux-amd64
chmod +x minikube
mv minikube /usr/bin/minikube
Be careful to turn it off swap: The shutdown command swapoff -a

Load alicloud k8s And install the relevant command components

1
2
3
4
5
6
7
8
9
10
cd /etc/yum .repos.d/
cat >>kubernetes.repo<<EOF
[kubernetes]
name=Kubernetes
baseurl=https: //mirrors .aliyun.com /kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https: //mirrors .aliyun.com /kubernetes/yum/doc/yum-key .gpg https: //mirrors .aliyun.com /kubernetes/yum/doc/rpm-package-key .gpg
EOF
1
2
yum install kubectl kubelet kubeadm -y
systemctl start kubelet && systemctl enable kubelet

Use default VirtualBox Drive to create Kubernetes The local environment

minikube start --registry-mirror=https: //registry .docker-cn.com

The following words appear

- Verifying component health .....
+ kubectl is now configured to use "minikube"
= Done! Thank you for using minikube!

Local minikube installation is complete . Of course, you can't access the Internet , Separate ingress Or port forwarding

############################################################################

---------------------------------- I'm the divider ------------------------------------ Cut it --------------------------------

############################################################################

ingress Installation method :

Generate ingress:

establish depolyment.yaml:

apiVersion: v1
kind: Namespace
metadata:
   name: kube-system
   labels:
     app.kubernetes.io /name : ingress-nginx
     app.kubernetes.io /part-of : ingress-nginx
---
kind: ConfigMap
apiVersion: v1
metadata:
   name: nginx-configuration
   namespace: kube-system
   labels:
     app.kubernetes.io /name : ingress-nginx
     app.kubernetes.io /part-of : ingress-nginx
---
kind: ConfigMap
apiVersion: v1
metadata:
   name: tcp-services
   namespace: kube-system
   labels:
     app.kubernetes.io /name : ingress-nginx
     app.kubernetes.io /part-of : ingress-nginx
---
kind: ConfigMap
apiVersion: v1
metadata:
   name: udp-services
   namespace: kube-system
   labels:
     app.kubernetes.io /name : ingress-nginx
     app.kubernetes.io /part-of : ingress-nginx
---
apiVersion: v1
kind: ServiceAccount
metadata:
   name: nginx-ingress-serviceaccount
   namespace: kube-system
   labels:
     app.kubernetes.io /name : ingress-nginx
     app.kubernetes.io /part-of : ingress-nginx
---
apiVersion: rbac.authorization.k8s.io /v1beta1
kind: ClusterRole
metadata:
   name: nginx-ingress-clusterrole
   labels:
     app.kubernetes.io /name : ingress-nginx
     app.kubernetes.io /part-of : ingress-nginx
rules:
   - apiGroups:
       - ""
     resources:
       - configmaps
       - endpoints
       - nodes
       - pods
       - secrets
     verbs:
       - list
       - watch
   - apiGroups:
       - ""
     resources:
       - nodes
     verbs:
       - get
   - apiGroups:
       - ""
     resources:
       - services
     verbs:
       - get
       - list
       - watch
   - apiGroups:
       - "extensions"
     resources:
       - ingresses
     verbs:
       - get
       - list
       - watch
   - apiGroups:
       - ""
     resources:
       - events
     verbs:
       - create
       - patch
   - apiGroups:
       - "extensions"
     resources:
       - ingresses /status
     verbs:
       - update
---
apiVersion: rbac.authorization.k8s.io /v1beta1
kind: Role
metadata:
   name: nginx-ingress-role
   namespace: kube-system
   labels:
     app.kubernetes.io /name : ingress-nginx
     app.kubernetes.io /part-of : ingress-nginx
rules:
   - apiGroups:
       - ""
     resources:
       - configmaps
       - pods
       - secrets
       - namespaces
     verbs:
       - get
   - apiGroups:
       - ""
     resources:
       - configmaps
     resourceNames:
       # Defaults to "<election-id>-<ingress-class>"
       # Here: "<ingress-controller-leader>-<nginx>"
       # This has to be adapted if you change either parameter
       # when launching the nginx-ingress-controller.
       - "ingress-controller-leader-nginx"
     verbs:
       - get
       - update
   - apiGroups:
       - ""
     resources:
       - configmaps
     verbs:
       - create
   - apiGroups:
       - ""
     resources:
       - endpoints
     verbs:
       - get
---
apiVersion: rbac.authorization.k8s.io /v1beta1
kind: RoleBinding
metadata:
   name: nginx-ingress-role-nisa-binding
   namespace: kube-system
   labels:
     app.kubernetes.io /name : ingress-nginx
     app.kubernetes.io /part-of : ingress-nginx
roleRef:
   apiGroup: rbac.authorization.k8s.io
   kind: Role
   name: nginx-ingress-role
subjects:
   - kind: ServiceAccount
     name: nginx-ingress-serviceaccount
     namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io /v1beta1
kind: ClusterRoleBinding
metadata:
   name: nginx-ingress-clusterrole-nisa-binding
   labels:
     app.kubernetes.io /name : ingress-nginx
     app.kubernetes.io /part-of : ingress-nginx
roleRef:
   apiGroup: rbac.authorization.k8s.io
   kind: ClusterRole
   name: nginx-ingress-clusterrole
subjects:
   - kind: ServiceAccount
     name: nginx-ingress-serviceaccount
     namespace: kube-system
---
apiVersion: apps /v1
kind: Deployment
metadata:
   name: nginx-ingress-controller
   namespace: kube-system
   labels:
     app.kubernetes.io /name : ingress-nginx
     app.kubernetes.io /part-of : ingress-nginx
spec:
   replicas: 1
   selector:
     matchLabels:
       app.kubernetes.io /name : ingress-nginx
       app.kubernetes.io /part-of : ingress-nginx
   template:
     metadata:
       labels:
         app.kubernetes.io /name : ingress-nginx
         app.kubernetes.io /part-of : ingress-nginx
       annotations:
         prometheus.io /port : "10254"
         prometheus.io /scrape : "true"
     spec:
       serviceAccountName: nginx-ingress-serviceaccount
       containers:
         - name: nginx-ingress-controller
           image: quay.io /kubernetes-ingress-controller/nginx-ingress-controller :0.23.0
           args:
             - /nginx-ingress-controller
             - --configmap=$(POD_NAMESPACE) /nginx-configuration
             - --tcp-services-configmap=$(POD_NAMESPACE) /tcp-services
             - --udp-services-configmap=$(POD_NAMESPACE) /udp-services
             - --publish-service=$(POD_NAMESPACE) /ingress-nginx
             - --annotations-prefix=nginx.ingress.kubernetes.io
           securityContext:
             allowPrivilegeEscalation: true
             capabilities:
               drop:
                 - ALL
               add:
                 - NET_BIND_SERVICE
             # www-data -> 33
             runAsUser: 33
           env :
             - name: POD_NAME
               valueFrom:
                 fieldRef:
                   fieldPath: metadata.name
             - name: POD_NAMESPACE
               valueFrom:
                 fieldRef:
                   fieldPath: metadata.namespace
           ports:
             - name: http
               containerPort: 80
             - name: https
               containerPort: 443
           livenessProbe:
             failureThreshold: 3
             httpGet:
               path: /healthz
               port: 10254
               scheme: HTTP
             initialDelaySeconds: 10
             periodSeconds: 10
             successThreshold: 1
             timeoutSeconds: 10
           readinessProbe:
             failureThreshold: 3
             httpGet:
               path: /healthz
               port: 10254
               scheme: HTTP
             periodSeconds: 10
             successThreshold: 1
             timeoutSeconds: 10
---

---

---

To create a svc,yaml:

Service:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
apiVersion: v1
kind: Service
metadata:
   annotations:
     #service.beta.kubernetes.io/alicloud-loadbalancer-id: "lb-wz9du18pa4e7f93vetzww"
   labels:
     app: nginx-ingress
   name: nginx-ingress
   namespace: kube-system
spec:
   ports:
   - name: http
     nodePort: 30468
     port: 80
     protocol: TCP
     targetPort: 80
   - name: https
     nodePort: 30471
     port: 443
     protocol: TCP
     targetPort: 443
   selector:
     #app: ingress-nginx
     app.kubernetes.io /name : ingress-nginx
   #type: LoadBalancer
   type : NodePort
status:
   loadBalancer:
     ingress:
     - ip: 39.108.26.119( Change here to your own machine ip)

above yaml establish pod The order is :

kubectl apply -f xxxx.yaml

Business image can be pulled gitlab Of , It's not done here cofigmap, I need to match myself . You need to write your own business choreography yaml

Here's a simple installation script .

#!/bin/bash
# install docker relevant , It is used to pull the local required image , Version adopts docker-ce .06 edition , Support 1.13 edition kubernetes
# Check whether the network card is fixed ip
grep -rE "dhcp" /etc/sysconfig/network-scripts/ifcfg-*
if [ $? -eq ];
then
echo " The network card is DHCP Please change the mode to regulation ip"
exit
else
echo " The network card is normal ."
fi
yum clean all && yum repolist
yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
yum install docker-ce-18.06..ce -y
systemctl start docker
systemctl enable docker
VERSION=`docker version`
if [ $? -eq ];
then
echo " Output docker Version information :$VERSION"
else
echo "docker Installation error , Please check the error log "
exit
fi
echo "" >/proc/sys/net/bridge/bridge-nf-call-iptables # This step is to ensure that iptables Forward correctly to get the image , Otherwise it will be reported dns Parse error
######## obtain minikube Binary files and add system commands ########
cd /data
curl -Lo minikube http://kubernetes.oss-cn-hangzhou.aliyuncs.com/minikube/releases/v0.35.0/minikube-linux-amd64
chmod +x minikube
mv minikube /usr/bin/minikube
swapoff -a # closed swap Otherwise, an error will be prompted during initialization
cd /etc/yum.repos.d/
cat>>kubernetes.repo<<EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=
gpgcheck=
repo_gpgcheck=
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
yum install kubectl kubelet kubeadm -y
systemctl start kubelet && systemctl enable kubelet
######## start-up minikube########
minikube start --vm-driver=none
if [ $? -eq ];
then
echo "minikube Successful initialization "
else
echo "minikube initialization failed , Please check the error output , Re execute the initialization command minikube start --vm-driver=none command , If there are still errors , Please execute the clean cluster command minikube delete, And re execute the initialization command !"
minikube delete
exit
fi
# default Minikube Use VirtualBox Drive to create Kubernetes The local environment
#minikube start --registry-mirror=https://registry.docker-cn.com
STATUS=`kubectl get node | awk '{print$2}' | sed -n '2p'`
if [ $STATUS = "Ready" ];
then
echo " Output cluster state $STATUS"
else
echo " The output state is not Ready, Please contact O & M ."
fi
#echo " Output cluster state $STATUS"
#echo " The output state is not Ready, Please contact O & M ."

Local k8s Environmental Science minikube More about the build process

  1. Ubuntu12.04 Embedded cross compiler environment arm-linu-gcc Set up process , The illustration

    Reprint : Wang wensong's blog Ubuntu12.04 Embedded cross compiler environment arm-linu-gcc Set up process , The illustration Installation environment        Linux edition :Ubuntu 12.04     Kernel version :Linux 3.5.0 ...

  2. Ubuntu 12.04 Embedded cross compiler environment arm-linux-gcc Set up process

    Ubuntu 12.04 Embedded cross compiler environment arm-linux-gcc Set up process Linux edition :Ubuntu 12.04 Kernel version :Linux 3.5.0 Cross compiler version :arm-linux-gcc-4. ...

  3. Ubuntu On hi3531 Cross compile environment arm-hisiv100nptl-linux Set up process

    install SDK 1.Hi3531 SDK Bag location     stay "Hi3531_V100R001***/01.software/board" Under the table of contents , You can see a Hi3531_SDK_Vx ...

  4. 3 Construction of local agent environment for wechat development -- Realize the integration of Intranet ip Mapping to the Internet

    The development of WeChat official account , To build a website , And it is possible to modify the content of the website at any time for debugging , This requires that the temporary external network can return to the local development environment to build the project for testing , That is, the intranet is mapped to the public network , But many developers don't have their own domain names and servers , Let's build a Ben first ...

  5. Local + Distributed Hadoop Complete construction process

    1 summary Hadoop It is very important in the big data technology system , Hailed as a world changing 7 individual Java One of the project ( be left over 6 Yes Junit.Eclipse.Spring.Solr.HudsonAndJenkins.Android ...

  6. [kubernetes] Use Minikube Quickly build local k8s Environmental Science ( be based on Docker Drive mode )

    One . Experimental environment operating system :Centos 7 x86_64 Docker:1.12.6 Two . Deploy k8s step 2.1   install kubectl cat <<EOF > /etc/yum. ...

  7. Local Windows Environmental Science Dubbo Build tests

    Dubbo Introduce Dubbo[] Is a distributed services framework , Committed to providing high performance and transparency RPC Remote service invocation scenarios , as well as SOA Service governance solution . Its core part includes : Telematics :  Provides for a variety of long connection based NIO Framework Abstract encapsulation , ...

  8. Ubuntu 8.04 Embedded cross compiler environment arm-linux-gcc Diagram of construction process

    Linux edition :Ubuntu8.04 Kernel version :Linux 2.6.24 Cross compiler version :arm-linux-gcc-3.4.1 Cross compiler download link : https://share.weiyun.com ...

  9. Ubuntu 12.04 Embedded cross compiler environment arm-linux-gcc Diagram of construction process

    Linux edition :Ubuntu 12.04 Kernel version :Linux 3.5.0 Cross compiler version :arm-linux-gcc-4.4.3 Cross compiler download See this article http://www.linuxidc.c ...

Random recommendation

  1. angularjs And Restangular usage

    Reference material : angularjs How to get service port data ( Three ) Study -[ front end ]-angularjs Basic framework and method of sending request to server Restangular on Angular

  2. JAVA: abstract class VS Interface

    JAVA Compare the differences between abstract classes and interfaces in , And their respective uses . 1.JAVA abstract class : Abstract classes can't be instantiated , It's no different from the normal class . stay <JAVA Programming idea > In a Book , Define an abstract class as “ Class containing abstract methods ...

  3. Luogu 1352 CODEVS1380 A dance without a boss

    There seems to be something wrong with rogue's test data ,4 A little bit RE It's unavoidable CODEVS can AC —————— 10 Remember in minutes : Open the range of arrays to 10000+ It's over —————— Title Description  Description Ural The university has N individual ...

  4. Sheep and cars ( or s Three questions (Monty Hall problem) Also known as the montehall problem )

    Three questions (Monty Hall problem) Also known as the montehall problem . Montessori problem or Montessori paradox , It's about American video game shows Let's Make a Deal. The name of the question comes from Monty, the host of the show · Holzer (Mon ...

  5. 201521123035《Java Programming 》 Week 11 homework

    1. This week's learning summary 1.1 In the way you like ( Mind mapping or something ) Summarize the related content of multithreading . This week's multi thread conflict starts with multi thread conflict , So mutual exclusive sharing and mutual exclusive access are proposed . among , Exclusive access refers to synchronize ...

  6. vue Learning notes —— route

    1 Routing configuration stay vue.config Middle configuration , You can use... In your code @ To express src Under the table of contents import aa from '@/aa/index.js' 2 Single page can load lazily 3 Create dynamic routing Routing is defined in : ...

  7. modbus-vcr Introduce

    Related links :modbus-vcr modbus-vcr It's a Ettercap Plug in for , Used in industrial control system protocols that lack data integrity . This Ettercap The plug-in performs a MITM The attack is using Modbus The system of agreement ...

  8. CF643D Bearish Fanpages

    The question The English version of the title Problems Submit Status Standings Custom test .input-output-copier { font-size: 1.2rem; floa ...

  9. queue queue example ( Producer and consumer models )

    import queue, threading, time q = queue.Queue(maxsize=10)def producter(n): count = 1 while True: q.p ...

  10. C++ 0x Use condition_variable And Mutex Synchronize two threads

    Mutex : lock     Only one thread is allowed to access its code content at the same time personification : It's just a lock , Sure lock unlock, Anyone can get the lock , Open the door and enter the house , But when you go in , I'll lock the door (lock) If someone wants to get in, they have to wait for him to get out ...