# 修改前 $ grep server /etc/chrony.conf # Use public servers from the pool.ntp.org project. server 0.centos.pool.ntp.org iburst server 1.centos.pool.ntp.org iburst server 2.centos.pool.ntp.org iburst server 3.centos.pool.ntp.org iburst
# 修改后 $ grep server /etc/chrony.conf # Use public servers from the pool.ntp.org project. server ntp.ntsc.ac.cn iburst
# 我们先使用kubeadm命令查看一下主要的几个镜像版本 # 因为我们此前指定安装了旧的1.23.6版本,这里的apiserver镜像版本也会随之回滚 $ kubeadm config images list I0506 11:24:17.061315 16055 version.go:255] remote version is much newer: v1.24.0; falling back to: stable-1.23 k8s.gcr.io/kube-apiserver:v1.23.6 k8s.gcr.io/kube-controller-manager:v1.23.6 k8s.gcr.io/kube-scheduler:v1.23.6 k8s.gcr.io/kube-proxy:v1.23.6 k8s.gcr.io/pause:3.6 k8s.gcr.io/etcd:3.5.1-0 k8s.gcr.io/coredns/coredns:v1.8.6
# 初始化 $ kubeadm init --config kubeadm-calico.conf [init] Using Kubernetes version: v1.23.6 [preflight] Running pre-flight checks [preflight] Pulling images required for setting up a Kubernetes cluster [preflight] This might take a minute or two, depending on the speed of your internet connection [preflight] You can also perform this action in beforehand using 'kubeadm config images pull' ...此处略去一堆输出...
Alternatively, if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster. Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
$ kubectl cluster-info Kubernetes control plane is running at https://10.31.88.1:6443 CoreDNS is running at https://10.31.88.1:6443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy
To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'
$ kubectl get nodes -o wide NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME tiny-calico-master-88-1.k8s.tcinternal NotReady control-plane,master 4m15s v1.23.6 10.31.88.1 <none> CentOS Linux 7 (Core) 3.10.0-1160.62.1.el7.x86_64 docker://20.10.14
$ kubeadm join 10.31.88.1:6443 --token abcdef.0123456789abcdef \ > --discovery-token-ca-cert-hash sha256:a4189d36d164a865be540d48fcd10ff13e2f90ed6e901201b6ea2baf96dae0ae [preflight] Running pre-flight checks [preflight] Reading configuration from the cluster... [preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml' [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env" [kubelet-start] Starting the kubelet [kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
This node has joined the cluster: * Certificate signing request was sent to apiserver and a response was received. * The Kubelet was informed of the new secure connection details.
Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
$ kubectl get nodes NAME STATUS ROLES AGE VERSION tiny-calico-master-88-1.k8s.tcinternal NotReady control-plane,master 20m v1.23.6 tiny-calico-worker-88-11.k8s.tcinternal NotReady <none> 105s v1.23.6 tiny-calico-worker-88-12.k8s.tcinternal NotReady <none> 35s v1.23.6
# The default IPv4 pool to create on startup if none exists. Pod IPs will be # chosen from this range. Changing this value after installation will have # no effect. This should fall within `--cluster-cidr`. -name:CALICO_IPV4POOL_CIDR value:"10.88.64.0/18" -name:CALICO_IPV4POOL_BLOCK_SIZE value:"26"
kind:ConfigMap apiVersion:v1 metadata: name:calico-config namespace:kube-system data: # Configure this with the location of your etcd cluster. # etcd_endpoints: "http://<ETCD_IP>:<ETCD_PORT>" # If you're using TLS enabled etcd uncomment the following. # You must also populate the Secret below with these files. # etcd_ca: "" # "/calico-secrets/etcd-ca" # etcd_cert: "" # "/calico-secrets/etcd-cert" # etcd_key: "" # "/calico-secrets/etcd-key" etcd_endpoints:"https://10.31.88.1:2379" etcd_ca:"/etc/kubernetes/pki/etcd/ca.crt" etcd_cert:"/etc/kubernetes/pki/etcd/server.crt" etcd_key:"/etc/kubernetes/pki/etcd/server.key"
--- # Source: calico/templates/calico-etcd-secrets.yaml # The following contains k8s Secrets for use with a TLS enabled etcd cluster. # For information on populating Secrets, see http://kubernetes.io/docs/user-guide/secrets/ apiVersion:v1 kind:Secret type:Opaque metadata: name:calico-etcd-secrets namespace:kube-system data: # Populate the following with etcd TLS configuration if desired, but leave blank if # not using TLS for etcd. # The keys below should be uncommented and the values populated with the base64 # encoded contents of each file that would be associated with the TLS data. # Example command for encoding a file contents: cat <file> | base64 -w 0 etcd-key:LS0tLS1CRUdJTi......tLS0tCg== etcd-cert:LS0tLS1CRUdJT......tLS0tLQo= etcd-ca:LS0tLS1CRUdJTiB......FLS0tLS0K
$ kubectl apply -f calico-etcd.yaml secret/calico-etcd-secrets created configmap/calico-config created clusterrole.rbac.authorization.k8s.io/calico-kube-controllers created clusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers created clusterrole.rbac.authorization.k8s.io/calico-node created clusterrolebinding.rbac.authorization.k8s.io/calico-node created daemonset.apps/calico-node created serviceaccount/calico-node created deployment.apps/calico-kube-controllers created serviceaccount/calico-kube-controllers created Warning: policy/v1beta1 PodDisruptionBudget is deprecated in v1.21+, unavailable in v1.25+; use policy/v1 PodDisruptionBudget poddisruptionbudget.policy/calico-kube-controllers created
$ cat calicoctl-etcd.yaml # Calico Version v3.22.2 # https://projectcalico.docs.tigera.io/releases#v3.22.2 # This manifest includes the following component versions: # calico/ctl:v3.22.2
apiVersion: v1 kind: Pod metadata: name: calicoctl namespace: kube-system spec: nodeSelector: kubernetes.io/os: linux hostNetwork: true containers: - name: calicoctl image: calico/ctl:v3.22.2 command: - /calicoctl args: - version - --poll=1m env: - name: ETCD_ENDPOINTS valueFrom: configMapKeyRef: name: calico-config key: etcd_endpoints # If you're using TLS enabled etcd uncomment the following. # Location of the CA certificate for etcd. - name: ETCD_CA_CERT_FILE valueFrom: configMapKeyRef: name: calico-config key: etcd_ca # Location of the client key for etcd. - name: ETCD_KEY_FILE valueFrom: configMapKeyRef: name: calico-config key: etcd_key # Location of the client certificate for etcd. - name: ETCD_CERT_FILE valueFrom: configMapKeyRef: name: calico-config key: etcd_cert volumeMounts: - mountPath: /calico-secrets name: etcd-certs volumes: # If you're using TLS enabled etcd uncomment the following. - name: etcd-certs secret: secretName: calico-etcd-secrets
$ calicoctl node status Calico process is running.
IPv4 BGP status +--------------+-------------------+-------+----------+-------------+ | PEER ADDRESS | PEER TYPE | STATE | SINCE | INFO | +--------------+-------------------+-------+----------+-------------+ | 10.31.88.11 | node-to-node mesh | up | 08:26:30 | Established | | 10.31.88.12 | node-to-node mesh | up | 08:26:30 | Established | +--------------+-------------------+-------+----------+-------------+
IPv6 BGP status No IPv6 peers found.
$ calicoctl get nodes NAME tiny-calico-master-88-1.k8s.tcinternal tiny-calico-worker-88-11.k8s.tcinternal tiny-calico-worker-88-12.k8s.tcinternal
$ calicoctl ipam show +----------+---------------+-----------+------------+--------------+ | GROUPING | CIDR | IPS TOTAL | IPS IN USE | IPS FREE | +----------+---------------+-----------+------------+--------------+ | IP Pool | 10.88.64.0/18 | 16384 | 2 (0%) | 16382 (100%) | +----------+---------------+-----------+------------+--------------+
apiVersion:v1 kind:Service metadata: name:nginx-quic-service namespace:nginx-quic spec: selector: app:nginx-quic ports: -protocol:TCP port:8080# match for service access port targetPort:80# match for pod access port nodePort:30088# match for external access port type:NodePort
# 直接部署 $ kubectl apply -f nginx-quic.yaml namespace/nginx-quic created deployment.apps/nginx-quic-deployment created service/nginx-quic-service created
# 查看deployment的运行状态 $ kubectl get deployment -o wide -n nginx-quic NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR nginx-quic-deployment 4/4 4 4 55s nginx-quic tinychen777/nginx-quic:latest app=nginx-quic
# 查看service的运行状态 $ kubectl get service -o wide -n nginx-quic NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR nginx-quic-service NodePort 10.88.52.168 <none> 8080:30088/TCP 66s app=nginx-quic