{"id":92,"date":"2023-04-26T14:01:26","date_gmt":"2023-04-26T06:01:26","guid":{"rendered":"https:\/\/zhaocunwei.co.uk\/?p=92"},"modified":"2023-05-09T12:54:11","modified_gmt":"2023-05-09T04:54:11","slug":"k8s-2","status":"publish","type":"post","link":"https:\/\/zhaocunwei.co.uk\/index.php\/2023\/04\/26\/k8s-2\/","title":{"rendered":"\u6211\u4f7f\u7528\u4e09\u53f0Centos\u79bb\u7ebf\u90e8\u7f72K8S\u96c6\u7fa4-\u4e3b\u8981\u662f\u516c\u53f8\u4ea7\u54c1\u90e8\u7f72\u5728\u7532\u65b9\u7684\u673a\u623f\uff0c\u7532\u65b9\u673a\u623f\u662f\u5c40\u57df\u7f51"},"content":{"rendered":"<h1>\u524d\u8a00<\/h1>\n<p>\u6839\u636e\u516c\u53f8\u9879\u76ee\u7684\u8981\u6c42\uff0c\u9879\u76ee\u7ec4\u8981\u79bb\u7ebf\u90e8\u7f72\u5230\u5ba2\u6237\u7684\u673a\u623f\uff0c\u9700\u8981\u5728\u6709\u7f51\u7684\u670d\u52a1\u5668\u5b89\u88c5k8s\u96c6\u7fa4\uff0c\u7136\u540e\u5728<br \/>\n\u6253\u6210\u7cfb\u7edf\u955c\u50cf<\/p>\n<h2>1. \u5b89\u88c5Docker\u548c\u79bb\u7ebf\u955c\u50cf<\/h2>\n<p>\u4e0b\u8f7dDocker\u79bb\u7ebf\u5b89\u88c5\u5305\u548c\u955c\u50cf\u6587\u4ef6\uff0c\u5982docker-ce-18.09.0-3.el7.x86_64.rpm\u548ckubernetes-images-v1.17.0.tar.gz<br \/>\n1.\u4e0b\u8f7ddocker-ce-18.09.0-3.el7.x86_64.rpm<br \/>\n\u9700\u8981\u4e00\u4e2a\u8054\u7f51\u7684\u7535\u8111\u4e0a\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff1a\u8fd9\u6837\u5c31\u53ef\u4ee5\u4e0b\u8f7d\u672c\u5730\u4e86<\/p>\n<pre><code>https:\/\/download.docker.com\/linux\/centos\/7\/x86_64\/stable\/Packages\/<\/code><\/pre>\n<p>\u901a\u8fc7\u6d4f\u89c8\u5668\u91cc\u9762\u627e\u5230\u6211\u4eec\u8981\u4e0b\u8f7d\u7684\u6587\u4ef6\u3002<\/p>\n<p>\u7136\u540e\u5f00\u59cb\u4e0b\u8f7d\uff1a<br \/>\nkubernetes-images-v1.17.0.tar.gz<br \/>\n\u9700\u8981\u5728\u53ef\u4ee5\u4e0a\u7f51\u7684\u6d4f\u89c8\u5668\u4e0a\u9762\u76f4\u63a5\u6267\u884cURL\uff1a<\/p>\n<pre><code>https:\/\/dl.k8s.io\/v1.17.0\/kubernetes-server-linux-amd64.tar.gz<\/code><\/pre>\n<p>\u5c06\u4e0b\u8f7d\u597d\u7684\u5b89\u88c5\u5305\u548c\u955c\u50cf\u6587\u4ef6\u62f7\u8d1d\u5230\u4e09\u53f0\u670d\u52a1\u5668\u4e0a\uff0c\u5e76\u5b89\u88c5Docker\uff1a<\/p>\n<p>\u6211\u5728\u670d\u52a1\u5668\u4e0a\u9762\u521b\u5efa\u4e86\u5982\u4e0b\u76ee\u5f55\uff1a<\/p>\n<pre><code>\/home\/docker<\/code><\/pre>\n<p>\u8fdb\u884c\u5b89\u88c5\uff1a<\/p>\n<pre><code> rpm -ivh docker-ce-18.09.0-3.el7.x86_64.rpm<\/code><\/pre>\n<p>\u5f53\u8fdb\u884c\u5b89\u88c5\u65f6\u62a5\u5982\u4e0b\u9519\u8bef\uff1a<\/p>\n<pre><code>  [root@zzmuap6zwdoqhqxb docker]# rpm -ivh docker-ce-18.09.0-3.el7.x86_64.rpm \nwarning: docker-ce-18.09.0-3.el7.x86_64.rpm: Header V4 RSA\/SHA512 Signature, key ID 621e9f35: NOKEY\nerror: Failed dependencies:\n        container-selinux >= 2.9 is needed by docker-ce-3:18.09.0-3.el7.x86_64\n        containerd.io is needed by docker-ce-3:18.09.0-3.el7.x86_64\n        docker-ce-cli is needed by docker-ce-3:18.09.0-3.el7.x86_64<\/code><\/pre>\n<p><font color=red>\u89e3\u51b3\u529e\u6cd5<\/font><\/p>\n<p>\u901a\u8fc7\u4e0a\u9762\u7684\u62a5\u9519\uff0c\u6211\u4eec\u77e5\u9053\u8fd9\u662f\u5b89\u88c5 Docker CE \u5e76\u4e14\u51fa\u73b0\u4e86\u4e00\u4e9b\u4f9d\u8d56\u95ee\u9898\uff0c\u89e3\u51b3\u6b64\u95ee\u9898\uff1a<\/p>\n<p>Mac \u7535\u8111\u4e0a\uff0c\u5728\u6d4f\u89c8\u5668\u4e2d\u6253\u5f00\u4ee5\u4e0b\u94fe\u63a5\u6765\u83b7\u53d6\u6240\u9700\u7684 rpm \u5305\uff1a<\/p>\n<ol>\n<li>\u5728\u6d4f\u89c8\u5668\u4e2d\u6253\u5f00\u4ee5\u4e0b\u94fe\u63a5\uff1a\n<ul>\n<li><code>container-selinux<\/code>: <a href=\"http:\/\/mirror.centos.org\/centos\/7\/extras\/x86_64\/Packages\/\">http:\/\/mirror.centos.org\/centos\/7\/extras\/x86_64\/Packages\/<\/a><\/li>\n<li><code>containerd.io<\/code>: <a href=\"https:\/\/download.docker.com\/linux\/centos\/7\/x86_64\/stable\/Packages\/\">https:\/\/download.docker.com\/linux\/centos\/7\/x86_64\/stable\/Packages\/<\/a><\/li>\n<li><code>docker-ce-cli<\/code> \u548c <code>docker-ce<\/code>: <a href=\"https:\/\/download.docker.com\/linux\/centos\/7\/x86_64\/stable\/Packages\/\">https:\/\/download.docker.com\/linux\/centos\/7\/x86_64\/stable\/Packages\/<\/a><\/li>\n<\/ul>\n<\/li>\n<li>\u5728\u4e0a\u8ff0\u6bcf\u4e2a\u94fe\u63a5\u4e2d\uff0c\u627e\u5230\u6240\u9700\u7684 rpm \u5305\u3002\u4f8b\u5982\uff0c\u5728 <code>container-selinux<\/code> \u7684\u94fe\u63a5\u4e2d\uff0c\u627e\u5230\u5305\u542b <code>container-selinux-2.x.x-x.el7.noarch.rpm<\/code> \u7684\u6587\u4ef6\uff0c\u7136\u540e\u5355\u51fb\u8be5\u6587\u4ef6\u540d\uff0c\u5728\u51fa\u73b0\u7684\u5bf9\u8bdd\u6846\u4e2d\u5355\u51fb\u201c\u4e0b\u8f7d\u201d\u6309\u94ae\u5373\u53ef\u4e0b\u8f7d\u8be5\u6587\u4ef6\u3002<\/li>\n<\/ol>\n<p>\u4ee5\u4e0b\u662f\u4e3a CentOS 7 \u51c6\u5907 Docker CE 18.09.0 \u79bb\u7ebf\u5b89\u88c5\u5305\u6240\u9700\u7684\u5305\u53ca\u5176\u7248\u672c\uff1a<\/p>\n<ul>\n<li><code>container-selinux<\/code>: container-selinux-2.107-3.el7.noarch.rpm<\/li>\n<li><code>containerd.io<\/code>: containerd.io-1.2.13-3.1.el7.x86_64.rpm<\/li>\n<li><code>docker-ce-cli<\/code>: docker-ce-cli-18.09.0-3.el7.x86_64.rpm<\/li>\n<li><code>docker-ce<\/code>: docker-ce-18.09.0-3.el7.x86_64.rpm<\/li>\n<\/ul>\n<pre><code>sudo rpm -ivh containerd.io-1.2.13-3.1.el7.x86_64.rpm container-selinux-2.119.1-1.c57a6f9.el7.noarch.rpm docker-ce-18.09.0-3.el7.x86_64.rpm docker-ce-cli-18.09.3-3.el7.x86_64.rpm <\/code><\/pre>\n<p>\u6210\u529f\u4e86\uff0c\u7136\u540e\u5f00\u59cb\u7ee7\u7eed\u6267\u884c\u5176\u4ed6\u6b65\u9aa4\uff1a<\/p>\n<ul>\n<li>\u5bfc\u5165Kubernetes\u79bb\u7ebf\u955c\u50cf<\/li>\n<\/ul>\n<pre><code>  docker load -i kubernetes-images-v1.17.0.tar.gz<\/code><\/pre>\n<p>\u6267\u884c\u4e0a\u9762\u7684\u547d\u4ee4\u62a5\u9519\u4e86\ud83d\ude00\uff1a<\/p>\n<pre><code>[root@zzmuap6zwdoqhqxb docker]# docker load -i kubernetes-server-linux-amd64.tar.gz \nCannot connect to the Docker daemon at unix:\/\/\/var\/run\/docker.sock. Is the docker daemon running?<\/code><\/pre>\n<p>\u89e3\u51b3\uff1a<\/p>\n<p>\u8be5\u9519\u8bef\u8868\u660e\u60a8\u5728\u8fd0\u884c <code>docker load<\/code> \u547d\u4ee4\u65f6\uff0cDocker \u5ba2\u6237\u7aef\u65e0\u6cd5\u8fde\u63a5\u5230 Docker \u5f15\u64ce\u3002\u56e0\u6b64\uff0c\u6211\u63a8\u6d4b\u51fa\u73b0\u8be5\u9519\u8bef\u7684\u539f\u56e0\u662f\u56e0\u4e3a Docker \u5f15\u64ce\u672a\u542f\u52a8\u6216\u5df2\u505c\u6b62\u3002<\/p>\n<p>\u60a8\u53ef\u4ee5\u6309\u7167\u4ee5\u4e0b\u6b65\u9aa4\u68c0\u67e5 Docker \u5f15\u64ce\u662f\u5426\u542f\u52a8\uff0c\u5e76\u91cd\u542f Docker \u5f15\u64ce\uff1a<\/p>\n<ol>\n<li>\u5728\u7ec8\u7aef\u4e2d\u8f93\u5165\u4ee5\u4e0b\u547d\u4ee4\uff0c\u68c0\u67e5 Docker \u5f15\u64ce\u662f\u5426\u6b63\u5728\u8fd0<\/li>\n<\/ol>\n<pre><code>sudo systemctl start docker<\/code><\/pre>\n<p>\u5982\u679cDocker\u5df2\u7ecf\u5728\u8fd0\u884c\u4e2d\uff0c\u60a8\u53ef\u4ee5\u5c1d\u8bd5\u4f7f\u7528\u4ee5\u4e0b\u547d\u4ee4\u91cd\u65b0\u542f\u52a8\u5b83\uff1a<\/p>\n<pre><code>sudo systemctl restart docker<\/code><\/pre>\n<p>\u5982\u679c\u4ecd\u7136\u65e0\u6cd5\u8fde\u63a5\u5230Docker\u5b88\u62a4\u8fdb\u7a0b\uff0c\u8bf7\u68c0\u67e5\u7f51\u7edc\u8bbe\u7f6e\u5e76\u5c1d\u8bd5\u91cd\u65b0\u5b89\u88c5Docker\u3002<\/p>\n<p>\u7136\u540e\u7ee7\u7eed\u6267\u884c\u6211\u4eec\u7684\u547d\u4ee4\uff1a<\/p>\n<pre><code>docker load -i kubernetes-images-v1.17.0.tar.gz<\/code><\/pre>\n<p>\u9519\u8bef\u53d8\u4e86O(\u2229_\u2229)O\u54c8\u54c8~<\/p>\n<pre><code>open \/var\/lib\/docker\/tmp\/docker-import-481500431\/kubernetes\/json: no such file or directory<\/code><\/pre>\n<p>\u8fd9\u4e2a\u9519\u8bef\u53ef\u80fd\u662f\u56e0\u4e3a\u4f60\u7684\u8def\u5f84\u6709\u8bef\uff0c\u6216\u8005\u662f\u7531\u4e8etar\u5305\u4e2d\u7684\u5185\u5bb9\u4e0d\u6b63\u786e\u5bfc\u81f4\u7684\u3002\u8bf7\u786e\u8ba4\u4ee5\u4e0b\u51e0\u70b9\uff1a<\/p>\n<ol>\n<li>\n\u786e\u4fdd tar \u5305\u8def\u5f84\u662f\u6b63\u786e\u7684\uff0c\u5e76\u4e14\u6587\u4ef6\u5b58\u5728\u3002(\u6211\u7684\u6587\u4ef6\u5b58\u5728\uff0c\u53ef\u4ee5\u8bf4\u660e\u8fd9\u4e2a\u9519\u8bef\u4e0d\u5b58\u5728)\n<\/li>\n<li>\n\u4f7f\u7528 tar \u547d\u4ee4\u6765\u68c0\u67e5 tar \u5305\u662f\u5426\u5b8c\u6574\u5e76\u4e14\u5305\u542b\u4e86\u6b63\u786e\u7684\u6587\u4ef6\uff1a<\/p>\n<pre><code>tar tf kubernetes-server-linux-amd64.tar.gz<\/code><\/pre>\n<p>\u8fd9\u4e2a\u547d\u4ee4\u4f1a\u5217\u51fatar\u5305\u4e2d\u6240\u6709\u5305\u542b\u7684\u6587\u4ef6\u548c\u6587\u4ef6\u5939\u3002<\/p>\n<pre><code>kubernetes\/\nkubernetes\/server\/\nkubernetes\/server\/bin\/\nkubernetes\/server\/bin\/apiextensions-apiserver\nkubernetes\/server\/bin\/kube-controller-manager.tar\nkubernetes\/server\/bin\/mounter\nkubernetes\/server\/bin\/kube-proxy.docker_tag\nkubernetes\/server\/bin\/kube-controller-manager.docker_tag\nkubernetes\/server\/bin\/kube-proxy.tar\nkubernetes\/server\/bin\/kubectl\nkubernetes\/server\/bin\/kube-scheduler.tar\nkubernetes\/server\/bin\/kube-apiserver.docker_tag\nkubernetes\/server\/bin\/kube-scheduler\nkubernetes\/server\/bin\/kubeadm\nkubernetes\/server\/bin\/kube-controller-manager\nkubernetes\/server\/bin\/kube-scheduler.docker_tag\nkubernetes\/server\/bin\/kubelet\nkubernetes\/server\/bin\/kube-proxy\nkubernetes\/server\/bin\/kube-apiserver.tar\nkubernetes\/server\/bin\/kube-apiserver\nkubernetes\/LICENSES\nkubernetes\/kubernetes-src.tar.gz\nkubernetes\/addons\/<\/code><\/pre>\n<\/li>\n<li>\n\u786e\u8ba4\u4f60\u7684 Docker \u7248\u672c\u652f\u6301 <code>docker load<\/code> \u547d\u4ee4\u3002\u4f60\u53ef\u4ee5\u901a\u8fc7\u4ee5\u4e0b\u547d\u4ee4\u6765\u67e5\u770b Docker \u7248\u672c\uff1a<\/p>\n<pre><code>docker version<\/code><\/pre>\n<pre><code>Client:\nVersion:           18.09.3\nAPI version:       1.39\nGo version:        go1.10.8\nGit commit:        774a1f4\nBuilt:             Thu Feb 28 06:33:21 2019\nOS\/Arch:           linux\/amd64\nExperimental:      false\n\nServer: Docker Engine - Community\nEngine:\n Version:          18.09.0\n API version:      1.39 (minimum version 1.12)\n Go version:       go1.10.4\n Git commit:       4d60db4\n Built:            Wed Nov  7 00:19:08 2018\n OS\/Arch:          linux\/amd64\n Experimental:     false<\/code><\/pre>\n<p>Docker \u5ba2\u6237\u7aef\u7684\u7248\u672c\u4e3a 18.09.3\uff0c\u800c Docker \u670d\u52a1\u5668\u7248\u672c\u4e3a 18.09.0\u3002<\/p>\n<p>\u8fd9\u4e24\u4e2a\u7248\u672c\u4e4b\u95f4\u7684\u5dee\u5f02\u53ef\u80fd\u5bfc\u81f4\u4e00\u4e9b\u547d\u4ee4\u5728\u670d\u52a1\u5668\u4e0a\u4e0d\u8d77\u4f5c\u7528\u3002<font color=red>\u4f46\u662f\u5c0f\u7248\u672c\uff0c\u6211\u611f\u89c9\u5e94\u8be5\u4e0d\u4f1a\u6709\u592a\u5927\u7684\u5dee\u5f02<\/font>\n<\/li>\n<\/ol>\n<p>\u5982\u679c\u4ee5\u4e0a\u6b65\u9aa4\u90fd\u6ca1\u6709\u95ee\u9898\uff0c\u8bf7\u786e\u8ba4\u4f60\u7684 Docker \u5b88\u62a4\u8fdb\u7a0b\u6b63\u5728\u8fd0\u884c\uff0c\u4f60\u53ef\u4ee5\u4f7f\u7528\u4ee5\u4e0b\u547d\u4ee4\u542f\u52a8\u5b83\uff1a<\/p>\n<pre><code>sudo systemctl start docker<\/code><\/pre>\n<pre><code>sudo systemctl status docker<\/code><\/pre>\n<pre><code>\u25cf docker.service - Docker Application Container Engine\n   Loaded: loaded (\/usr\/lib\/systemd\/system\/docker.service; disabled; vendor preset: disabled)\n   Active: active (running) since Tue 2023-04-25 13:48:58 CST; 16min ago\n     Docs: https:\/\/docs.docker.com\n Main PID: 4440 (dockerd)\n   Memory: 66.3M\n   CGroup: \/system.slice\/docker.service\n           \u251c\u25004440 \/usr\/bin\/dockerd -H unix:\/\/\n           \u2514\u25004459 containerd --config \/var\/run\/docker\/containerd\/containerd.toml --log-level info\n\nApr 25 13:48:58 zzmuap6zwdoqhqxb dockerd[4440]: time=\"2023-04-25T13:48:58.383158185+08:00\" level=info msg=\"Graph migration to c...conds\"Apr 25 13:48:58 zzmuap6zwdoqhqxb dockerd[4440]: time=\"2023-04-25T13:48:58.383404121+08:00\" level=warning msg=\"mountpoint for pi...found\"Apr 25 13:48:58 zzmuap6zwdoqhqxb dockerd[4440]: time=\"2023-04-25T13:48:58.383647067+08:00\" level=info msg=\"Loading containers: start.\"\nApr 25 13:48:58 zzmuap6zwdoqhqxb dockerd[4440]: time=\"2023-04-25T13:48:58.392385683+08:00\" level=warning msg=\"Running modprobe bridge...Apr 25 13:48:58 zzmuap6zwdoqhqxb dockerd[4440]: time=\"2023-04-25T13:48:58.481289323+08:00\" level=info msg=\"Default bridge (dock...dress\"Apr 25 13:48:58 zzmuap6zwdoqhqxb dockerd[4440]: time=\"2023-04-25T13:48:58.520565932+08:00\" level=info msg=\"Loading containers: done.\"\nApr 25 13:48:58 zzmuap6zwdoqhqxb dockerd[4440]: time=\"2023-04-25T13:48:58.532050119+08:00\" level=info msg=\"Docker daemon\" commi...8.09.0Apr 25 13:48:58 zzmuap6zwdoqhqxb dockerd[4440]: time=\"2023-04-25T13:48:58.532162365+08:00\" level=info msg=\"Daemon has completed...ation\"Apr 25 13:48:58 zzmuap6zwdoqhqxb dockerd[4440]: time=\"2023-04-25T13:48:58.553613862+08:00\" level=info msg=\"API listen on \/var\/r....sock\"Apr 25 13:48:58 zzmuap6zwdoqhqxb systemd[1]: Started Docker Application Container Engine.\nHint: Some lines were ellipsized, use -l to show in full.<\/code><\/pre>\n<p>\u5982\u679cDocker\u5df2\u7ecf\u5728\u8fd0\u884c\u4e2d\uff0c\u4f60\u53ef\u4ee5\u5c1d\u8bd5\u4f7f\u7528\u4ee5\u4e0b\u547d\u4ee4\u91cd\u65b0\u542f\u52a8\u5b83\uff1a<\/p>\n<pre><code>sudo systemctl restart docker<\/code><\/pre>\n<p>\u901a\u8fc7\u4e0a\u9762\u7684\u4e00\u987f\u9a9a\u64cd\u4f5c\u8fd8\u662f\u4e0d\u884c\uff0c\u662f\u4e0d\u662f\uff1a\u252d\u252e\ufe4f\u252d\u252e<\/p>\n<p>Docker\u6ca1\u6709\u8db3\u591f\u7684\u7a7a\u95f4\u6765\u521b\u5efa\u4e34\u65f6\u6587\u4ef6\u3002\u53ef\u4ee5\u5c1d\u8bd5\u589e\u52a0Docker\u5b88\u62a4\u8fdb\u7a0b\u7684\u5b58\u50a8\u7a7a\u95f4\u9650\u5236\uff0c\u4ee5\u4fbf\u53ef\u4ee5\u521b\u5efa\u4e34\u65f6\u6587\u4ef6\u3002<\/p>\n<p>\u4ee5\u4e0b\u662f\u5982\u4f55\u589e\u52a0Docker\u5b58\u50a8\u7a7a\u95f4\u9650\u5236\u7684\u6b65\u9aa4\uff1a<\/p>\n<ol>\n<li>\n\u7f16\u8f91 <code>\/etc\/docker\/daemon.json<\/code> \u6587\u4ef6<\/p>\n<pre><code>sudo vim \/etc\/docker\/daemon.json<\/code><\/pre>\n<\/li>\n<li>\n\u5c06\u4ee5\u4e0b\u5185\u5bb9\u590d\u5236\u5230\u6587\u4ef6\u4e2d\uff1a<\/p>\n<pre><code>{\n   \"storage-driver\": \"devicemapper\",\n   \"storage-opts\": [\n       \"dm.basesize=20G\",\n       \"dm.thinpooldev=\/dev\/mapper\/docker-thinpool\",\n       \"dm.use_deferred_deletion=true\",\n       \"dm.use_deferred_removal=true\"\n   ]\n}<\/code><\/pre>\n<\/li>\n<li>\n\u4fdd\u5b58\u8be5\u6587\u4ef6\u5e76\u9000\u51fa\u7f16\u8f91\u5668\u3002\n<\/li>\n<li>\n\u91cd\u542f Docker \u670d\u52a1<\/p>\n<pre><code>sudo systemctl restart docker<\/code><\/pre>\n<\/li>\n<\/ol>\n<p>\u6267\u884c\u547d\u4ee4\u540e\u62a5\u9519\uff1a<\/p>\n<pre><code>sudo systemctl restart docker\nJob for docker.service failed because the control process exited with error code. See \"systemctl status docker.service\" and \"journalctl -xe\" for details.<\/code><\/pre>\n<p>\u5982\u679c <code>systemctl restart docker<\/code> \u547d\u4ee4\u8fd0\u884c\u5931\u8d25, \u6709\u51e0\u79cd\u53ef\u80fd\u60c5\u51b5:<\/p>\n<ol>\n<li>\nDocker \u914d\u7f6e\u6587\u4ef6\u6709\u8bef\uff1a\u4f60\u53ef\u4ee5\u5c1d\u8bd5\u7f16\u8f91 <code>\/etc\/docker\/daemon.json<\/code> \u6587\u4ef6\u5e76\u68c0\u67e5\u5176\u4e2d\u7684\u914d\u7f6e\u662f\u5426\u6b63\u786e\u3002\n<\/li>\n<li>\nDocker \u5df2\u7ecf\u5728\u8fd0\u884c\u4e2d\uff1a\u5728\u6709\u4e9b\u60c5\u51b5\u4e0b\uff0c\u5982\u679c Docker \u5df2\u7ecf\u5728\u8fd0\u884c\u4e2d\uff0c\u4f60\u53ef\u80fd\u9700\u8981\u5148\u505c\u6b62 Docker \u670d\u52a1\uff0c\u7136\u540e\u518d\u91cd\u542f\u5b83\u3002<\/p>\n<pre><code>sudo systemctl stop docker\nsudo systemctl start docker<\/code><\/pre>\n<\/li>\n<li>\nDocker \u670d\u52a1\u9519\u8bef\uff1a\u5982\u679cDocker\u670d\u52a1\u6b63\u5728\u4f7f\u7528\u8fc7\u591a\u5185\u5b58\u6216CPU\u8d44\u6e90\uff0c\u5219\u53ef\u80fd\u65e0\u6cd5\u91cd\u65b0\u542f\u52a8\u3002\u4f7f\u7528 <code>systemctl status<\/code> \u547d\u4ee4\u6765\u67e5\u770bDocker\u670d\u52a1\u7684\u72b6\u6001\uff0c\u67e5\u770bDocker\u65e5\u5fd7\uff0c\u4ee5\u4fbf\u627e\u5230\u76f8\u5173\u9519\u8bef\u5e76\u89e3\u51b3\u5b83\u4eec\u3002<\/p>\n<pre><code>systemctl status docker\njournalctl -xe<\/code><\/pre>\n<\/li>\n<li>\n\u7cfb\u7edf\u542f\u52a8\u68c0\u67e5\u70b9\u95ee\u9898\uff1a\u5982\u679c\u4f60\u6b63\u5728\u8fdb\u884c\u7cfb\u7edf\u7684\u5f15\u5bfc\u548c\u91cd\u542f\u64cd\u4f5c\uff0c\u53ef\u80fd\u9700\u8981\u68c0\u67e5\u5f15\u5bfc\u68c0\u67e5\u70b9\u548c\u542f\u52a8\u5e8f\u5217\u914d\u7f6e\uff0c\u4ee5\u786e\u4fddDocker\u670d\u52a1\u5728\u542f\u52a8\u671f\u95f4\u6b63\u786e\u5730\u542f\u52a8\u3002\n<\/li>\n<\/ol>\n<p><font color=red>\u6211\u611f\u89c9\u662f\u914d\u7f6e\u6587\u4ef6\u5199\u9519\u4e86\uff1a<\/font> <\/p>\n<pre><code>vim \/etc\/docker\/daemon.json<\/code><\/pre>\n<pre><code>{\n    \"storage-driver\": \"devicemapper\",\n    \"storage-opts\": [\n        \"dm.basesize=20G\",\n        \"dm.thinpooldev=\/dev\/mapper\/docker-thinpool\",\n        \"dm.use_deferred_deletion=true\",\n        \"dm.use_deferred_removal=true\"\n    ]\n}<\/code><\/pre>\n<p>\u67e5\u770b\u4e0a\u9762\u6211\u7684\u914d\u7f6e\u662f\u6b63\u786e\uff0c\u73b0\u5728\u68c0\u67e5\u6211\u7684\u5f53\u524d\u5b89\u88c5\u76ee\u5f55\u662f\u5426\u78c1\u76d8\u6ee1\u4e86\uff1a<\/p>\n<p>\u68c0\u67e5 <code>\/var\/lib\/docker<\/code> \u7684\u78c1\u76d8\u7a7a\u95f4\u4f7f\u7528\u60c5\u51b5\u662f\u975e\u5e38\u91cd\u8981\u7684\uff0c\u56e0\u4e3a Docker \u7684\u6240\u6709\u6570\u636e\uff08\u5bb9\u5668\u3001\u955c\u50cf\u3001\u5377\u7b49\uff09\u90fd\u4f1a\u4fdd\u5b58\u5728\u8fd9\u4e2a\u76ee\u5f55\u4e0b\u3002<\/p>\n<pre><code>df -h \/var\/lib\/docker<\/code><\/pre>\n<pre><code>Filesystem      Size  Used Avail Use% Mounted on\n\/dev\/vda2        36G  3.1G   31G  10% \/<\/code><\/pre>\n<p>\u6839\u636e\u63d0\u4f9b\u7684\u8f93\u51fa\uff0c<code>\/var\/lib\/docker<\/code> \u76ee\u5f55\u4f3c\u4e4e\u5e76\u6ca1\u6709\u4f7f\u7528\u5f88\u591a\u78c1\u76d8\u7a7a\u95f4\uff0c\u56e0\u4e3a\u5b83\u6ca1\u6709\u663e\u793a\u5728 <code>df<\/code> \u547d\u4ee4\u7684\u8f93\u51fa\u4e2d\u3002\u56e0\u6b64\uff0c\u78c1\u76d8\u7a7a\u95f4\u9650\u5236\u5e76\u4e0d\u662f\u5bfc\u81f4 Docker \u542f\u52a8\u5931\u8d25\u7684\u539f\u56e0\uff0c\u4f60\u9700\u8981\u68c0\u67e5\u5176\u4ed6\u53ef\u80fd\u7684\u95ee\u9898\u3002<\/p>\n<p>\u53ef\u4ee5\u8fd0\u884c\u4ee5\u4e0b\u4e24\u4e2a\u547d\u4ee4\u6765\u68c0\u67e5 Docker \u670d\u52a1\u7684\u8be6\u7ec6\u4fe1\u606f:<\/p>\n<pre><code>systemctl status docker.service\njournalctl -xe<\/code><\/pre>\n<pre><code>\u25cf docker.service - Docker Application Container Engine\n   Loaded: loaded (\/usr\/lib\/systemd\/system\/docker.service; disabled; vendor preset: disabled)\n   Active: failed (Result: start-limit) since Tue 2023-04-25 14:16:04 CST; 7min ago\n     Docs: https:\/\/docs.docker.com\n  Process: 5069 ExecStart=\/usr\/bin\/dockerd -H unix:\/\/ (code=exited, status=1\/FAILURE)\n Main PID: 5069 (code=exited, status=1\/FAILURE)\n\nApr 25 14:16:02 zzmuap6zwdoqhqxb systemd[1]: Failed to start Docker Application Container Engine.\nApr 25 14:16:02 zzmuap6zwdoqhqxb systemd[1]: Unit docker.service entered failed state.\nApr 25 14:16:02 zzmuap6zwdoqhqxb systemd[1]: docker.service failed.\nApr 25 14:16:04 zzmuap6zwdoqhqxb systemd[1]: docker.service holdoff time over, scheduling restart.\nApr 25 14:16:04 zzmuap6zwdoqhqxb systemd[1]: Stopped Docker Application Container Engine.\nApr 25 14:16:04 zzmuap6zwdoqhqxb systemd[1]: start request repeated too quickly for docker.service\nApr 25 14:16:04 zzmuap6zwdoqhqxb systemd[1]: Failed to start Docker Application Container Engine.\nApr 25 14:16:04 zzmuap6zwdoqhqxb systemd[1]: Unit docker.service entered failed state.\nApr 25 14:16:04 zzmuap6zwdoqhqxb systemd[1]: docker.service failed.<\/code><\/pre>\n<pre><code>Apr 25 14:16:56 zzmuap6zwdoqhqxb sshd[5083]: Failed password for root from 164.92.157.12 port 40670 ssh2\nApr 25 14:16:56 zzmuap6zwdoqhqxb sshd[5083]: Connection closed by 164.92.157.12 port 40670 [preauth]\nApr 25 14:17:35 zzmuap6zwdoqhqxb sshd[5086]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=164Apr 25 14:17:35 zzmuap6zwdoqhqxb sshd[5086]: pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"\nApr 25 14:17:37 zzmuap6zwdoqhqxb sshd[5086]: Failed password for root from 164.92.157.12 port 58892 ssh2\nApr 25 14:17:37 zzmuap6zwdoqhqxb sshd[5086]: Connection closed by 164.92.157.12 port 58892 [preauth]\nApr 25 14:18:10 zzmuap6zwdoqhqxb sshd[5105]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=218Apr 25 14:18:10 zzmuap6zwdoqhqxb sshd[5105]: pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"\nApr 25 14:18:12 zzmuap6zwdoqhqxb sshd[5105]: Failed password for root from 218.94.53.250 port 56682 ssh2\nApr 25 14:18:12 zzmuap6zwdoqhqxb sshd[5105]: Connection closed by 218.94.53.250 port 56682 [preauth]\nApr 25 14:18:17 zzmuap6zwdoqhqxb sshd[5107]: Invalid user oracle from 164.92.157.12 port 45276\nApr 25 14:18:17 zzmuap6zwdoqhqxb sshd[5107]: input_userauth_request: invalid user oracle [preauth]\nApr 25 14:18:17 zzmuap6zwdoqhqxb sshd[5107]: pam_unix(sshd:auth): check pass; user unknown\nApr 25 14:18:17 zzmuap6zwdoqhqxb sshd[5107]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=164Apr 25 14:18:19 zzmuap6zwdoqhqxb sshd[5107]: Failed password for invalid user oracle from 164.92.157.12 port 45276 ssh2\nApr 25 14:18:19 zzmuap6zwdoqhqxb sshd[5107]: Connection closed by 164.92.157.12 port 45276 [preauth]\nApr 25 14:18:58 zzmuap6zwdoqhqxb sshd[5109]: Invalid user oracle from 164.92.157.12 port 39130\nApr 25 14:18:58 zzmuap6zwdoqhqxb sshd[5109]: input_userauth_request: invalid user oracle [preauth]\nApr 25 14:18:58 zzmuap6zwdoqhqxb sshd[5109]: pam_unix(sshd:auth): check pass; user unknown\nApr 25 14:18:58 zzmuap6zwdoqhqxb sshd[5109]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=164Apr 25 14:18:59 zzmuap6zwdoqhqxb sshd[5109]: Failed password for invalid user oracle from 164.92.157.12 port 39130 ssh2\nApr 25 14:19:00 zzmuap6zwdoqhqxb sshd[5109]: Connection closed by 164.92.157.12 port 39130 [preauth]\nApr 25 14:19:37 zzmuap6zwdoqhqxb sshd[5111]: refused connect from 164.92.157.12 (164.92.157.12)\nApr 25 14:20:01 zzmuap6zwdoqhqxb CROND[5113]: (root) CMD (\/usr\/lib64\/sa\/sa1 1 1)\nApr 25 14:20:01 zzmuap6zwdoqhqxb postfix\/pickup[3637]: 8B7C6E0010: uid=0 from=<root>\nApr 25 14:20:01 zzmuap6zwdoqhqxb postfix\/cleanup[5118]: 8B7C6E0010: message-id=<20230425062001.8B7C6E0010@zzmuap6zwdoqhqxb.localdomain>\nApr 25 14:20:01 zzmuap6zwdoqhqxb postfix\/qmgr[1167]: 8B7C6E0010: from=<root@zzmuap6zwdoqhqxb.localdomain>, size=721, nrcpt=1 (queue actiApr 25 14:20:01 zzmuap6zwdoqhqxb postfix\/local[5120]: 8B7C6E0010: to=<root@zzmuap6zwdoqhqxb.localdomain>, orig_to=<root>, relay=local, dApr 25 14:20:01 zzmuap6zwdoqhqxb postfix\/qmgr[1167]: 8B7C6E0010: removed\nApr 25 14:20:19 zzmuap6zwdoqhqxb sshd[5121]: refused connect from 164.92.157.12 (164.92.157.12)\nApr 25 14:20:59 zzmuap6zwdoqhqxb sshd[5123]: refused connect from 164.92.157.12 (164.92.157.12)\nApr 25 14:21:41 zzmuap6zwdoqhqxb sshd[5124]: refused connect from 164.92.157.12 (164.92.157.12)\nApr 25 14:22:22 zzmuap6zwdoqhqxb sshd[5125]: refused connect from 164.92.157.12 (164.92.157.12)\nApr 25 14:23:02 zzmuap6zwdoqhqxb sshd[5126]: refused connect from 164.92.157.12 (164.92.157.12)<\/code><\/pre>\n<pre><code>sudo dockerd<\/code><\/pre>\n<pre><code>INFO[2023-04-25T14:28:27.769068507+08:00] parsed scheme: \"unix\"                         module=grpc\nINFO[2023-04-25T14:28:27.769125008+08:00] scheme \"unix\" not registered, fallback to default scheme  module=grpc\nINFO[2023-04-25T14:28:27.769162226+08:00] parsed scheme: \"unix\"                         module=grpc\nINFO[2023-04-25T14:28:27.769169182+08:00] scheme \"unix\" not registered, fallback to default scheme  module=grpc\nINFO[2023-04-25T14:28:27.769197511+08:00] ccResolverWrapper: sending new addresses to cc: [{unix:\/\/\/run\/containerd\/containerd.sock 0  <nil>}]  module=grpc\nINFO[2023-04-25T14:28:27.769239388+08:00] ClientConn switching balancer to \"pick_first\"  module=grpc\nINFO[2023-04-25T14:28:27.769302514+08:00] pickfirstBalancer: HandleSubConnStateChange: 0xc420793780, CONNECTING  module=grpc\nINFO[2023-04-25T14:28:27.769299151+08:00] ccResolverWrapper: sending new addresses to cc: [{unix:\/\/\/run\/containerd\/containerd.sock 0  <nil>}]  module=grpc\nINFO[2023-04-25T14:28:27.769327661+08:00] ClientConn switching balancer to \"pick_first\"  module=grpc\nINFO[2023-04-25T14:28:27.770884453+08:00] pickfirstBalancer: HandleSubConnStateChange: 0xc420793780, READY  module=grpc\nINFO[2023-04-25T14:28:27.770933748+08:00] pickfirstBalancer: HandleSubConnStateChange: 0xc4201d0070, CONNECTING  module=grpc\nINFO[2023-04-25T14:28:27.771102764+08:00] pickfirstBalancer: HandleSubConnStateChange: 0xc4201d0070, READY  module=grpc\nError starting daemon: error initializing graphdriver: devicemapper: Non existing device docker-thinpool<\/code><\/pre>\n<p>\u901a\u8fc7\u547d\u4ee4\u884c\u67e5\u770b\u4e00\u4e0b\u6211\u4eec\u5199\u7684\u914d\u7f6e\u6587\u4ef6\u662f\u5426\u6b63\u786e\uff1a<\/p>\n<pre><code>sudo docker daemon --config-file \/etc\/docker\/daemon.json --test<\/code><\/pre>\n<h1>O(\u2229_\u2229)O\u54c8\u54c8~<\/h1>\n<p>\u4f7f\u7528\u9ed8\u8ba4\u7684\uff0c\u914d\u7f6e\u6587\u4ef6\uff0c\u7136\u540e\u628a\u6211\u7684\/etc\/docker\/daemon.json\u914d\u7f6e\u6587\u4ef6\u5220\u9664\u4e86\uff0c<\/p>\n<pre><code>systemctl start docker<\/code><\/pre>\n<p>\u7136\u540e\u5206\u522b\u5411\u53e6\u5916\u4e24\u53f0\u670d\u52a1\u5668\uff0c\u4e0a\u4f20\u914d\u7f6e\u6587\u4ef6\uff0c\u5b89\u88c5\uff0c\u542f\u52a8docker\u5bb9\u5668\uff0c\u8bf7\u67e5\u770b\u4e0a\u9762\u7684\u5b89\u88c5\u6b65\u9aa4\uff1a<\/p>\n<p>\u89e3\u538b\u4e0b\u8f7d\u7684\u538b\u7f29\u5305:<\/p>\n<pre><code>tar -zxvf kubernetes-server-linux-amd64.tar.gz<\/code><\/pre>\n<h2>\u5b89\u88c5 Kubernetes \u7ec4\u4ef6<\/h2>\n<p>\u5728\u89e3\u538b kubernetes-server-linux-amd64.tar.gz \u6587\u4ef6\u540e\uff0c\u4e0b\u9762\u662f\u4e00\u4e9b\u53ef\u80fd\u6709\u7528\u7684\u64cd\u4f5c\uff1a<\/p>\n<ol>\n<li>\n\u5c06 kubernetes \u4e8c\u8fdb\u5236\u6587\u4ef6\u79fb\u52a8\u5230 \/usr\/local\/bin \u76ee\u5f55\uff0c\u53ef\u7528\u4ee5\u4e0b\u547d\u4ee4\u5b9e\u73b0\uff1a<\/p>\n<pre><code>sudo mv kubernetes\/server\/bin\/* \/usr\/local\/bin\/<\/code><\/pre>\n<\/li>\n<li>\n\u8bbe\u7f6e kubectl \u5ba2\u6237\u7aef\u7684\u81ea\u52a8\u8865\u5168\u529f\u80fd\uff1a<\/p>\n<pre><code>echo \"source <(kubectl completion bash)\" >> ~\/.bashrc<\/code><\/pre>\n<p>\u7136\u540e\u5728\u7ec8\u7aef\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\uff0c\u4f7f\u53d8\u66f4\u7acb\u5373\u751f\u6548\uff1a<\/p>\n<pre><code>source ~\/.bashrc<\/code><\/pre>\n<\/li>\n<li>\n\u542f\u52a8 Kubernetes \u670d\u52a1\uff1a<\/p>\n<pre><code>sudo systemctl start kubelet<\/code><\/pre>\n<pre><code>[root@zzmuap6zwdoqhqxb docker]# sudo systemctl start kubelet\nFailed to start kubelet.service: Unit not found.<\/code><\/pre>\n<pre><code>\u8fd9\u4e2a\u9519\u8bef\u53ef\u80fd\u662f\u7531\u4e8e\u7f3a\u5931kubelet\u8fd9\u4e2asystemd\u670d\u52a1\u5bfc\u81f4\u7684\u3002\u5728\u786e\u4fddkubelet\u88ab\u6b63\u786e\u5b89\u88c5\u7684\u524d\u63d0\u4e0b\uff0c\u53ef\u4ee5\u5148\u5c1d\u8bd5\u8fd0\u884c\u6307\u4ee4sudo systemctl daemon-reload\uff0c\u6765\u91cd\u65b0\u52a0\u8f7dsystemd\u7ba1\u7406\u7684\u914d\u7f6e\u6587\u4ef6\u3002\u5982\u679c\u6709\u66f4\u65b0\u7684systemd\u914d\u7f6e\u6587\u4ef6\uff0c\u5b83\u4eec\u4f1a\u88ab\u91cd\u65b0\u52a0\u8f7d\uff0c\u5e76\u4f7f\u5f97systemd\u611f\u77e5\u5230\u65b0\u7684kubelet\u670d\u52a1\u7684\u5b58\u5728\u3002\u7136\u540e\u518d\u5c1d\u8bd5\u542f\u52a8kubelet\uff1asudo systemctl start kubelet\u3002<\/code><\/pre>\n<p>\u5982\u679c\u5373\u4f7f\u91cd\u65b0\u52a0\u8f7d\u4e86systemd\u7ba1\u7406\u7684\u914d\u7f6e\u6587\u4ef6\u540e\uff0c\u4ecd\u7136\u65e0\u6cd5\u542f\u52a8kubelet\u670d\u52a1\uff0c\u5e76\u4e14\u63d0\u793a\u201cUnit not found\u201d\u9519\u8bef\uff0c\u53ef\u80fd\u662f\u56e0\u4e3akubelet\u670d\u52a1\u5728\u7cfb\u7edf\u4e2d\u6ca1\u6709\u88ab\u6b63\u786e\u5b89\u88c5\u5bfc\u81f4\u7684\u3002\u4f60\u53ef\u4ee5\u901a\u8fc7\u91cd\u65b0\u5b89\u88c5kubelet\u670d\u52a1\u6765\u89e3\u51b3\u8fd9\u4e2a\u95ee\u9898\uff0c<\/p>\n<p>\u9700\u8981\u79bb\u7ebf\u8fdb\u884c\u5b89\u88c5\uff1a<\/p>\n<p>\u4f46\u662f\u6211\u5df2\u7ecf\u5b89\u88c5\u4e86\uff0c\u4e0a\u9762\u7684\u6587\u4ef6\uff0c\u6211\u8bd5\u7740\u7ed9\u6587\u4ef6\u8d4b\u4e88\u6743\u9650\u8bd5\u4e00\u8bd5\uff1a<\/p>\n<pre><code>sudo chmod +x \/usr\/local\/bin\/kubelet<\/code><\/pre>\n<p>\u8fd8\u662f\u4e0d\u884c\ud83d\ude00<\/p>\n<p>\u5982\u679c\u5728\u5b89\u88c5\u5b8c\u6210kubelet\u670d\u52a1\u4e4b\u540e\uff0c\u901a\u8fc7<code>sudo systemctl start kubelet<\/code>\u542f\u52a8\u670d\u52a1\u65f6\uff0c\u4ecd\u7136\u63d0\u793a\u201cUnit not found\u201d\u9519\u8bef\uff0c\u90a3\u4e48\u53ef\u80fd\u662f\u7531\u4e8ekubelet\u670d\u52a1\u7684systemd\u5355\u5143\u6587\u4ef6\u6ca1\u6709\u88ab\u6b63\u786e\u5b89\u88c5\u3002\u8fd9\u79cd\u60c5\u51b5\u4e0b\uff0c\u4f60\u9700\u8981\u624b\u52a8\u5b89\u88c5kubelet\u7684systemd\u5355\u5143\u6587\u4ef6\u3002\u5177\u4f53\u6765\u8bf4\uff0c\u53ef\u4ee5\u6309\u7167\u4ee5\u4e0b\u6b65\u9aa4\u64cd\u4f5c\uff1a<\/p>\n<ol>\n<li>\u6253\u5f00<code>\/lib\/systemd\/system\/kubelet.service<\/code>\u6587\u4ef6\uff0c\u5982\u679c\u8be5\u6587\u4ef6\u4e0d\u5b58\u5728\u5219\u65b0\u5efa\u8be5\u6587\u4ef6\u3002\u53ef\u4ee5\u4f7f\u7528\u4f60\u559c\u6b22\u7684\u7f16\u8f91\u5668\uff0c\u4f8b\u5982vi\u6216nano\u6765\u6253\u5f00\u8be5\u6587\u4ef6\uff1a<\/li>\n<\/ol>\n<pre><code>sudo vim \/lib\/systemd\/system\/kubelet.service<\/code><\/pre>\n<ol start=\"2\">\n<li>\u5728kubelet.service\u6587\u4ef6\u4e2d\u6dfb\u52a0\u4ee5\u4e0b\u5185\u5bb9\uff1a<\/li>\n<\/ol>\n<pre><code>[Unit]\nDescription=kubelet: The Kubernetes Node Agent\nDocumentation=https:\/\/kubernetes.io\/docs\/home\/\n\n[Service]\nExecStart=\/usr\/local\/bin\/kubelet --config=\/etc\/kubernetes\/kubelet.conf --pod-manifest-path=\/etc\/kubernetes\/manifests --cgroup-driver=systemd\nRestart=always\nStartLimitInterval=0s\nRestartSec=10s\n\n[Install]\nWantedBy=multi-user.target<\/code><\/pre>\n<p>\u4e0a\u8ff0\u5185\u5bb9\u5305\u62ec\u4e86kubelet\u670d\u52a1\u7684systemd\u5355\u5143\u6587\u4ef6\u7684\u4e00\u4e9b\u57fa\u672c\u4fe1\u606f\u3002\u5176\u4e2d\uff0cExecStart\u9009\u9879\u6307\u5b9a\u4e86kubelet\u4e8c\u8fdb\u5236\u6587\u4ef6\u7684\u8def\u5f84\u548c\u914d\u7f6e\u6587\u4ef6\u7684\u8def\u5f84\uff0c\u8fd9\u91cc\u7684\u8def\u5f84\u5e94\u8be5\u6839\u636e\u4f60\u5b9e\u9645\u4f7f\u7528\u7684\u8def\u5f84\u8fdb\u884c\u4fee\u6539\uff1bRestart\u9009\u9879\u8bbe\u7f6e\u4e3aalways\uff0c\u8868\u793a\u5728kubelet\u670d\u52a1\u51fa\u73b0\u95ee\u9898\u65f6\u81ea\u52a8\u91cd\u542f\uff1bWantedBy\u9009\u9879\u5c06kubelet\u670d\u52a1\u6dfb\u52a0\u5230multi-user.target\u5355\u5143\uff0c\u4ee5\u4fbf\u5728\u7cfb\u7edf\u542f\u52a8\u65f6\u81ea\u52a8\u542f\u52a8kubelet\u670d\u52a1\u3002<\/p>\n<ol start=\"3\">\n<li>\u4fdd\u5b58kubelet.service\u6587\u4ef6\u5e76\u9000\u51fa\u7f16\u8f91\u5668\u3002\u7136\u540e\uff0c\u8fd0\u884c\u4ee5\u4e0b\u547d\u4ee4\u91cd\u65b0\u52a0\u8f7dsystemd\u7684\u914d\u7f6e\u6587\u4ef6\uff1a<\/li>\n<\/ol>\n<pre><code>sudo systemctl daemon-reload<\/code><\/pre>\n<ol start=\"4\">\n<li>\u63a5\u4e0b\u6765\uff0c\u5c31\u53ef\u4ee5\u542f\u52a8kubelet\u670d\u52a1\u4e86\u3002\u8fd0\u884c\u4ee5\u4e0b\u547d\u4ee4\u542f\u52a8kubelet\u670d\u52a1\uff1a<\/li>\n<\/ol>\n<pre><code>sudo systemctl start kubelet<\/code><\/pre>\n<p>\u73b0\u5728\u5e94\u8be5\u53ef\u4ee5\u6210\u529f\u542f\u52a8kubelet\u670d\u52a1\u4e86\u3002\u5982\u679c\u8fd8\u6709\u95ee\u9898\uff0c\u53ef\u4ee5\u67e5\u770bsystemd\u670d\u52a1\u7684\u65e5\u5fd7\u6765\u68c0\u67e5\u8be6\u7ec6\u7684\u9519\u8bef\u4fe1\u606f\uff0c\u4f7f\u7528\u547d\u4ee4<code>sudo journalctl -u kubelet<\/code>\u3002\n<\/li>\n<li>\n\u68c0\u67e5 Kubernetes \u670d\u52a1\u7684\u8fd0\u884c\u72b6\u6001\uff1a<\/p>\n<pre><code>systemctl status kubelet<\/code><\/pre>\n<p>\u5982\u679c\u670d\u52a1\u6b63\u5728\u8fd0\u884c\uff0c\u5219\u4f1a\u663e\u793a active (running)\u3002<\/p>\n<pre><code>[root@zzmuap6zwdoqhqxb systemd]# systemctl status kubelet \u25cf kubelet.service - kubelet: The Kubernetes Node Agent Loaded: loaded (\/usr\/lib\/systemd\/system\/kubelet.service; disabled; vendor preset: disabled) Active: activating (auto-restart) (Result: exit-code) since Tue 2023-04-25 17:38:09 CST; 6s ago Docs: https:\/\/kubernetes.io\/docs\/home\/ Process: 7545 ExecStart=\/usr\/local\/bin\/kubelet --config=\/etc\/kubernetes\/kubelet.conf --pod-manifest-path=\/etc\/kubernetes\/manifests --cgroup-driver=systemd (code=exited, status=255) Main PID: 7545 (code=exited, status=255) Apr 25 17:38:09 zzmuap6zwdoqhqxb systemd[1]: Unit kubelet.service entered failed state. Apr 25 17:38:09 zzmuap6zwdoqhqxb syst<\/code><\/pre>\n<p>\u63d0\u4f9b\u7684\u4fe1\u606f\uff0ckubelet\u670d\u52a1\u6ca1\u6709\u4ee5\u6b63\u786e\u7684\u65b9\u5f0f\u542f\u52a8\uff0c\u5e76\u4e14\u5728\u542f\u52a8\u5931\u8d25\u540e\u81ea\u52a8\u91cd\u65b0\u542f\u52a8\u3002\u8fd9\u901a\u5e38\u662f\u7531\u4e8e\u914d\u7f6e\u6587\u4ef6\u7684\u9519\u8bef\u6216\u8005\u7f3a\u5931\u800c\u5bfc\u81f4\u7684\u3002\u4f60\u53ef\u4ee5\u68c0\u67e5kubelet\u7684\u914d\u7f6e\u6587\u4ef6<code>\/etc\/kubernetes\/kubelet.conf<\/code>\u662f\u5426\u5b58\u5728\uff0c\u5e76\u786e\u4fdd\u5176\u4e2d\u7684\u4fe1\u606f\u662f\u6b63\u786e\u7684\u3002\u53e6\u5916\uff0c\u4e5f\u53ef\u4ee5\u67e5\u770bkubelet\u7684\u65e5\u5fd7\u6587\u4ef6\uff0c\u4ee5\u83b7\u53d6\u66f4\u591a\u5173\u4e8e\u670d\u52a1\u542f\u52a8\u5931\u8d25\u7684\u8be6\u7ec6\u4fe1\u606f\u3002<\/p>\n<p>\u901a\u8fc7\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u53ef\u4ee5\u83b7\u53d6kubelet\u7684\u65e5\u5fd7\u4fe1\u606f\uff1a<\/p>\n<pre><code>sudo journalctl -u kubelet<\/code><\/pre>\n<pre><code>-- Logs begin at Tue 2023-04-25 11:21:07 CST, end at Tue 2023-04-25 17:40:27 CST. --\nApr 25 17:37:59 zzmuap6zwdoqhqxb systemd[1]: Started kubelet: The Kubernetes Node Agent.\nApr 25 17:37:59 zzmuap6zwdoqhqxb kubelet[7534]: Flag --pod-manifest-path has been deprecated, This parameter should be set via the confiApr 25 17:37:59 zzmuap6zwdoqhqxb kubelet[7534]: Flag --cgroup-driver has been deprecated, This parameter should be set via the config fiApr 25 17:37:59 zzmuap6zwdoqhqxb kubelet[7534]: F0425 17:37:59.171440    7534 server.go:198] failed to load Kubelet config file \/etc\/kubApr 25 17:37:59 zzmuap6zwdoqhqxb systemd[1]: kubelet.service: main process exited, code=exited, status=255\/n\/a\nApr 25 17:37:59 zzmuap6zwdoqhqxb systemd[1]: Unit kubelet.service entered failed state.\nApr 25 17:37:59 zzmuap6zwdoqhqxb systemd[1]: kubelet.service failed.\nApr 25 17:38:09 zzmuap6zwdoqhqxb systemd[1]: kubelet.service holdoff time over, scheduling restart.\nApr 25 17:38:09 zzmuap6zwdoqhqxb systemd[1]: Stopped kubelet: The Kubernetes Node Agent.\nApr 25 17:38:09 zzmuap6zwdoqhqxb systemd[1]: Started kubelet: The Kubernetes Node Agent.\nApr 25 17:38:09 zzmuap6zwdoqhqxb kubelet[7545]: Flag --pod-manifest-path has been deprecated, This parameter should be set via the confiApr 25 17:38:09 zzmuap6zwdoqhqxb kubelet[7545]: Flag --cgroup-driver has been deprecated, This parameter should be set via the config fiApr 25 17:38:09 zzmuap6zwdoqhqxb kubelet[7545]: F0425 17:38:09.364173    7545 server.go:198] failed to load Kubelet config file \/etc\/kubApr 25 17:38:09 zzmuap6zwdoqhqxb systemd[1]: kubelet.service: main process exited, code=exited, status=255\/n\/a\nApr 25 17:38:09 zzmuap6zwdoqhqxb systemd[1]: Unit kubelet.service entered failed state.\nApr 25 17:38:09 zzmuap6zwdoqhqxb systemd[1]: kubelet.service failed.\nApr 25 17:38:19 zzmuap6zwdoqhqxb systemd[1]: kubelet.service holdoff time over, scheduling restart.\nApr 25 17:38:19 zzmuap6zwdoqhqxb systemd[1]: Stopped kubelet: The Kubernetes Node Agent.\nApr 25 17:38:19 zzmuap6zwdoqhqxb systemd[1]: Started kubelet: The Kubernetes Node Agent.\nApr 25 17:38:19 zzmuap6zwdoqhqxb kubelet[7558]: Flag --pod-manifest-path has been deprecated, This parameter should be set via the confiApr 25 17:38:19 zzmuap6zwdoqhqxb kubelet[7558]: Flag --cgroup-driver has been deprecated, This parameter should be set via the config fiApr 25 17:38:19 zzmuap6zwdoqhqxb kubelet[7558]: F0425 17:38:19.607625    7558 server.go:198] failed to load Kubelet config file \/etc\/kubApr 25 17:38:19 zzmuap6zwdoqhqxb systemd[1]: kubelet.service: main process exited, code=exited, status=255\/n\/a\nApr 25 17:38:19 zzmuap6zwdoqhqxb systemd[1]: Unit kubelet.service entered failed state.\nApr 25 17:38:19 zzmuap6zwdoqhqxb systemd[1]: kubelet.service failed.\nApr 25 17:38:29 zzmuap6zwdoqhqxb systemd[1]: kubelet.service holdoff time over, scheduling restart.\nApr 25 17:38:29 zzmuap6zwdoqhqxb systemd[1]: Stopped kubelet: The Kubernetes Node Agent.\nApr 25 17:38:29 zzmuap6zwdoqhqxb systemd[1]: Started kubelet: The Kubernetes Node Agent.\nApr 25 17:38:29 zzmuap6zwdoqhqxb kubelet[7569]: Flag --pod-manifest-path has been deprecated, This parameter should be set via the confiApr 25 17:38:29 zzmuap6zwdoqhqxb kubelet[7569]: Flag --cgroup-driver has been deprecated, This parameter should be set via the config fiApr 25 17:38:29 zzmuap6zwdoqhqxb kubelet[7569]: F0425 17:38:29.868205    7569 server.go:198] failed to load Kubelet config file \/etc\/kubApr 25 17:38:29 zzmuap6zwdoqhqxb systemd[1]: kubelet.service: main process exited, code=exited, status=255\/n\/a\nApr 25 17:38:29 zzmuap6zwdoqhqxb systemd[1]: Unit kubelet.service entered failed state.<\/code><\/pre>\n<p>\u5728\u65e5\u5fd7\u4fe1\u606f\u4e2d\uff0c\u53ef\u4ee5\u67e5\u627e\u5173\u4e8ekubelet\u670d\u52a1\u542f\u52a8\u5931\u8d25\u6216\u9047\u5230\u95ee\u9898\u7684\u63d0\u793a\u6216\u9519\u8bef\u4fe1\u606f\u3002<\/p>\n<p>\u6839\u636e\u63d0\u4f9b\u7684\u65e5\u5fd7\u4fe1\u606f\uff0ckubelet\u670d\u52a1\u542f\u52a8\u5931\u8d25\uff0c\u5e76\u4e14\u5728\u542f\u52a8\u5931\u8d25\u540e\u81ea\u52a8\u9000\u51fa\u3002\u53ef\u80fd\u7684\u539f\u56e0\u662fkubelet\u7684\u914d\u7f6e\u6587\u4ef6\u4e2d\u6709\u9519\u8bef\u5bfc\u81f4kubelet\u670d\u52a1\u65e0\u6cd5\u6b63\u5e38\u542f\u52a8\u3002<\/p>\n<p>\u4f60\u53ef\u4ee5\u68c0\u67e5kubelet\u914d\u7f6e\u6587\u4ef6<code>\/etc\/kubernetes\/kubelet.conf<\/code>\u7684\u5185\u5bb9\uff0c\u5e76\u786e\u4fdd\u5176\u4e2d\u7684\u4fe1\u606f\u914d\u7f6e\u6b63\u786e\uff0c\u5982kubeconfig\u6587\u4ef6\u3001kubelet \u76d1\u542c\u7684 ip \u548chostname\u7b49\u3002\u5982\u679c\u4f60\u4e4b\u524d\u901a\u8fc7kubeadm\u5de5\u5177\u6765\u5b89\u88c5Kubernetes\u96c6\u7fa4\u7684\u8bdd\uff0ckubelet\u7684\u914d\u7f6e\u6587\u4ef6\uff08kubelet.conf\uff09\u5e94\u8be5\u7531kubeadm\u81ea\u52a8\u751f\u6210\u5e76\u653e\u7f6e\u5728<code>\/etc\/kubernetes\/<\/code>\u76ee\u5f55\u4e0b\u3002\u56e0\u6b64\uff0c\u4f60\u53ef\u4ee5\u5c1d\u8bd5\u4f7f\u7528\u4ee5\u4e0b\u547d\u4ee4\u68c0\u67e5\u8be5\u914d\u7f6e\u6587\u4ef6\u662f\u5426\u5b58\u5728\uff1a<\/p>\n<pre><code>ls -l \/etc\/kubernetes\/kubelet.conf<\/code><\/pre>\n<p>\u5982\u679ckubelet\u914d\u7f6e\u6587\u4ef6\u4e0d\u5b58\u5728\uff0c\u5219\u9700\u8981\u624b\u52a8\u521b\u5efa\u8be5\u6587\u4ef6\uff0c\u5e76\u786e\u4fdd\u5176\u4e2d\u5305\u542b\u6b63\u786e\u7684\u914d\u7f6e\u4fe1\u606f\u3002\u53ef\u4ee5\u53c2\u8003Kubernetes\u5b98\u65b9\u6587\u6863\u4e2d\u7684\u8bf4\u660e\uff0c\u6765\u786e\u5b9akubelet\u914d\u7f6e\u6587\u4ef6\u5e94\u8be5\u5305\u542b\u54ea\u4e9b\u4fe1\u606f\u4ee5\u53ca\u5982\u4f55\u8fdb\u884c\u914d\u7f6e\u3002\u5728\u7f16\u8f91\u597dkubelet\u914d\u7f6e\u6587\u4ef6\u540e\uff0c\u91cd\u65b0\u542f\u52a8kubelet\u670d\u52a1\u5373\u53ef\u3002<\/p>\n<p>\u5982\u679c\u95ee\u9898\u4ecd\u7136\u5b58\u5728\uff0c\u53ef\u4ee5\u8fdb\u4e00\u6b65\u67e5\u770bkubelet\u7684\u65e5\u5fd7\u4fe1\u606f\uff0c\u4ee5\u4fbf\u6e05\u695a\u5730\u4e86\u89e3kubelet\u670d\u52a1\u542f\u52a8\u65f6\u53d1\u751f\u4e86\u4ec0\u4e48\u9519\u8bef\uff0c\u4ece\u800c\u66f4\u597d\u5730\u89e3\u51b3\u95ee\u9898\u3002<\/p>\n<p>\u5b89\u88c5kubelet\u65f6\u7f3a\u5931\u4e86kubelet\u914d\u7f6e\u6587\u4ef6\uff0c\u53ef\u4ee5\u91cd\u65b0\u751f\u6210kubelet\u914d\u7f6e\u6587\u4ef6\uff0c\u53ef\u4ee5\u4f7f\u7528\u4e0b\u9762\u7684\u547d\u4ee4\u751f\u6210\uff5e<\/p>\n<pre><code>sudo kubeadm config print init-defaults --component-configs KubeletConfiguration > kubelet.conf<\/code><\/pre>\n<p>\u751f\u6210\u7684kubelet.conf\u6587\u4ef6\u4e2d\u5e94\u5305\u542b\u4e86kubelet\u7684\u9ed8\u8ba4\u914d\u7f6e\u4fe1\u606f\u3002\u63a5\u4e0b\u6765\uff0c\u5c06\u8be5\u6587\u4ef6\u79fb\u52a8\u5230<code>\/etc\/kubernetes\/<\/code>\u76ee\u5f55\u4e0b\uff1a<\/p>\n<pre><code>sudo mv kubelet.conf \/etc\/kubernetes\/<\/code><\/pre>\n<pre><code>sudo mv kubelet.conf \/etc\/kubernetes\/ mv: cannot move \u2018kubelet.conf\u2019 to \u2018\/etc\/kubernetes\/\u2019: No such file or directory<\/code><\/pre>\n<p>\u5982\u679c\u5728\u5c1d\u8bd5\u5c06kubelet.conf\u6587\u4ef6\u79fb\u52a8\u5230\/etc\/kubernetes\/\u76ee\u5f55\u65f6\uff0c\u7cfb\u7edf\u63d0\u793a\u201cno such file or directory\u201d\u9519\u8bef\uff0c\u90a3\u4e48\u53ef\u80fd\u662f\u7531\u4e8e\/etc\/kubernetes\u76ee\u5f55\u4e0d\u5b58\u5728\u5bfc\u81f4\u7684\u3002\u4f60\u5e94\u8be5\u5148\u521b\u5efa\/etc\/kubernetes\u76ee\u5f55\uff0c\u5e76\u786e\u4fdd\u65b0\u76ee\u5f55\u5177\u6709\u6b63\u786e\u7684\u6743\u9650\u3002\u5177\u4f53\u6765\u8bf4\uff0c\u53ef\u4ee5\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\uff1a<\/p>\n<pre><code>sudo mkdir \/etc\/kubernetes\nsudo chmod 755 \/etc\/kubernetes<\/code><\/pre>\n<p>\u7136\u540e\u518d\u5c06kubelet.conf\u6587\u4ef6\u79fb\u52a8\u5230\/etc\/kubernetes\u76ee\u5f55\u4e0b\u5373\u53ef\uff1a<\/p>\n<pre><code>sudo mv kubelet.conf \/etc\/kubernetes\/<\/code><\/pre>\n<p>\u8bf7\u6ce8\u610f\uff0c\u5982\u679c\u4f60\u4f7f\u7528\u7684kubeadm\u751f\u6210\u7684kubelet.conf\u6587\u4ef6\u5305\u542bCA\u8bc1\u4e66\u7684\u76f8\u5173\u4fe1\u606f\uff0c\u90a3\u4e48\u786e\u4fdd\u6743\u9650\u6b63\u786e\uff0c\u53ef\u4ee5\u4f7f\u7528\u4ee5\u4e0b\u547d\u4ee4\uff1a<\/p>\n<pre><code>sudo chown root:root \/etc\/kubernetes\/kubelet.conf\nsudo chmod 644 \/etc\/kubernetes\/kubelet.conf<\/code><\/pre>\n<\/li>\n<li>\n\u914d\u7f6e Kubernetes \u670d\u52a1\u7684\u5f00\u673a\u81ea\u542f\u52a8\uff1a<\/p>\n<pre><code>sudo systemctl enable kubelet<\/code><\/pre>\n<\/li>\n<\/ol>\n<pre><code>sudo systemctl enable kubelet\nCreated symlink from \/etc\/systemd\/system\/multi-user.target.wants\/kubelet.service to \/usr\/lib\/systemd\/system\/kubelet.service.<\/code><\/pre>\n<h1>======= \u91cd\u65b0\u5f00\u59cb==========<\/h1>\n<p>\u51c6\u5907\u4e09\u53f0CentOS 7.6\u4ee5\u4e0a\u673a\u5668,\u914d\u7f6e\u4e3b\u673a\u540d\u548chosts,\u5173\u95ed\u9632\u706b\u5899\u548cSelinux<\/p>\n<ol>\n<li>\n\u914d\u7f6e\u4e3b\u673a\u540d<\/p>\n<pre><code># \u4fee\u6539\u4e3b\u673a\u540d\nhostnamectl set-hostname node1  # node1\u4f5c\u4e3a\u4e3b\u8282\u70b9\nhostnamectl set-hostname node2  # node2\u4f5c\u4e3a\u5de5\u4f5c\u8282\u70b9\nhostnamectl set-hostname node3  # node3\u4f5c\u4e3a\u5de5\u4f5c\u8282\u70b9<\/code><\/pre>\n<\/li>\n<\/ol>\n<pre><code># \u786e\u8ba4\u4e3b\u673a\u540d\u5df2\u53d8\u66f4\nhostnamectl status <\/code><\/pre>\n<ol start=\"2\">\n<li>\u914d\u7f6ehosts\u6587\u4ef6<\/li>\n<\/ol>\n<pre><code># \u5728\u4e09\u53f0\u670d\u52a1\u5668\u4e0a\u6dfb\u52a0hosts\u8bb0\u5f55\ncat &gt;&gt; \/etc\/hosts &lt;&lt;EOF\n192.168.1.127 node1 \n192.168.1.115 node2\n192.168.1.147 node3  \nEOF<\/code><\/pre>\n<ol start=\"3\">\n<li>\u5173\u95ed\u9632\u706b\u5899<\/li>\n<\/ol>\n<pre><code># \u5728\u4e09\u53f0\u670d\u52a1\u5668\u4e0a\u5173\u95ed\u9632\u706b\u5899\nsystemctl stop firewalld\nsystemctl disable firewalld<\/code><\/pre>\n<ol start=\"4\">\n<li>\u5173\u95edSelinux<\/li>\n<\/ol>\n<pre><code># \u5728\u4e09\u53f0\u670d\u52a1\u5668\u4e0a\u5173\u95edSelinux\nsed -i &#039;s\/enforcing\/disabled\/&#039; \/etc\/selinux\/config  \nsetenforce 0<\/code><\/pre>\n<ol start=\"5\">\n<li>\u91cd\u542f\u7f51\u7edc\u670d\u52a1<\/li>\n<\/ol>\n<pre><code># \u5728\u4e09\u53f0\u670d\u52a1\u5668\u4e0a\u91cd\u542f\u7f51\u7edc\u670d\u52a1\nsystemctl restart network<\/code><\/pre>\n<ol start=\"6\">\n<li>\u914d\u7f6essh\u514d\u5bc6\u7801\u767b\u5f55- \u5728node1\u4e0a\u751f\u6210ssh key:<\/li>\n<\/ol>\n<pre><code>ssh-keygen -t rsa<\/code><\/pre>\n<p>\u4e00\u76f4\u6309\u56de\u8f66,\u6700\u7ec8\u4f1a\u751f\u6210\u4e24\u4e2a\u6587\u4ef6:id_rsa(\u79c1\u94a5)\u548cid_rsa.pub(\u516c\u94a5)<\/p>\n<ol start=\"2\">\n<li>\u5c06id_rsa.pub(\u516c\u94a5)\u5185\u5bb9\u590d\u5236\u5230\u526a\u5207\u677f:<\/li>\n<\/ol>\n<pre><code>cat ~\/.ssh\/id_rsa.pub<\/code><\/pre>\n<p>\u590d\u5236\u663e\u793a\u7684\u5185\u5bb9<\/p>\n<ol start=\"3\">\n<li>\u767b\u5f55node2,\u5728authorized_keys\u6587\u4ef6\u672b\u5c3e\u6dfb\u52a0\u590d\u5236\u7684\u516c\u94a5\u5185\u5bb9:<\/li>\n<\/ol>\n<pre><code>ssh node2\necho \u628a\u6b65\u9aa42\u590d\u5236\u7684\u516c\u94a5\u5185\u5bb9\u7c98\u8d34\u5e76\u56de\u8f66 &gt; &gt;&gt; ~\/.ssh\/authorized_keys <\/code><\/pre>\n<pre><code>echo ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC6k8Av2wDFXOr7avTM4ScrYla4Sb+\/p3TSuN+YrQPORo4JL9RlCxUyGTeEPoRahzipZ4DxRauL\/mghtc1huDRjkZJQJUS1Ebg96M3+L3BQ4RPqmp3g7Lv46XSIHGOPKDvX16o0kTsYiTlEFc9BZc1LyJMDzIaMSfmKnEYIwb4lzPb\/VOWpq7SGLNK\/WDmxIGkZNrfDmclE3S68YrE4iE06cKKZxgiDFkmyM7SeeiqFVtLaScSIOA1Hftc+M2r+yOLfUtMvJofs6wwG3dv+L7CN0uPVuPwd\/uIES5bHR5qPDdk09hsxQCfqN2wj2SgTb2sQDLZQA3FjKYF43zgTxK\/r root@node1 \\>\\> ~\/.ssh\/authorized_keys  \n<\/code><\/pre>\n<pre><code>-bash: syntax error near unexpected token `>>'<\/code><\/pre>\n<p>\u89e3\u51b3\u529e\u6cd5\uff1a<\/p>\n<pre><code>echo ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC6k8Av2wDFXOr7avTM4ScrYla4Sb+\/p3TSuN+YrQPORo4JL9RlCxUyGTeEPoRahzipZ4DxRauL\/mghtc1huDRjkZJQJUS1Ebg96M3+L3BQ4RPqmp3g7Lv46XSIHGOPKDvX16o0kTsYiTlEFc9BZc1LyJMDzIaMSfmKnEYIwb4lzPb\/VOWpq7SGLNK\/WDmxIGkZNrfDmclE3S68YrE4iE06cKKZxgiDFkmyM7SeeiqFVtLaScSIOA1Hftc+M2r+yOLfUtMvJofs6wwG3dv+L7CN0uPVuPwd\/uIES5bHR5qPDdk09hsxQCfqN2wj2SgTb2sQDLZQA3FjKYF43zgTxK\/r root@node1 \\>\\> ~\/.ssh\/authorized_keys  <\/code><\/pre>\n<p>\u5728\u6b65\u9aa43\u548c\u6b65\u9aa44\u4e2d,<code>&gt; &gt;&gt; ~\/.ssh\/authorized_keys<\/code>\u8fd9\u4e00\u884c\u6267\u884c\u65f6\u51fa\u73b0<code>syntax error near unexpected token &#039;&gt;&#039;<\/code>\u9519\u8bef,\u8fd9\u662f\u56e0\u4e3a '&gt;' \u7b26\u53f7\u5728bash\u4e2d\u6709\u7279\u6b8a\u542b\u4e49,\u9700\u8981\u8f6c\u4e49\u3002\u6b63\u786e\u7684\u6b65\u9aa4\u5e94\u4e3a:3. \u767b\u5f55node2,\u5728authorized_keys\u6587\u4ef6\u672b\u5c3e\u6dfb\u52a0\u590d\u5236\u7684\u516c\u94a5\u5185\u5bb9:<\/p>\n<pre><code>bash\nssh node2  \necho \u628a\u6b65\u9aa42\u590d\u5236\u7684\u516c\u94a5\u5185\u5bb9\u7c98\u8d34\u5e76\u56de\u8f66 \\&gt;\\&gt; ~\/.ssh\/authorized_keys  <\/code><\/pre>\n<p>\u6ce8\u610f,<code>&gt; &gt;&gt;<\/code> \u53d8\u4e3a <code>\\&gt;\\&gt;<\/code><\/p>\n<ol start=\"4\">\n<li>\u767b\u5f55node3,\u5728authorized_keys\u6587\u4ef6\u672b\u5c3e\u6dfb\u52a0\u590d\u5236\u7684\u516c\u94a5\u5185\u5bb9: <\/li>\n<\/ol>\n<pre><code>bash \nssh node3\necho \u628a\u6b65\u9aa42\u590d\u5236\u7684\u516c\u94a5\u5185\u5bb9\u7c98\u8d34\u5e76\u56de\u8f66 \\&gt;\\&gt; ~\/.ssh\/authorized_keys  <\/code><\/pre>\n<p>\u540c\u6837,<code>&gt; &gt;&gt;<\/code> \u53d8\u4e3a <code>\\&gt;\\&gt;<\/code>'&gt;' \u7b26\u53f7\u4f5c\u4e3a\u8f93\u51fa\u91cd\u5b9a\u5411\u7b26,\u5728bash\u4e2d\u6709\u7279\u6b8a\u610f\u4e49,\u6240\u4ee5\u76f4\u63a5\u4f7f\u7528\u4f1a\u4ea7\u751f\u8bed\u6cd5\u9519\u8bef\u3002\u4f7f\u7528\u8f6c\u4e49\u7b26'\\'\u53ef\u4ee5\u907f\u514d\u8fd9\u4e2a\u9519\u8bef\u3002\u4fee\u6b63\u540e\u7684\u8fd9\u4e24\u4e2a\u6b65\u9aa4,\u5e94\u8be5\u53ef\u4ee5\u6b63\u5e38\u6267\u884c,\u5c06\u516c\u94a5\u5185\u5bb9\u8ffd\u52a0\u5230authorized_keys\u6587\u4ef6,\u5b9e\u73b0\u514d\u5bc6\u767b\u5f55\u914d\u7f6e\u3002<\/p>\n<ol start=\"4\">\n<li>\u767b\u5f55node3,\u5728authorized_keys\u6587\u4ef6\u672b\u5c3e\u6dfb\u52a0\u590d\u5236\u7684\u516c\u94a5\u5185\u5bb9:<\/li>\n<\/ol>\n<pre><code>bash\nssh node3\necho \u628a\u6b65\u9aa42\u590d\u5236\u7684\u516c\u94a5\u5185\u5bb9\u7c98\u8d34\u5e76\u56de\u8f66 &gt; &gt;&gt; ~\/.ssh\/authorized_keys<\/code><\/pre>\n<ul>\n<li>\u5c06\u516c\u94a5\u590d\u5236\u5230\u5176\u4ed6\u4e24\u53f0\u673a\u5668:\n<pre><code>ssh-copy-id node2 \nssh-copy-id node3<\/code><\/pre>\n<\/li>\n<li>\u6d4b\u8bd5ssh\u514d\u5bc6\u7801\u767b\u5f55:\n<pre><code>bash \nssh node2 \nssh node3<\/code><\/pre>\n<p>\u767b\u5f55node2\u65f6\u4ecd\u7136\u9700\u8981\u8f93\u5165\u5bc6\u7801,\u8fd9\u8868\u660enode2\u5e76\u672a\u6210\u529f\u914d\u7f6e\u514d\u5bc6\u767b\u5f55\u3002\u8fd9\u53ef\u80fd\u7531\u51e0\u4e2a\u539f\u56e0\u5bfc\u81f4:1. node2\u7684authorized_keys\u6587\u4ef6\u5185\u5bb9\u683c\u5f0f\u9519\u8bef\u8bf7\u767b\u5f55node2,\u68c0\u67e5~\/.ssh\/authorized_keys\u6587\u4ef6\u5185\u5bb9\u662f\u5426\u4e3a\u6b65\u9aa43\u8f93\u51fa\u7684\u516c\u94a5\u5b57\u7b26\u4e32,\u662f\u5426\u6709\u7a7a\u884c\u6216\u7a7a\u683c\u7b49\u5f71\u54cd\u5185\u5bb9\u683c\u5f0f\u7684\u5b57\u7b26\u3002<br \/>\n\u5982\u679c\u683c\u5f0f\u4e0d\u6b63\u786e,\u8bf7\u5220\u9664authorized_keys\u6587\u4ef6\u5185\u5bb9,\u91cd\u65b0\u6267\u884c\u6b65\u9aa43,\u786e\u4fdd\u516c\u94a5\u5185\u5bb9\u5b8c\u5168\u6b63\u786e\u5730\u8ffd\u52a0\u5230\u8be5\u6587\u4ef6\u30022. node2\u7684authorized_keys\u6587\u4ef6\u6743\u9650\u4e0d\u6b63\u786e\u6b63\u786e\u7684\u6743\u9650\u4e3a644,\u8bf7\u6267\u884c:<\/p>\n<pre><code>bash\nchmod 644 ~\/.ssh\/authorized_keys<\/code><\/pre>\n<ol start=\"3\">\n<li>SSH\u670d\u52a1\u672a\u542f\u52a8\u6216\u914d\u7f6e\u9519\u8bef\u8bf7\u767b\u5f55node2,\u6267\u884c:<\/li>\n<\/ol>\n<pre><code>bash\nsystemctl status sshd<\/code><\/pre>\n<p>\u68c0\u67e5SSH\u670d\u52a1\u662f\u5426\u5904\u4e8eactive\u72b6\u6001,\u5982\u679c\u672a\u542f\u52a8\u8bf7\u6267\u884c:<\/p>\n<pre><code>bash\nsystemctl start sshd <\/code><\/pre>\n<p>\u5982\u679c\u914d\u7f6e\u6587\u4ef6\u6709\u8bef,\u8bf7\u68c0\u67e5\/etc\/ssh\/sshd_config,\u53c2\u8003\u5176\u4ed6\u8282\u70b9\u7684\u914d\u7f6e\u8fdb\u884c\u4fee\u6b63\u3002<\/p>\n<ol start=\"4\">\n<li>\u8282\u70b9\u65f6\u95f4\u540c\u6b65\u95ee\u9898\u5982\u679c\u8282\u70b9\u65f6\u95f4\u4e0d\u540c\u6b65,\u4e5f\u4f1a\u5bfc\u81f4SSH\u767b\u5f55\u9700\u8981\u8f93\u5165\u5bc6\u7801\u3002\u8bf7\u5728\u4e09\u4e2a\u8282\u70b9\u4e0a\u540c\u6b65\u65f6\u95f4,\u6267\u884c:<\/li>\n<\/ol>\n<pre><code>bash\nntpdate 0.centos.pool.ntp.org<\/code><\/pre>\n<p>\u4ee5\u4e0a\u51e0\u4e2a\u56e0\u7d20\u662f\u5bfc\u81f4\u514d\u5bc6\u767b\u5f55\u4e0d\u6210\u529f,\u9700\u8981\u8f93\u5165\u5bc6\u7801\u7684\u5e38\u89c1\u539f\u56e0\u3002\u8bf7\u6309\u7167\u4e0a\u8ff0\u6b65\u9aa4\u8fdb\u884c\u68c0\u67e5\u3001\u4fee\u6b63,\u6700\u7ec8\u786e\u4fdd:- node2\u7684authorized_keys\u6587\u4ef6\u683c\u5f0f\u6b63\u786e,\u6743\u9650\u4e3a644<\/p>\n<\/li>\n<li>SSH\u670d\u52a1\u542f\u52a8\u6b63\u5e38,\u914d\u7f6e\u6b63\u786e<\/li>\n<li>\u4e09\u4e2a\u8282\u70b9\u7cfb\u7edf\u65f6\u95f4\u540c\u6b65\u5b8c\u6210\u4e0a\u8ff0\u8981\u6c42\u540e,\u91cd\u65b0\u6267\u884c<code>ssh node2<\/code>\u5e94\u8be5\u53ef\u4ee5\u76f4\u63a5\u767b\u5f55,\u4e0d\u9700\u8981\u8f93\u5165\u5bc6\u7801\u3002\n<ol>\n<li>\u767b\u5f55node2:<\/li>\n<\/ol>\n<pre><code>bash\nssh node2   # \u6b64\u65f6\u8fd8\u9700\u8981\u8f93\u5165\u5bc6\u7801<\/code><\/pre>\n<ol start=\"2\">\n<li>\u68c0\u67e5authorized_keys\u6587\u4ef6\u5185\u5bb9:<\/li>\n<\/ol>\n<pre><code>bash\ncat ~\/.ssh\/authorized_keys<\/code><\/pre>\n<ol start=\"3\">\n<li>\u68c0\u67e5\u6587\u4ef6\u5185\u5bb9\u662f\u5426\u6b63\u786e,\u5e94\u8be5\u662f\u60a8\u5728node1\u4e0a\u751f\u6210\u7684\u516c\u94a5\u5b57\u7b26\u4e32,\u5e76\u4e14\u6bcf\u4e00\u884c\u53ea\u5305\u542b\u4e00\u4e2a\u516c\u94a5,\u6ca1\u6709\u7a7a\u884c\u6216\u7a7a\u683c\u3002\u5982\u679c\u5185\u5bb9\u683c\u5f0f\u6b63\u786e,\u6587\u4ef6\u5185\u5bb9\u5e94\u8be5\u7c7b\u4f3c\u8fd9\u6837:<\/li>\n<\/ol>\n<pre><code>ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAzFhfgaLx41Wmh2vO3h2rjvkRUtdflFcDSUaZyREeVig3FKSTTiFmORBZWTuSHOzgYsuIwOBm852994WkmI9VREtdem1MxiSMDb+WrheMTBxnWiwje0W+QbvEfWkhT+5h+TJp0IzSySbcMnVovVXLCL8z15MKoxf94CUz+GfRlWZ+RR5i5QVdWtHtc464fWXk8VYM1G13LKnVey66qjoqarUrKo1fMMidZXr9acWeGw\/+B+TTdhiZPzInhVbQbXozki+Tt6cxvDmCw== root@node1<\/code><\/pre>\n<p>\u5982\u679c\u5185\u5bb9\u683c\u5f0f\u4e0d\u6b63\u786e,\u6216\u4e0d\u5b58\u5728authorized_keys\u6587\u4ef6,\u8bf7\u6267\u884c\u4e0b\u8ff0\u6b65\u9aa4\u4fee\u6b63:1) \u5220\u9664\u5df2\u6709\u7684authorized_keys\u6587\u4ef6:<\/p>\n<pre><code>bash\nrm -f ~\/.ssh\/authorized_keys<\/code><\/pre>\n<p>2) \u5728node1\u4e0a\u5c06\u516c\u94a5\u5185\u5bb9\u91cd\u65b0\u590d\u5236\u5230\u526a\u5207\u677f:<\/p>\n<pre><code>bash\ncat ~\/.ssh\/id_rsa.pub<\/code><\/pre>\n<p>3) \u767b\u5f55node2,\u5c06\u590d\u5236\u7684\u516c\u94a5\u5185\u5bb9\u8ffd\u52a0\u5230authorized_keys\u6587\u4ef6:<\/p>\n<pre><code>bash \necho \u628a\u6b65\u9aa42\u590d\u5236\u7684\u516c\u94a5\u5185\u5bb9\u7c98\u8d34\u5e76\u56de\u8f66 \\&gt;\\&gt; ~\/.ssh\/authorized_keys<\/code><\/pre>\n<p>4) \u8bbe\u7f6eauthorized_keys\u6587\u4ef6\u6743\u9650\u4e3a644:<\/p>\n<pre><code>bash \nchmod 644 ~\/.ssh\/authorized_keys <\/code><\/pre>\n<p>5) \u91cd\u65b0\u6d4b\u8bd5\u4ecenode1\u767b\u5f55node2\u662f\u5426\u9700\u8981\u8f93\u5165\u5bc6\u7801\u3002\u8bf7\u6309\u4ee5\u4e0a\u6b65\u9aa4\u68c0\u67e5authorized_keys\u6587\u4ef6\u5185\u5bb9,\u786e\u4fdd\u683c\u5f0f\u548c\u6743\u9650\u6b63\u786e\u65e0\u8bef,\u7136\u540e\u91cd\u65b0\u6d4b\u8bd5ssh\u767b\u5f55\u3002\u5982\u679c\u8fd8\u9700\u8981\u8f93\u5165\u5bc6\u7801,\u8bf7\u53c2\u8003\u524d\u4e00\u7b54\u6848\u4e2d\u7684\u5176\u4ed6\u4fee\u6b63\u6b65\u9aa4\u8fdb\u884c\u68c0\u67e5\u3002<br \/>\n6) SSH\u5bc6\u94a5\u5bf9\u6587\u4ef6\u6743\u9650\u5728node1\u4e0a\u68c0\u67e5id_rsa(\u79c1\u94a5)\u548cid_rsa.pub(\u516c\u94a5)\u6587\u4ef6\u7684\u6743\u9650,\u5e94\u4e3a600\u548c644\u3002\u5982\u679c\u6743\u9650\u8fc7\u5927,\u4f1a\u5bfc\u81f4\u514d\u5bc6\u767b\u5f55\u5931\u8d25,\u8bf7\u4fee\u6b63:<\/p>\n<pre><code>bash\nchmod 600 ~\/.ssh\/id_rsa\nchmod 644 ~\/.ssh\/id_rsa.pub<\/code><\/pre>\n<ol start=\"5\">\n<li>node1\u548cnode2\u4e0aSSH\u670d\u52a1\u72b6\u6001\u6b63\u5e38,\u5e76\u542f\u7528PublicKeyAuthentication\u68c0\u67e5<code>systemctl status sshd<\/code><\/li>\n<\/ol>\n<p>\u68c0\u67e5\/etc\/ssh\/sshd_config\u6587\u4ef6\u4e2dPublicKeyAuthentication\u662f\u5426\u4e3ayes<\/p>\n<ol start=\"6\">\n<li>node1\u548cnode2\u7cfb\u7edf\u65f6\u95f4\u540c\u6b65<\/li>\n<\/ol>\n<pre><code>bash\nntpdate 0.centos.pool.ntp.org <\/code><\/pre>\n<pre><code>[root@node2 ~]# echo ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCxHBwc4AoL6\/qLGoCUXV0oR7di9PKIxem9CH5JgtwGZroqhUR+WjwPbbuft7pmh4h7o6Gig52VltuxzZajj6rGxA88IsDzDKL\/Rzh\/lbiK6OtNQSh4Uop4Ni8t40aFKF8\/JXefgDp9SJ12APDMdiUgwEFsLx\/CAAoYlAxoUVbVVyUkYAdmPR\/yqPA4kQhHis9lkJJqZZdsk6AxjXBHhQLGJgZxylamLeLxfqyEvVv8BeSVlG0a0IR82kLveKBRb8i5TUbH\/85zVCns8O75lr0E39AsY8Uez3P\/zr352Ido56cfM2GO+7+6asf7q0XhBpQ2AwOGAFKzv1QX810PZ5ED root@node1 \\&gt;\\&gt; ~\/.ssh\/authorized_keys\nssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCxHBwc4AoL6\/qLGoCUXV0oR7di9PKIxem9CH5JgtwGZroqhUR+WjwPbbuft7pmh4h7o6Gig52VltuxzZajj6rGxA88IsDzDKL\/Rzh\/lbiK6OtNQSh4Uop4Ni8t40aFKF8\/JXefgDp9SJ12APDMdiUgwEFsLx\/CAAoYlAxoUVbVVyUkYAdmPR\/yqPA4kQhHis9lkJJqZZdsk6AxjXBHhQLGJgZxylamLeLxfqyEvVv8BeSVlG0a0IR82kLveKBRb8i5TUbH\/85zVCns8O75lr0E39AsY8Uez3P\/zr352Ido56cfM2GO+7+6asf7q0XhBpQ2AwOGAFKzv1QX810PZ5ED root@node1 &gt;&gt; \/root\/.ssh\/authorized_keys<\/code><\/pre>\n<p>\u6839\u636e\u60a8\u63d0\u4f9b\u7684\u8f93\u51fa,\u6211\u770b\u5230\u60a8\u662f\u76f4\u63a5\u5c06node1\u7684\u516c\u94a5\u5185\u5bb9\u8ffd\u52a0\u5230node2\u7684\/root\/.ssh\/authorized_keys\u6587\u4ef6,\u8fd9\u5c31\u5bfc\u81f4\u4e86\u514d\u5bc6\u767b\u5f55\u5931\u8d25\u3002\u6b63\u786e\u7684\u6b65\u9aa4\u5e94\u8be5\u662f:\u5c06\u516c\u94a5\u8ffd\u52a0\u5230\u666e\u901a\u7528\u6237\u7684~\/.ssh\/authorized_keys\u6587\u4ef6,\u800c\u4e0d\u662froot\u7528\u6237\u7684\u6587\u4ef6\u3002\u6240\u4ee5,\u8bf7\u6267\u884c\u4ee5\u4e0b\u6b65\u9aa4\u4fee\u6b63:1.\u5728node2\u4e0a\u521b\u5efa\u4e00\u4e2a\u666e\u901a\u7528\u6237,\u5982node2:<\/p>\n<pre><code>bash\nadduser node2<\/code><\/pre>\n<p>2.\u5207\u6362\u5230node2\u7528\u6237 <\/p>\n<pre><code>bash\nsu - node2<\/code><\/pre>\n<p>3.\u5c06node1\u4e0a\u7684id_rsa.pub\u516c\u94a5\u5185\u5bb9\u590d\u5236\u5230\u526a\u5207\u677f <\/p>\n<pre><code>bash\ncat ~\/.ssh\/id_rsa.pub <\/code><\/pre>\n<p>4.\u5c06\u590d\u5236\u7684\u516c\u94a5\u5185\u5bb9\u8ffd\u52a0\u5230\u5f53\u524d\u666e\u901a\u7528\u6237test\u7684~\/.ssh\/authorized_keys\u6587\u4ef6<\/p>\n<pre><code>bash\necho \u4ece\u6b65\u9aa43\u590d\u5236\u7684\u516c\u94a5\u5185\u5bb9\u7c98\u8d34\u5e76\u56de\u8f66 &gt;&gt; ~\/.ssh\/authorized_keys <\/code><\/pre>\n<p>5.\u8bbe\u7f6eauthorized_keys\u6587\u4ef6\u6743\u9650\u4e3a644:<\/p>\n<pre><code>bash\nchmod 644 ~\/.ssh\/authorized_keys  <\/code><\/pre>\n<p>6.\u4f7f\u7528node1\u7684\u666e\u901a\u7528\u6237node1\u8d26\u53f7\u767b\u5f55node2\u7684\u666e\u901a\u7528\u6237node2\u8d26\u53f7<\/p>\n<pre><code>bash\nssh node2@node2  \n# \u5982\u679c\u4ecd\u9700\u5bc6\u7801,\u68c0\u67e5\u6b65\u9aa43-5<\/code><\/pre>\n<p>7.\u4f7f\u7528node1\u7684root\u8d26\u53f7\u767b\u5f55node2\u7684test\u8d26\u53f7<\/p>\n<pre><code>bash \nssh node2@node2<\/code><\/pre>\n<p>\u73b0\u5728,\u5e94\u8be5\u53ef\u4ee5\u76f4\u63a5\u767b\u5f55,\u4e0d\u9700\u8981\u5bc6\u7801\u3002\u8fd9\u8868\u660e\u6211\u4eec\u901a\u8fc7\u4e3a\u666e\u901a\u7528\u6237\u914d\u7f6e\u514d\u5bc6\u767b\u5f55,\u5b9e\u73b0\u4e86\u4ecenode1\u7684root\u548c\u666e\u901a\u7528\u6237\u767b\u5f55node2\u7684\u666e\u901a\u7528\u6237\u3002\u5c06\u514d\u5bc6\u767b\u5f55\u914d\u7f6e\u5728\u666e\u901a\u7528\u6237\u4e0a,\u800c\u4e0d\u662f\u76f4\u63a5\u5728root\u7528\u6237\u4e0a,\u8fd9\u662f\u51fa\u4e8e\u5b89\u5168\u8003\u8651\u3002\u6211\u4eec\u64cd\u4f5c\u7cfb\u7edf\u7684\u5404\u79cd\u8d44\u6e90\u4e5f\u5e94\u9075\u5faa\u6700\u4f4e\u6743\u9650\u539f\u5219\u3002<\/p>\n<ol>\n<li>\u5728node2\u4e0a\u521b\u5efa\u666e\u901a\u7528\u6237node2,\u5e76\u4e3anode2\u7528\u6237\u751f\u6210\u516c\u94a5\u5bf9:<\/li>\n<\/ol>\n<pre><code>bash\nadduser node2 \nsu - node2\nssh-keygen -t rsa  # \u4e00\u76f4\u6309\u56de\u8f66,\u751f\u6210\u5bc6\u94a5\u5bf9<\/code><\/pre>\n<ol start=\"2\">\n<li>\u5c06node2\u7684node2\u7528\u6237\u7684\u516c\u94a5\u5185\u5bb9\u590d\u5236\u5230\u526a\u5207\u677f:<\/li>\n<\/ol>\n<pre><code>bash \ncat ~\/.ssh\/id_rsa.pub<\/code><\/pre>\n<ol start=\"3\">\n<li>\u767b\u5f55node1,\u5c06\u590d\u5236\u7684\u516c\u94a5\u5185\u5bb9\u8ffd\u52a0\u5230node2\u7528\u6237\u7684~\/.ssh\/authorized_keys\u6587\u4ef6:<\/li>\n<\/ol>\n<pre><code>bash\necho \u4ece\u6b65\u9aa42\u590d\u5236\u7684\u516c\u94a5\u5185\u5bb9\u7c98\u8d34\u81f3\u56de\u8f66 &gt;&gt; ~\/.ssh\/authorized_keys <\/code><\/pre>\n<ol start=\"4\">\n<li>\u8bbe\u7f6enode1\u4e0a\u7684~\/.ssh\/authorized_keys\u6587\u4ef6\u6743\u9650\u4e3a644:<\/li>\n<\/ol>\n<pre><code>bash\nchmod 644 ~\/.ssh\/authorized_keys<\/code><\/pre>\n<ol start=\"5\">\n<li>\u4f7f\u7528node2\u7684node2\u7528\u6237\u767b\u5f55node1:<\/li>\n<\/ol>\n<pre><code>bash\nssh node1  \n# \u76f4\u63a5\u767b\u5f55,\u4e0d\u9700\u8f93\u5165\u5bc6\u7801<\/code><\/pre>\n<ol start=\"6\">\n<li>\u4f7f\u7528node1\u7684root\u7528\u6237\u767b\u5f55node2\u7684node2\u7528\u6237:<\/li>\n<\/ol>\n<pre><code>bash \nssh node2@node2\n# \u76f4\u63a5\u767b\u5f55,\u4e0d\u9700\u8f93\u5165\u5bc6\u7801<\/code><\/pre>\n<p>\u8fd9\u4e2a\u53c2\u6570\u7684\u5168\u79f0\u662fPublicKeyAuthentication,\u7528\u4e8e\u542f\u7528\u6216\u7981\u7528SSH\u516c\u94a5\u8ba4\u8bc1\u3002\u8981\u5728\u914d\u7f6e\u6587\u4ef6\u4e2d\u6dfb\u52a0\u8fd9\u4e2a\u53c2\u6570,\u53ef\u4ee5\u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4:1. \u7f16\u8f91\/etc\/ssh\/sshd_config\u914d\u7f6e\u6587\u4ef6:<\/p>\n<pre><code>bash\nvim \/etc\/ssh\/sshd_config<\/code><\/pre>\n<ol start=\"2\">\n<li>\u6309\u4e0bShift+G\u5feb\u6377\u952e\u8df3\u8f6c\u5230\u914d\u7f6e\u6587\u4ef6\u672b\u5c3e\u30023. \u6dfb\u52a0PubkeyAuthentication\u53c2\u6570,\u5e76\u8bbe\u7f6e\u4e3ayes\u542f\u7528\u516c\u94a5\u8ba4\u8bc1:<\/li>\n<\/ol>\n<pre><code>PubkeyAuthentication yes<\/code><\/pre>\n<ol start=\"4\">\n<li>\u4fdd\u5b58\u5e76\u9000\u51fa\u7f16\u8f91\u5668\u30025. \u91cd\u542fSSH\u670d\u52a1\u4ee5\u4f7f\u914d\u7f6e\u751f\u6548: <\/li>\n<\/ol>\n<pre><code>bash \nsystemctl restart sshd<\/code><\/pre>\n<p>.ssh\u76ee\u5f55\u8981700,authorized_keys\u6587\u4ef6\u8981600\u3002\u4e5f\u6709\u8bf4\u662f755\u548c655\u70b9\uff0c\u53cd\u6b63\u540e\u9762\u4e24\u4e2a\u4e0d\u80fd\u6709\u5199\u6743\u9650<\/p>\n<\/li>\n<\/ul>\n<p>\u5728\u914d\u7f6ek8s\u9ad8\u53ef\u7528\u96c6\u7fa4\u65f6,\u8282\u70b9\u4e4b\u95f4\u901a\u5e38\u9700\u8981\u5efa\u7acbSSH\u514d\u5bc6\u767b\u5f55\u8fde\u63a5,\u7528\u4e8e\u8282\u70b9\u4e4b\u95f4\u7684\u901a\u4fe1\u548c\u63a7\u5236\u3002<\/p>\n<p>\u5bf9\u4e8e3\u8282\u70b9\u7684k8s\u96c6\u7fa4,\u914d\u7f6eSSH\u514d\u5bc6\u767b\u5f55\u7684\u63a8\u8350\u65b9\u5f0f\u6709\u4e24\u79cd:<\/p>\n<ol>\n<li>\n\u8282\u70b9\u5bf9\u8282\u70b9\u914d\u7f6e:\u5373node1\u514d\u5bc6\u8bbf\u95eenode2,node2\u514d\u5bc6\u8bbf\u95eenode3,node3\u514d\u5bc6\u8bbf\u95eenode1\u3002\u8fd9\u79cd\u914d\u7f6e\u7b80\u5355,\u4f46\u662f\u5b58\u5728\u95ee\u9898\u662f\u4e2d\u95f4\u8282\u70b9\u5931\u6548\u4f1a\u5f71\u54cd\u5230\u5176\u4ed6\u8282\u70b9\u7684\u8fde\u63a5\u3002\n<\/li>\n<li>\n\u5168\u90e8\u8282\u70b9\u914d\u7f6e:\u5373\u6240\u6709\u8282\u70b9(node1\u3001node2\u3001node3)\u76f8\u4e92\u4e4b\u95f4\u90fd\u914d\u7f6eSSH\u514d\u5bc6\u767b\u5f55\u3002\u8fd9\u79cd\u914d\u7f6e\u6bd4\u8f83\u590d\u6742,\u4f46\u662f\u9ad8\u53ef\u7528\u6027\u66f4\u597d,\u5373\u4f7f\u90e8\u5206\u8282\u70b9\u5931\u6548\u4e5f\u4e0d\u4f1a\u5f71\u54cd\u5176\u4ed6\u8282\u70b9\u8fde\u63a5\u3002\u6240\u4ee5,\u5bf9\u4e8e3\u8282\u70b9\u9ad8\u53ef\u7528\u7684k8s\u96c6\u7fa4,\u6211\u63a8\u8350\u9009\u7528\u7b2c\u4e8c\u79cd\u65b9\u6848:\u5728\u6240\u6709\u8282\u70b9\u4e4b\u95f4\u90fd\u914d\u7f6eSSH\u514d\u5bc6\u767b\u5f55\u8fde\u63a5\u3002\u5177\u4f53\u914d\u7f6e\u6b65\u9aa4\u5982\u4e0b:<\/p>\n<ol>\n<li>\n\u5728node1\u4e0a\u751f\u6210SSH\u5bc6\u94a5\u5bf9,\u7136\u540e\u5c06\u516c\u94a5\u8ffd\u52a0\u5230node2\u548cnode3\u7684~\/.ssh\/authorized_keys\u6587\u4ef6<\/p>\n<ol start=\"2\">\n<li>\n\u5728node2\u4e0a\u751f\u6210SSH\u5bc6\u94a5\u5bf9,\u7136\u540e\u5c06\u516c\u94a5\u8ffd\u52a0\u5230node1\u548cnode3\u7684~\/.ssh\/authorized_keys\u6587\u4ef6<\/p>\n<ol start=\"3\">\n<li>\u5728node3\u4e0a\u751f\u6210SSH\u5bc6\u94a5\u5bf9,\u7136\u540e\u5c06\u516c\u94a5\u8ffd\u52a0\u5230node1\u548cnode2\u7684~\/.ssh\/authorized_keys\u6587\u4ef64. \u68c0\u67e5\u6240\u6709\u8282\u70b9\u7684~\/.ssh\/authorized_keys\u6587\u4ef6\u6743\u9650\u5747\u4e3a6005. \u5728\u6240\u6709\u8282\u70b9\u4e0a\u68c0\u67e5\/etc\/ssh\/sshd_config,\u786e\u4fdd\u6709PubkeyAuthentication yes\u914d\u7f6e6. \u5728\u6240\u6709\u8282\u70b9\u4e0a\u91cd\u542fSSH\u670d\u52a1:systemctl restart sshd7. \u4ece\u6bcf\u4e2a\u8282\u70b9\u6d4b\u8bd5SSH\u514d\u5bc6\u767b\u5f55\u5176\u4ed6\u6240\u6709\u8282\u70b9,\u786e\u4fdd\u8fde\u63a5\u6b63\u5e388. (\u53ef\u9009)\u5728\u6240\u6709\u8282\u70b9\u7981\u7528SSH\u5bc6\u7801\u9a8c\u8bc1\u4e0eKeyboard-interactive\u9a8c\u8bc1,\u63d0\u9ad8\u5b89\u5168\u6027\u3002\u5b8c\u6210\u4e0a\u8ff0\u6b65\u9aa4\u540e,3\u4e2ak8s\u8282\u70b9\u4e4b\u95f4\u5c31\u914d\u7f6e\u4e86\u5b8c\u6574\u7684SSH\u514d\u5bc6\u767b\u5f55\u8fde\u63a5,\u8fd9\u4e3a\u540e\u7eed\u7684k8s\u9ad8\u53ef\u7528\u96c6\u7fa4\u90e8\u7f72\u6253\u4e0b\u4e86\u57fa\u7840<\/li>\n<\/ol>\n<p><font color=red>\u603b\u7ed3\uff1a\u4e3b\u8981\u662f\u914d\u7f6e\u597d\u516c\u94a5\uff0cnode1\uff0cnode2, node3\uff0c\u7136\u540e\u8bbe\u7f6e\u597d\u6743\u9650<\/font>\n<\/li>\n<\/ol>\n<\/li>\n<\/ol>\n<\/li>\n<\/ol>\n<h1>K8S\u7ec4\u4ef6\u7248\u672c\u53f7\uff1a<\/h1>\n<p>\u90e8\u7f72\u6700\u65b0\u7248\u672c\u7684k8s\u96c6\u7fa4,\u6211\u7684\u63a8\u8350\u914d\u7f6e\u5982\u4e0b:<\/p>\n<ul>\n<li>\nkubelet &amp; kubeadm:1.22.4<br \/>\n- kubectl:1.22.4<br \/>\n- kubernetes-cni:0.8.7<br \/>\n- chrony:4.0+<br \/>\n- coredns:1.8.0<br \/>\n- etcd:3.4.13-0<br \/>\n- pause:3.4.1\n<\/li>\n<li>\nkubelet &amp; kubeadm:kubelet\u662f\u96c6\u7fa4\u8282\u70b9\u4e0a\u7684\u4e3b\u8981\u8282\u70b9\u4ee3\u7406,\u5b83\u76d1\u89c6\u8282\u70b9\u4e0a\u7684pod\u548c\u5bb9\u5668,\u5e76\u786e\u4fdd\u5b83\u4eec\u6309\u9884\u671f\u8fd0\u884c\u3002kubeadm\u7528\u4e8e\u5feb\u901f\u8f7b\u677e\u5730\u5728\u673a\u5668\u4e0a\u542f\u52a8\u4e00\u4e2aKubernetes\u96c6\u7fa4\u3002\n<\/li>\n<li>\nkubectl:Kubernetes\u547d\u4ee4\u884c\u5de5\u5177,\u7528\u4e8e\u8fd0\u884c\u547d\u4ee4\u884c\u547d\u4ee4\u4e0eKubernetes\u96c6\u7fa4\u8fdb\u884c\u4ea4\u4e92\u3002\n<\/li>\n<li>\nkubernetes-cni:Kubernetes CNI\u7f51\u7edc\u63d2\u4ef6,\u4e3apod\u63d0\u4f9b\u7f51\u7edc\u8fde\u63a5,\u5b9e\u73b0pod\u7f51\u7edc\u901a\u4fe1\u3002<\/p>\n<ul>\n<li>chrony:\u65f6\u95f4\u540c\u6b65\u670d\u52a1,\u4e3ak8s\u96c6\u7fa4\u5185\u5404\u8282\u70b9\u540c\u6b65\u65f6\u95f4\u4ee5\u6ee1\u8db3k8s\u7684\u8981\u6c42\u3002<\/li>\n<li>coredns: Kubernetes DNS\u670d\u52a1\u5668,\u4e3aKubernetes\u96c6\u7fa4\u63d0\u4f9bDNS\u670d\u52a1\u3002<\/li>\n<li>etcd:Kubernetes\u6570\u636e\u6301\u4e45\u5316\u7684\u6570\u636e\u5e93,\u5b58\u50a8\u96c6\u7fa4\u72b6\u6001\u3002\n<ul>\n<li>pause:\u4e00\u4e2a\u7a7a\u7684\u5bb9\u5668\u955c\u50cf,\u4e3b\u8981\u7528\u4e8e\u5360\u4f4d\u3002\u67d0\u4e9bpod\u4e2d\u7684\u4e00\u4e2a\u5bb9\u5668\u542f\u52a8\u5931\u8d25\u540e\u5c06\u88ab\u66ff\u6362\u4e3apause\u5bb9\u5668\u4ee5\u4fdd\u8bc1pod\u5904\u4e8erunning\u72b6\u6001\u3002<\/li>\n<li>kubelet\u3001kubeadm\u3001kubectl\u7528\u4e8e\u90e8\u7f72\u548c\u7ba1\u7406k8s\u96c6\u7fa4\u3002\n<ul>\n<li>coredns\u3001etcd\u3001pause\u7528\u4e8ek8s\u96c6\u7fa4\u7684\u57fa\u7840\u8bbe\u65bd\u4e0e\u7ef4\u6301pod running\u72b6\u6001\u3002<\/li>\n<li>kubernetes-cni\u7528\u4e8ek8s\u7f51\u7edc\u7f16\u6392,chrony\u7528\u4e8e\u65f6\u95f4\u540c\u6b65\u3002\n<ul>\n<li>Docker\u7528\u4e8e\u8fd0\u884c\u5bb9\u5668\u4e0e\u5bb9\u5668\u955c\u50cf\u3002<\/li>\n<\/ul>\n<\/li>\n<\/ul>\n<\/li>\n<\/ul>\n<\/li>\n<\/ul>\n<\/li>\n<\/ul>\n<h1>\u4e09\u53f0\u673a\u5668\u540c\u6b65\u65f6\u95f4\uff1a<\/h1>\n<p>node1\u4e0a\u79bb\u7ebf\u5b89\u88c5\uff1achrony<\/p>\n<p>\u90a3\u4e48\u5177\u4f53\u7684\u79bb\u7ebf\u5b89\u88c5chrony\u6b65\u9aa4\u5982\u4e0b:1. \u5728node1\u4e0a\u68c0\u67e5\u5f53\u524d\u7cfb\u7edf\u65f6\u95f4\u540c\u6b65\u670d\u52a1,\u5982\u679c\u5df2\u7ecf\u5b89\u88c5\u8bf7\u5148\u5378\u8f7d:<\/p>\n<pre><code>bash\nyum remove chrony ntp  # \u5378\u8f7dchrony\u4e0eNTP<\/code><\/pre>\n<ol start=\"2\">\n<li>\u67e5\u627echrony\u4e0ek8s\u7248\u672c\u7684\u5bf9\u5e94\u5173\u7cfb,\u53ef\u4ee5\u5728\u5b98\u7f51\u6216Release note\u4e2d\u627e\u5230\u3002\u5047\u5b9ak8s\u7248\u672c\u4e3a1.20,\u9700\u8981chrony\u7248\u672c\u4e3a4.0\u30023. \u5728\u5176\u4ed6\u8054\u7f51\u8282\u70b9\u4e0a\u4e0b\u8f7d\u5bf9\u5e94chrony\u7248\u672c\u7684RPM\u5305:<\/li>\n<\/ol>\n<pre><code>wget https:\/\/repo.chrony.org\/chrony-4.0-1.x86_64.rpm       <\/code><\/pre>\n<p>\u4f7f\u7528\u6d4f\u89c8\u5668\uff1a<\/p>\n<p>- \u6253\u5f00https:\/\/chrony.tuxfamily.org\/download.html<\/p>\n<p>chrony-4.3.tar.gz<\/p>\n<ol>\n<li>\u5728node1\u670d\u52a1\u5668\u4e0a\u89e3\u538b\u6e90\u7801\u5305:<\/li>\n<\/ol>\n<pre><code>bash\ntar -xzf chrony-4.3.tar.gz<\/code><\/pre>\n<ol start=\"2\">\n<li>\u8fdb\u5165\u89e3\u538b\u540e\u7684\u76ee\u5f55:<\/li>\n<\/ol>\n<pre><code>bash \ncd chrony-4.3<\/code><\/pre>\n<ol start=\"3\">\n<li>\u8fd0\u884c\u914d\u7f6e\u811a\u672c:<\/li>\n<\/ol>\n<pre><code>bash\n.\/configure<\/code><\/pre>\n<pre><code>[root@node1 chrony-4.3]# .\/configure\nConfiguring for  Linux-x86_64\nChecking for gcc : No\nChecking for clang : No\nChecking for cc : No\nerror: no C compiler found<\/code><\/pre>\n<p><font color=red>\u7531\u4e8e\u6211\u4eec\u7684\u670d\u52a1\u5668\u5904\u4e8e\u5c40\u57df\u7f51\uff0c\u6240\u4ee5\u53ea\u80fd\u901a\u8fc7\u5176\u4ed6\u7535\u8111\u6d4f\u89c8\u5668\u4e0b\u8f7d\uff0c\u8fdb\u884c\u4f20\u9012<\/font><\/p>\n<ol>\n<li>\n\u5728\u4e00\u53f0\u53ef\u4ee5\u8bbf\u95ee\u5916\u7f51\u7684\u7535\u8111\u4e0a,\u6253\u5f00\u6d4f\u89c8\u5668,\u5206\u522b\u8bbf\u95eegcc\u3001gcc-c++\u548cmake\u7684\u4e0b\u8f7d\u9875\u9762\u3002<\/p>\n<p>gcc\u4e0b\u8f7d\u9875\u9762:<a href=\"https:\/\/app.slack.com\/client\/T054JM7292S\/D054CP9NHF1\">https:\/\/app.slack.com\/client\/T054JM7292S\/D054CP9NHF1<\/a>\n<\/li>\n<\/ol>\n<p>\u200b   gcc-c++\u4e0b\u8f7d\u9875\u9762: <a href=\"https:\/\/vault.centos.org\/centos\/8\/AppStream\/x86_64\/os\/Packages\/gcc-8.5.0-4.el8_5.x86_64.rpm\">https:\/\/vault.centos.org\/centos\/8\/AppStream\/x86_64\/os\/Packages\/gcc-8.5.0-4.el8_5.x86_64.rpm<\/a><br \/>\n\u200b   make\u4e0b\u8f7d\u9875\u9762:<a href=\"http:\/\/ftp.gnu.org\/gnu\/make\/\u5728\u8fd9\u4e9b\u9875\u9762\u4e0a\u60a8\u53ef\u4ee5\u627e\u5230\u5bf9\u5e94\u7cfb\u7edf\u4e0e\u67b6\u6784\u7684\u4e8c\u8fdb\u5236\u5305\u4e0b\u8f7d\u94fe\u63a5,\u5982:gcc-8.3.0.tar.xz\">http:\/\/ftp.gnu.org\/gnu\/make\/\u5728\u8fd9\u4e9b\u9875\u9762\u4e0a\u60a8\u53ef\u4ee5\u627e\u5230\u5bf9\u5e94\u7cfb\u7edf\u4e0e\u67b6\u6784\u7684\u4e8c\u8fdb\u5236\u5305\u4e0b\u8f7d\u94fe\u63a5,\u5982:gcc-8.3.0.tar.xz<\/a><br \/>\ngcc-8.3.0-2.el7.x86_64.rpm<br \/>\nmake-4.2.1.tar.gz<\/p>\n<ol start=\"2\">\n<li>\u70b9\u51fb\u5404\u4e2a\u94fe\u63a5,\u6d4f\u89c8\u5668\u4f1a\u5f00\u59cb\u4e0b\u8f7d\u5bf9\u5e94\u7684\u4e8c\u8fdb\u5236\u5305\u3002\u8bf7\u6ce8\u610f\u67e5\u770b\u6587\u4ef6\u540d\u662f\u5426\u6b63\u786e\u3002<\/li>\n<\/ol>\n<pre><code>bash\n# \u5bf9tar\u5305\u89e3\u538b\u3001\u914d\u7f6e\u4e0e\u7f16\u8bd1\ntar -xvf gcc-8.3.0.tar.xz \ncd gcc-8.3.0\n\n.\/configure --prefix=\/home\/k8s\/gcc-8.3.0 --enable-languages=c,c++\n\nmake \n\nmake install\n\n# \u4f7f\u7528rpm\u76f4\u63a5\u5b89\u88c5\nrpm -ivh gcc-8.3.0-2.el7.x86_64.rpm \nrpm -ivh make-4.2.1-1.el7.x86_64.rpm<\/code><\/pre>\n<pre><code>configure: error: in `\/home\/k8s\/gcc-8.3.0':\nconfigure: error: no acceptable C compiler found in $PATH\nSee `config.log' for more details.<\/code><\/pre>\n<p>\u4e0a\u9762\u7684\u4f9d\u8d56\u5305\uff0c\u6211\u5728\u5c40\u57df\u7f51\u60c5\u51b5\u4e0b\u65e0\u6cd5\u5b89\u88c5\uff0c\u6211\u9700\u8981\u66f4\u65b0\u4e00\u4e0b\uff0c\u8fdb\u884c\u5b89\u88c5\uff0c\u4e0b\u8f7d7.3\u7684\u7248\u672c\u8bd5\u4e00\u8bd5\uff1a<\/p>\n<p><a href=\"https:\/\/ftp.gnu.org\/gnu\/gcc\/gcc-7.3.0\/gcc-7.3.0.tar.gz\">https:\/\/ftp.gnu.org\/gnu\/gcc\/gcc-7.3.0\/gcc-7.3.0.tar.gz<\/a><\/p>\n<pre><code>.\/configure --prefix=\/home\/k8s\/gcc-7.3.0 --enable-languages=c,c++ <\/code><\/pre>\n<p>\u914d\u7f6e\u73af\u5883\u53d8\u91cf\uff1a<\/p>\n<pre><code>export PATH=\/home\/k8s\/gcc-7.3.0\/bin:$PATH <\/code><\/pre>\n<ol start=\"3\">\n<li>\n\u6267\u884csource\u547d\u4ee4,\u91cd\u65b0\u52a0\u8f7d\u73af\u5883\u53d8\u91cf:<\/p>\n<pre><code>source \/etc\/profile <\/code><\/pre>\n<pre><code>[root@node1 gcc-7.3.0]# source \/etc\/profile \n-bash: TMOUT: readonly variable<\/code><\/pre>\n<p>\u624b\u52a8\u6dfb\u52a0\u73af\u5883\u53d8\u91cf\uff1a<\/p>\n<pre><code>vim \/etc\/profile<\/code><\/pre>\n<pre><code>source \/etc\/profile <\/code><\/pre>\n<\/li>\n<\/ol>\n<pre><code>.\/configure --prefix=\/home\/k8s\/gcc-7.3.0 --enable-languages=c,c++<\/code><\/pre>\n<p>\u8fd9\u4e2a\u811a\u672c\u4f1a\u81ea\u52a8\u68c0\u67e5\u73af\u5883\u4f9d\u8d56,\u5e76\u751f\u6210Makefile\u6587\u4ef6\u3002<\/p>\n<ol start=\"4\">\n<li>\u7f16\u8bd1\u6e90\u7801:<\/li>\n<\/ol>\n<pre><code>bash\nmake<\/code><\/pre>\n<ol start=\"5\">\n<li>\u5b89\u88c5:<\/li>\n<\/ol>\n<pre><code>bash\nmake install <\/code><\/pre>\n<p>chrony\u4f1a\u88ab\u9ed8\u8ba4\u5b89\u88c5\u5230\/usr\/local\/bin\u76ee\u5f55\u30026. \u521b\u5efachrony\u914d\u7f6e\u6587\u4ef6\/etc\/chrony.conf(\u793a\u4f8b\u5185\u5bb9\u5982\u4e0b),\u5e76\u542f\u52a8\u670d\u52a1\u3002<\/p>\n<pre><code>server 0.centos.pool.ntp.org iburst \nserver 1.centos.pool.ntp.org iburst  \nserver 2.centos.pool.ntp.org iburst \nserver 3.centos.pool.ntp.org iburst<\/code><\/pre>\n<p>\u7136\u540e\u542f\u52a8chrony:<\/p>\n<pre><code>bash\nsystemctl start chronyd.service<\/code><\/pre>\n<ol start=\"7\">\n<li>\n\u68c0\u67e5chrony\u7248\u672c\u4e0e\u72b6\u6001,\u786e\u4fdd\u5b89\u88c5\u4e0e\u914d\u7f6e\u6210\u529f\u3002\n<\/li>\n<li>\n\u9a8c\u8bc1chrony\u7248\u672c\u4e0e\u529f\u80fd\u662f\u5426\u6b63\u5e38:\n<\/li>\n<\/ol>\n<pre><code>bash\nchronyc -v   # \u67e5\u770bchrony\u7248\u672c,\u5e94\u4e3a4.0 \nsystemctl start chronyd   # \u542f\u52a8chrony\nchronyc sources  # \u67e5\u770b\u65f6\u95f4\u540c\u6b65\u72b6\u6001<\/code><\/pre>\n<ol start=\"7\">\n<li>\u7f16\u8f91<code>\/etc\/chrony.conf<\/code>\u914d\u7f6e\u6587\u4ef6,\u786e\u4fdd\u5176\u5b83\u8282\u70b9\u53ef\u4ee5\u8bbf\u95ee:<\/li>\n<\/ol>\n<pre><code>allow 192.168.0.0\/16   # \u8282\u70b9\u7f51\u6bb5 \nbindcmdaddress 0.0.0.0  # \u5141\u8bb8\u6240\u6709\u5730\u5740\u8bbf\u95ee<\/code><\/pre>\n<ol start=\"8\">\n<li>\u542f\u52a8chrony\u670d\u52a1\u5e76\u8bbe\u7f6e\u5f00\u673a\u542f\u52a8: <\/li>\n<\/ol>\n<pre><code>bash\nsystemctl enable chronyd  &amp;&amp; systemctl start chronyd <\/code><\/pre>\n<h2>\u5176\u4ed6\u8282\u70b9\u5f00\u59cb\u540c\u6b65\u65f6\u95f4\uff1a<\/h2>\n<p>\u4e0d\u8054\u7f51\u771f\u7684TMD\uff0c\u6211\u73b0\u5728\u4f7f\u7528\u4e92\u8054\u7f51\u5148\u628aK8S\u5b89\u88c5\u6210\u529f\uff1a<\/p>\n<p>chrony\u662f\u4e00\u4e2a\u5b9e\u73b0NTP\u534f\u8bae\u7684\u670d\u52a1,\u7528\u4e8e\u540c\u6b65\u4e3b\u673a\u7cfb\u7edf\u65f6\u95f4\u3002\u5b83\u53ef\u4ee5\u5b9a\u671f\u4ece\u7ea2\u9999\u670d\u52a1\u5668\u540c\u6b65\u65f6\u95f4,\u786e\u4fdd\u4e3b\u673a\u7ef4\u6301\u51c6\u786e\u7684\u7cfb\u7edf\u65f6\u95f4\u3002\u5728CentOS\u4e0a\u5b89\u88c5chrony,\u53ef\u4ee5\u6309\u4ee5\u4e0b\u6b65\u9aa4\u8fdb\u884c:<\/p>\n<p>\u200b     \u786e\u4fdd\u7cfb\u7edf\u5df2\u5b89\u88c5epel-release\u6e90<\/p>\n<p>\u200b   yum install epel-release<\/p>\n<ol start=\"2\">\n<li>\n\u5b89\u88c5chrony\u5305:<\/p>\n<p>yum install chrony<\/p>\n<ol start=\"3\">\n<li>\n\u542f\u52a8chrony\u670d\u52a1\u5e76\u8bbe\u7f6e\u5f00\u673a\u542f\u52a8:<\/p>\n<p>systemctl start chronyd<br \/>\nsystemctl enable chronyd<\/p>\n<ol start=\"4\">\n<li>\n\u68c0\u67e5chrony\u662f\u5426\u6b63\u5e38\u8fd0\u884c:<\/p>\n<p>systemctl status chronyd<\/p>\n<ol start=\"5\">\n<li>\u68c0\u67e5chrony\u662f\u5426\u6b63\u5e38\u540c\u6b65\u65f6\u95f4:<\/li>\n<\/ol>\n<p>chronyc sources<\/p>\n<p>\u8fd9\u4f1a\u5217\u51fachrony\u6b63\u5728\u540c\u6b65\u7684NTP\u670d\u52a1\u5668\u4fe1\u606f\u3002<\/p>\n<ol start=\"6\">\n<li>\n\u914d\u7f6eNTP\u670d\u52a1\u5668\u3002\u9ed8\u8ba4\u914d\u7f6e\u6587\u4ef6\u4e3a\/etc\/chrony.conf\u3002<\/p>\n<p>\u6307\u5b9a\u81ea\u5df1\u7684NTP\u670d\u52a1\u5668,\u4e5f\u53ef\u4ee5\u4f7f\u7528CentOS\u63d0\u4f9b\u7684\u5982:<\/p>\n<p>server <a href=\"http:\/\/0.centos.pool.ntp.org\/\">0.centos.pool.ntp.org<\/a> iburst<br \/>\nserver <a href=\"http:\/\/1.centos.pool.ntp.org\/\">1.centos.pool.ntp.org<\/a> iburst<br \/>\nserver <a href=\"http:\/\/2.centos.pool.ntp.org\/\">2.centos.pool.ntp.org<\/a> iburst<br \/>\nserver <a href=\"http:\/\/3.centos.pool.ntp.org\/\">3.centos.pool.ntp.org<\/a> iburst<\/p>\n<ol start=\"7\">\n<li>\n\u91cd\u542fchrony\u751f\u6548\u914d\u7f6e\u66f4\u6539:<\/p>\n<p>systemctl restart chronyd<\/p>\n<ol start=\"8\">\n<li>\n\u67e5\u770b\u4e3b\u673a\u662f\u5426\u4e0eNTP\u670d\u52a1\u5668\u6b63\u5e38\u540c\u6b65<\/p>\n<p>chronyc tracking\u8fd9\u6837chrony\u670d\u52a1\u5c31\u5b89\u88c5\u5e76\u914d\u7f6e\u5b8c\u6210\u4e86\u3002<\/p>\n<p>\u5b83\u5c06\u5b9a\u671f\u540c\u6b65\u65f6\u95f4,\u786e\u4fdd\u4e3b\u673a\u7ef4\u6301\u51c6\u786e\u7684\u7cfb\u7edf\u65f6\u95f4\u3002\n<\/li>\n<\/ol>\n<\/li>\n<\/ol>\n<\/li>\n<\/ol>\n<\/li>\n<\/ol>\n<\/li>\n<\/ol>\n<\/li>\n<\/ol>\n<p>\u5c06node1\u914d\u7f6e\u4e3aNTP\u670d\u52a1\u5668,\u8ba9node2\u548cnode3\u540c\u6b65node1\u7684\u65f6\u95f4\u662f\u4e00\u4e2a\u5f88\u597d\u7684\u65b9\u6848\u3002\u5177\u4f53\u6b65\u9aa4\u5982\u4e0b:<\/p>\n<ol>\n<li>\n\u5728node1\u4e0a\u5b89\u88c5chrony\u5e76\u914d\u7f6e\u4e3aNTP\u670d\u52a1\u5668:yum install chrony<\/p>\n<p>vim \/etc\/chrony.conf<\/p>\n<p>\u5728\u6587\u4ef6\u672b\u5c3e\u6dfb\u52a0:<\/p>\n<p>allow 192.168.1.*  #\u5141\u8bb8\u5c40\u57df\u7f51\u540c\u6b65\u65f6\u95f4<br \/>\nlocal stratum 10 <\/p>\n<p>\u200b    <\/p>\n<ol start=\"2\">\n<li>\n\u542f\u52a8chrony\u670d\u52a1\u5e76\u8bbe\u7f6e\u5f00\u673a\u542f\u52a8:<\/p>\n<p>systemctl start chronyd<br \/>\nsystemctl enable chronyd<\/p>\n<p>3.\u5728node2\u548cnode3\u5b89\u88c5chrony,<\/p>\n<p>\u914d\u7f6e\u540c\u6b65node1\u65f6\u95f4:<\/p>\n<p>yum install chrony  <\/p>\n<h1>\u4e24\u53f0\u670d\u52a1\u5668\u90fd\u8981\u5b89\u88c5<\/h1>\n<p>vim \/etc\/chrony.conf<\/p>\n<p>\u7f16\u8f91\u6587\u4ef6,\u5185\u5bb9\u4e3a:<\/p>\n<p>server node1<\/p>\n<ol start=\"4\">\n<li>\nnode2\u548cnode3\u542f\u52a8chrony\u670d\u52a1:<\/p>\n<p>systemctl start chronyd<br \/>\nsystemctl enable chronyd<\/p>\n<ol start=\"5\">\n<li>\n\u5728\u4e09\u53f0\u670d\u52a1\u5668\u4e0a\u68c0\u67e5\u914d\u7f6e\u548c\u540c\u6b65\u72b6\u6001:# node1<br \/>\nsystemctl status chronyd   #\u68c0\u67e5\u670d\u52a1\u72b6\u6001<br \/>\nchronyc sources       #\u68c0\u67e5\u662f\u5426\u6709\u5ba2\u6237\u7aef\u540c\u6b65# node2,node3<br \/>\nsystemctl status chronyd<br \/>\nchronyc sources<br \/>\nchronyc tracking   #\u68c0\u67e5\u662f\u5426\u6210\u529f\u540c\u6b65node1\n<\/li>\n<li>\n\u968f\u65f6\u68c0\u67e5chrony\u540c\u6b65\u72b6\u6001\u548c\u65f6\u95f4\u51c6\u786e\u6027\u3002\u9700\u8981\u7684\u8bdd\u53ef\u4ee5\u91cd\u542f\u670d\u52a1:<\/p>\n<p>systemctl restart chronyd<\/p>\n<p>\u8fd9\u6837,node1\u4f5c\u4e3aNTP\u670d\u52a1\u5668\u63d0\u4f9b\u65f6\u95f4\u670d\u52a1,node2\u548cnode3\u4f5c\u4e3a\u5ba2\u6237\u7aef\u540c\u6b65node1\u7684\u65f6\u95f4\u3002\n<\/li>\n<\/ol>\n<\/li>\n<\/ol>\n<\/li>\n<\/ol>\n<\/li>\n<\/ol>\n<h1>\u914d\u7f6ekeepalived\u5b9e\u73b0VIP\u6f02\u79fb<\/h1>\n<p>keepalived\u53ef\u4ee5\u63d0\u4f9b\u9ad8\u53ef\u7528\u7684VIP(\u865a\u62dfIP),\u901a\u8fc7VRRP\u534f\u8bae\u5728\u591a\u4e2a\u8282\u70b9\u4e4b\u95f4\u6f02\u79fb,\u5b9e\u73b0\u6d41\u91cf\u5206\u53d1\u548c\u9ad8\u53ef\u7528\u3002\u5177\u4f53\u7684\u914d\u7f6e\u6b65\u9aa4\u5982\u4e0b:1. \u5b89\u88c5keepalived\u5728node1,node2,node3\u4e0a\u5b89\u88c5keepalived:<\/p>\n<pre><code>bash\nyum install keepalived -y<\/code><\/pre>\n<ol start=\"2\">\n<li>\u914d\u7f6ekeepalived\u4fee\u6539\/etc\/keepalived\/keepalived.conf,\u5728node1\u4e0a:<\/li>\n<\/ol>\n<pre><code>global_defs {\n   router_id node1  # \u552f\u4e00ID,\u5728\u96c6\u7fa4\u4e2d\u5fc5\u987b\u4e0d\u540c \n}\n\nvrrp_instance VI_1 {\n    state MASTER  # node1 \u4e3aMASTER \n    interface ens33 # VIP\u7ed1\u5b9a\u5728\u6b64\u63a5\u53e3\n    virtual_router_id 51 # VRRP \u8def\u7531ID,\u5728\u96c6\u7fa4\u4e2d\u5fc5\u987b\u76f8\u540c\n    priority 100   # node1 \u7684\u4f18\u5148\u7ea7\u6700\u9ad8,\u6240\u4ee5\u4e3aMASTER\n    advert_int 1    # VRRP \u5fc3\u8df3\u5305\u5e7f\u64ad\u95f4\u9694\n    authentication {\n        auth_type PASS\n        auth_pass 1111\n    }\n    virtual_ipaddress {\n        192.168.200.16  # VIP\u5730\u5740\n    }\n} <\/code><\/pre>\n<p>\u5728node2\u548cnode3\u4e0a,\u914d\u7f6e\u5982\u4e0b:<\/p>\n<pre><code>global_defs {\n   router_id node2  # \u4e0d\u540c\u4e8enode1\n}\n\nvrrp_instance VI_1 {\n    state BACKUP   # node2\/node3 \u4e3aBACKUP\n    interface ens33\n    virtual_router_id 51 \n    priority 90     # \u4f18\u5148\u7ea7\u6bd4node1\u4f4e\n    advert_int 1\n    authentication {\n        auth_type PASS\n        auth_pass 1111\n    }\n    virtual_ipaddress {\n        192.168.200.16  \n    }\n}<\/code><\/pre>\n<ol start=\"3\">\n<li>\u542f\u52a8\u5e76\u542f\u7528keepalived\u5728node1,node2\u548cnode3\u4e0a\u542f\u52a8keepalived:<\/li>\n<\/ol>\n<pre><code>bash\nsystemctl start keepalived &amp;&amp; systemctl enable keepalived<\/code><\/pre>\n<ol start=\"4\">\n<li>\u6d4b\u8bd5VIP\u6f02\u79fb\u6211\u4eec\u53ef\u4ee5\u901a\u8fc7ping 192.168.200.16\u6765\u68c0\u67e5VIP\u6f02\u79fb\u60c5\u51b5\u3002keepalived\u542f\u52a8\u540e,VIP\u4f1a\u5728node1\u4e0a,\u6b64\u65f6ping\u901a\u3002\u5982\u679c\u5173\u95ednode1\u7684keepalived\u6216\u63a5\u53e3,VIP\u4f1a\u81ea\u52a8\u6f02\u79fb\u5230node2,ping\u4ecd\u901a\u3002\u5173\u95ednode2\u7684keepalived\u540e,VIP\u4f1a\u6f02\u79fb\u5230node3,ping\u540c\u6837\u901a\u3002<\/li>\n<\/ol>\n<pre><code>Created symlink from \/etc\/systemd\/system\/multi-user.target.wants\/keepalived.service to \/usr\/lib\/systemd\/system\/keepalived.service.<\/code><\/pre>\n<p>\u8fd9\u6761\u4fe1\u606f\u8868\u793a\u5728node1\u4e0a\u521b\u5efa\u4e86keepalived.service\u7684\u8f6f\u94fe\u63a5\u3002\u5177\u4f53\u6765\u8bf4,\u5b83\u521b\u5efa\u4e86\u4ece\/etc\/systemd\/system\/multi-user.target.wants\/keepalived.service\u5230\/usr\/lib\/systemd\/system\/keepalived.service\u7684\u8f6f\u94fe\u63a5\u3002\u5728Linux\u7cfb\u7edf\u4e2d,keepalived\u670d\u52a1\u7531keepalived.service\u6587\u4ef6\u5b9a\u4e49\u548c\u63a7\u5236\u3002\u9ed8\u8ba4\u60c5\u51b5\u4e0b,\u8fd9\u4e2a\u670d\u52a1\u6587\u4ef6\u4f4d\u4e8e\/usr\/lib\/systemd\/system\/\u76ee\u5f55\u4e0b\u3002\u5982\u679c\u8981\u542f\u52a8keepalived\u670d\u52a1,\u9700\u8981\u521b\u5efa\u4ece\/etc\/systemd\/system\/multi-user.target.wants\/\u5230keepalived.service\u6587\u4ef6\u7684\u8f6f\u94fe\u63a5\u3002\u8fd9\u4e2a\u8f6f\u94fe\u63a5\u544a\u8bc9systemd\u5728\u7cfb\u7edf\u542f\u52a8\u5230multi-user.target\u76ee\u6807\u65f6\u81ea\u52a8\u542f\u52a8keepalived\u670d\u52a1\u3002\u6240\u4ee5,\u8fd9\u6761\u4fe1\u606f\u8868\u660e\u5728node3\u4e0a\u5df2\u7ecf\u521b\u5efa\u4e86\u8fd9\u4e2a\u8f6f\u94fe\u63a5,keepalived\u670d\u52a1\u88ab\u914d\u7f6e\u4e3a\u5728\u7cfb\u7edf\u542f\u52a8\u65f6\u81ea\u52a8\u542f\u52a8\u3002\u63a5\u4e0b\u6765,\u8981\u542f\u52a8keepalived\u670d\u52a1,\u53ea\u9700\u8981\u8fd0\u884c:<\/p>\n<pre><code>bash\nsystemctl start keepalived.service <\/code><\/pre>\n<p>\u5e76\u901a\u8fc7\u4ee5\u4e0b\u547d\u4ee4\u68c0\u67e5\u5176\u8fd0\u884c\u72b6\u6001:<\/p>\n<pre><code>bash \nsystemctl status keepalived.service<\/code><\/pre>\n<p>\u5982\u679c\u8981\u8bbe\u7f6ekeepalived\u670d\u52a1\u5f00\u673a\u81ea\u542f\u52a8,\u8fd0\u884c:<\/p>\n<pre><code>bash\nsystemctl enable keepalived.service<\/code><\/pre>\n<p>\u7efc\u4e0a,\u8fd9\u6761\u4fe1\u606f\u63d0\u793a\u6211\u4eecnode3\u7684keepalived\u670d\u52a1\u5df2\u7ecf\u88ab\u6b63\u786e\u914d\u7f6e\u4e3a\u5f00\u673a\u81ea\u542f\u52a8\u3002\u63a5\u4e0b\u6765\u53ea\u9700\u8981\u542f\u52a8\u8be5\u670d\u52a1,\u5e76\u68c0\u67e5\u72b6\u6001\u4e0e\u8fd0\u884c\u60c5\u51b5\u5c31\u53ef\u4ee5\u4e86\u3002<\/p>\n<h1>\u5b89\u88c5\uff1akubectl:1.22.4<\/h1>\n<ol>\n<li>\u5728node1,node2\u548cnode3\u4e0a\u5b89\u88c5kubectl\u7684\u5148\u51b3\u6761\u4ef6<\/li>\n<\/ol>\n<pre><code>bash\n# \u66f4\u65b0yum\u5305\u7d22\u5f15 \nyum update -y\n\n# \u5b89\u88c5\u4f9d\u8d56\u5305\nyum install -y yum-utils device-mapper-persistent-data lvm2 curl socat<\/code><\/pre>\n<ol start=\"2\">\n<li>\u6dfb\u52a0\u963f\u91cc\u4e91Kubernetes\u6e90<\/li>\n<\/ol>\n<pre><code>bash\ncat &lt;&lt;EOF &gt; \/etc\/yum.repos.d\/kubernetes.repo\n[kubernetes]\nname=Kubernetes\nbaseurl=https:\/\/mirrors.aliyun.com\/kubernetes\/yum\/repos\/kubernetes-el7-x86_64\/\nenabled=1\ngpgcheck=1\nrepo_gpgcheck=1 \ngpgkey=https:\/\/mirrors.aliyun.com\/kubernetes\/yum\/doc\/yum-key.gpg https:\/\/mirrors.aliyun.com\/kubernetes\/yum\/doc\/rpm-package-key.gpg\nEOF<\/code><\/pre>\n<ol start=\"3\">\n<li>\u5b89\u88c5kubectl<\/li>\n<\/ol>\n<pre><code>bash\nyum install -y kubectl-1.22.4<\/code><\/pre>\n<ol start=\"4\">\n<li>\u68c0\u67e5kubectl\u5b89\u88c5\u7248\u672c<\/li>\n<\/ol>\n<pre><code>bash\nkubectl version --client<\/code><\/pre>\n<p>\u8f93\u51fa\u5e94\u4e3a:<\/p>\n<pre><code>Client Version: version.Info{Major:&quot;1&quot;, Minor:&quot;22&quot;, GitVersion:&quot;v1.22.4&quot;, GitCommit:&quot;b69590d82caaae87caf6234447777b506ca95fab2&quot;, GitTreeState:&quot;clean&quot;, BuildDate:&quot;2021-11-16T19:29:40Z&quot;, GoVersion:&quot;go1.16.10&quot;, Compiler:&quot;gc&quot;, Platform:&quot;linux\/amd64&quot;}<\/code><\/pre>\n<p>\u786e\u8ba4Client Version\u4e3a1.22.4,\u8868\u793akubectl\u5df2\u6b63\u786e\u5b89\u88c5\u3002<\/p>\n<ol start=\"5\">\n<li>\u5b89\u88c5\u5176\u4ed6node\u8282\u70b9(\u53ef\u9009)\u5982\u679c\u6709\u5176\u4ed6node\u8282\u70b9(node2,node3\u7b49),\u91cd\u590d\u6b65\u9aa41-4,\u5728\u5176\u4ed6\u8282\u70b9\u4e0a\u4e5f\u5b89\u88c5kubectl 1.22.4\u3002\u8fd9\u6837,\u6240\u6709\u7684node\u4e0a\u5c31\u5177\u6709\u76f8\u540c\u7248\u672c\u7684kubectl,\u53ef\u4ee5\u7528\u4e8e\u5728Kubernetes\u96c6\u7fa4\u4e0a\u8fdb\u884c\u5404\u79cd\u7ba1\u7406\u64cd\u4f5c\u3002<\/li>\n<\/ol>\n<h1>\u5b89\u88c5kubernetes-cni:0.8.7<\/h1>\n<p>\u5728Kubernetes\u96c6\u7fa4\u7684\u4e3b\u8282\u70b9node1\u548c\u5de5\u4f5c\u8282\u70b9node2,node3\u4e0a\u5b89\u88c5kubernetes-cni 0.8.7,\u53ef\u4ee5\u6309\u4ee5\u4e0b\u6b65\u9aa4\u64cd\u4f5c:<\/p>\n<ol>\n<li>\u5728node1,node2\u548cnode3\u4e0a\u4e0b\u8f7dkubernetes-cni 0.8.7<\/li>\n<\/ol>\n<pre><code>bash\nwget https:\/\/github.com\/containernetworking\/plugins\/releases\/download\/v0.8.7\/cni-plugins-linux-amd64-v0.8.7.tgz<\/code><\/pre>\n<p>\u53ef\u4ee5\u4f7f\u7528\u4e00\u4e9b\u56fd\u5185\u955c\u50cf\u6e90\u6765\u52a0\u901f\u4e0b\u8f7d\u3002\u4f8b\u5982:- \u963f\u91cc\u4e91\u955c\u50cf\u6e90:<\/p>\n<pre><code>bash\nwget https:\/\/kubernetes.oss-cn-hangzhou.aliyuncs.com\/cni-plugins\/v0.8.7\/cni-plugins-linux-amd64-v0.8.7.tgz<\/code><\/pre>\n<p>- \u4e2d\u79d1\u5927\u955c\u50cf\u6e90:<\/p>\n<pre><code>bash\nwget https:\/\/mirrors.ustc.edu.cn\/cni\/cni-plugins-linux-amd64-v0.8.7.tgz<\/code><\/pre>\n<p>- \u534e\u4e3a\u4e91\u955c\u50cf\u6e90:<\/p>\n<pre><code>bash \nwget https:\/\/mirrors.huaweicloud.com\/kubernetes\/cni-plugins\/v0.8.7\/cni-plugins-linux-amd64-v0.8.7.tgz<\/code><\/pre>\n<p>- \u7f51\u6613\u955c\u50cf\u6e90:<\/p>\n<pre><code>bash\nwget https:\/\/mirrors.163.com\/cni\/cni-plugins-linux-amd64-v0.8.7.tgz<\/code><\/pre>\n<ol start=\"2\">\n<li>\u5728node1,node2\u548cnode3\u4e0a\u89e3\u538b\u4e0b\u8f7d\u7684\u6587\u4ef6<\/li>\n<\/ol>\n<pre><code>bash\ntar -xzvf cni-plugins-linux-amd64-v0.8.7.tgz<\/code><\/pre>\n<ol start=\"3\">\n<li>\u5728node1,node2\u548cnode3\u4e0a\u79fb\u52a8\u89e3\u538b\u540e\u7684\u76ee\u5f55<\/li>\n<\/ol>\n<pre><code>bash \nsudo mv cni-plugins-linux-amd64-v0.8.7 \/opt\/cni\/bin<\/code><\/pre>\n<ol start=\"4\">\n<li>\u5728node1,node2\u548cnode3\u4e0a\u521b\u5efaCNI\u7f51\u7edc\u914d\u7f6e\u76ee\u5f55\u53ca\u6587\u4ef6<\/li>\n<\/ol>\n<pre><code>bash\nsudo mkdir -p \/etc\/cni\/net.d\ncat &gt;\/etc\/cni\/net.d\/10-calico.conflist &lt;&lt;EOF\n{\n    &quot;name&quot;: &quot;k8s-pod-network&quot;, \n    &quot;cniVersion&quot;: &quot;0.3.0&quot;, \n    &quot;plugins&quot;: [\n        {\n            &quot;type&quot;: &quot;calico&quot;, \n            &quot;etcd_endpoints&quot;: &quot;https:\/\/127.0.0.1:2379&quot;,\n            &quot;log_level&quot;: &quot;info&quot;, \n            &quot;datastore_type&quot;: &quot;kubernetes&quot;,\n            &quot;nodename&quot;: &quot;node1&quot;, \n            &quot;ipam&quot;: {\n                &quot;type&quot;: &quot;calico-ipam&quot;\n            },\n            &quot;policy&quot;: {\n                &quot;type&quot;: &quot;k8s&quot;\n           },\n            &quot;kubernetes&quot;: {\n                &quot;kubeconfig&quot;: &quot;\/etc\/cni\/net.d\/calico-kubeconfig&quot;\n            }\n        }, \n        {\n            &quot;type&quot;: &quot;portmap&quot;, \n            &quot;snat&quot;: true, \n            &quot;capabilities&quot;: {&quot;portMappings&quot;: true}\n        }\n    ]\n}\nEOF<\/code><\/pre>\n<p>\u5176\u4e2d,nodename\u7684\u503c\u5728node2\u548cnode3\u4e0a\u5e94\u4fee\u6539\u4e3a\u5bf9\u5e94\u7684\u8282\u70b9\u540d\u30025. \u5728node1\u4e0a\u6388\u6743CNI\u7f51\u7edc\u914d\u7f6e\u6587\u4ef6<\/p>\n<pre><code>bash\nchmod 644 \/etc\/cni\/net.d\/10-calico.conflist <\/code><\/pre>\n<ol start=\"6\">\n<li>\u91cd\u542fKubelet\u670d\u52a1\u4ee5\u52a0\u8f7dCNI\u63d2\u4ef6\u5728node1,node2\u548cnode3\u4e0a\u8fd0\u884c:<\/li>\n<\/ol>\n<pre><code>bash\nsystemctl restart kubelet<\/code><\/pre>\n<ol start=\"7\">\n<li>\u68c0\u67e5\u5b89\u88c5\u60c5\u51b5\u5728\u4efb\u610f\u8282\u70b9\u4e0a\u8fd0\u884c:<\/li>\n<\/ol>\n<pre><code>bash \n\/opt\/cni\/bin\/cni-plugins-helper.sh <\/code><\/pre>\n<p>\u8f93\u51fa\u5e94\u663e\u793acni-plugins-helper\u53ca\u5176\u4ed6CNI\u63d2\u4ef6\u7684\u8be6\u7ec6\u4fe1\u606f,\u8868\u660eCNI\u5df2\u6b63\u786e\u5b89\u88c5\u3002\u4ee5\u4e0a\u5c31\u662f\u5728Kubernetes\u96c6\u7fa4\u7684\u4e3b\u8282\u70b9\u548c\u5de5\u4f5c\u8282\u70b9\u4e0a\u5b89\u88c5CNI\u7f51\u7edc\u63d2\u4ef6kubernetes-cni 0.8.7\u7684\u5b8c\u6574\u6b65\u9aa4\u3002<\/p>\n<h1>kubelet &amp; kubeadm:1.22.4<\/h1>\n<p>\u57283\u53f0\u670d\u52a1\u5668(node1,node2,node3)\u4e0a\u5b89\u88c5kubelet\u3001kubeadm1.22.4\u7248\u672c\u7684\u6b65\u9aa4\u5982\u4e0b:<\/p>\n<ol>\n<li>\u6dfb\u52a0Kubernetes\u8f6f\u4ef6\u5305\u7684yum\u6e90:<\/li>\n<\/ol>\n<pre><code>bash\ncat &lt;&lt;EOF | sudo tee \/etc\/yum.repos.d\/kubernetes.repo\n[kubernetes]\nname=Kubernetes\nbaseurl=https:\/\/mirrors.aliyun.com\/kubernetes\/yum\/repos\/kubernetes-el7-x86_64\/\nenabled=1\ngpgcheck=1\nrepo_gpgcheck=1\ngpgkey=https:\/\/mirrors.aliyun.com\/kubernetes\/yum\/doc\/yum-key.gpg https:\/\/mirrors.aliyun.com\/kubernetes\/yum\/doc\/rpm-package-key.gpg\nEOF<\/code><\/pre>\n<ol start=\"2\">\n<li>\u5b89\u88c5kubeadm\u3001kubelet\u548ckubectl:<\/li>\n<\/ol>\n<pre><code>bash \nsudo yum install -y kubelet-1.22.4 kubeadm-1.22.4 <\/code><\/pre>\n<ol start=\"3\">\n<li>\u542f\u52a8kubelet:<\/li>\n<\/ol>\n<pre><code>bash\nsudo systemctl enable --now kubelet<\/code><\/pre>\n<ol start=\"4\">\n<li>\u7f16\u8f91kubelet\u914d\u7f6e\u6587\u4ef6vim \/etc\/sysctl.d\/k8s.conf,\u6dfb\u52a0\u4ee5\u4e0b\u5185\u5bb9:<\/li>\n<\/ol>\n<pre><code>net.bridge.bridge-nf-call-iptables  = 1\nnet.bridge.bridge-nf-call-ip6tables = 1\nnet.ipv4.ip_forward                 = 1\nnet.ipv4.tcp_keepalive_time        =  600<\/code><\/pre>\n<ol start=\"5\">\n<li>\u91cd\u65b0\u52a0\u8f7dsysctl\u5e76\u4f7f\u914d\u7f6e\u751f\u6548<\/li>\n<\/ol>\n<pre><code>bash \nsudo sysctl --system <\/code><\/pre>\n<ol start=\"6\">\n<li>\u963b\u6b62swap\u5206\u533a,\u56e0\u4e3akubernetes\u4e0d\u652f\u6301swap\u5206\u533a<\/li>\n<\/ol>\n<pre><code>bash\nsudo swapoff -a<\/code><\/pre>\n<ol start=\"7\">\n<li>\u7f16\u8f91vim \/etc\/default\/grub,\u5728GRUB_CMDLINE_LINUX\u53c2\u6570\u4e2d\u6dfb\u52a0&quot;swapoff=yes&quot;<\/li>\n<\/ol>\n<pre><code>bash\nsudo grub2-mkconfig -o \/boot\/grub2\/grub.cfg<\/code><\/pre>\n<ol start=\"8\">\n<li>\u91cd\u542f\u670d\u52a1\u5668\u6fc0\u6d3b\u914d\u7f6e<\/li>\n<\/ol>\n<pre><code>bash\nsudo reboot<\/code><\/pre>\n<ol start=\"9\">\n<li>\n\u5b89\u88c5Docker 19.03\u6216\u66f4\u9ad8\u7248\u672c<\/p>\n<p>10.\u6267\u884c\u4ee5\u4e0a\u6b65\u9aa4\u5728node1\u3001node2\u548cnode3\u670d\u52a1\u5668\u4e0a\u91cd\u590d\u3002\u81f3\u6b64,3\u53f0\u670d\u52a1\u5668\u4e0a\u7684kubelet\u3001kubeadm\u548ckubectl \u5df2\u5b89\u88c5\u5b8c\u6210,\u5e76\u505a\u597d\u4e86\u521d\u59cb\u5316Kubernetes\u96c6\u7fa4\u7684\u51c6\u5907\u5de5\u4f5c\u3002<\/p>\n<p>\u5728 Kubernetes \u96c6\u7fa4\u4e2d,Docker \u88ab\u7528\u4f5c\u9ed8\u8ba4\u7684\u5bb9\u5668\u8fd0\u884c\u65f6\u3002kubelet\u901a\u8fc7Docker\u6765\u7ba1\u7406Pod\u4e2d\u7684\u5bb9\u5668,\u6240\u4ee5 Docker \u662f\u90e8\u7f72 Kubernetes \u96c6\u7fa4\u7684\u5fc5\u8981\u7ec4\u4ef6\u4e4b\u4e00\u3002Docker\u7684\u5b89\u88c5\u65b9\u6cd5\u53ef\u4ee5\u53c2\u8003\u8fd9\u7bc7\u6587\u7ae0:<a href=\"https:\/\/yeasy.gitbook.io\/docker_practice\/install\u603b\u4f53\u6765\u8bf4,\u5728CentOS\u7cfb\u7edf\u4e0a\u5b89\u88c5Docker\">https:\/\/yeasy.gitbook.io\/docker_practice\/install\u603b\u4f53\u6765\u8bf4,\u5728CentOS\u7cfb\u7edf\u4e0a\u5b89\u88c5Docker<\/a> CE\u793e\u533a\u7248\u7684\u6b65\u9aa4\u5982\u4e0b:<\/p>\n<ol>\n<li>\u6dfb\u52a0Docker\u7684yum\u6e90<\/li>\n<\/ol>\n<pre><code>bash\nyum-config-manager --add-repo https:\/\/download.docker.com\/linux\/centos\/docker-ce.repo<\/code><\/pre>\n<ol start=\"2\">\n<li>\u5b89\u88c5Docker CE:<\/li>\n<\/ol>\n<pre><code>bash\nyum install docker-ce docker-ce-cli containerd.io<\/code><\/pre>\n<p>\u6216\u8005\u6307\u5b9a\u5b89\u88c5\u7279\u5b9a\u7248\u672c:<\/p>\n<pre><code>bash\nyum install docker-ce-<VERSION_STRING> docker-ce-cli-<VERSION_STRING> containerd.io<\/code><\/pre>\n<ol start=\"3\">\n<li>\u542f\u52a8Docker\u5e76\u8bbe\u7f6e\u5f00\u673a\u81ea\u542f:<\/li>\n<\/ol>\n<pre><code>bash\nsystemctl start docker \nsystemctl enable docker<\/code><\/pre>\n<ol start=\"4\">\n<li>\n\u914d\u7f6eDocker\u52a0\u901f\u5668(\u53ef\u9009)\n<\/li>\n<li>\n\u5c06\u60a8\u7684\u7528\u6237\u6dfb\u52a0\u5230docker\u7ec4\u6216\u8bbe\u7f6esudo\u6743\u9650(\u53ef\u9009)<\/p>\n<p>\u5982\u679c\u60a8\u4e0d\u5c06\u7528\u6237\u6dfb\u52a0\u5230docker\u7ec4,\u90a3\u4e48\u6bcf\u6b21\u4f7f\u7528Docker,\u90fd\u9700\u8981sudo\u6765\u83b7\u53d6root\u6743\u9650,\u6bd4\u8f83\u9ebb\u70e6\u3002\u6240\u4ee5,\u5efa\u8bae\u60a8\u5c06\u7528\u6237\u6dfb\u52a0\u5230docker\u7ec4,\u6b65\u9aa4\u5982\u4e0b:1. \u521b\u5efadocker\u7ec4:<\/p>\n<pre><code>bash\ngroupadd docker<\/code><\/pre>\n<ol start=\"2\">\n<li>\u5c06\u60a8\u7684\u7528\u6237\u6dfb\u52a0\u5230docker\u7ec4: root<\/li>\n<\/ol>\n<pre><code>bash\nusermod -aG docker root<\/code><\/pre>\n<p>\u5c06username\u66ff\u6362\u4e3a\u60a8\u7684\u5b9e\u9645\u7528\u6237\u540d\u3002<\/p>\n<ol start=\"3\">\n<li>\u9000\u51fa\u5e76\u91cd\u65b0\u767b\u5f55,\u7136\u540e\u8fd0\u884c <code>id username<\/code>\u5e76\u786e\u8ba4\u7528\u6237\u662f\u5426\u5df2\u6dfb\u52a0\u5230docker\u7ec4\u3002<\/li>\n<\/ol>\n<pre><code>[root@zzmuap6zwdoqhqxb ~]# groupadd docker\ngroupadd: group 'docker' already exists<\/code><\/pre>\n<p>\u8fd9\u662f\u56e0\u4e3a\u5728\u5b89\u88c5Docker\u65f6,\u5b89\u88c5\u811a\u672c\u81ea\u52a8\u521b\u5efa\u4e86docker\u7ec4\u3002\u6240\u4ee5,\u60a8\u53ea\u9700\u8981\u5c06\u60a8\u7684\u7528\u6237\u6dfb\u52a0\u5230\u73b0\u6709\u7684docker\u7ec4\u5373\u53ef\u3002\u547d\u4ee4\u5982\u4e0b:<\/p>\n<p>\u5c06\u6b64\u547d\u4ee4\u4e2d\u7684user1\u66ff\u6362\u4e3a\u60a8\u81ea\u5df1\u7684\u5b9e\u9645\u767b\u5f55\u7528\u6237\u540d\u3002<\/p>\n<pre><code>bash\n# node1\u670d\u52a1\u5668\u6267\u884c\nusermod -aG docker root\n# node2\u670d\u52a1\u5668\u6267\u884c\nusermod -aG docker root\n# node3\u670d\u52a1\u5668\u6267\u884c\nusermod -aG docker root<\/code><\/pre>\n<p>\u5c06your_username\u66ff\u6362\u4e3a\u60a8\u7684\u5b9e\u9645\u7528\u6237\u540d\u3002\u6267\u884c\u6b64\u547d\u4ee4\u540e,\u60a8\u7684\u7528\u6237\u5c31\u88ab\u6dfb\u52a0\u5230\u4e86docker\u7ec4\u3002\u7136\u540e\u60a8\u9700\u8981:1. \u9000\u51fa\u5f53\u524d\u767b\u5f55\u4f1a\u8bdd:<\/p>\n<pre><code>bash \nexit<\/code><\/pre>\n<ol start=\"2\">\n<li>\n\u518d\u91cd\u65b0\u767b\u5f55\u3002\n<\/li>\n<li>\n\u6d4b\u8bd5\u60a8\u7684\u7528\u6237\u662f\u5426\u53ef\u4ee5\u6b63\u5e38\u4f7f\u7528Docker\u547d\u4ee4,\u8fd0\u884c:\n<\/li>\n<\/ol>\n<pre><code>bash \ndocker run hello-world<\/code><\/pre>\n<pre><code>[root@zzmuap6zwdoqhqxb ~]# docker run hello-world\nUnable to find image 'hello-world:latest' locally\nlatest: Pulling from library\/hello-world\n2db29710123e: Pull complete \nDigest: sha256:4e83453afed1b4fa1a3500525091dbfca6ce1e66903fd4c01ff015dbcb1ba33e\nStatus: Downloaded newer image for hello-world:latest\n\nHello from Docker!\nThis message shows that your installation appears to be working correctly.\n\nTo generate this message, Docker took the following steps:\n1. The Docker client contacted the Docker daemon.\n2. The Docker daemon pulled the \"hello-world\" image from the Docker Hub.\n  (amd64)\n3. The Docker daemon created a new container from that image which runs the\n  executable that produces the output you are currently reading.\n4. The Docker daemon streamed that output to the Docker client, which sent it\n  to your terminal.\n\nTo try something more ambitious, you can run an Ubuntu container with:\n$ docker run -it ubuntu bash\n\nShare images, automate workflows, and more with a free Docker ID:\nhttps:\/\/hub.docker.com\/\n\nFor more examples and ideas, visit:\nhttps:\/\/docs.docker.com\/get-started\/\n<\/code><\/pre>\n<p>\u606d\u559c!\u4ece\u8f93\u51fa\u53ef\u4ee5\u770b\u51fa\u60a8\u5df2\u7ecf\u6210\u529f\u5b89\u88c5Docker\u5e76\u53ef\u4ee5\u6b63\u5e38\u8fd0\u884cDocker\u547d\u4ee4\u4e86\u3002&quot;Hello from Docker!&quot;\u7684\u8f93\u51fa\u8868\u660eDocker\u5b89\u88c5\u6b63\u5e38,\u53ef\u4ee5\u62c9\u53d6\u548c\u8fd0\u884c\u955c\u50cf\u3002<\/p>\n<p>\u5982\u679c\u53ef\u4ee5\u6b63\u5e38\u8fd0\u884c,\u8bf4\u660e\u60a8\u7684\u7528\u6237\u5df2\u83b7\u5f97\u4f7f\u7528Docker\u7684\u6743\u9650,\u4e0d\u9700\u8981sudo\u5c31\u53ef\u4ee5\u7ba1\u7406Docker\u3002\n<\/li>\n<\/ol>\n<p>\u4ee5\u4e0a\u6b65\u9aa4\u9700\u8981\u5728node1\u3001node2\u548cnode3\u4e09\u4e2a\u8282\u70b9\u4e0a\u90fd\u6267\u884c\u4e00\u904d,\u5b89\u88c5Docker\u5e76\u6b63\u5e38\u8fd0\u884c\u540e,\u624d\u80fd\u5f00\u59cb\u521d\u59cb\u5316Kubernetes\u96c6\u7fa4\u3002\u6240\u4ee5,\u603b\u7ed3\u6765\u8bf4,\u5728\u5b89\u88c5kubelet\u3001kubeadm\u548ckubectl\u540e,\u60a8\u8fd8\u9700\u8981:<\/p>\n<ol>\n<li>\u5728\u6240\u6709\u8282\u70b9\u5b89\u88c5Docker,\u7248\u672c\u9700\u8981\u572819.03\u6216\u66f4\u9ad8<\/li>\n<li>\u5728\u6240\u6709\u8282\u70b9\u542f\u52a8Docker\u5e76\u8bbe\u7f6e\u5f00\u673a\u81ea\u542f<\/li>\n<li>\u5728\u6240\u6709\u8282\u70b9\u6d4b\u8bd5Docker\u662f\u5426\u6b63\u5e38\u5de5\u4f5c\u5b8c\u6210\u4ee5\u4e0a\u6b65\u9aa4\u540e,\u624d\u80fd\u5f00\u59cb\u901a\u8fc7<\/li>\n<li>kubeadm init \u5728node1\u8282\u70b9\u521d\u59cb\u5316Kubernetes\u96c6\u7fa4\u3002<\/li>\n<\/ol>\n<p>\u63a5\u4e0b\u6765\u53ef\u4ee5\u5728node1\u4e0a\u6267\u884c<code>kubeadm init<\/code>\u547d\u4ee4\u521d\u59cb\u5316\u96c6\u7fa4\u63a7\u5236\u5e73\u9762,\u4e4b\u540e\u5728node2\u548cnode3\u4e0a\u6267\u884c<code>kubeadm join<\/code>\u547d\u4ee4\u5c06\u5176\u52a0\u5165\u96c6\u7fa4\u3002\n<\/li>\n<\/ol>\n<ul>\n<li>\nnode1\u4e0a\u6267\u884c<code>kubeadm init<\/code><\/p>\n<p>\u5728node1\u8282\u70b9\u4e0a\u6267\u884c<code>kubeadm init<\/code>\u547d\u4ee4\u6765\u521d\u59cb\u5316Kubernetes\u63a7\u5236\u5e73\u9762(control plane)\u3002\n<\/li>\n<\/ul>\n<p>\u5047\u8bbenode1\u7684IP\u5730\u5740\u662f192.168.0.1,\u90a3\u4e48\u6267\u884c\u547d\u4ee4\u5982\u4e0b:<\/p>\n<pre><code>bash\nkubeadm init --apiserver-advertise-address=192.168.0.1 --pod-network-cidr=10.244.0.0\/16\n# \u6211\u7684node1IP\u5730\u5740\u4e3a\uff1a192.168.1.127\nkubeadm init --apiserver-advertise-address=192.168.1.127 --pod-network-cidr=10.244.0.0\/16<\/code><\/pre>\n<pre><code>[root@node1 ~]# kubeadm init --apiserver-advertise-address=192.168.1.127 --pod-network-cidr=10.244.0.0\/16\nI0427 08:33:33.663710   31403 version.go:255] remote version is much newer: v1.27.1; falling back to: stable-1.22\n[init] Using Kubernetes version: v1.22.17\n[preflight] Running pre-flight checks\n        [WARNING SystemVerification]: this Docker version is not on the list of validated versions: 23.0.4. Latest validated version: 20.10\nerror execution phase preflight: [preflight] Some fatal errors occurred:\n        [ERROR Swap]: running with swap on is not supported. Please disable swap\n[preflight] If you know what you are doing, you can make a check non-fatal with <code>--ignore-preflight-errors=...<\/code>\nTo see the stack t<\/code><\/pre>\n<p>\u4ece\u8f93\u51fa\u53ef\u4ee5\u770b\u51fa,\u5728\u6267\u884c<code>kubeadm init<\/code>\u65f6\u9047\u5230\u4e86\u4e00\u4e9b\u9519\u8bef\u3002\u6211\u4eec\u9700\u8981\u89e3\u51b3\u8fd9\u4e9b\u9519\u8bef,\u7136\u540e\u518d\u91cd\u65b0\u6267\u884c\u521d\u59cb\u5316\u547d\u4ee4\u3002\u9519\u8bef\u4e3b\u8981\u6709\u4e24\u4e2a:<\/p>\n<ol>\n<li>\nDocker\u7248\u672c\u4e0d\u5728\u9a8c\u8bc1\u7248\u672c\u5217\u8868\u5185\u3002\u76ee\u524dKubernetes 1.22\u7248\u672c\u652f\u6301\u7684Docker\u7248\u672c\u8303\u56f4\u662f17.03~20.10\u3002\u6240\u4ee5\u9700\u8981\u5347\u7ea7\u6216\u964d\u7ea7Docker\u7248\u672c\u3002<\/p>\n<pre><code>\n[root@node1 ~]# docker version\nClient: Docker Engine - Community\nVersion:           23.0.4\nAPI version:       1.42\nGo version:        go1.19.8\nGit commit:        f480fb1\nBuilt:             Fri Apr 14 10:36:38 2023\nOS\/Arch:           linux\/amd64\nContext:           default<\/code><\/pre>\n<\/li>\n<\/ol>\n<p>Server: Docker Engine - Community<br \/>\nEngine:<br \/>\nVersion:          23.0.4<br \/>\nAPI version:      1.42 (minimum version 1.12)<br \/>\nGo version:       go1.19.8<br \/>\nGit commit:       cbce331<br \/>\nBuilt:            Fri Apr 14 10:34:14 2023<br \/>\nOS\/Arch:          linux\/amd64<br \/>\nExperimental:     false<br \/>\ncontainerd:<br \/>\nVersion:          1.6.20<br \/>\nGitCommit:        2806fc1057397dbaeefbea0e4e17bddfbd388f38<br \/>\nrunc:<br \/>\nVersion:          1.1.5<br \/>\nGitCommit:        v1.1.5-0-gf19387a<br \/>\ndocker-init:<br \/>\nVersion:          0.19.0<br \/>\nGitCommit:        de40ad0<br \/>\n[root@node1 ~]# <\/p>\n<pre><code>\n1. \u7cfb\u7edf\u542f\u7528\u4e86swap\u5206\u533a,\u8fd9\u4f1a\u5f71\u54cdKubernetes\u7684\u529f\u80fd\u3002\u9700\u8981\u4e34\u65f6\u6216\u6c38\u4e45\u5173\u95edswap\u5206\u533a\u3002\u6240\u4ee5,\u89e3\u51b3\u6b65\u9aa4\u5982\u4e0b:\n\n   1. \u5347\u7ea7\u6216\u964d\u7ea7Docker\u7248\u672c\u523017.03~20.10\u4e4b\u95f4\u7684\u7248\u672c\u3002\u53ef\u4ee5\u53c2\u8003Docker\u5b98\u7f51\u7684\u5b89\u88c5\u6307\u5357\u3002\n\n      1. \u505c\u6b62Docker\u670d\u52a1:\n<\/code><\/pre>\n<pre><code>  bash\n  systemctl stop docker\n  ```\n\n  ````\n  [root@node1 ~]# systemctl stop docker\n  Warning: Stopping docker.service, but it can still be activated by:\n    docker.socket\n  ````\n\n  \u4ece\u8b66\u544a\u4fe1\u606f\u53ef\u4ee5\u770b\u51fa,\u4ec5\u505c\u6b62<code>docker.service<\/code>\u670d\u52a1\u662f\u4e0d\u591f\u7684\u3002Docker\u670d\u52a1\u5b9e\u9645\u7531\u4e24\u4e2a\u5355\u5143\u7ec4\u6210:- docker.service: Docker\u5f15\u64ce\u5b88\u62a4\u8fdb\u7a0b\u670d\u52a1\n  \\- docker.socket: Docker\u5b88\u62a4\u8fdb\u7a0b\u7684socket\u6587\u4ef6,\u7528\u4e8edocker daemon\u7684socket\u6fc0\u6d3b\u6240\u4ee5,\u8981\u5b8c\u5168\u505c\u6b62Docker\u670d\u52a1,\u9700\u8981\u505c\u6b62\u8fd9\u4e24\u4e2a\u5355\u5143:\n\n  ```\n  bash\n  systemctl stop docker.service docker.socket\n  ```\n\n  \u8fd9\u5c06\u505c\u6b62Docker daemon\u548c\u76f8\u5173socket,\u4ece\u800c\u5b8c\u5168\u505c\u6b62Docker\u670d\u52a1\u3002\n\n  2. \u53ef\u9009\u6b65\u9aa4:\u5982\u679c\u8981\u5378\u8f7d\u5f53\u524d\u7248\u672c\u7684Docker,\u5f3a\u70c8\u5efa\u8bae\u505a\u5907\u4efd\u3002\u60a8\u53ef\u4ee5:- \u5907\u4efdDocker\u76f8\u5173\u7684\u6570\u636e:\n\n  ```\n  bash\n  cp -rpf \/var\/lib\/docker \/var\/lib\/docker.bak\n  ```\n\n  \\- \u5907\u4efd\u73b0\u6709Docker\u914d\u7f6e\u6587\u4ef6:\n\n  ```\n  bash \n  cp -rpf \/etc\/docker \/etc\/docker.bak\n  ```\n\n  ````\n  \u5f53\u524d\u5b89\u88c5\u7684Docker\u7248\u672c\u662f23.0.4\u3002\n  \u8fd9\u4e0d\u5728Kubernetes 1.22\u652f\u6301\u7684Docker\u7248\u672c\u8303\u56f4(17.03~20.10)\u5185\u3002\n  \u6240\u4ee5,\u60a8\u9700\u8981\u5378\u8f7d\u5f53\u524d\u7248\u672c\u7684Docker,\u7136\u540e\u5b89\u88c5\u652f\u6301\u7248\u672c\u7684Docker\u3002\n  \u5378\u8f7dDocker\u6b65\u9aa4\u5982\u4e0b:\n  bash\n  # 1. \u5378\u8f7dDocker\n  yum remove docker \\\n                    docker-client \\\n                    docker-client-latest \\\n                    docker-common \\\n                    docker-latest \\\n                    docker-latest-logrotate \\\n                    docker-logrotate \\\n                    docker-engine\n\n  # 2. \u4e3b\u673a\u4e0a\u5220\u9664Docker\u7684\u5176\u4ed6\u7ec4\u4ef6  \n  rm -rf \/var\/lib\/docker\n  rm -rf \/var\/run\/docker.sock\n  \u5b89\u88c5Docker 19.03\u7248\u672c\u7684\u6b65\u9aa4\u5982\u4e0b:\n  bash \n  # 1. \u6dfb\u52a0Docker\u4ed3\u5e93\n  yum-config-manager --add-repo https:\/\/download.docker.com\/linux\/centos\/docker-ce.repo\n\n  # 2. \u5b89\u88c5Docker CE 19.03  \n  yum install docker-ce-19.03.15 docker-ce-cli-19.03.15 containerd.io-1.4.7\n\n  # 3. \u542f\u52a8 Docker CE \n  systemctl start docker\n\n  # 4. \u6dfb\u52a0\u5f00\u673a\u542f\u52a8Docker\u670d\u52a1 \n  systemctl enable docker\n  \u5b8c\u6210Docker\u5347\u7ea7\u540e,\u8bf7\u91cd\u65b0\u6267\u884c<code>kubeadm init<\/code>\u521d\u59cb\u5316Kubernetes\u96c6\u7fa4\n  ````<\/code><\/pre>\n<ol start=\"2\">\n<li>\u5173\u95edswap\u5206\u533a:<\/li>\n<\/ol>\n<pre><code>bash\n# \u4e34\u65f6\u5173\u95ed \nswapoff -a \n\n# \u6c38\u4e45\u5173\u95ed,\u6ce8\u91ca\/etc\/fstab\u6587\u4ef6\u4e2d\u7684swap\u5206\u533a\u884c\nsed -i &#039;s\/^\\\/dev\\\/mapper\\\/centos-swap\/#&amp;\/&#039; \/etc\/fstab<\/code><\/pre>\n<ol start=\"3\">\n<li>\u91cd\u65b0\u6267\u884c<code>kubeadm init<\/code>\u547d\u4ee4\u521d\u59cb\u5316Kubernetes\u63a7\u5236\u5e73\u9762:<\/li>\n<\/ol>\n<pre><code>bash\nkubeadm init --apiserver-advertise-address=192.168.1.127 --pod-network-cidr=10.244.0.0\/16<\/code><\/pre>\n<p>\u5982\u679c\u8f93\u51fa\u663e\u793a&quot;Kubernetes control plane has initialized successfully&quot;,\u8bf4\u660eKubernetes\u63a7\u5236\u5e73\u9762\u521d\u59cb\u5316\u6210\u529f!<\/p>\n<p>\u5176\u4e2d:<\/p>\n<p>--apiserver-advertise-address \u53c2\u6570\u6307\u5b9a\u96c6\u7fa4\u4e2d\u5176\u5b83\u8282\u70b9\u4e0e\u63a7\u5236\u5e73\u9762\u901a\u4fe1\u7684IP\u5730\u5740\u3002<\/p>\n<p>--pod-network-cidr \u53c2\u6570\u6307\u5b9a Pod \u7f51\u7edc\u7684 CIDR \u8303\u56f4,\u5982\u679c\u60a8\u6253\u7b97\u4f7f\u7528 Flannel \u4f5c\u4e3a\u7f51\u7edc\u63d2\u4ef6,\u5efa\u8bae\u4f7f\u7528 10.244.0.0\/16\u3002\u6267\u884c\u8be5\u547d\u4ee4\u540e,\u8f93\u51fa\u4e2d\u4f1a\u663e\u793a\u5982\u4e0b\u4fe1\u606f:<\/p>\n<pre><code>Your Kubernetes control-plane has initialized successfully!\n\nTo start using your cluster, you need to run the following as a regular user:\n\n  mkdir -p $HOME\/.kube\n  sudo cp -i \/etc\/kubernetes\/admin.conf $HOME\/.kube\/config\n  sudo chown $(id -u):$(id -g) $HOME\/.kube\/config\n\nYou should now deploy a pod network to the cluster.\nRun &quot;kubectl apply -f [podnetwork].yaml&quot; with one of the options listed at:\n  https:\/\/kubernetes.io\/docs\/concepts\/cluster-administration\/addons\/\n\nYou can now join any number of worker nodes by running the following on each as root:\n\nkubeadm join 192.168.0.1:6443 --token abcdef.1234567890abcdef \\\n    --discovery-token-ca-cert-hash sha256:1234..cdef <\/code><\/pre>\n<p>\u6839\u636e\u8f93\u51fa,\u63a5\u4e0b\u6765\u60a8\u9700\u8981:<\/p>\n<ol>\n<li>\n\u914d\u7f6ekubectl\u5de5\u5177,\u4f7f\u7528\u5e38\u89c4\u7528\u6237\u6267\u884c\u63d0\u793a\u76843\u6761\u547d\u4ee4<\/p>\n<ol>\n<li>\u4ee5\u5e38\u89c4\u7528\u6237\u767b\u5f55node1\u8282\u70b9,\u4e0d\u662froot\u7528\u6237<\/li>\n<li>\u6267\u884c\u4ee5\u4e0b3\u6761\u547d\u4ee4:<\/li>\n<\/ol>\n<pre><code>bash\n# \u521b\u5efa.kube\u76ee\u5f55\nmkdir -p $HOME\/.kube \n\n# \u62f7\u8d1d\u7fa4\u96c6\u914d\u7f6e\u6587\u4ef6\u5230.kube\u76ee\u5f55\nsudo cp -i \/etc\/kubernetes\/admin.conf $HOME\/.kube\/config  \n\n# \u8bbe\u7f6e\u6587\u4ef6\u6743\u9650 \nsudo chown $(id -u):$(id -g) $HOME\/.kube\/config<\/code><\/pre>\n<ol start=\"3\">\n<li>\u6d4b\u8bd5kubectl\u5de5\u5177,\u6267\u884c<code>kubectl get nodes<\/code>\u67e5\u770b\u8282\u70b9\u793a\u4f8b\u8f93\u51fa:<\/li>\n<\/ol>\n<pre><code>NAME     STATUS     ROLES                  AGE   VERSION\nnode1   NotReady   control-plane,master   11m   v1.23.0<\/code><\/pre>\n<p>node1\u8282\u70b9\u663e\u793a\u4e3aNotReady\u72b6\u6001,\u8fd9\u662f\u56e0\u4e3a\u5c1a\u672a\u90e8\u7f72Pod\u7f51\u7edc\u63d2\u4ef6\u30024. \u90e8\u7f72Pod\u7f51\u7edc\u63d2\u4ef6,\u6bd4\u5982Flannel<\/p>\n<pre><code>bash\nkubectl apply -f https:\/\/raw.githubusercontent.com\/coreos\/flannel\/master\/Documentation\/kube-flannel.yml<\/code><\/pre>\n<ol start=\"5\">\n<li>\u518d\u6b21\u6267\u884c<code>kubectl get nodes<\/code>\u68c0\u67e5\u8282\u70b9\u72b6\u6001,node1\u8282\u70b9\u5e94\u53d8\u4e3aReady\u72b6\u6001\u3002<\/li>\n<\/ol>\n<\/li>\n<li>\n\u90e8\u7f72Pod\u7f51\u7edc\u63d2\u4ef6,\u6267\u884c <code>kubectl apply -f [podnetwork].yaml<\/code>\u547d\u4ee4\u3002\n<\/li>\n<li>\n\u60a8\u53ef\u4ee5\u9009\u62e9Calico\u6216Flannel3. \u5728node2\u548cnode3\u8282\u70b9\u6267\u884c<code>kubeadm join<\/code>\u547d\u4ee4\u5c06\u8282\u70b9\u52a0\u5165\u96c6\u7fa4\n<\/li>\n<li>\n\u5b89\u88c5\u5176\u4ed6\u63d2\u4ef6\u6216\u7ec4\u4ef6(\u53ef\u9009)\n<\/li>\n<\/ol>\n<ul>\n<li>\nnode2\u548cnode3\u4e0a\u6267\u884c<code>kubeadm join<\/code>\u547d\u4ee4\u5c06\u5176\u52a0\u5165\u96c6\u7fa4<\/p>\n<pre><code>[root@node1 ~]# kubeadm init --apiserver-advertise-address=192.168.1.127 --pod-network-cidr=10.244.0.0\/16\nI0427 08:52:28.284600     583 version.go:255] remote version is much newer: v1.27.1; falling back to: stable-1.22\n[init] Using Kubernetes version: v1.22.17\n[preflight] Running pre-flight checks\n      [WARNING SystemVerification]: this Docker version is not on the list of validated versions: 23.0.4. Latest validated version: 20.10\nerror execution phase preflight: [preflight] Some fatal errors occurred:\n      [ERROR Swap]: running with swap on is not supported. Please disable swap\n[preflight] If you know what you are doing, you can make a check non-fatal with <code>--ignore-preflight-errors=...<\/code>\nTo see the stack trace<\/code><\/pre>\n<pre><code>[root@node1 ~]# yum remove docker \\\n>                   docker-client \\\n>                   docker-client-latest \\\n>                   docker-common \\\n>                   docker-latest \\\n>                   docker-latest-logrotate \\\n>                   docker-logrotate \\\n>                   docker-engine\nLoaded plugins: fastestmirror, langpacks\nRepository base is listed more than once in the configuration\nRepository updates is listed more than once in the configuration\nRepository extras is listed more than once in the configuration\nRepository centosplus is listed more than once in the configuration\nNo Match for argument: docker\nNo Match for argument: docker-client\nNo Match for argument: docker-client-latest\nNo Match for argument: docker-common\nNo Match for argument: docker-latest\nNo Match for argument: docker-latest-logrotate\nNo Match for argument: docker-logrotate\nNo Match for argument: docker-engine\nNo Packages marked for removal\n[root@node1 ~]# <\/code><\/pre>\n<p>\u4ece\u8f93\u51fa\u53ef\u4ee5\u770b\u51fa,\u5728\u6267\u884c<code>yum remove<\/code>\u547d\u4ee4\u5378\u8f7dDocker\u65f6\u5e76\u672a\u627e\u5230\u4efb\u4f55\u5339\u914d\u7684Docker\u8f6f\u4ef6\u5305\u8fdb\u884c\u5378\u8f7d\u3002\u8fd9\u8bf4\u660enode1\u8282\u70b9\u4e0a\u53ef\u80fd\u5e76\u672a\u5b89\u88c5Docker,\u6216\u8005Docker\u662f\u901a\u8fc7\u5176\u4ed6\u65b9\u5f0f(\u5982\u811a\u672c)\u5b89\u88c5\u7684\u3002\u5728\u8fd9\u79cd\u60c5\u51b5\u4e0b,\u6211\u4eec\u9700\u8981\u624b\u52a8\u5220\u9664Docker\u76f8\u5173\u7684\u6587\u4ef6\u4e0e\u76ee\u5f55:<\/p>\n<pre><code>bash\n# \u5220\u9664docker\u76ee\u5f55\nrm -rf \/var\/lib\/docker\n\n# \u5220\u9664docker.service \u6587\u4ef6\nrm \/usr\/lib\/systemd\/system\/docker.service  \n\n# \u5220\u9664docker.socket\u6587\u4ef6\nrm \/usr\/lib\/systemd\/system\/docker.socket  \n\n# \u5220\u9664drain\u6570\u636e\u5e93\nrm -rf \/var\/lib\/containerd\n\n# \u5220\u9664containerd.service\u6587\u4ef6\nrm \/usr\/lib\/systemd\/system\/containerd.service<\/code><\/pre>\n<h3>\u6700\u7ec8\u5378\u8f7d\u65b9\u6cd5<\/h3>\n<\/li>\n<\/ul>\n<ol>\n<li>\u5378\u8f7dDocker 23.0.4:<\/li>\n<\/ol>\n<pre><code>bash \n# 1. \u5220\u9664\u8f6f\u4ef6\u5305\nyum remove docker-ce docker-ce-cli containerd.io \n\n# 2. \u5220\u9664\u6587\u4ef6\u4e0e\u76ee\u5f55\nrm -rf \/var\/lib\/docker \nrm \/usr\/lib\/systemd\/system\/docker.service \nrm \/usr\/lib\/systemd\/system\/docker.socket  \nrm -rf \/var\/lib\/containerd \nrm \/usr\/lib\/systemd\/system\/containerd.service<\/code><\/pre>\n<pre><code>[root@node1 ~]# yum-config-manager --add-repo https:\/\/download.docker.com\/linux\/centos\/docker-ce.repo\nLoaded plugins: fastestmirror, langpacks\nRepository base is listed more than once in the configuration\nRepository updates is listed more than once in the configuration\nRepository extras is listed more than once in the configuration\nRepository centosplus is listed more than once in the configuration\nadding repo from: https:\/\/download.docker.com\/linux\/centos\/docker-ce.repo\ngrabbing file https:\/\/download.docker.com\/linux\/centos\/docker-ce.repo to \/etc\/yum.repos.d\/docker-ce.repo\nrepo saved to \/etc\/yum.repos.d\/docker-ce.repo\n[root@node1 ~]# yum install docker-ce-19.03.15 docker-ce-cli-19.03.15 containerd.io-1.4.7\nLoaded plugins: fastestmirror, langpacks\nRepository base is listed more than once in the configuration\nRepository updates is listed more than once in the configuration\nRepository extras is listed more than once in the configuration\nRepository centosplus is listed more than once in the configuration\nLoading mirror speeds from cached hostfile\n * base: mirrors.ustc.edu.cn\n * epel: mirrors.tuna.tsinghua.edu.cn\n * extras: mirrors.ustc.edu.cn\n * updates: mirrors.ustc.edu.cn\nNo package containerd.io-1.4.7 available.\nResolving Dependencies\n--> Running transaction check\n---> Package docker-ce.x86_64 3:19.03.15-3.el7 will be installed\n--> Processing Dependency: containerd.io >= 1.2.2-3 for package: 3:docker-ce-19.03.15-3.el7.x86_64\n---> Package docker-ce-cli.x86_64 1:19.03.15-3.el7 will be installed\n--> Running transaction check\n---> Package containerd.io.x86_64 0:1.6.20-3.1.el7 will be installed\n--> Finished Dependency Resolution\n\nDependencies Resolved\n\n============================================================================================================================== Package                       Arch                   Version                          Repository                        Size\n==============================================================================================================================Installing:\n docker-ce                     x86_64                 3:19.03.15-3.el7                 docker-ce-stable                  24 M\n docker-ce-cli                 x86_64                 1:19.03.15-3.el7                 docker-ce-stable                  38 M\nInstalling for dependencies:\n containerd.io                 x86_64                 1.6.20-3.1.el7                   docker-ce-stable                  34 M\n\nTransaction Summary\n==============================================================================================================================Install  2 Packages (+1 Dependent package)\n\nTotal download size: 96 M\nInstalled size: 389 M\nIs this ok [y\/d\/N]: y\nDownloading packages:\n(1\/3): docker-ce-19.03.15-3.el7.x86_64.rpm                                                             |  24 MB  00:00:02     \n(2\/3): docker-ce-cli-19.03.15-3.el7.x86_64.rpm                                                         |  38 MB  00:00:02     \n(3\/3): containerd.io-1.6.20-3.1.el7.x86_64.rpm                                                         |  34 MB  00:00:06     \n------------------------------------------------------------------------------------------------------------------------------Total                                                                                          16 MB\/s |  96 MB  00:00:06     \nRunning transaction check\nRunning transaction test\n\nTransaction check error:\n  file \/usr\/libexec\/docker\/cli-plugins\/docker-buildx from install of docker-ce-cli-1:19.03.15-3.el7.x86_64 conflicts with file from package docker-buildx-plugin-0:0.10.4-1.el7.x86_64\n\nError Summary\n-------------\n\n[root@node1 ~]# docker version\n-bash: \/usr\/bin\/docker: No such file or directory\n[root@node1 ~]# docker version\n-bash: \/usr\/bin\/docker: No such file or directory\n[root@node1 ~]# yum install docker-ce-19.03.15 docker-ce-cli-19.03.15 containerd.io-1.4.7\nLoaded plugins: fastestmirror, langpacks\nRepository base is listed more than once in the configuration\nRepository updates is listed more than once in the configuration\nRepository extras is listed more than once in the configuration\nRepository centosplus is listed more than once in the configuration\nLoading mirror speeds from cached hostfile\n * base: mirrors.aliyun.com\n * epel: mirrors.bfsu.edu.cn\n * extras: mirrors.aliyun.com\n * updates: mirrors.aliyun.com\nNo package containerd.io-1.4.7 available.\nResolving Dependencies\n--> Running transaction check\n---> Package docker-ce.x86_64 3:19.03.15-3.el7 will be installed\n--> Processing Dependency: containerd.io >= 1.2.2-3 for package: 3:docker-ce-19.03.15-3.el7.x86_64\n---> Package docker-ce-cli.x86_64 1:19.03.15-3.el7 will be installed\n--> Running transaction check\n---> Package containerd.io.x86_64 0:1.6.20-3.1.el7 will be installed\n--> Finished Dependency Resolution\n\nDependencies Resolved\n\n============================================================================================================================== Package                       Arch                   Version                          Repository                        Size\n==============================================================================================================================Installing:\n docker-ce                     x86_64                 3:19.03.15-3.el7                 docker-ce-stable                  24 M\n docker-ce-cli                 x86_64                 1:19.03.15-3.el7                 docker-ce-stable                  38 M\nInstalling for dependencies:\n containerd.io                 x86_64                 1.6.20-3.1.el7                   docker-ce-stable                  34 M\n\nTransaction Summary\n==============================================================================================================================Install  2 Packages (+1 Dependent package)\n\nTotal size: 96 M\nInstalled size: 389 M\nIs this ok [y\/d\/N]: y\nDownloading packages:\nRunning transaction check\nRunning transaction test\n\nTransaction check error:\n  file \/usr\/libexec\/docker\/cli-plugins\/docker-buildx from install of docker-ce-cli-1:19.03.15-3.el7.x86_64 conflicts with file from package docker-buildx-plugin-0:0.10.4-1.el7.x86_64\n\nError Summary\n-------------<\/code><\/pre>\n<p>\u4ece\u8f93\u51fa\u53ef\u4ee5\u770b\u51fa,\u5728\u5b89\u88c5Docker 19.03\u65f6\u9047\u5230\u4e86\u4e00\u4e9b\u9519\u8bef\u3002\u4e3b\u8981\u9519\u8bef\u662f:<\/p>\n<pre><code>Transaction check error:\n file \/usr\/libexec\/docker\/cli-plugins\/docker-buildx from install of docker-ce-cli-1:19.03.15-3.el7.x86_64 conflicts with file from package docker-buildx-plugin-0:0.10.4-1.el7.x86_64<\/code><\/pre>\n<p>\u8fd9\u8bf4\u660e\u60a8node1\u8282\u70b9\u4e0a\u5df2\u7ecf\u5b89\u88c5\u4e86docker-buildx-plugin\u8f6f\u4ef6\u5305,\u8fd9\u4e0edocker-ce-cli\u8f6f\u4ef6\u5305\u4ea7\u751f\u6587\u4ef6\u51b2\u7a81\u3002\u6240\u4ee5,\u9700\u8981\u5148\u5378\u8f7ddocker-buildx-plugin\u8f6f\u4ef6\u5305,\u7136\u540e\u91cd\u65b0\u5b89\u88c5Docker\u3002\u5177\u4f53\u6b65\u9aa4\u5982\u4e0b:<\/p>\n<ol>\n<li>\u5378\u8f7ddocker-buildx-plugin<\/li>\n<\/ol>\n<pre><code>bash\nyum remove docker-buildx-plugin<\/code><\/pre>\n<ol start=\"2\">\n<li>\u6e05\u7406Docker\u65e7\u7248\u672c\u9057\u7559\u7684\u6587\u4ef6\u4e0e\u76ee\u5f55:<\/li>\n<\/ol>\n<pre><code>bash \nrm -rf \/var\/lib\/docker \nrm \/usr\/lib\/systemd\/system\/docker.service  \nrm \/usr\/lib\/systemd\/system\/docker.socket\nrm -rf \/var\/lib\/containerd\nrm \/usr\/lib\/systemd\/system\/containerd.service <\/code><\/pre>\n<ol start=\"3\">\n<li>\u5b89\u88c5Docker 19.03:<\/li>\n<\/ol>\n<pre><code>bash\nyum install -y docker-ce-19.03.15 docker-ce-cli-19.03.15 containerd.io<\/code><\/pre>\n<ol start=\"4\">\n<li>\u542f\u52a8Docker\u5e76\u8bbe\u7f6e\u5f00\u673a\u81ea\u542f:<\/li>\n<\/ol>\n<pre><code>bash\nsystemctl start docker \nsystemctl enable docker<\/code><\/pre>\n<ol start=\"5\">\n<li>\u6d4b\u8bd5Docker\u662f\u5426\u6b63\u5e38\u5de5\u4f5c:<\/li>\n<\/ol>\n<pre><code>bash \ndocker version\ndocker run hello-world<\/code><\/pre>\n<ol start=\"3\">\n<li>\u91cd\u65b0\u6267\u884c<code>kubeadm init<\/code>\u547d\u4ee4\u521d\u59cb\u5316Kubernetes\u63a7\u5236\u5e73\u9762:<\/li>\n<\/ol>\n<pre><code>bash\nkubeadm init --apiserver-advertise-address=192.168.1.127 --pod-network-cidr=10.244.0.0\/16<\/code><\/pre>\n<p>\u5982\u679c\u8f93\u51fa\u663e\u793a&quot;Kubernetes control plane has initialized successfully&quot;,\u8bf4\u660eKubernetes\u63a7\u5236\u5e73\u9762\u521d\u59cb\u5316\u6210\u529f!<\/p>\n<p>\u5728\u91cd\u65b0\u6267\u884c<code>kubeadm init<\/code>\u4e4b\u524d,\u9700\u8981\u8fdb\u884c\u51e0\u4e2a\u51c6\u5907\u6b65\u9aa4:1. \u505c\u6b62kubelet\u548ckubeadm\u670d\u52a1:<\/p>\n<pre><code>bash\nsystemctl stop kubelet \nsystemctl stop kubeadm<\/code><\/pre>\n<ol start=\"2\">\n<li>\u6e05\u7406\u8282\u70b9\u4e0a\u5df2\u6709\u7684Kubernetes\u7ec4\u4ef6\u548c\u6570\u636e:<\/li>\n<\/ol>\n<pre><code>bash\nkubeadm reset \nrm -rf \/etc\/cni\/net.d\nrm -rf \/var\/lib\/etcd \nrm -rf \/var\/lib\/kubelet \nrm -rf \/var\/lib\/dockershim<\/code><\/pre>\n<ol start=\"3\">\n<li>\u91cd\u542fDocker\u670d\u52a1:<\/li>\n<\/ol>\n<pre><code>bash\nsystemctl restart docker<\/code><\/pre>\n<ol start=\"4\">\n<li>\u91cd\u542fkubelet\u670d\u52a1:<\/li>\n<\/ol>\n<pre><code>bash\nsystemctl restart kubelet<\/code><\/pre>\n<ol start=\"5\">\n<li>\n\u4f7f\u7528<code>kubeadm init<\/code>\u91cd\u65b0\u521d\u59cb\u5316Kubernetes\u4e3b\u8282\u70b9:<\/p>\n<p>kubelet &amp; kubeadm:1.22.4<br \/>\n- kubectl:1.22.4<br \/>\n- kubernetes-cni:0.8.7<br \/>\n- chrony:4.0+<br \/>\n- coredns:1.8.0<br \/>\n- etcd:3.4.13-0<br \/>\n- pause:3.4.1\n<\/li>\n<\/ol>\n<pre><code>bash\nkubeadm init \\\n  --apiserver-advertise-address=192.168.1.127 \\\n  --image-repository registry.aliyuncs.com\/google_containers \\\n  --kubernetes-version v1.22.4 \\\n  --service-cidr=10.96.0.0\/12 \\\n  --pod-network-cidr=10.244.0.0\/16<\/code><\/pre>\n<p>\u5176\u4e2d:--apiserver-advertise-address: \u6307\u5b9aAPiserver\u7684ADVERTISE\u5730\u5740,<\/p>\n<p>\u8fd9\u91cc\u662fnode1\u7684\u5185\u7f51IP--image-repository: \u6307\u5b9a\u955c\u50cf\u4ed3\u5e93,\u8fd9\u91cc\u4f7f\u7528\u963f\u91cc\u4e91\u7684\u4ed3\u5e93<\/p>\n<p>--kubernetes-version: \u6307\u5b9aKubernetes\u7248\u672c,<\/p>\n<p>\u8fd9\u91cc\u4f7f\u7528v1.20.4<\/p>\n<p>--service-cidr: \u6307\u5b9aService\u865a\u62df\u7f51\u7edc\u6bb5,\u8fd9\u91cc\u4f7f\u752810.96.0.0\/12<\/p>\n<p>--pod-network-cidr: \u6307\u5b9aPod\u865a\u62df\u7f51\u7edc\u6bb5,\u8fd9\u91cc\u4f7f\u752810.244.0.0\/166. <\/p>\n<p>\u6839\u636ekubeadm init\u7684\u8f93\u51fa,\u8bbe\u7f6ekubeconfig\u6587\u4ef6\u5e76\u542f\u52a8kube-proxy\u3001kube-dns\u7b49Addon\u7ec4\u4ef6:<\/p>\n<pre><code>bash\nmkdir -p $HOME\/.kube\nsudo cp -i \/etc\/kubernetes\/admin.conf $HOME\/.kube\/config\nsudo chown $(id -u):$(id -g) $HOME\/.kube\/config\n\nkubectl apply -f https:\/\/raw.githubusercontent.com\/coreos\/flannel\/master\/Documentation\/kube-flannel.yml<\/code><\/pre>\n<ol start=\"7\">\n<li>\u9a8c\u8bc1Kubernetes\u4e3b\u8282\u70b9\u662f\u5426\u521d\u59cb\u5316\u6210\u529f:<\/li>\n<\/ol>\n<pre><code>bash\nkubectl get nodes\nkubectl get pods --all-namespaces<\/code><\/pre>\n<p>\u5982\u679c\u8282\u70b9\u5168\u90e8\u4e3aReady\u72b6\u6001,Pod\u90fd\u5904\u4e8eRunning\u72b6\u6001,\u8bf4\u660eKubernetes\u96c6\u7fa4\u521d\u59cb\u5316\u6210\u529f!<\/p>\n<p>\u4e8b\u5b9e\u4e0a,\u5bf9\u4e8e\u521d\u6b21\u4f53\u9a8cKubernetes,\u4f7f\u7528<code>kubeadm init<\/code>\u7684\u9ed8\u8ba4\u53c2\u6570\u5c31\u53ef\u4ee5\u8f7b\u677e\u521d\u59cb\u5316\u4e00\u4e2a\u5355\u8282\u70b9\u7684Kubernetes\u96c6\u7fa4\u3002\u4e0b\u9762\u662f\u6700\u7b80\u5355\u7684\u521d\u59cb\u5316\u547d\u4ee4:<\/p>\n<pre><code>bash\nkubeadm init<\/code><\/pre>\n<p>\u8fd9\u6761\u547d\u4ee4\u4f1a:<\/p>\n<ol>\n<li>\u4f7f\u7528kubeadm\u5b89\u88c5\u7684\u6700\u65b0\u7a33\u5b9a\u7248\u672c\u90e8\u7f72Kubernetes\u63a7\u5236\u5e73\u9762<\/li>\n<li>\u4e3a\u96c6\u7fa4\u751f\u6210\u81ea\u7b7e\u540d\u8bc1\u4e66<\/li>\n<li>\u5b89\u88c5\u9ed8\u8ba4\u7684\u7f51\u7edc\u63d2\u4ef6(\u5982\u679c\u6709\u5b89\u88c5\u7684\u8bdd)<\/li>\n<li>\u90e8\u7f72\u6838\u5fc3DNS addon\u3001kube-proxy\u7b49\u7ec4\u4ef6<\/li>\n<li>\u4e3a\u60a8\u672c\u5730\u751f\u6210admin.conf kubeconfig\u6587\u4ef6<\/li>\n<li>\u663e\u793a\u96c6\u7fa4\u76f8\u5173\u4fe1\u606f\u53ca\u8fdb\u4e00\u6b65\u6307\u5f15\u6240\u4ee5,\u5982\u679c\u60a8\u53ea\u662f\u60f3\u5feb\u901f\u4f53\u9a8c\u548c\u5b66\u4e60Kubernetes,\u4f7f\u7528\u4e0a\u9762\u7684\u7b80\u5355\u547d\u4ee4\u5373\u53ef\u5b8c\u6210\u521d\u59cb\u5316\u3002\u5f53\u60a8\u66f4\u6df1\u5165\u7406\u89e3\u5e76\u8bd5\u9a8cKubernetes\u5404\u7ec4\u4ef6\u4e4b\u540e,\u53ef\u4ee5\u6839\u636e\u81ea\u8eab\u9700\u6c42\u9009\u62e9\u662f\u5426\u6307\u5b9a\u53c2\u6570:- \u6307\u5b9a\u7248\u672c: --kubernetes-version<br \/>\n- \u6307\u5b9aCIDR: --service-cidr --pod-network-cidr<br \/>\n- \u6307\u5b9a\u955c\u50cf\u6e90: --image-repository<br \/>\n- \u6307\u5b9aADVERTISE\u5730\u5740:--apiserver-advertise-address<br \/>\n- \u7b49\u7b49<\/li>\n<\/ol>\n<pre><code>[root@node1 ~]# kubeadm init --apiserver-advertise-address=192.168.1.127 --pod-network-cidr=10.244.0.0\/16\nI0427 09:30:32.251099    3800 version.go:255] remote version is much newer: v1.27.1; falling back to: stable-1.22\n[init] Using Kubernetes version: v1.22.17\n[preflight] Running pre-flight checks\nerror execution phase preflight: [preflight] Some fatal errors occurred:\n        [ERROR Swap]: running with swap on is not supported. Please disable swap\n[preflight] If you know what you are doing, you can make a check non-fatal with <code>--ignore-preflight-errors=...<\/code>\nTo see the stack trace of this error execute with --v=5 or higher<\/code><\/pre>\n<p>\u4ece\u521d\u59cb\u5316\u8f93\u51fa\u53ef\u4ee5\u770b\u51fa,\u5728\u8fd0\u884cpreflight\u68c0\u67e5\u65f6\u53d1\u73b0node1\u8282\u70b9\u4e0aswap\u5206\u533a\u662f\u542f\u7528\u7684,\u8fd9\u4f1a\u5bfc\u81f4\u521d\u59cb\u5316\u5931\u8d25\u3002Kubernetes\u4e0d\u652f\u6301swap\u5206\u533a,\u6240\u4ee5\u5728\u521d\u59cb\u5316Kubernetes\u8282\u70b9\u4e4b\u524d,\u9700\u8981\u7981\u7528swap\u3002\u8981\u7981\u7528swap\u7684\u65b9\u6cd5\u5982\u4e0b:1.\u4e34\u65f6\u7981\u7528swap\u5206\u533a:<\/p>\n<pre><code>bash\nswapoff -a  # \u4e34\u65f6\u7981\u7528\u6240\u6709\u7684swap\u5206\u533a<\/code><\/pre>\n<p>2.\u6c38\u4e45\u7981\u7528swap\u5206\u533a:- \u7f16\u8f91\/etc\/fstab\u6587\u4ef6,\u6ce8\u91ca\u6389SWAP\u7684\u884c:<\/p>\n<pre><code>bash\nvim \/etc\/fstab\n\n# \/dev\/mapper\/centos-swap swap                    swap    defaults        0 0<\/code><\/pre>\n<p>- \u91cd\u542f\u8282\u70b9\u6765\u751f\u6548\u914d\u7f6e<\/p>\n<pre><code>bash\nreboot<\/code><\/pre>\n<p>3.\u5728Kubernetes\u8282\u70b9\u521d\u59cb\u5316\u4e4b\u524d,\u786e\u8ba4swap\u5df2\u7ecf\u7981\u7528:<\/p>\n<pre><code>bash \nfree -m  # \u68c0\u67e5\u662f\u5426\u8fd8\u6709swap\u5206\u533a\n              total        used        free      shared  buff\/cache   available\nMem:           1878         164         165           0        1548        1641\nSwap:             0           0           0           <\/code><\/pre>\n<p>\u8f93\u51fa\u4e2dSwap: 0 0 0\u8868\u793aswap\u5df2\u7981\u7528\u3002<\/p>\n<p>\u5728\u914d\u7f6e\u6587\u4ef6\/etc\/fstab\u4e2d,\u6ce8\u91ca\u884c\u4ee5#\u5f00\u59cb\u3002\u6240\u4ee5,\u8981\u6ce8\u91ca\u6389swap\u90a3\u4e00\u884c,\u5e94\u4fee\u6539\u4e3a:<\/p>\n<pre><code># \/etc\/fstab\n# Created by anaconda on Mon Sep  4 16:13:12 2017  \n#  \n# Accessible filesystems, by reference, are maintained under &#039;\/dev\/disk&#039;  \n# See man pages fstab(5), findfs(8), mount(8) and\/or blkid(8) for more info\n#\nUUID=d3b7747c-23f4-47f5-9b90-eeb6faa412ea \/                       ext4    defaults        1 1  \n#UUID=959ab3ee-5bea-44c2-a908-c577a79e5bb9 swap                    swap    defaults        0 0<\/code><\/pre>\n<p>\u5c06swap\u4e00\u884c\u524d\u9762\u52a0#\u6ce8\u91ca\u6389\u5c31\u53ef\u4ee5\u4e86\u3002\u4fdd\u5b58\/etc\/fstab\u6587\u4ef6\u540e,\u6267\u884c:<\/p>\n<pre><code>bash\nswapoff -a  # \u4e34\u65f6\u7981\u7528swap\nreboot     # \u91cd\u542f\u7cfb\u7edf\u4f7f\u914d\u7f6e\u751f\u6548<\/code><\/pre>\n<p>\u91cd\u542f\u540e,\u4f7f\u7528<code>free -m<\/code>\u547d\u4ee4\u68c0\u67e5,swap\u5e94\u8be5\u663e\u793a\u4e3a0,\u8fd9\u8bf4\u660eswap\u5df2\u88ab\u6c38\u4e45\u7981\u7528\u3002<\/p>\n<p>4.\u91cd\u65b0\u6267\u884c<code>kubeadm init<\/code>\u547d\u4ee4\u521d\u59cb\u5316Kubernetes\u4e3b\u8282\u70b9:<\/p>\n<pre><code>bash\nkubeadm init --apiserver-advertise-address=192.168.1.127 --pod-network-cidr=10.244.0.0\/16<\/code><\/pre>\n<p>\u521d\u59cb\u5316\u8fc7\u7a0b\u4e2d\u5982\u679c\u6ca1\u6709\u63d0\u5230swap\u76f8\u5173\u9519\u8bef,\u8bf4\u660eKubernetes\u4e3b\u8282\u70b9\u521d\u59cb\u5316\u6210\u529f!\u63a5\u4e0b\u6765\u6309\u7167kubeadm init\u8f93\u51fa\u5b8c\u6210\u96c6\u7fa4\u642d\u5efa\u5373\u53ef\u3002<\/p>\n<pre><code>[root@node1 ~]# kubeadm init --apiserver-advertise-address=192.168.1.127 --pod-network-cidr=10.244.0.0\/16\nI0427 09:52:09.941900    8401 version.go:255] remote version is much newer: v1.27.1; falling back to: stable-1.22\n[init] Using Kubernetes version: v1.22.17\n[preflight] Running pre-flight checks\n[preflight] Pulling images required for setting up a Kubernetes cluster\n[preflight] This might take a minute or two, depending on the speed of your internet connection\n[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'<\/code><\/pre>\n<p>\u7981\u7528swap\u540e,\u518d\u6b21\u6267\u884c<code>kubeadm init<\/code>\u65f6\u6b63\u786e\u4e0b\u8f7d\u4e86\u9700\u8981\u7684\u955c\u50cf\u5e76\u8fd0\u884c\u4e86preflight\u68c0\u67e5\u3002\u8fd9\u8bf4\u660enode1\u8282\u70b9\u73b0\u5728\u53ef\u4ee5\u6b63\u786e\u521d\u59cb\u5316Kubernetes\u4e86\u3002\u6839\u636e\u8f93\u51fa,kubeadm\u6b63\u5728\u4e0b\u8f7d\u521d\u59cb\u5316Kubernetes\u96c6\u7fa4\u6240\u9700\u7684\u955c\u50cf,\u8fd9\u53ef\u80fd\u9700\u8981\u51e0\u5206\u949f,\u5177\u4f53\u53d6\u51b3\u4e8e\u7f51\u7edc\u8fde\u63a5\u901f\u5ea6\u3002\u4e0b\u8f7d\u5b8c\u6210\u540e,kubeadm\u4f1a\u7ee7\u7eed\u81ea\u52a8\u8fdb\u884c\u8bc1\u4e66\u751f\u6210\u3001Kubernetes\u63a7\u5236\u5e73\u9762\u5b89\u88c5\u3001kube-proxy\u548cDNS\u63d2\u4ef6\u5b89\u88c5\u7b49\u5de5\u4f5c,\u5e76\u8f93\u51fa\u76f8\u5173\u4fe1\u606f\u3002\u60a8\u9700\u8981\u6309\u7167\u8f93\u51fa,\u8fdb\u884c\u4ee5\u4e0b\u64cd\u4f5c:<\/p>\n<ol>\n<li>kubeadm\u4f1a\u751f\u6210\u4e00\u4e2akubeconfig\u6587\u4ef6,\u7528\u4e8e\u672c\u5730\u8bbf\u95ee\u96c6\u7fa4,\u60a8\u9700\u8981\u5982\u4e0b\u79fb\u52a8\u4e0e\u4fee\u6539\u8be5\u6587\u4ef6\u6743\u9650:<\/li>\n<\/ol>\n<pre><code>bash\nmkdir -p $HOME\/.kube\nsudo cp -i \/etc\/kubernetes\/admin.conf $HOME\/.kube\/config\nsudo chown $(id -u):$(id -g) $HOME\/.kube\/config<\/code><\/pre>\n<ol start=\"2\">\n<li>\u5e94\u7528\u4e00\u4e2aPod\u7f51\u7edc\u89e3\u51b3\u65b9\u6848,kubeadm\u63a8\u8350\u4f7f\u7528flannel\u3002\u53ef\u4ee5\u4f7f\u7528\u4ee5\u4e0b\u547d\u4ee4\u5feb\u901f\u5b89\u88c5:<\/li>\n<\/ol>\n<pre><code>kubectl apply -f https:\/\/raw.githubusercontent.com\/coreos\/flannel\/master\/Documentation\/kube-flannel.yml<\/code><\/pre>\n<ol start=\"3\">\n<li>\u68c0\u67e5\u8282\u70b9\u548cPod\u7f51\u7edc\u662f\u5426\u51c6\u5907\u5c31\u7eea:<\/li>\n<\/ol>\n<pre><code>bash\nkubectl get nodes \nkubectl get pods --all-namespaces<\/code><\/pre>\n<p>\u6240\u6709\u8282\u70b9\u72b6\u6001\u5982\u679c\u53d8\u4e3aReady,Pod\u5168\u90e8\u8fd0\u884c,\u8bf4\u660eKubernetes\u96c6\u7fa4\u5b89\u88c5\u6210\u529f!<\/p>\n<ol start=\"4\">\n<li>\u9ed8\u8ba4\u5b89\u88c5\u7684\u7ec4\u4ef6\u6709DNS\u63d2\u4ef6CoreDNS\u3001kube-proxy\u7b49\u3002\u60a8\u53ef\u4ee5\u4f7f\u7528kubectl\u547d\u4ee4\u67e5\u770b\u5404\u4e2a\u8d44\u6e90\u5bf9\u8c61\u3002<\/li>\n<\/ol>\n<pre><code>[root@node1 ~]# kubeadm init --apiserver-advertise-address=192.168.1.127 --pod-network-cidr=10.244.0.0\/16\nI0427 09:52:09.941900    8401 version.go:255] remote version is much newer: v1.27.1; falling back to: stable-1.22\n[init] Using Kubernetes version: v1.22.17\n[preflight] Running pre-flight checks\n[preflight] Pulling images required for setting up a Kubernetes cluster\n[preflight] This might take a minute or two, depending on the speed of your internet connection\n[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'\n\nerror execution phase preflight: [preflight] Some fatal errors occurred:\n        [ERROR ImagePull]: failed to pull image k8s.gcr.io\/kube-apiserver:v1.22.17: output: Error response from daemon: Get https:\/\/k8s.gcr.io\/v2\/: net\/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers), error: exit status 1\n        [ERROR ImagePull]: failed to pull image k8s.gcr.io\/kube-controller-manager:v1.22.17: output: Error response from daemon: Get https:\/\/k8s.gcr.io\/v2\/: net\/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)\n, error: exit status 1\n        [ERROR ImagePull]: failed to pull image k8s.gcr.io\/kube-scheduler:v1.22.17: output: Error response from daemon: Get https:\/\/k8s.gcr.io\/v2\/: net\/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers), error: exit status 1\n        [ERROR ImagePull]: failed to pull image k8s.gcr.io\/kube-proxy:v1.22.17: output: Error response from daemon: Get https:\/\/k8s.gcr.io\/v2\/: net\/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)\n, error: exit status 1\n        [ERROR ImagePull]: failed to pull image k8s.gcr.io\/pause:3.5: output: Error response from daemon: Get https:\/\/k8s.gcr.io\/v2\/: net\/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)\n, error: exit status 1\n        [ERROR ImagePull]: failed to pull image k8s.gcr.io\/etcd:3.5.0-0: output: Error response from daemon: Get https:\/\/k8s.gcr.io\/v2\/: net\/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)\n, error: exit status 1\n        [ERROR ImagePull]: failed to pull image k8s.gcr.io\/coredns\/coredns:v1.8.4: output: Error response from daemon: Get https:\/\/k8s.gcr.io\/v2\/: net\/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)\n, error: exit status 1\n[preflight] If you know what you are doing, you can make a check non-fatal with <code>--ignore-preflight-errors=...<\/code>\nTo see the stack trace of this error execute with --v=5 or higher\n[root@node1 ~]# <\/code><\/pre>\n<p>\u4ece\u8f93\u51fa\u53ef\u4ee5\u770b\u51fa,\u5728\u62c9\u53d6\u521d\u59cb\u5316\u6240\u9700\u955c\u50cf\u65f6\u51fa\u73b0\u9519\u8bef,\u5bfc\u81f4preflight\u68c0\u67e5\u5931\u8d25\u3002<a href=\"http:\/\/xn--k8s-om0ep9hgz4aoefr4ewfr1a539c278ax24cswex6j3xv.gcr.io\/\">xn--k8s-om0ep9hgz4aoefr4ewfr1a539c278ax24cswex6j3xv.gcr.io<\/a>,\u8fd9\u662fGoogle Container Registry\u5730\u5740,\u7531\u4e8e\u7f51\u7edc\u95ee\u9898\u65e0\u6cd5\u6b63\u5e38\u8bbf\u95ee\u3002\u8981\u89e3\u51b3\u8fd9\u4e2a\u95ee\u9898,\u6709\u4e24\u79cd\u65b9\u5f0f:1. \u7b49\u5f85\u7f51\u7edc\u6062\u590d\u6b63\u5e38,\u91cd\u65b0\u6267\u884cinit\u547d\u4ee4\u3002<a href=\"http:\/\/xn--k8s-7l0e996d47r01ujylc4al6j9y4a9eg0zocxufvd.gcr.io\/\">xn--k8s-7l0e996d47r01ujylc4al6j9y4a9eg0zocxufvd.gcr.io<\/a>\u30022. \u4f7f\u7528<code>--image-repository<\/code>\u53c2\u6570\u6307\u5b9a\u5176\u4ed6\u955c\u50cf\u4ed3\u5e93\u5730\u5740,\u4f8b\u5982\u963f\u91cc\u4e91\u4ed3\u5e93,<a href=\"http:\/\/xn--gcr-9s1ew58xo7fj9j.io\/\">xn--gcr-9s1ew58xo7fj9j.io<\/a>\u3002\u521d\u59cb\u5316\u547d\u4ee4\u53ef\u4ee5\u4fee\u6539\u4e3a:<\/p>\n<pre><code>bash\nkubeadm init --apiserver-advertise-address=192.168.1.127 --pod-network-cidr=10.244.0.0\/16  --image-repository registry.cn-hangzhou.aliyuncs.com\/google_containers<\/code><\/pre>\n<p>\u8fd9\u5c06\u4f7f\u7528\u963f\u91cc\u4e91\u7684\u955c\u50cf\u4ed3\u5e93\u6765\u62c9\u53d6\u6240\u9700\u955c\u50cf\u3002<\/p>\n<pre><code>[etcd] Creating static Pod manifest for local etcd in \"\/etc\/kubernetes\/manifests\"\n[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from dirests\". This can take up to 4m0s\n[kubelet-check] Initial timeout of 40s passed.\n[kubelet-check] It seems like the kubelet isn't running or healthy.\n[kubelet-check] The HTTP call equal to 'curl -sSL http:\/\/localhost:10248\/healthz' failed with err48\/healthz\": dial tcp [::1]:10248: connect: connection refused.\n[kubelet-check] It seems like the kubelet isn't running or healthy.\n[kubelet-check] The HTTP call equal to 'curl -sSL http:\/\/localhost:10248\/healthz' failed with err48\/healthz\": dial tcp [::1]:10248: connect: connection refused.\n[kubelet-check] It seems like the kubelet isn't running or healthy.\n[kubelet-check] The HTTP call equal to 'curl -sSL http:\/\/localhost:10248\/healthz' failed with err48\/healthz\": dial tcp [::1]:10248: connect: connection refused.\n[kubelet-check] It seems like the kubelet isn't running or healthy.\n[kubelet-check] The HTTP call equal to 'curl -sSL http:\/\/localhost:10248\/healthz' failed with err48\/healthz\": dial tcp [::1]:10248: connect: connection refused.\n[kubelet-check] It seems like the kubelet isn't running or healthy.\n[kubelet-check] The HTTP call equal to 'curl -sSL http:\/\/localhost:10248\/healthz' failed with err48\/healthz\": dial tcp [::1]:10248: connect: connection refused.\n\n        Unfortunately, an error has occurred:\n                timed out waiting for the condition\n\n        This error is likely caused by:\n                - The kubelet is not running\n                - The kubelet is unhealthy due to a misconfiguration of the node in some way (req\n\n        If you are on a systemd-powered system, you can try to troubleshoot the error with the fo\n                - 'systemctl status kubelet'\n                - 'journalctl -xeu kubelet'\n\n        Additionally, a control plane component may have crashed or exited when started by the co\n        To troubleshoot, list all containers using your preferred container runtimes CLI.\n\n        Here is one example how you may list all Kubernetes containers running in docker:\n                - 'docker ps -a | grep kube | grep -v pause'\n                Once you have found the failing container, you can inspect its logs with:\n                - 'docker logs CONTAINERID'\n\nerror execution phase wait-control-plane: couldn't initialize a Kubernetes cluster\nTo see the stack trace of this error execute with --v=5 or higher\n[root@node1 ~]# <\/code><\/pre>\n<p>\u4ece\u8f93\u51fa\u53ef\u4ee5\u770b\u51fa,\u521d\u59cb\u5316\u8fc7\u7a0b\u5728\u7b49\u5f85kubelet\u5f15\u5bfc\u63a7\u5236\u5e73\u9762\u7ec4\u4ef6\u65f6\u8d85\u65f6\u3002\u8fd9\u662f\u56e0\u4e3akubelet\u8fdb\u7a0b\u6ca1\u6709\u6b63\u5e38\u542f\u52a8\u6216\u8005\u8fd0\u884c\u3002kubeadm\u901a\u8fc7\u8c03\u7528kubelet\u7684\u5065\u5eb7\u68c0\u67e5API\u6765\u786e\u8ba4\u5176\u8fd0\u884c\u72b6\u6001,\u4f46\u4e00\u76f4\u5931\u8d25,\u56e0\u6b64\u5224\u5b9akubelet\u672a\u8fd0\u884c\u5e76\u5bfc\u81f4\u521d\u59cb\u5316\u8d85\u65f6\u3002\u8981\u89e3\u51b3\u8fd9\u4e2a\u95ee\u9898,\u9700\u8981\u9996\u5148\u786e\u4fddkubelet\u8fdb\u7a0b\u6b63\u5e38\u542f\u52a8\u5e76\u54cd\u5e94\u8bf7\u6c42\u3002\u6709\u51e0\u4e2a\u5730\u65b9\u9700\u8981\u68c0\u67e5:<\/p>\n<ol>\n<li>\u68c0\u67e5kubelet\u8fdb\u7a0b\u662f\u5426\u542f\u52a8\u3002\u4f7f\u7528<code>systemctl status kubelet<\/code>\u547d\u4ee4\u67e5\u770bkubelet\u670d\u52a1\u72b6\u6001\u3002\u5982\u679c\u672a\u542f\u52a8,\u4f7f\u7528<code>systemctl start kubelet<\/code>\u542f\u52a8\u3002<\/li>\n<li>\u68c0\u67e5kubelet\u542f\u52a8\u540e\u7684\u65e5\u5fd7,\u770b\u662f\u5426\u6709\u9519\u8bef\u3002\u4f7f\u7528<code>journalctl -xeu kubelet<\/code>\u67e5\u770b\u65e5\u5fd7\u3002\u5982\u679c\u6709\u9519\u8bef,\u9700\u8981\u6839\u636e\u9519\u8bef\u4fe1\u606f\u8fdb\u884c\u6392\u67e5\u3002<\/li>\n<li>\u68c0\u67e5kubelet\u4f7f\u7528\u7684\u914d\u7f6e\u6587\u4ef6\/var\/lib\/kubelet\/config.yaml\u3002\u786e\u4fdd\u5176\u914d\u7f6e\u6b63\u786e,\u6ca1\u6709\u8bef\u914d\u53c2\u6570\u3002<\/li>\n<li>\u786e\u4fdd\u673a\u5668\u80fd\u591f\u8bbf\u95ee\u6240\u9700\u955c\u50cf\u4ed3\u5e93,kubelet\u9700\u8981\u4e0b\u8f7d\u6240\u9700\u955c\u50cf\u624d\u80fd\u5de5\u4f5c\u3002<\/li>\n<li>\u7f51\u7edc\u95ee\u9898\u4e5f\u4f1a\u5bfc\u81f4kubelet\u65e0\u6cd5\u54cd\u5e94\u3002\u68c0\u67e5\u8282\u70b9\u7f51\u7edc\u914d\u7f6e\u3002<\/li>\n<li>\u8d44\u6e90\u4e0d\u8db3\u4e5f\u4f1a\u5f71\u54cdkubelet\u8fd0\u884c\u3002\u68c0\u67e5\u8282\u70b9\u8d44\u6e90\u4f7f\u7528\u60c5\u51b5\u3002<\/li>\n<\/ol>\n<pre><code>[root@node1 ~]# systemctl status kubelet\n\u25cf kubelet.service - kubelet: The Kubernetes Node Agent\n   Loaded: loaded (\/usr\/lib\/systemd\/system\/kubelet.service; enabled; vendor preset: disabled)\n  Drop-In: \/usr\/lib\/systemd\/system\/kubelet.service.d\n           \u2514\u250010-kubeadm.conf\n   Active: activating (auto-restart) (Result: exit-code) since Thu 2023-04-27 10:11:36 CST; 5s ago\n     Docs: https:\/\/kubernetes.io\/docs\/\n  Process: 14527 ExecStart=\/usr\/bin\/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS (code=exited, status=1\/FAILURE)\n Main PID: 14527 (code=exited, status=1\/FAILURE)\n\nApr 27 10:11:36 node1 systemd[1]: kubelet.service: main process exited, code=exited...UREApr 27 10:11:36 node1 systemd[1]: Unit kubelet.service entered failed state.\nApr 27 10:11:36 node1 systemd[1]: kubelet.service failed.\nHint: Some lines were ellipsized, use -l to show in full.<\/code><\/pre>\n<p>\u4ecekubelet\u670d\u52a1\u72b6\u6001\u53ef\u4ee5\u770b\u51fa,kubelet\u8fdb\u7a0b\u542f\u52a8\u540e\u5f88\u5feb\u5c31\u9000\u51fa\u4e86,\u5e76\u8fdb\u5165failed\u72b6\u6001\u3002\u8fd9\u8bf4\u660ekubelet\u542f\u52a8\u65f6\u51fa\u73b0\u4e86\u9519\u8bef,\u65e0\u6cd5\u6b63\u5e38\u8fd0\u884c\u3002\u8981\u6392\u67e5\u9519\u8bef,\u9700\u8981\u68c0\u67e5kubelet\u7684\u65e5\u5fd7\u3002\u4f7f\u7528\u4ee5\u4e0b\u547d\u4ee4\u67e5\u770b:<\/p>\n<pre><code>bash\njournalctl -xeu kubelet <\/code><\/pre>\n<p>\u8fd9\u4f1a\u8f93\u51fakubelet\u7684\u8be6\u7ec6\u65e5\u5fd7\u4fe1\u606f,\u5305\u62ec\u542f\u52a8\u9519\u8bef\u4fe1\u606f\u3002\u6839\u636e\u8f93\u51fa,\u67e5\u627e\u9519\u8bef\u539f\u56e0\u3002<br \/>\n\u5e38\u89c1\u7684\u9519\u8bef\u6709:<\/p>\n<ol>\n<li>\n\u955c\u50cf\u62c9\u53d6\u5931\u8d25:\u7f51\u7edc\u95ee\u9898\u6216\u4ed3\u5e93\u8ba4\u8bc1\u5931\u8d25\u5bfc\u81f4\n<\/li>\n<li>\n\u914d\u7f6e\u9519\u8bef:kubelet\u542f\u52a8\u53c2\u6570\u6216\u914d\u7f6e\u6587\u4ef6kubelet.config\u6709\u8bef\n<\/li>\n<li>\n\u8d44\u6e90\u4e0d\u8db3:\u5185\u5b58\u3001CPU\u6216\u8005\u78c1\u76d8\u7a7a\u95f4\u4e0d\u8db3,\u5bfc\u81f4kubelet\u65e0\u6cd5\u542f\u52a8\n<\/li>\n<li>\n\u4f9d\u8d56\u670d\u52a1\u95ee\u9898:\u7cfb\u7edf\u4f9d\u8d56\u670d\u52a1\u5982docker\u542f\u52a8\u5931\u8d25,\u4f1a\u5f71\u54cdkubelet\n<\/li>\n<li>\n\u6743\u9650\u95ee\u9898:kubelet\u6ca1\u6709\u6b63\u786e\u7684\u6587\u4ef6\u8bfb\u5199\u6743\u9650<\/p>\n<pre><code>[root@node1 ~]# journalctl -xeu kubelet\nApr 27 10:13:49 node1 kubelet[15854]: Flag --network-plugin has been deprecated, will be \nApr 27 10:13:49 node1 kubelet[15854]: Flag --network-plugin has been deprecated, will be \nApr 27 10:13:49 node1 kubelet[15854]: I0427 10:13:49.599506   15854 server.go:440] \"KubelApr 27 10:13:49 node1 kubelet[15854]: I0427 10:13:49.599861   15854 server.go:868] \"ClienApr 27 10:13:49 node1 kubelet[15854]: I0427 10:13:49.602320   15854 certificate_store.go:Apr 27 10:13:49 node1 kubelet[15854]: I0427 10:13:49.603585   15854 dynamic_cafile_contenApr 27 10:13:49 node1 kubelet[15854]: I0427 10:13:49.681532   15854 server.go:687] \"--cgrApr 27 10:13:49 node1 kubelet[15854]: I0427 10:13:49.681835   15854 container_manager_linApr 27 10:13:49 node1 kubelet[15854]: I0427 10:13:49.681944   15854 container_manager_linApr 27 10:13:49 node1 kubelet[15854]: I0427 10:13:49.681969   15854 topology_manager.go:1Apr 27 10:13:49 node1 kubelet[15854]: I0427 10:13:49.681982   15854 container_manager_linApr 27 10:13:49 node1 kubelet[15854]: I0427 10:13:49.682027   15854 state_mem.go:36] \"IniApr 27 10:13:49 node1 kubelet[15854]: I0427 10:13:49.682099   15854 kubelet.go:314] \"UsinApr 27 10:13:49 node1 kubelet[15854]: I0427 10:13:49.682135   15854 client.go:78] \"ConnecApr 27 10:13:49 node1 kubelet[15854]: I0427 10:13:49.682150   15854 client.go:97] \"Start \nApr 27 10:13:49 node1 kubelet[15854]: I0427 10:13:49.690804   15854 docker_service.go:566Apr 27 10:13:49 node1 kubelet[15854]: I0427 10:13:49.690831   15854 docker_service.go:242Apr 27 10:13:49 node1 kubelet[15854]: I0427 10:13:49.694427   15854 cni.go:204] \"Error vaApr 27 10:13:49 node1 kubelet[15854]: I0427 10:13:49.694465   15854 cni.go:239] \"Unable tApr 27 10:13:49 node1 kubelet[15854]: I0427 10:13:49.702505   15854 cni.go:204] \"Error vaApr 27 10:13:49 node1 kubelet[15854]: I0427 10:13:49.702529   15854 cni.go:239] \"Unable tApr 27 10:13:49 node1 kubelet[15854]: I0427 10:13:49.702591   15854 docker_service.go:257Apr 27 10:13:49 node1 kubelet[15854]: I0427 10:13:49.706184   15854 cni.go:204] \"Error vaApr 27 10:13:49 node1 kubelet[15854]: I0427 10:13:49.706210   15854 cni.go:239] \"Unable tApr 27 10:13:49 node1 kubelet[15854]: I0427 10:13:49.712293   15854 docker_service.go:264Apr 27 10:13:49 node1 kubelet[15854]: E0427 10:13:49.712331   15854 server.go:294] \"FaileApr 27 10:13:49 node1 systemd[1]: kubelet.service: main process exited, code=exited, statApr 27 10:13:49 node1 systemd[1]: Unit kubelet.service entered failed state.\nApr 27 10:13:49 node1 systemd[1]: kubelet.service failed.\nlines 1345-1373\/1373 (END)<\/code><\/pre>\n<p>\u4ece\u65e5\u5fd7\u53ef\u4ee5\u770b\u5230,kubelet\u542f\u52a8\u5931\u8d25\u7684\u539f\u56e0\u662fCNI\u7f51\u7edc\u63d2\u4ef6\u9519\u8bef\u3002\u6709\u4ee5\u4e0b\u4e24\u5904\u9519\u8bef\u65e5\u5fd7:<\/p>\n<pre><code>I0427 10:13:49.694427   15854 cni.go:204] \"Error validating network plugin kernel parameters: \"\nI0427 10:13:49.694465   15854 cni.go:239] \"Unable to update cni config - Error validating network plugin kernel parameters: \" <\/code><\/pre>\n<p>\u8fd9\u8bf4\u660eCNI\u7f51\u7edc\u63d2\u4ef6\u7684\u914d\u7f6e\u6216\u6267\u884c\u6709\u95ee\u9898,\u5bfc\u81f4kubelet\u65e0\u6cd5\u8bbe\u7f6e\u5bb9\u5668\u7f51\u7edc,\u6240\u4ee5\u542f\u52a8\u5931\u8d25\u3002<\/p>\n<p>\u89e3\u51b3\u65b9\u6848:<\/p>\n<ol>\n<li>\n\u68c0\u67e5CNI\u7f51\u7edc\u63d2\u4ef6(\u9ed8\u8ba4\u4e3abridge)\u7684\u914d\u7f6e\u6587\u4ef6\u662f\u5426\u6b63\u786e\u3002\u914d\u7f6e\u6587\u4ef6\u901a\u5e38\u5728<code>\/etc\/cni\/net.d\/<\/code>\u76ee\u5f55\u4e0b\u3002<\/p>\n<p>\u786e\u5b9aCNI\u7f51\u7edc\u63d2\u4ef6\u914d\u7f6e\u6b63\u786e\u7684\u6b65\u9aa4\u5982\u4e0b:1. \u8fdb\u5165CNI\u914d\u7f6e\u76ee\u5f55<code>\/etc\/cni\/net.d\/<\/code><\/p>\n<pre><code>bash\ncd \/etc\/cni\/net.d\/<\/code><\/pre>\n<ol start=\"2\">\n<li>\u68c0\u67e5\u662f\u5426\u6709 CNI \u7f51\u7edc\u914d\u7f6e\u6587\u4ef6\u3002\u9ed8\u8ba4\u4e3a<code>10-bridge.conf<\/code><\/li>\n<\/ol>\n<pre><code>bash \nls\n# \u5e94\u8be5\u770b\u523010-bridge.conf\u6587\u4ef6<\/code><\/pre>\n<ol start=\"3\">\n<li>\u68c0\u67e5<code>10-bridge.conf<\/code>\u6587\u4ef6\u5185\u5bb9\u662f\u5426\u6b63\u786e\u3002\u6587\u4ef6\u5185\u5bb9\u5e94\u4e3a:<\/li>\n<\/ol>\n<pre><code>json\n{\n  \"cniVersion\": \"0.4.0\",\n  \"name\": \"bridge\",\n  \"type\": \"bridge\",\n  \"bridge\": \"cnio0\",\n  \"isGateway\": true,\n  \"ipMasq\": true,\n  \"ipam\": {\n      \"type\": \"host-local\",\n      \"subnet\": \"10.244.0.0\/16\",\n      \"routes\": [\n          { \"dst\": \"0.0.0.0\/0\"  }\n      ]\n  }\n}<\/code><\/pre>\n<p>vim 10-bridge.conf \uff1a<\/p>\n<pre><code>{\n  \"name\": \"k8s-pod-network\",\n  \"cniVersion\": \"0.3.0\",\n  \"plugins\": [\n      {\n          \"type\": \"calico\",\n          \"etcd_endpoints\": \"https:\/\/127.0.0.1:2379\",\n          \"log_level\": \"info\",\n          \"datastore_type\": \"kubernetes\",\n          \"nodename\": \"node1\",\n          \"ipam\": {\n              \"type\": \"calico-ipam\"\n          },\n          \"policy\": {\n              \"type\": \"k8s\"\n         },\n          \"kubernetes\": {\n              \"kubeconfig\": \"\/etc\/cni\/net.d\/calico-kubeconfig\"\n          }\n      },\n      {\n          \"type\": \"portmap\",\n          \"snat\": true,\n          \"capabilities\": {\"portMappings\": true}\n      }\n  ]\n}<\/code><\/pre>\n<p>\u4ece\u914d\u7f6e\u6587\u4ef6\u770b,\u6b63\u5728\u4f7f\u7528Calico\u4f5c\u4e3aCNI\u7f51\u7edc\u63d2\u4ef6\u3002<br \/>\n\u8fd9\u8bf4\u660ebridge\u63d2\u4ef6\u914d\u7f6e\u88ab\u8986\u76d6,kubelet\u542f\u52a8\u5931\u8d25\u5e94\u8be5\u8ddfCalico\u76f8\u5173\u3002\u9700\u8981\u505a\u7684\u5982\u4e0b:<\/p>\n<ol>\n<li>\n\u786e\u8ba4Calico\u6240\u6709\u7ec4\u4ef6\u662f\u5426\u5b89\u88c5\u6b63\u786e\u5e76\u8fd0\u884c\u6b63\u5e38\u3002\u8fd9\u5305\u62eccalicoctl\u3001calico-node\u3001calico-kube-controllers\u7b49\u3002\u53c2\u8003Calico\u5b89\u88c5\u6587\u6863\u8fdb\u884c\u786e\u8ba4\u3002<\/p>\n<p>\u786e\u8ba4Calico\u7ec4\u4ef6\u5b89\u88c5\u6b63\u786e\u7684\u6b65\u9aa4:<\/p>\n<p>\u53ef\u4ee5\u4f7f\u7528\u4ee5\u4e0b\u547d\u4ee4\u67e5\u770b\u670d\u52a1\u5668\u662f\u5426\u5b89\u88c5\u4e86calicoctl CLI\u5de5\u5177:<\/p>\n<pre><code>bash\nwhich calicoctl<\/code><\/pre>\n<p>\u5982\u679c\u8fd4\u56decalicoctl\u8def\u5f84,\u5219\u8bf4\u660e\u5df2\u5b89\u88c5\u3002\u4f8b\u5982:<\/p>\n<pre><code>\/usr\/local\/bin\/calicoctl<\/code><\/pre>\n<ol>\n<li>\u5b89\u88c5calicoctl CLI\u5de5\u5177\u3002<\/li>\n<\/ol>\n<pre><code>bash\ncurl -O -L  https:\/\/github.com\/projectcalico\/calicoctl\/releases\/download\/v3.19.2\/calicoctl\nsudo install calicoctl \/usr\/local\/bin<\/code><\/pre>\n<p>\u4e2d\u79d1\u5927\u955c\u50cf\u6e90\u63d0\u4f9b\u4e86Calico\u5b89\u88c5\u5305,\u53ef\u4ee5\u4ece\u90a3\u91cc\u4e0b\u8f7dcalicoctl\u3002\u547d\u4ee4\u5982\u4e0b:<\/p>\n<pre><code>bash\ncurl -O -L  https:\/\/mirrors.ustc.edu.cn\/calico\/v3.19.2\/calicoctl\nchmod +x calicoctl\nsudo mv calicoctl \/usr\/local\/bin<\/code><\/pre>\n<ol start=\"2\">\n<li>\u914d\u7f6ecalicoctl\u4ee5\u8bbf\u95eeKubernetes API\u3002<\/li>\n<\/ol>\n<pre><code>bash\nmkdir -p \/etc\/calico \ncat <<EOF > \/etc\/calico\/calicoctl.cfg\napiVersion: projectcalico.org\/v3\nkind: CalicoAPIConfig\nmetadata:\nspec:\netcdEndpoints: \"https:\/\/127.0.0.1:2379\"\nkubeconfig: \"\/etc\/cni\/net.d\/calico-kubeconfig\" \nEOF<\/code><\/pre>\n<ol start=\"3\">\n<li>\u68c0\u67e5calico-node\u670d\u52a1\u662f\u5426\u5728\u8fd0\u884c\u3002<\/li>\n<\/ol>\n<pre><code>bash \nsystemctl status calico-node\n# calico-node.service \u5e94\u5904\u4e8e active (running) \u72b6\u6001<\/code><\/pre>\n<ol start=\"4\">\n<li>\u68c0\u67e5calico-kube-controllers\u90e8\u7f72\u662f\u5426\u6b63\u786e\u3002<\/li>\n<\/ol>\n<pre><code>bash\nkubectl -n kube-system get deployment calico-kube-controllers\n# \u5e94\u90e8\u7f72\u6210\u529f\u5e76\u6709\u53ef\u7528\u526f\u672c <\/code><\/pre>\n<ol start=\"5\">\n<li>\u4f7f\u7528<code>calicoctl node status<\/code>\u68c0\u67e5Calico\u8282\u70b9\u72b6\u6001\u662f\u5426\u4e3aready\u3002<\/li>\n<\/ol>\n<pre><code>bash\ncalicoctl node status\n# \u5e94\u663e\u793a\u8282\u70b9\u72b6\u6001\u662fready  <\/code><\/pre>\n<ol start=\"6\">\n<li>\u68c0\u67e5NetworkPolicy CRD\u662f\u5426\u90e8\u7f72\u3002<\/li>\n<\/ol>\n<pre><code>bash\nkubectl get crds networkpolicies.crd.projectcalico.org\n# \u5e94\u8fd4\u56deNetworkPolicy CRD<\/code><\/pre>\n<ol start=\"7\">\n<li>\u68c0\u67e5Calico Pods\u662f\u5426\u90fd\u5728\u6b63\u5e38\u8fd0\u884c\u3002<\/li>\n<\/ol>\n<pre><code>bash\nkubectl -n kube-system get pods -l k8s-app=calico-node\nkubectl -n kube-system get pods -l k8s-app=calico-kube-controllers \n# Pod\u90fd\u5e94\u5904\u4e8eRunning\u72b6\u6001<\/code><\/pre>\n<\/li>\n<li>\n\u68c0\u67e5\u4f60\u63d0\u4f9b\u7684Calico CNI\u914d\u7f6e\u662f\u5426\u6b63\u786e\u3002\u5173\u952e\u9879\u5982:<\/p>\n<ul>\n<li>etcd_endpoints:\u786e\u4fdd\u80fd\u8fde\u63a5\u4e0aEtcd<br \/>\n- kubeconfig:\u914d\u7f6e\u6587\u4ef6\u5e94\u5b58\u5728\u548c\u6b63\u786e<br \/>\n- nodename:\u8282\u70b9\u540d\u79f0\u5e94\u4e3a\u4f60\u7684\u673a\u5668\u540d\u79f0<br \/>\n- \u5269\u4f59\u53c2\u6570\u4e3a\u9ed8\u8ba4\u503c\u6216\u6309\u9700\u8bbe\u7f6e<\/p>\n<ol start=\"3\">\n<li>\u786e\u8ba4\u9632\u706b\u5899\u6ca1\u6709\u5c4f\u853dCalico\u670d\u52a1\u76f8\u5173\u7aef\u53e3\u3002Calico\u4f7f\u7528\u8bb8\u591a\u7aef\u53e3,\u9632\u706b\u5899\u53ef\u80fd\u5f71\u54cd\u5176\u6b63\u5e38\u5de5\u4f5c\u30024. \u91cd\u542fdocker\u3001kubelet\u548ccalico-node\u670d\u52a1\u4ee5\u52a0\u8f7d\u6b63\u786e\u7684Calico CNI\u914d\u7f6e\u3002<\/li>\n<\/ol>\n<\/li>\n<\/ul>\n<\/li>\n<\/ol>\n<pre><code>bash\nsystemctl restart docker kubelet calico-node<\/code><\/pre>\n<ol start=\"5\">\n<li>\u91cd\u65b0\u521d\u59cb\u5316Kubernetes,\u67e5\u770bCalico Pod\u7f51\u7edc\u662f\u5426\u6b63\u5e38\u3002<\/li>\n<\/ol>\n<pre><code>bash\nkubeadm init \nkubectl get pods -n kube-system \n# \u5e94\u6709calico-node Pod\u8fd0\u884c\u6b63\u5e38<\/code><\/pre>\n<ol start=\"6\">\n<li>\n\u68c0\u67e5\u8282\u70b9NetworkPolicy\u662f\u5426\u751f\u6548,\u786e\u8ba4Calico\u7f51\u7edc\u7b56\u7565\u529f\u80fd\u6b63\u5e38\u3002\n<\/li>\n<li>\n\u786e\u8ba4\u6587\u4ef6\u6743\u9650\u662f\u5426\u4e3a<code>0644<\/code>\u3002\n<\/li>\n<\/ol>\n<pre><code>bash\nstat 10-bridge.conf\nFile: 10-bridge.conf\nSize: 255             Blocks: 8          IO Block: 4096   regular file\nDevice: 801h\/2049d      Inode: 393377      Links: 1    \nAccess: (0644\/-rw-r--r--)  Uid: (    0\/    root)   Gid: (    0\/    root)<\/code><\/pre>\n<ol start=\"5\">\n<li>\u68c0\u67e5cni0\u7f51\u6865\u662f\u5426\u5b58\u5728\u3002<\/li>\n<\/ol>\n<pre><code>bash\nip addr show cnio0\n# \u5e94\u663e\u793acnio0\u7f51\u6865\u63a5\u53e3\u4fe1\u606f<\/code><\/pre>\n<ol start=\"6\">\n<li>\u53ef\u9009:\u5982\u679c\u4f7f\u7528\u7b2c\u4e09\u65b9CNI\u63d2\u4ef6,\u53c2\u8003\u5176\u6587\u6863\u68c0\u67e5\u5176\u914d\u7f6e\u548c\u90e8\u7f72\u662f\u5426\u6b63\u786e\u3002\u914d\u7f6e\u6587\u4ef6\u6b63\u786e\u4e14CNI\u63d2\u4ef6\u5de5\u4f5c\u6b63\u5e38\u540e,\u91cd\u542fkubelet\u670d\u52a1,\u7136\u540e\u91cd\u65b0\u521d\u59cb\u5316Kubernetes\u3002\u8fd9\u5e94\u8be5\u53ef\u4ee5\u89e3\u51b3\u56e0CNI\u7f51\u7edc\u9519\u8bef\u5bfc\u81f4\u7684kubelet\u542f\u52a8\u5931\u8d25\u95ee\u9898\u3002<\/li>\n<\/ol>\n<\/li>\n<li>\n\u786e\u8ba4CNI\u7f51\u7edc\u63d2\u4ef6\u7684\u6267\u884c\u6587\u4ef6\u662f\u5426\u53ef\u6267\u884c\u3002\u901a\u5e38\u4e3a<code>\/opt\/cni\/bin\/<\/code>\u76ee\u5f55\u4e0b\u7684\u811a\u672c\u3002\n<\/li>\n<li>\n\u5982\u679c\u4f7f\u7528\u7b2c\u4e09\u65b9CNI\u63d2\u4ef6,\u786e\u8ba4\u5176\u662f\u5426\u5b89\u88c5\u6b63\u786e\u5e76\u53ef\u7528\u3002\n<\/li>\n<li>\n\u91cd\u542fdocker\u670d\u52a1\u3002\u56e0\u4e3aCNI\u7f51\u7edc\u8bbe\u7f6e\u4f9d\u8d56docker,\u91cd\u542f\u53ef\u4ee5\u786e\u4fdddocker\u5904\u4e8e\u6b63\u5e38\u72b6\u6001\u3002\n<\/li>\n<li>\n\u91cd\u65b0\u542f\u52a8kubelet\u670d\u52a1\u3002\u9519\u8bef\u4fee\u590d\u540e,\u91cd\u542fkubelet\u4f7f\u4e4b\u52a0\u8f7d\u6b63\u786e\u7684CNI\u7f51\u7edc\u914d\u7f6e\u3002\n<\/li>\n<\/ol>\n<pre><code>bash\nsystemctl restart docker \nsystemctl daemon-reload\nsystemctl restart kubelet<\/code><\/pre>\n<ol start=\"6\">\n<li>\u7136\u540e\u518d\u521d\u59cb\u5316 Kubernetes \u63a7\u5236\u5e73\u9762\u3002<\/li>\n<\/ol>\n<pre><code>bash \nkubeadm init<\/code><\/pre>\n<p>\u521d\u59cb\u5316\u5982\u679c\u6210\u529f,\u5219\u8bf4\u660eCNI\u7f51\u7edc\u9519\u8bef\u5df2\u7ecf\u89e3\u51b3,kubelet \u548c Kubernetes \u90fd\u80fd\u6b63\u5e38\u5de5\u4f5c\u4e86\u3002<\/p>\n<pre><code>[root@node1 net.d]# kubeadm init \\\n>   --apiserver-advertise-address=192.168.1.127 \\\n>   --image-repository registry.aliyuncs.com\/google_containers \\\n>   --kubernetes-version v1.22.4 \\\n>   --service-cidr=10.96.0.0\/12 \\\n>   --pod-network-cidr=10.244.0.0\/16\n[init] Using Kubernetes version: v1.22.4\n[preflight] Running pre-flight checks\nerror execution phase preflight: [preflight] Some fatal errors occurred:\n       [ERROR FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml]: \/etc\/kubernetes\/manifests\/kube-apiserver.yaml already exists\n       [ERROR FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml]: \/etc\/kubernetes\/manifests\/kube-controller-manager.yaml already exists\n       [ERROR FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml]: \/etc\/kubernetes\/manifests\/kube-scheduler.yaml already exists\n       [ERROR FileAvailable--etc-kubernetes-manifests-etcd.yaml]: \/etc\/kubernetes\/manifests\/etcd.yaml already exists\n[preflight] If you know what you are doing, you can make a check non-fatal with <code>--ignore-preflight-errors=...<\/code>\nTo see the stack trace of this error execute with --v=5 or higher<\/code><\/pre>\n<p>\u8fd9\u662f\u56e0\u4e3a\u4f60\u7684\u8282\u70b9\u4e0a\u5df2\u7ecf\u5b58\u5728Kubernetes\u7684\u9759\u6001Pod\u914d\u7f6e\u6587\u4ef6,\u6240\u4ee5kubeadm init\u68c0\u67e5\u5931\u8d25\u3002\u89e3\u51b3\u65b9\u6848:1. \u5907\u4efd\u5e76\u5220\u9664\u5df2\u6709\u7684Kubernetes\u9759\u6001Pod\u914d\u7f6e\u6587\u4ef6:<\/p>\n<pre><code>bash \nmv \/etc\/kubernetes\/manifests\/* \/etc\/kubernetes\/manifests.bak<\/code><\/pre>\n<p>\u7136\u540e\u91cd\u65b0\u8fd0\u884ckubeadm init\u521d\u59cb\u5316\u30022. \u5728kubeadm init\u65f6\u4f7f\u7528<code>--ignore-preflight-errors=FileExisting-etc-kubernetes-manifests<\/code>\u5ffd\u7565\u8fd9\u4e2a\u9519\u8bef\u3002<\/p>\n<pre><code>bash\nkubeadm init  --ignore-preflight-errors=FileExisting-etc-kubernetes-manifests<\/code><\/pre>\n<ol start=\"3\">\n<li>\u5982\u679c\u8981\u4fdd\u7559\u5df2\u6709\u914d\u7f6e,\u53ef\u4ee5\u4fee\u6539\u5bf9\u5e94\u7684<code>--config<\/code>\u53c2\u6570\u6307\u5411\u5907\u4efdManifests\u76ee\u5f55\u3002\u4f8b\u5982:<\/li>\n<\/ol>\n<pre><code>bash\nkubeadm init --config=\/etc\/kubernetes\/manifests.bak <\/code><\/pre>\n<p>\u7136\u540ekubeadm\u4f1a\u57fa\u4e8e\u5907\u4efdManifests\u8fdb\u884c\u521d\u59cb\u5316\u3002<\/p>\n<pre><code>[root@node1 net.d]# kubeadm init --apiserver-advertise-address=192.168.1.127 --pod-network-cidr=10.244.0.0\/16  --image-repository registry.cn-hangzhou.aliyuncs.com\/google_containers\nI0427 11:20:24.406920   23590 version.go:255] remote version is much newer: v1.27.1; falling back to: stable-1.22\n[init] Using Kubernetes version: v1.22.17\n[preflight] Running pre-flight checks\nerror execution phase preflight: [preflight] Some fatal errors occurred:\n       [ERROR FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml]: \/etc\/kubernetes\/manifests\/kube-apiserver.yaml already exists\n       [ERROR FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml]: \/etc\/kubernetes\/manifests\/kube-controller-manager.yaml already exists\n       [ERROR FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml]: \/etc\/kubernetes\/manifests\/kube-scheduler.yaml already exists\n       [ERROR FileAvailable--etc-kubernetes-manifests-etcd.yaml]: \/etc\/kubernetes\/manifests\/etcd.yaml already exists\n[preflight] If you know what you are doing, you can make a check non-fatal with <code>--ignore-preflight-errors=...<\/code>\nTo see the stack trace of this error execute with --v=5 or higher<\/code><\/pre>\n<p>\u662f\u56e0\u4e3a\u8282\u70b9\u4e0a\u5df2\u5b58\u5728Kubernetes\u9759\u6001Pod\u914d\u7f6e\u6587\u4ef6,\u5bfc\u81f4kubeadm init\u68c0\u67e5\u5931\u8d25\u3002\u89e3\u51b3\u529e\u6cd5:<\/p>\n<ol>\n<li>\u5907\u4efd\u5e76\u5220\u9664\u5df2\u6709Kubernetes\u9759\u6001Pod\u914d\u7f6e\u6587\u4ef6:<\/li>\n<\/ol>\n<pre><code>bash\nmv \/etc\/kubernetes\/manifests\/* \/etc\/kubernetes\/manifests.bak<\/code><\/pre>\n<p>\u7136\u540e\u91cd\u65b0\u8fd0\u884ckubeadm init\u3002<\/p>\n<ol start=\"2\">\n<li>\u5728kubeadm init\u65f6\u4f7f\u7528<code>--ignore-preflight-errors=FileExisting-etc-kubernetes-manifests<\/code>\u5ffd\u7565\u8fd9\u4e2a\u9519\u8bef:<\/li>\n<\/ol>\n<pre><code>bash\nkubeadm init  --ignore-preflight-errors=FileExisting-etc-kubernetes-manifests<\/code><\/pre>\n<ol start=\"3\">\n<li>\u5982\u679c\u8981\u4fdd\u7559\u5df2\u6709\u914d\u7f6e,\u53ef\u4ee5\u4f7f\u7528<code>--config<\/code>\u53c2\u6570\u6307\u5b9a\u5907\u4efdManifests\u76ee\u5f55:<\/li>\n<\/ol>\n<pre><code>bash\nkubeadm init --config=\/etc\/kubernetes\/manifests.bak<\/code><\/pre>\n<p>kubeadm\u5c06\u57fa\u4e8e\u5907\u4efd\u7684Manifests\u8fdb\u884c\u521d\u59cb\u5316\u3002<\/p>\n<pre><code>kubeadm init \\\n --apiserver-advertise-address=192.168.1.127 \\\n --image-repository registry.aliyuncs.com\/google_containers \\\n --kubernetes-version v1.22.4 \\\n --service-cidr=10.96.0.0\/12 \\\n --pod-network-cidr=10.244.0.0\/16<\/code><\/pre>\n<pre><code>Running pre-flight checks\nerror execution phase preflight: [preflight] Some fatal errors occurred:\n       [ERROR FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml]: \/etc\/kubernetes\/manifests\/kube-apiserver.yaml already exists\n       [ERROR FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml]: \/etc\/kubernetes\/manifests\/kube-controller-manager.yaml already exists\n       [ERROR FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml]: \/etc\/kubernetes\/manifests\/kube-scheduler.yaml already exists\n       [ERROR FileAvailable--etc-kubernetes-manifests-etcd.yaml]: \/etc\/kubernetes\/manifests\/etcd.yaml already exists\n[preflight] If you know what you are doing, you can make a check non-fatal with <code>--ignore-preflight-errors=...<\/code>\nTo see the stack trace of this error execute with --v=5 or higher\n[root@node1 net.d]# kubeadm init --apiserver-advertise-address=192.168.1.127 --pod-network-cidr=10.244.0.0\/16  --image-repository registry.cn-hangzhou.aliyuncs.com\/google_containers\nI0427 11:20:24.406920   23590 version.go:255] remote version is much newer: v1.27.1; falling back to: stable-1.22\n[init] Using Kubernetes version: v1.22.17\n[preflight] Running pre-flight checks\nerror execution phase preflight: [preflight] Some fatal errors occurred:\n       [ERROR FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml]: \/etc\/kubernetes\/manifests\/kube-apiserver.yaml already exists\n       [ERROR FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml]: \/etc\/kubernetes\/manifests\/kube-controller-manager.yaml already exists\n       [ERROR FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml]: \/etc\/kubernetes\/manifests\/kube-scheduler.yaml already exists\n       [ERROR FileAvailable--etc-kubernetes-manifests-etcd.yaml]: \/etc\/kubernetes\/manifests\/etcd.yaml already exists\n[preflight] If you know what you are doing, you can make a check non-fatal with <code>--ignore-preflight-errors=...<\/code>\nTo see the stack trace of this error execute with --v=5 or higher\n[root@node1 net.d]# rm -f \/etc\/kubernetes\/manifests\/*\n[root@node1 net.d]# kubeadm init \\\n>   --apiserver-advertise-address=192.168.1.127 \\\n>   --image-repository registry.aliyuncs.com\/google_containers \\\n>   --kubernetes-version v1.22.4 \\\n>   --service-cidr=10.96.0.0\/12 \\\n>   --pod-network-cidr=10.244.0.0\/16\n[init] Using Kubernetes version: v1.22.4\n[preflight] Running pre-flight checks\n[preflight] Pulling images required for setting up a Kubernetes cluster\n[preflight] This might take a minute or two, depending on the speed of your internet connection\n[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'\n\n[certs] Using certificateDir folder \"\/etc\/kubernetes\/pki\"\n[certs] Using existing ca certificate authority\n[certs] Using existing apiserver certificate and key on disk\n[certs] Using existing apiserver-kubelet-client certificate and key on disk\n[certs] Using existing front-proxy-ca certificate authority\n[certs] Using existing front-proxy-client certificate and key on disk\n[certs] Using existing etcd\/ca certificate authority\n[certs] Using existing etcd\/server certificate and key on disk\n[certs] Using existing etcd\/peer certificate and key on disk\n[certs] Using existing etcd\/healthcheck-client certificate and key on disk\n[certs] Using existing apiserver-etcd-client certificate and key on disk\n[certs] Using the existing \"sa\" key\n[kubeconfig] Using kubeconfig folder \"\/etc\/kubernetes\"\n[kubeconfig] Using existing kubeconfig file: \"\/etc\/kubernetes\/admin.conf\"\n[kubeconfig] Using existing kubeconfig file: \"\/etc\/kubernetes\/kubelet.conf\"\n[kubeconfig] Using existing kubeconfig file: \"\/etc\/kubernetes\/controller-manager.conf\"\n[kubeconfig] Using existing kubeconfig file: \"\/etc\/kubernetes\/scheduler.conf\"\n[kubelet-start] Writing kubelet environment file with flags to file \"\/var\/lib\/kubelet\/kubeadm-flags.env\"\n[kubelet-start] Writing kubelet configuration to file \"\/var\/lib\/kubelet\/config.yaml\"\n[kubelet-start] Starting the kubelet\n[control-plane] Using manifest folder \"\/etc\/kubernetes\/manifests\"\n[control-plane] Creating static Pod manifest for \"kube-apiserver\"\n[control-plane] Creating static Pod manifest for \"kube-controller-manager\"\n[control-plane] Creating static Pod manifest for \"kube-scheduler\"\n[etcd] Creating static Pod manifest for local etcd in \"\/etc\/kubernetes\/manifests\"\n[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory \"\/etc\/kubernetes\/manifests\". This can take up to 4m0s\n[kubelet-check] Initial timeout of 40s passed.\n[kubelet-check] It seems like the kubelet isn't running or healthy.\n[kubelet-check] The HTTP call equal to 'curl -sSL http:\/\/localhost:10248\/healthz' failed with error: Get \"http:\/\/localhost:10248\/healthz\": dial tcp [::1]:10248: connect: connection refused.\n[kubelet-check] It seems like the kubelet isn't running or healthy.\n[kubelet-check] The HTTP call equal to 'curl -sSL http:\/\/localhost:10248\/healthz' failed with error: Get \"http:\/\/localhost:10248\/healthz\": dial tcp [::1]:10248: connect: connection refused.\n[kubelet-check] It seems like the kubelet isn't running or healthy.\n[kubelet-check] The HTTP call equal to 'curl -sSL http:\/\/localhost:10248\/healthz' failed with error: Get \"http:\/\/localhost:10248\/healthz\": dial tcp [::1]:10248: connect: connection refused.\n[kubelet-check] It seems like the kubelet isn't running or healthy.\n[kubelet-check] The HTTP call equal to 'curl -sSL http:\/\/localhost:10248\/healthz' failed with error: Get \"http:\/\/localhost:10248\/healthz\": dial tcp [::1]:10248: connect: connection refused.\n[kubelet-check] It seems like the kubelet isn't running or healthy.\n[kubelet-check] The HTTP call equal to 'curl -sSL http:\/\/localhost:10248\/healthz' failed with error: Get \"http:\/\/localhost:10248\/healthz\": dial tcp [::1]:10248: connect: connection refused.\n\n       Unfortunately, an error has occurred:\n               timed out waiting for the condition\n\n       This error is likely caused by:\n               - The kubelet is not running\n               - The kubelet is unhealthy due to a misconfiguration of the node in some way (required cgroups disabled)\n\n       If you are on a systemd-powered system, you can try to troubleshoot the error with the following commands:\n               - 'systemctl status kubelet'\n               - 'journalctl -xeu kubelet'\n\n       Additionally, a control plane component may have crashed or exited when started by the container runtime.\n       To troubleshoot, list all containers using your preferred container runtimes CLI.\n       Here is one example how you may list all Kubernetes containers running in docker:                - 'docker ps -a | grep kube | grep -v pause'\n               Once you have found the failing container, you can inspect its logs with:                - 'docker logs CONTAINERID'\n\nerror execution phase wait-control-plane: couldn't initialize a Kubernetes cluster\nTo see the stack trace of this error execute with --v=5 or higher<\/code><\/pre>\n<p>\u8fd9\u4e2a\u9519\u8bef\u662f\u56e0\u4e3akubelet\u670d\u52a1\u6ca1\u6709\u6b63\u5e38\u542f\u52a8,\u5bfc\u81f4\u63a7\u5236\u9762\u7ec4\u4ef6\u4f5c\u4e3a\u9759\u6001Pod\u8fd0\u884c\u5931\u8d25\u3002\u89e3\u51b3\u65b9\u6848:<\/p>\n<ol>\n<li>\u68c0\u67e5kubelet\u670d\u52a1\u72b6\u6001,\u662f\u5426\u6b63\u5e38\u8fd0\u884c:<\/li>\n<\/ol>\n<pre><code>bash\nsystemctl status kubelet<\/code><\/pre>\n<p>\u5982\u679c\u672a\u8fd0\u884c,\u8bf7\u542f\u52a8kubelet:<\/p>\n<pre><code>bash\nsystemctl start kubelet<\/code><\/pre>\n<ol start=\"2\">\n<li>\u68c0\u67e5kubelet\u65e5\u5fd7,\u67e5\u770b\u662f\u5426\u6709\u9519\u8bef\u4fe1\u606f:<\/li>\n<\/ol>\n<pre><code>bash\njournalctl -xeu kubelet <\/code><\/pre>\n<p>\u6839\u636e\u65e5\u5fd7\u4fe1\u606f\u8fdb\u884c\u76f8\u5e94\u4fee\u590d\u30023. \u68c0\u67e5docker\u670d\u52a1,\u5e76\u5217\u51faKubernetes\u76f8\u5173\u5bb9\u5668:<\/p>\n<pre><code>bash \ndocker ps -a | grep kube | grep -v pause<\/code><\/pre>\n<p>\u627e\u5230\u5931\u8d25\u7684\u5bb9\u5668,\u5e76\u68c0\u67e5\u5176\u65e5\u5fd7:<\/p>\n<pre><code>bash\ndocker logs CONTAINERID<\/code><\/pre>\n<p>\u6839\u636e\u65e5\u5fd7\u4fe1\u606f\u8fdb\u884c\u4fee\u590d\u30024. \u5982\u679ckubelet\u548c\u5bb9\u5668\u8fd0\u884c\u6b63\u5e38\u4f46\u521d\u59cb\u5316\u4ecd\u5931\u8d25,\u8bf7\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u91cd\u65b0\u8fd0\u884c:<\/p>\n<pre><code>bash\nrm -rf \/etc\/kubernetes\/pki \nkubeadm init ...<\/code><\/pre>\n<p>\u8fd9\u5c06\u5220\u9664Kubernetes\u8bc1\u4e66,\u7136\u540e\u91cd\u65b0\u521d\u59cb\u5316\u96c6\u7fa4\u3002Kubernetes\u7684\u8fd0\u884c\u4f9d\u8d56\u4e8e\u5e95\u5c42\u670d\u52a1\u7684\u6b63\u5e38\u8fd0\u884c,\u5982kubelet\u548cdocker\u3002\u53ea\u6709\u5b83\u4eec\u6b63\u5e38\u8fd0\u884c,\u63a7\u5236\u9762\u7ec4\u4ef6\u548c\u5e94\u7528\u624d\u80fd\u6b63\u5e38\u5de5\u4f5c\u3002\n<\/li>\n<li>\n\u6839\u636e\u65e5\u5fd7\u4fe1\u606f,kubelet\u542f\u52a8\u5931\u8d25\u662f\u56e0\u4e3aCNI\u7f51\u7edc\u63d2\u4ef6\u5b89\u88c5\u9519\u8bef\u3002\u89e3\u51b3\u529e\u6cd5:1. \u5378\u8f7d\u5f53\u524dCNI\u63d2\u4ef6:<\/p>\n<pre><code>bash\n\/etc\/cni\/net.d # rm -f *<\/code><\/pre>\n<ol start=\"2\">\n<li>\u91cd\u65b0\u5b89\u88c5CNI\u63d2\u4ef6\u3002\u8fd9\u91cc\u4ee5Flannel\u4e3a\u4f8b:<\/li>\n<\/ol>\n<pre><code>bash \ncurl -sSL \"https:\/\/raw.githubusercontent.com\/coreos\/flannel\/master\/Documentation\/kube-flannel.yml\" | kubectl apply -f -<\/code><\/pre>\n<ol start=\"3\">\n<li>\u91cd\u542fkubelet\u670d\u52a1:<\/li>\n<\/ol>\n<pre><code>bash\nsystemctl daemon-reload\nsystemctl restart kubelet<\/code><\/pre>\n<ol start=\"4\">\n<li>\u68c0\u67e5kubelet\u548cCNI\u7f51\u7edc\u63d2\u4ef6Pod\u662f\u5426\u6b63\u5e38\u8fd0\u884c:<\/li>\n<\/ol>\n<pre><code>bash\nkubectl get pods -n kube-system <\/code><\/pre>\n<ol start=\"5\">\n<li>\n\u5982\u679c\u4ecd\u6709\u95ee\u9898,\u8bf7\u518d\u6b21\u68c0\u67e5kubelet\u548cCNI\u7f51\u7edc\u63d2\u4ef6\u7684\u65e5\u5fd7,\u8fdb\u884c\u8fdb\u4e00\u6b65\u6392\u67e5\u548c\u4fee\u590d\u3002<\/p>\n<ol>\n<li>\u76f4\u63a5\u5728\u8282\u70b9\u4e0a\u4e0b\u8f7dFlannel CNI\u63d2\u4ef6\u6e05\u5355\u6587\u4ef6kube-flannel.yml:<\/li>\n<\/ol>\n<pre><code>bash\nwget https:\/\/raw.githubusercontent.com\/coreos\/flannel\/master\/Documentation\/kube-flannel.yml<\/code><\/pre>\n<ol start=\"2\">\n<li>\u5b89\u88c5Flannel CNI\u63d2\u4ef6:<\/li>\n<\/ol>\n<pre><code>bash\nkubectl apply -f kube-flannel.yml<\/code><\/pre>\n<ol start=\"3\">\n<li>\u68c0\u67e5Flannel Pods\u662f\u5426\u6b63\u5e38\u8fd0\u884c:<\/li>\n<\/ol>\n<pre><code>bash\nkubectl get pods -n kube-system -l app=flannel <\/code><\/pre>\n<ol start=\"4\">\n<li>\u91cd\u65b0\u542f\u52a8kubelet\u670d\u52a1:<\/li>\n<\/ol>\n<pre><code>bash \nsystemctl restart kubelet<\/code><\/pre>\n<\/li>\n<\/ol>\n<\/li>\n<\/ol>\n<pre><code>[root@node1 net.d]# wget https:\/\/raw.githubusercontent.com\/coreos\/flannel\/master\/Documentation\/kube-flannel.yml\n--2023-04-27 12:15:25--  https:\/\/raw.githubusercontent.com\/coreos\/flannel\/master\/Documentation\/kube-flannel.yml\nResolving raw.githubusercontent.com (raw.githubusercontent.com)... 0.0.0.0, ::\nConnecting to raw.githubusercontent.com (raw.githubusercontent.com)|0.0.0.0|:443... failed: Connection refused.\nConnecting to raw.githubusercontent.com (raw.githubusercontent.com)|::|:443... failed: Connection refused.<\/code><\/pre>\n<p>\u5728\u6d4f\u89c8\u5668\u4e2d,\u53ef\u4ee5\u76f4\u63a5\u8bbf\u95ee\u8fd9\u4e2a URL \u4e0b\u8f7d kube-flannel.yml \u6587\u4ef6:<a href=\"https:\/\/raw.githubusercontent.com\/coreos\/flannel\/master\/Documentation\/kube-flannel.yml\">https:\/\/raw.githubusercontent.com\/coreos\/flannel\/master\/Documentation\/kube-flannel.yml<\/a><\/p>\n<p>\u5177\u4f53\u6b65\u9aa4:<\/p>\n<ol>\n<li>\u6253\u5f00\u6d4f\u89c8\u5668,\u8f93\u5165\u8be5 URL\u3002<\/li>\n<li>\u9875\u9762\u4f1a\u76f4\u63a5\u663e\u793a YAML \u6587\u4ef6\u5185\u5bb9,\u70b9\u51fb\u6d4f\u89c8\u5668\u7684&quot;\u53e6\u5b58\u4e3a&quot;\u6216\u8005&quot;\u4e0b\u8f7d&quot;\u6309\u94ae\u3002<\/li>\n<li>\u4fdd\u5b58\u6587\u4ef6\u540d\u4e3a kube-flannel.yml,\u4fdd\u5b58\u7c7b\u578b\u9009\u62e9 &quot;All Files&quot;\u3002<\/li>\n<li>\u6587\u4ef6\u4e0b\u8f7d\u5b8c\u6210\u540e,\u53ef\u4ee5\u901a\u8fc7 U \u76d8\u3001\u7f51\u7edc\u5171\u4eab\u7b49\u65b9\u5f0f\u628a kube-flannel.yml \u6587\u4ef6\u4f20\u5230\u8282\u70b9\u4e0a\u3002<\/li>\n<li>\u5728\u8282\u70b9\u4e0a,\u5b89\u88c5 Flannel:<\/li>\n<\/ol>\n<pre><code>bash\nkubectl apply -f kube-flannel.yml<\/code><\/pre>\n<ol start=\"6\">\n<li>\u68c0\u67e5 Flannel Pods \u662f\u5426\u6b63\u5e38\u8fd0\u884c:<\/li>\n<\/ol>\n<pre><code>bash \nkubectl get pods -n kube-system -l app=flannel<\/code><\/pre>\n<ol start=\"7\">\n<li>\u91cd\u542f kubelet \u670d\u52a1:<\/li>\n<\/ol>\n<pre><code>bash\nsystemctl restart kubelet<\/code><\/pre>\n<pre><code>[root@node1 net.d]# kubectl apply -f kube-flannel.yml\nThe connection to the server localhost:8080 was refused - did you specify the right host or port?\n[root@node1 net.d]# <\/code><\/pre>\n<p>\u8fd9\u4e2a\u9519\u8bef\u8868\u793a\u8282\u70b9\u65e0\u6cd5\u8fde\u63a5\u5230 Kubernetes API Server,\u6240\u4ee5\u65e0\u6cd5\u7533\u8bf7 Flannel \u8d44\u6e90\u3002\u8fd9\u901a\u5e38\u662f\u56e0\u4e3a:<\/p>\n<ol>\n<li>\nKubernetes API Server \u672a\u8fd0\u884c;\n<\/li>\n<li>\nkubectl \u914d\u7f6e\u6587\u4ef6\u914d\u7f6e\u9519\u8bef,\u5bfc\u81f4\u65e0\u6cd5\u8fde\u63a5 API Server\u3002<\/p>\n<p>\u89e3\u51b3\u65b9\u6cd5:<\/p>\n<ol>\n<li>\u68c0\u67e5 Kubernetes API Server \u662f\u5426\u6b63\u5e38\u8fd0\u884c:<\/li>\n<\/ol>\n<\/li>\n<\/ol>\n<pre><code>bash\nkubectl get nodes<\/code><\/pre>\n<p>\u5982\u679c\u65e0\u6cd5\u83b7\u53d6\u8282\u70b9\u4fe1\u606f,\u8868\u793a API Server \u672a\u8fd0\u884c,\u8bf7\u91cd\u65b0\u521d\u59cb\u5316 Kubernetes \u96c6\u7fa4\u3002<\/p>\n<ol start=\"2\">\n<li>\u68c0\u67e5 kubectl \u914d\u7f6e,\u4e00\u822c\u5728 ~\/.kube\/config \u6587\u4ef6\u4e2d:<\/li>\n<\/ol>\n<pre><code>bash \ncat ~\/.kube\/config<\/code><\/pre>\n<p>\u786e\u8ba4 server \u548c certificate-authority-data \u914d\u7f6e\u6b63\u786e,\u5982\u679c\u6709\u8bef,\u8bf7\u91cd\u65b0\u521d\u59cb\u5316 kubectl \u914d\u7f6e:<\/p>\n<pre><code>bash \nrm -rf ~\/.kube\nkubeadm init phase kubeconfig all<\/code><\/pre>\n<ol start=\"3\">\n<li>\u5982\u679c\u914d\u7f6e\u6b63\u786e,\u8bf7\u68c0\u67e5 Kubernetes \u670d\u52a1\u7f51\u7edc\u662f\u5426\u6b63\u5e38,\u4f8b\u5982 Flannel:<\/li>\n<\/ol>\n<pre><code>bash\nkubectl get pods -n kube-system -l app=flannel <\/code><\/pre>\n<p>\u5982\u679c Flannel \u672a\u90e8\u7f72\u6216\u672a\u8fd0\u884c,\u8bf7\u5148\u5b89\u88c5 Flannel,\u7136\u540e\u91cd\u542f kubelet \u540e\u91cd\u65b0\u5c1d\u8bd5\u5b89\u88c5 Flannel\u30024. \u518d\u6b21\u786e\u8ba4 kubectl \u53ef\u4ee5\u6b63\u5e38\u8fde\u63a5\u5e76\u64cd\u4f5c Kubernetes \u96c6\u7fa4\u540e,\u91cd\u65b0\u5b89\u88c5 Flannel:<\/p>\n<pre><code>bash\nkubectl apply -f kube-flannel.yml<\/code><\/pre>\n<h2>\u91cd\u65b0\u521d\u59cb\u5316\u96c6\u7fa4\uff1a<\/h2>\n<pre><code>kubeadm init \\\n  --apiserver-advertise-address=192.168.1.127 \\\n  --image-repository registry.aliyuncs.com\/google_containers \\\n  --kubernetes-version v1.22.4 \\\n  --service-cidr=10.96.0.0\/12 \\\n  --pod-network-cidr=10.244.0.0\/16\n  --ignore-preflight-errors=FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml<\/code><\/pre>\n<pre><code>[root@node1 ~]# kubeadm init \\\n>   --apiserver-advertise-address=192.168.1.127 \\\n>   --image-repository registry.aliyuncs.com\/google_containers \\\n>   --kubernetes-version v1.22.4 \\\n>   --service-cidr=10.96.0.0\/12 \\\n>   --pod-network-cidr=10.244.0.0\/16\n[init] Using Kubernetes version: v1.22.4\n[preflight] Running pre-flight checks\nerror execution phase preflight: [preflight] Some fatal errors occurred:\n        [ERROR FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml]: \/etc\/kubernetes\/manifests\/kube-apiserver.yaml already exists\n        [ERROR FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml]: \/etc\/kubernetes\/manifests\/kube-controller-manager.yaml already exists\n        [ERROR FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml]: \/etc\/kubernetes\/manifests\/kube-scheduler.yaml already exists\n        [ERROR FileAvailable--etc-kubernetes-manifests-etcd.yaml]: \/etc\/kubernetes\/manifests\/etcd.yaml already exists\n[preflight] If you know what you are doing, you can make a check non-fatal with <code>--ignore-preflight-errors=...<\/code>\nTo see the stack trace of this error execute with --v=5 or higher\n[root@node1 ~]#   --ignore-preflight-errors=FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml\n-bash: --ignore-preflight-errors=FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml: command not found<\/code><\/pre>\n<pre><code>kubeadm init \\\n  --apiserver-advertise-address=192.168.1.127 \\\n  --image-repository registry.aliyuncs.com\/google_containers \\\n  --kubernetes-version v1.22.4 \\\n  --service-cidr=10.96.0.0\/12 \\\n  --pod-network-cidr=10.244.0.0\/16 \\\n  --ignore-preflight-errors=FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml\n<\/code><\/pre>\n<pre><code>[kubelet-check] It seems like the kubelet isn't running or healthy.\n[kubelet-check] The HTTP call equal to 'curl -sSL http:\/\/localhost:10248\/healthz' failed with error: Get \"http:\/\/localhost:10248\/healthz\": dial tcp [::1]:10248: connect: connection refused.\n\n        Unfortunately, an error has occurred:\n                timed out waiting for the condition\n\n        This error is likely caused by:\n                - The kubelet is not running\n                - The kubelet is unhealthy due to a misconfiguration of the node in some way (required cgroups disabled)\n\n        If you are on a systemd-powered system, you can try to troubleshoot the error with the following commands:\n                - 'systemctl status kubelet'\n                - 'journalctl -xeu kubelet'\n\n        Additionally, a control plane component may have crashed or exited when started by the container runtime.\n        To troubleshoot, list all containers using your preferred container runtimes CLI.\n        Here is one example how you may list all Kubernetes containers running in docker:                - 'docker ps -a | grep kube | grep -v pause'\n                Once you have found the failing container, you can inspect its logs with:                - 'docker logs CONTAINERID'\n\nerror execution phase wait-control-plane: couldn't initialize a Kubernetes cluster\nTo see the stack trace of this error execute with --v=5 or higher<\/code><\/pre>\n<p>\u6839\u636e\u9519\u8bef\u4fe1\u606f,\u521d\u59cb\u5316 Kubernetes \u96c6\u7fa4\u5931\u8d25\u662f\u56e0\u4e3a:<\/p>\n<ol>\n<li>\nkubelet \u670d\u52a1\u672a\u8fd0\u884c;\n<\/li>\n<li>\nkubelet \u670d\u52a1\u914d\u7f6e\u9519\u8bef,\u5bfc\u81f4 unhealthy \u72b6\u6001;\n<\/li>\n<li>\n\u4e00\u4e9b\u63a7\u5236\u9762\u7ec4\u4ef6\u5728\u5bb9\u5668\u8fd0\u884c\u65f6\u542f\u52a8\u5931\u8d25\u3002<\/p>\n<p>\u89e3\u51b3\u65b9\u6848:<\/p>\n<ol>\n<li>\u68c0\u67e5 kubelet \u670d\u52a1\u72b6\u6001\u548c\u65e5\u5fd7:<\/li>\n<\/ol>\n<\/li>\n<\/ol>\n<pre><code>bash\nsystemctl status kubelet \njournalctl -xeu kubelet<\/code><\/pre>\n<p>\u6839\u636e\u65e5\u5fd7\u4fe1\u606f\u4fee\u590d kubelet \u914d\u7f6e\u6216\u670d\u52a1\u95ee\u9898,\u7136\u540e\u91cd\u542f kubelet\u3002<\/p>\n<ol start=\"2\">\n<li>\u68c0\u67e5\u662f\u5426\u6709 Kubernetes \u63a7\u5236\u9762\u7ec4\u4ef6\u5bb9\u5668\u542f\u52a8\u5931\u8d25:<\/li>\n<\/ol>\n<pre><code>bash \ndocker ps -a | grep kube | grep -v pause<\/code><\/pre>\n<p>\u627e\u5230\u5931\u8d25\u7684\u5bb9\u5668,\u68c0\u67e5\u5176\u65e5\u5fd7\u6392\u67e5\u95ee\u9898:<\/p>\n<pre><code>bash\ndocker logs CONTAINER_ID<\/code><\/pre>\n<p>\u6839\u636e\u65e5\u5fd7\u4fee\u590d\u5931\u8d25\u539f\u56e0,\u5982\u955c\u50cf\u7f3a\u5931\u3001\u53c2\u6570\u914d\u7f6e\u9519\u8bef\u7b49\u3002<\/p>\n<ol start=\"3\">\n<li>\u518d\u6b21\u521d\u59cb\u5316 Kubernetes \u96c6\u7fa4,\u68c0\u67e5\u7ec4\u4ef6\u72b6\u6001:<\/li>\n<\/ol>\n<pre><code>bash\nkubeadm reset -f\nkubeadm init ...\n\nkubectl get componentstatuses\nkubectl get pods -n kube-system <\/code><\/pre>\n<ol start=\"4\">\n<li>\u5982\u679c\u4ecd\u6709\u7ec4\u4ef6\u672a\u542f\u52a8,\u8bf7\u8fdb\u4e00\u6b65\u68c0\u67e5\u5bf9\u5e94 Pods \u65e5\u5fd7\u4fee\u590d\u95ee\u9898\u3002<\/li>\n<\/ol>\n<pre><code>pr 27 12:45:44 node1 systemd[1]: kubelet.service: main process exited, code=exited, statApr 27 12:45:44 node1 systemd[1]: Unit kubelet.service entered failed state.\nApr 27 12:45:44 node1 systemd[1]: kubelet.service failed.\nlines 1381-1409\/1409 (END)<\/code><\/pre>\n<p>\u6839\u636e kubelet \u670d\u52a1\u65e5\u5fd7,kubelet \u8fdb\u7a0b\u9000\u51fa,\u5bfc\u81f4 kubelet \u670d\u52a1\u8fdb\u5165 failed \u72b6\u6001\u3002\u8fd9\u901a\u5e38\u662f\u56e0\u4e3a:<\/p>\n<ol>\n<li>\nkubelet \u914d\u7f6e\u6587\u4ef6\u914d\u7f6e\u9519\u8bef,\u5bfc\u81f4 kubelet \u542f\u52a8\u5931\u8d25;\n<\/li>\n<li>\ndocker \u6216\u5176\u4ed6\u5bb9\u5668\u8fd0\u884c\u65f6\u8fde\u63a5\u5931\u8d25,\u5bfc\u81f4 kubelet \u9000\u51fa;\n<\/li>\n<li>\nkubelet \u9759\u6001 Pod \u914d\u7f6e\u6587\u4ef6\u9519\u8bef,\u5bfc\u81f4 kubelet \u542f\u52a8\u5931\u8d25\u3002<\/p>\n<p>\u89e3\u51b3\u65b9\u6cd5:<\/p>\n<ol>\n<li>\n\u68c0\u67e5 kubelet \u914d\u7f6e\u6587\u4ef6 \/var\/lib\/kubelet\/kubeadm-flags.env \u662f\u5426\u6b63\u786e,<\/p>\n<pre><code>cat \/var\/lib\/kubelet\/kubeadm-flags.env<\/code><\/pre>\n<p>\u8fd9\u4e2a\u914d\u7f6e\u6587\u4ef6\u5305\u542b kubelet \u542f\u52a8\u53c2\u6570,\u4e3b\u8981\u6709:- KUBELET_KUBEADM_ARGS: kubeadm \u521d\u59cb\u5316\u8bbe\u7f6e\u7684 kubelet \u53c2\u6570<br \/>\n- KUBELET_SYSTEM_PODS_ARGS: kubelet \u7cfb\u7edf Pod \u53c2\u6570<br \/>\n- KUBELET_NETWORK_ARGS: kubelet \u7f51\u7edc\u53c2\u6570<br \/>\n- KUBELET_AUTHZ_ARGS: kubelet \u6388\u6743\u53c2\u6570<br \/>\n- KUBELET_CERTIFICATE_ARGS: kubelet \u8bc1\u4e66\u53c2\u6570\u5982\u679c\u53d1\u73b0\u6709\u53c2\u6570\u503c\u4e3a\u7a7a\u6216\u663e\u7136\u9519\u8bef,\u5219\u9700\u8981\u91cd\u65b0\u521d\u59cb\u5316 kubelet \u914d\u7f6e\u3002\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u91cd\u65b0\u521d\u59cb\u5316 kubelet \u914d\u7f6e:<\/p>\n<pre><code>bash\nrm -rf \/var\/lib\/kubelet\/kubeadm-flags.env \nkubeadm init phase kubelet config all<\/code><\/pre>\n<p>\u5982\u679c\u6709\u8bef,\u8bf7\u91cd\u65b0\u521d\u59cb\u5316 kubelet \u914d\u7f6e:<\/p>\n<p><font color=red>\u6211\u7684\u914d\u7f6e\u5982\u4e0b\uff1a<\/font><\/p>\n<pre><code>[root@node1 ~]# cat \/var\/lib\/kubelet\/kubeadm-flags.env\nKUBELET_KUBEADM_ARGS=\"--network-plugin=cni --pod-infra-container-image=registry.aliyuncs.com\/google_containers\/pause:3.5\"<\/code><\/pre>\n<p>\u6b63\u786e\u7684 kubeadm-flags.env \u914d\u7f6e\u6587\u4ef6\u5185\u5bb9\u5e94\u8be5\u5982\u4e0b:<\/p>\n<pre><code>KUBELET_KUBEADM_ARGS=\"--network-plugin=cni --pod-infra-container-image=registry.aliyuncs.com\/google_containers\/pause:3.5\" \nKUBELET_SYSTEM_PODS_ARGS=\"--pod-manifest-path=\/etc\/kubernetes\/manifests \"\nKUBELET_NETWORK_ARGS=\"--network-plugin=cni --cni-conf-dir=\/etc\/cni\/net.d --cni-bin-dir=\/opt\/cni\/bin\"\nKUBELET_AUTHZ_ARGS=\"--authorization-mode=Webhook --client-ca-file=\/etc\/kubernetes\/pki\/ca.crt\"\nKUBELET_CERTIFICATE_ARGS=\"--rotate-certificates=true --cert-dir=\/var\/lib\/kubelet\/pki\\\n                         --kubeconfig=\/var\/lib\/kubelet\/kubeconfig --bootstrap-kubeconfig=\/etc\/kubernetes\/bootstrap-kubelet.conf\"<\/code><\/pre>\n<p>\u53c2\u6570\u8bf4\u660e:- KUBELET_KUBEADM_ARGS: kubeadm \u521d\u59cb\u5316\u8bbe\u7f6e\u7684 kubelet \u53c2\u6570,\u8fd9\u91cc\u6307\u5b9a\u4e86 CNI \u7f51\u7edc\u548c pause \u955c\u50cf<br \/>\n- KUBELET_SYSTEM_PODS_ARGS: kubelet \u7cfb\u7edf Pod \u53c2\u6570,\u6307\u5b9a manifest \u8def\u5f84<br \/>\n- KUBELET_NETWORK_ARGS: kubelet \u7f51\u7edc\u53c2\u6570,\u6307\u5b9a CNI \u7f51\u7edc<br \/>\n- KUBELET_AUTHZ_ARGS: kubelet \u6388\u6743\u53c2\u6570,\u6307\u5b9a Webhook \u6a21\u5f0f\u548c CA \u8bc1\u4e66<br \/>\n- KUBELET_CERTIFICATE_ARGS: kubelet \u8bc1\u4e66\u53c2\u6570,\u6307\u5b9a\u8bc1\u4e66\u76f8\u5173\u8def\u5f84\u8fd9\u4e2a\u914d\u7f6e\u6587\u4ef6\u5305\u542b kubelet \u542f\u52a8\u6240\u9700\u7684\u6240\u6709\u4e3b\u8981\u53c2\u6570,\u683c\u5f0f\u4e5f\u662f\u6b63\u786e\u7684\u3002\u5907\u4efd\u73b0\u6709\u9519\u8bef\u914d\u7f6e\u6587\u4ef6\u540e,\u53ef\u4ee5\u5c06\u8fd9\u4e2a\u914d\u7f6e\u5185\u5bb9\u8986\u76d6\u5230 \/var\/lib\/kubelet\/kubeadm-flags.env \u4e2d,\u7136\u540e\u91cd\u542f kubelet \u670d\u52a1\u4f7f\u4e4b\u751f\u6548\u3002kubelet \u670d\u52a1\u91cd\u542f\u6b63\u5e38\u540e,\u53ef\u4ee5\u91cd\u65b0\u521d\u59cb\u5316 Kubernetes \u96c6\u7fa4\u3002\n<\/li>\n<\/ol>\n<\/li>\n<\/ol>\n<pre><code>bash\nrm -rf \/var\/lib\/kubelet\/kubeadm-flags.env <\/code><\/pre>\n<ol start=\"2\">\n<li>\u68c0\u67e5 docker \u670d\u52a1\u72b6\u6001\u5e76\u542f\u52a8:<\/li>\n<\/ol>\n<pre><code>bash\nsystemctl status docker\nsystemctl start docker<\/code><\/pre>\n<ol start=\"3\">\n<li>\u68c0\u67e5 kubelet \u9759\u6001 Pod \u914d\u7f6e\u6587\u4ef6 \/var\/lib\/kubelet\/config.yaml \u662f\u5426\u6b63\u786e,\u5982\u679c\u6709\u8bef,\u8bf7\u5907\u4efd\u540e\u5220\u9664:<\/li>\n<\/ol>\n<pre><code>bash\nmv \/var\/lib\/kubelet\/config.yaml \/var\/lib\/kubelet\/config.yaml.bak<\/code><\/pre>\n<p>\u7136\u540e\u91cd\u542f kubelet:<\/p>\n<pre><code>bash \nsystemctl restart kubelet<\/code><\/pre>\n<ol start=\"4\">\n<li>kubelet \u91cd\u65b0\u542f\u52a8\u540e,\u68c0\u67e5\u5176\u65e5\u5fd7\u8f93\u51fa,\u5982\u679c\u4e00\u5207\u6b63\u5e38,\u5219\u91cd\u65b0\u521d\u59cb\u5316 Kubernetes \u96c6\u7fa4:<\/li>\n<\/ol>\n<pre><code>bash\nkubeadm reset -f \nkubeadm init ...<\/code><\/pre>\n<pre><code>journalctl -xeu kubelet<\/code><\/pre>\n<pre><code>Apr 27 12:57:54 node1 kubelet[10814]: I0427 12:57:54.474288   10814 cni.go:239] \"Unable tApr 27 12:57:54 node1 kubelet[10814]: I0427 12:57:54.474391   10814 docker_service.go:257Apr 27 12:57:54 node1 kubelet[10814]: I0427 12:57:54.474506   10814 cni.go:239] \"Unable tApr 27 12:57:54 node1 kubelet[10814]: I0427 12:57:54.484922   10814 docker_service.go:264Apr 27 12:57:54 node1 systemd[1]: kubelet.service: main process exited, code=exited, statApr 27 12:57:54 node1 kubelet[10814]: E0427 12:57:54.484964   10814 server.go:294] \"FaileApr 27 12:57:54 node1 systemd[1]: Unit kubelet.service entered failed state.\nApr 27 12:57:54 node1 systemd[1]: kubelet.service failed.\nApr 27 12:57:54 node1 systemd[1]: Stopped kubelet: The Kubernetes Node Agent.\n-- Subject: Unit kubelet.service has finished shutting down\n-- Defined-By: systemd\n-- Support: http:\/\/lists.freedesktop.org\/mailman\/listinfo\/systemd-devel\n-- \n-- Unit kubelet.service has finished shutting down.\nlines 1386-1414\/1414 (END)<\/code><\/pre>\n<p>\u6839\u636e kubelet \u670d\u52a1\u65e5\u5fd7,kubelet \u8fdb\u7a0b\u518d\u6b21\u9000\u51fa,\u5bfc\u81f4 kubelet \u670d\u52a1\u8fdb\u5165 failed \u72b6\u6001\u3002<\/p>\n<p>\u65e5\u5fd7\u663e\u793a:<\/p>\n<ol>\n<li>kubelet \u65e0\u6cd5\u8bbe\u7f6e CNI \u7f51\u7edc,\u53ef\u80fd\u662f CNI \u914d\u7f6e\u6587\u4ef6\u9519\u8bef\u6216 CNI \u63d2\u4ef6\u4e8c\u8fdb\u5236\u6587\u4ef6\u7f3a\u5931;<\/li>\n<li>kubelet \u65e0\u6cd5\u8fde\u63a5 Docker \u670d\u52a1,\u53ef\u80fd\u662f Docker \u670d\u52a1\u672a\u8fd0\u884c\u3002<\/li>\n<\/ol>\n<pre><code>[root@node1 net.d]# pwd\n\/etc\/cni\/net.d\n[root@node1 net.d]# cat kube-flannel.yml \n---\nkind: Namespace\napiVersion: v1\nmetadata:\n  name: kube-flannel\n  labels:\n    k8s-app: flannel\n    pod-security.kubernetes.io\/enforce: privileged\n---\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io\/v1\nmetadata:\n  labels:\n    k8s-app: flannel\n  name: flannel\nrules:\n- apiGroups:\n  - \"\"\n  resources:\n  - pods\n  verbs:\n  - get\n- apiGroups:\n  - \"\"\n  resources:\n  - nodes\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - \"\"\n  resources:\n  - nodes\/status\n  verbs:\n  - patch\n- apiGroups:\n  - networking.k8s.io\n  resources:\n  - clustercidrs\n  verbs:\n  - list\n  - watch\n---\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io\/v1\nmetadata:\n  labels:\n    k8s-app: flannel\n  name: flannel\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: flannel\nsubjects:\n- kind: ServiceAccount\n  name: flannel\n  namespace: kube-flannel\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  labels:\n    k8s-app: flannel\n  name: flannel\n  namespace: kube-flannel\n---\nkind: ConfigMap\napiVersion: v1\nmetadata:\n  name: kube-flannel-cfg\n  namespace: kube-flannel\n  labels:\n    tier: node\n    k8s-app: flannel\n    app: flannel\ndata:\n  cni-conf.json: |\n    {\n      \"name\": \"cbr0\",\n      \"cniVersion\": \"0.3.1\",\n      \"plugins\": [\n        {\n          \"type\": \"flannel\",\n          \"delegate\": {\n            \"hairpinMode\": true,\n            \"isDefaultGateway\": true\n          }\n        },\n        {\n          \"type\": \"portmap\",\n          \"capabilities\": {\n            \"portMappings\": true\n          }\n        }\n      ]\n    }\n  net-conf.json: |\n    {\n      \"Network\": \"10.244.0.0\/16\",\n      \"Backend\": {\n        \"Type\": \"vxlan\"\n      }\n    }\n---\napiVersion: apps\/v1\nkind: DaemonSet\nmetadata:\n  name: kube-flannel-ds\n  namespace: kube-flannel\n  labels:\n    tier: node\n    app: flannel\n    k8s-app: flannel\nspec:\n  selector:\n    matchLabels:\n      app: flannel\n  template:\n    metadata:\n      labels:\n        tier: node\n        app: flannel\n    spec:\n      affinity:\n        nodeAffinity:\n          requiredDuringSchedulingIgnoredDuringExecution:\n            nodeSelectorTerms:\n            - matchExpressions:\n              - key: kubernetes.io\/os\n                operator: In\n                values:\n                - linux\n      hostNetwork: true\n      priorityClassName: system-node-critical\n      tolerations:\n      - operator: Exists\n        effect: NoSchedule\n      serviceAccountName: flannel\n      initContainers:\n      - name: install-cni-plugin\n        image: docker.io\/flannel\/flannel-cni-plugin:v1.1.2\n       #image: docker.io\/rancher\/mirrored-flannelcni-flannel-cni-plugin:v1.1.2\n        command:\n        - cp\n        args:\n        - -f\n        - \/flannel\n        - \/opt\/cni\/bin\/flannel\n        volumeMounts:\n        - name: cni-plugin\n          mountPath: \/opt\/cni\/bin\n      - name: install-cni\n        image: docker.io\/flannel\/flannel:v0.21.4\n       #image: docker.io\/rancher\/mirrored-flannelcni-flannel:v0.21.4\n        command:\n        - cp\n        args:\n        - -f\n        - \/etc\/kube-flannel\/cni-conf.json\n        - \/etc\/cni\/net.d\/10-flannel.conflist\n        volumeMounts:\n        - name: cni\n          mountPath: \/etc\/cni\/net.d\n        - name: flannel-cfg\n          mountPath: \/etc\/kube-flannel\/\n      containers:\n      - name: kube-flannel\n        image: docker.io\/flannel\/flannel:v0.21.4\n       #image: docker.io\/rancher\/mirrored-flannelcni-flannel:v0.21.4\n        command:\n        - \/opt\/bin\/flanneld\n        args:\n        - --ip-masq\n        - --kube-subnet-mgr\n        resources:\n          requests:\n            cpu: \"100m\"\n            memory: \"50Mi\"\n        securityContext:\n          privileged: false\n          capabilities:\n            add: [\"NET_ADMIN\", \"NET_RAW\"]\n        env:\n        - name: POD_NAME\n          valueFrom:\n            fieldRef:\n              fieldPath: metadata.name\n        - name: POD_NAMESPACE\n          valueFrom:\n            fieldRef:\n              fieldPath: metadata.namespace\n        - name: EVENT_QUEUE_DEPTH\n          value: \"5000\"\n        volumeMounts:\n        - name: run\n          mountPath: \/run\/flannel\n        - name: flannel-cfg\n          mountPath: \/etc\/kube-flannel\/\n        - name: xtables-lock\n          mountPath: \/run\/xtables.lock\n      volumes:\n      - name: run\n        hostPath:\n          path: \/run\/flannel\n      - name: cni-plugin\n        hostPath:\n          path: \/opt\/cni\/bin\n      - name: cni\n        hostPath:\n          path: \/etc\/cni\/net.d\n      - name: flannel-cfg\n        configMap:\n          name: kube-flannel-cfg\n      - name: xtables-lock\n        hostPath:\n          path: \/run\/xtables.lock\n          type: FileOrCreate\n[root@node1 net.d]# <\/code><\/pre>\n<p>\u6839\u636e kube-flannel.yml \u914d\u7f6e\u6587\u4ef6\u5185\u5bb9\u68c0\u67e5,\u4e3b\u8981\u914d\u7f6e\u5982\u4e0b:<\/p>\n<ol>\n<li>\nflannel \u540e\u7aef\u7f51\u7edc\u7c7b\u578b\u4e3a vxlan;\n<\/li>\n<li>\n\u7f51\u7edc\u8303\u56f4\u4e3a 10.244.0.0\/16;\n<\/li>\n<li>\ncni-conf.json \u4e2d cbr0 \u63a5\u53e3\u4e3a Pod \u7f51\u7edc\u63a5\u53e3,type \u4e3a flannel;\n<\/li>\n<li>\nflannel CNI \u4e8c\u8fdb\u5236\u6587\u4ef6\u4e3a \/opt\/cni\/bin\/flannel;\n<\/li>\n<li>\nflanneld \u53c2\u6570 --ip-masq \u8868\u793a\u542f\u7528 SNAT;<\/p>\n<p>\u914d\u7f6e\u6587\u4ef6\u5185\u5bb9\u770b\u8d77\u6765\u6b63\u786e\u65e0\u8bef\u3002\n<\/li>\n<li>\n\/opt\/cni\/bin \u76ee\u5f55\u4e0b\u662f\u5426\u5b58\u5728 flannel \u4e8c\u8fdb\u5236\u6587\u4ef6;\n<\/li>\n<\/ol>\n<pre><code>[root@node1 net.d]# cd \/opt\/cni\/bin\n[root@node1 bin]# ll\ntotal 49832\n-rwxr-xr-x 1 root root 2782728 Jan 19 05:09 bandwidth\n-rwxr-xr-x 1 root root 3104192 Jan 19 05:09 bridge\ndrwxrwxr-x 2 root root    4096 Apr 26 18:44 cni-plugins-linux-amd64-v0.8.7\n-rwxr-xr-x 1 root root 7607056 Jan 19 05:09 dhcp\n-rwxr-xr-x 1 root root 2863024 Jan 19 05:09 dummy\n-rwxr-xr-x 1 root root 3165352 Jan 19 05:09 firewall\n-rwxr-xr-x 1 root root 2775224 Jan 19 05:09 host-device\n-rwxr-xr-x 1 root root 2332792 Jan 19 05:09 host-local\n-rwxr-xr-x 1 root root 2871792 Jan 19 05:09 ipvlan\n-rwxr-xr-x 1 root root 2396976 Jan 19 05:09 loopback\n-rwxr-xr-x 1 root root 2893624 Jan 19 05:09 macvlan\n-rwxr-xr-x 1 root root 2689440 Jan 19 05:09 portmap\n-rwxr-xr-x 1 root root 3000032 Jan 19 05:09 ptp\n-rwxr-xr-x 1 root root 2542400 Jan 19 05:09 sbr\n-rwxr-xr-x 1 root root 2074072 Jan 19 05:09 static\n-rwxr-xr-x 1 root root 2456920 Jan 19 05:09 tuning\n-rwxr-xr-x 1 root root 2867512 Jan 19 05:09 vlan\n-rwxr-xr-x 1 root root 2566424 Jan 19 05:09 vrf\n[root@node1 bin]# <\/code><\/pre>\n<p>\u6839\u636e \/opt\/cni\/bin \u76ee\u5f55\u5217\u8868,flannel CNI \u4e8c\u8fdb\u5236\u6587\u4ef6\u786e\u5b9e\u7f3a\u5931\u3002<\/p>\n<p>\u8bf7\u4e0b\u8f7d flannel CNI \u4e8c\u8fdb\u5236\u6587\u4ef6,\u5e76\u5c06\u5176\u653e\u7f6e\u5728 \/opt\/cni\/bin \u76ee\u5f55\u4e0b:<\/p>\n<pre><code>bash\nwget https:\/\/github.com\/coreos\/flannel\/releases\/download\/v0.21.4\/flannel-v0.21.4-linux-amd64.zip\nunzip flannel-v0.21.4-linux-amd64.zip\ncp flanneld \/opt\/cni\/bin\/flannel <\/code><\/pre>\n<p>\u4e0b\u8f7d\u540e,\u5c06 flanneld \u4e8c\u8fdb\u5236\u6587\u4ef6\u89e3\u538b\u5e76\u653e\u7f6e\u5728 \/opt\/cni\/bin \u76ee\u5f55\u4e0b:<\/p>\n<p><a href=\"https:\/\/github.com\/flannel-io\/flannel\/releases\/tag\/v0.21.4\/\">https:\/\/github.com\/flannel-io\/flannel\/releases\/tag\/v0.21.4\/<\/a><\/p>\n<p><a href=\"https:\/\/github.com\/flannel-io\/flannel\/releases\/download\/v0.21.4\/flannel-v0.21.4-linux-amd64.tar.gz\">flannel-v0.21.4-linux-amd64.tar.gz<\/a><\/p>\n<ol>\n<li>\u89e3\u538b flannel-v0.21.4-linux-amd64.tar.gz \u6587\u4ef6:<\/li>\n<\/ol>\n<pre><code>bash\ntar -xvf flannel-v0.21.4-linux-amd64.tar.gz<\/code><\/pre>\n<ol start=\"2\">\n<li>\u8fdb\u5165\u89e3\u538b\u540e\u7684\u76ee\u5f55,\u5c06 flanneld \u4e8c\u8fdb\u5236\u6587\u4ef6\u590d\u5236\u5230 CNI \u76ee\u5f55\u4e0b:<\/li>\n<\/ol>\n<pre><code>bash\ncd flannel-v0.21.4-linux-amd64\ncp flanneld \/opt\/cni\/bin\/flannel<\/code><\/pre>\n<ol start=\"3\">\n<li>\u91cd\u65b0\u521d\u59cb\u5316 kubelet \u5e76\u542f\u52a8:<\/li>\n<\/ol>\n<pre><code>bash\nsystemctl daemon-reload \nsystemctl restart kubelet<\/code><\/pre>\n<pre><code>journalctl -u kubelet<\/code><\/pre>\n<pre><code>[root@node1 flannel]# journalctl -u kubelet\n-- Logs begin at Thu 2023-04-27 09:47:40 CST, end at Thu 2023-04-27 13:31:41 CST. --\nApr 27 09:47:50 node1 systemd[1]: Started kubelet: The Kubernetes Node Agent.\nApr 27 09:47:50 node1 kubelet[1317]: E0427 09:47:50.544554    1317 server.go:206] \"FailedApr 27 09:47:50 node1 systemd[1]: kubelet.service: main process exited, code=exited, statApr 27 09:47:50 node1 systemd[1]: Unit kubelet.service entered failed state.\nApr 27 09:47:50 node1 systemd[1]: kubelet.service failed.\nApr 27 09:48:00 node1 systemd[1]: kubelet.service holdoff time over, scheduling restart.\nApr 27 09:48:00 node1 systemd[1]: Stopped kubelet: The Kubernetes Node Agent.\nApr 27 09:48:00 node1 systemd[1]: Started kubelet: The Kubernetes Node Agent.\nApr 27 09:48:00 node1 kubelet[7218]: E0427 09:48:00.851842    7218 server.go:206] \"FailedApr 27 09:48:00 node1 systemd[1]: kubelet.service: main process exited, code=exited, statApr 27 09:48:00 node1 systemd[1]: Unit kubelet.service entered failed state.\nApr 27 09:48:00 node1 systemd[1]: kubelet.service failed.\nApr 27 09:48:11 node1 systemd[1]: kubelet.service holdoff time over, scheduling restart.\nApr 27 09:48:11 node1 systemd[1]: Stopped kubelet: The Kubernetes Node Agent.\nApr 27 09:48:11 node1 systemd[1]: Started kubelet: The Kubernetes Node Agent.\nApr 27 09:48:11 node1 kubelet[8097]: E0427 09:48:11.093840    8097 server.go:206] \"FailedApr 27 09:48:11 node1 systemd[1]: kubelet.service: main process exited, code=exited, statApr 27 09:48:11 node1 systemd[1]: Unit kubelet.service entered failed state.\nApr 27 09:48:11 node1 systemd[1]: kubelet.service failed.\nApr 27 09:48:21 node1 systemd[1]: kubelet.service holdoff time over, scheduling restart.\nApr 27 09:48:21 node1 systemd[1]: Stopped kubelet: The Kubernetes Node Agent.\nApr 27 09:48:21 node1 systemd[1]: Started kubelet: The Kubernetes Node Agent.\nApr 27 09:48:21 node1 kubelet[8111]: E0427 09:48:21.352944    8111 server.go:206] \"FailedApr 27 09:48:21 node1 systemd[1]: kubelet.service: main process exited, code=exited, statApr 27 09:48:21 node1 systemd[1]: Unit kubelet.service entered failed state.\nApr 27 09:48:21 node1 systemd[1]: kubelet.service failed.\nApr 27 09:48:31 node1 systemd[1]: kubelet.service holdoff time over, scheduling restart.\nApr 27 09:48:31 node1 systemd[1]: Stopped kubelet: The Kubernetes Node Agent.\nlines 1-29<\/code><\/pre>\n<p>\u6839\u636e\u65e5\u5fd7,kubelet \u542f\u52a8\u5931\u8d25\u662f\u56e0\u4e3a\u627e\u4e0d\u5230\u914d\u7f6e\u6587\u4ef6 \/var\/lib\/kubelet\/config.yaml\u3002<br \/>\n\u5bfc\u81f4\u8fd9\u4e2a\u95ee\u9898\u7684\u539f\u56e0\u53ef\u80fd\u662f<\/p>\n<h1>\u5b89\u88c5coredns:1.8.0<\/h1>\n<h1>\u5b89\u88c5etcd:3.4.13-0<\/h1>\n<h1>\u5b89\u88c5 pause:3.4.1<\/h1>\n<h1>\u4e09\u53f0\u673a\u5668\u6253\u6210\u7cfb\u7edf\u955c\u50cf\uff0c\u540c\u6b65\u5230\u5c40\u57df\u7f51\u670d\u52a1\u5668\uff1a<\/h1>\n","protected":false},"excerpt":{"rendered":"<p>\u524d\u8a00 \u6839\u636e\u516c\u53f8\u9879\u76ee\u7684\u8981\u6c42\uff0c\u9879\u76ee\u7ec4\u8981\u79bb\u7ebf\u90e8\u7f72\u5230\u5ba2\u6237\u7684\u673a\u623f\uff0c\u9700\u8981\u5728\u6709\u7f51\u7684\u670d\u52a1\u5668\u5b89\u88c5k8s\u96c6\u7fa4\uff0c\u7136\u540e\u5728 \u6253\u6210\u7cfb\u7edf\u955c\u50cf [&hellip;]<\/p>\n","protected":false},"author":1,"featured_media":0,"comment_status":"open","ping_status":"open","sticky":false,"template":"","format":"standard","meta":{"om_disable_all_campaigns":false,"_mi_skip_tracking":false},"categories":[1],"tags":[23],"aioseo_notices":[],"yoast_head":"<!-- This site is optimized with the Yoast SEO plugin v20.5 - https:\/\/yoast.com\/wordpress\/plugins\/seo\/ -->\n<title>\u6211\u4f7f\u7528\u4e09\u53f0Centos\u79bb\u7ebf\u90e8\u7f72K8S\u96c6\u7fa4-\u4e3b\u8981\u662f\u516c\u53f8\u4ea7\u54c1\u90e8\u7f72\u5728\u7532\u65b9\u7684\u673a\u623f\uff0c\u7532\u65b9\u673a\u623f\u662f\u5c40\u57df\u7f51 - \u672c\u7f51\u7ad9\u5206\u4eab\u7f16\u7a0b\u8fc7\u7a0b\u4e2d\u51fa\u73b0\u7684bug<\/title>\n<meta name=\"description\" content=\"\u79bb\u7ebf\u90e8\u7f72K8S\u9ad8\u53ef\u7528\u96c6\u7fa4\u670d\u52a1\u5668\" \/>\n<meta name=\"robots\" content=\"index, follow, max-snippet:-1, max-image-preview:large, max-video-preview:-1\" \/>\n<link rel=\"canonical\" href=\"https:\/\/zhaocunwei.co.uk\/index.php\/2023\/04\/26\/k8s-2\/\" \/>\n<meta property=\"og:locale\" content=\"zh_CN\" \/>\n<meta property=\"og:type\" content=\"article\" \/>\n<meta property=\"og:title\" content=\"\u6211\u4f7f\u7528\u4e09\u53f0Centos\u79bb\u7ebf\u90e8\u7f72K8S\u96c6\u7fa4-\u4e3b\u8981\u662f\u516c\u53f8\u4ea7\u54c1\u90e8\u7f72\u5728\u7532\u65b9\u7684\u673a\u623f\uff0c\u7532\u65b9\u673a\u623f\u662f\u5c40\u57df\u7f51 - \u672c\u7f51\u7ad9\u5206\u4eab\u7f16\u7a0b\u8fc7\u7a0b\u4e2d\u51fa\u73b0\u7684bug\" \/>\n<meta property=\"og:description\" content=\"\u79bb\u7ebf\u90e8\u7f72K8S\u9ad8\u53ef\u7528\u96c6\u7fa4\u670d\u52a1\u5668\" \/>\n<meta property=\"og:url\" content=\"https:\/\/zhaocunwei.co.uk\/index.php\/2023\/04\/26\/k8s-2\/\" \/>\n<meta property=\"og:site_name\" content=\"\u672c\u7f51\u7ad9\u5206\u4eab\u7f16\u7a0b\u8fc7\u7a0b\u4e2d\u51fa\u73b0\u7684bug\" \/>\n<meta property=\"article:published_time\" content=\"2023-04-26T06:01:26+00:00\" \/>\n<meta property=\"article:modified_time\" content=\"2023-05-09T04:54:11+00:00\" \/>\n<meta name=\"author\" content=\"\u603b\u662f\u5e78\u798f\u7684\u8001\u8c4c\u8c46\" \/>\n<meta name=\"twitter:card\" content=\"summary_large_image\" \/>\n<meta name=\"twitter:label1\" content=\"\u4f5c\u8005\" \/>\n\t<meta name=\"twitter:data1\" content=\"\u603b\u662f\u5e78\u798f\u7684\u8001\u8c4c\u8c46\" \/>\n\t<meta name=\"twitter:label2\" content=\"\u9884\u8ba1\u9605\u8bfb\u65f6\u95f4\" \/>\n\t<meta name=\"twitter:data2\" content=\"50\u5206\" \/>\n<script type=\"application\/ld+json\" class=\"yoast-schema-graph\">{\"@context\":\"https:\/\/schema.org\",\"@graph\":[{\"@type\":\"WebPage\",\"@id\":\"https:\/\/zhaocunwei.co.uk\/index.php\/2023\/04\/26\/k8s-2\/\",\"url\":\"https:\/\/zhaocunwei.co.uk\/index.php\/2023\/04\/26\/k8s-2\/\",\"name\":\"\u6211\u4f7f\u7528\u4e09\u53f0Centos\u79bb\u7ebf\u90e8\u7f72K8S\u96c6\u7fa4-\u4e3b\u8981\u662f\u516c\u53f8\u4ea7\u54c1\u90e8\u7f72\u5728\u7532\u65b9\u7684\u673a\u623f\uff0c\u7532\u65b9\u673a\u623f\u662f\u5c40\u57df\u7f51 - \u672c\u7f51\u7ad9\u5206\u4eab\u7f16\u7a0b\u8fc7\u7a0b\u4e2d\u51fa\u73b0\u7684bug\",\"isPartOf\":{\"@id\":\"https:\/\/zhaocunwei.co.uk\/#website\"},\"datePublished\":\"2023-04-26T06:01:26+00:00\",\"dateModified\":\"2023-05-09T04:54:11+00:00\",\"author\":{\"@id\":\"https:\/\/zhaocunwei.co.uk\/#\/schema\/person\/dfb1dc0fc4a330c41908d477cd99c0b4\"},\"description\":\"\u79bb\u7ebf\u90e8\u7f72K8S\u9ad8\u53ef\u7528\u96c6\u7fa4\u670d\u52a1\u5668\",\"breadcrumb\":{\"@id\":\"https:\/\/zhaocunwei.co.uk\/index.php\/2023\/04\/26\/k8s-2\/#breadcrumb\"},\"inLanguage\":\"zh-CN\",\"potentialAction\":[{\"@type\":\"ReadAction\",\"target\":[\"https:\/\/zhaocunwei.co.uk\/index.php\/2023\/04\/26\/k8s-2\/\"]}]},{\"@type\":\"BreadcrumbList\",\"@id\":\"https:\/\/zhaocunwei.co.uk\/index.php\/2023\/04\/26\/k8s-2\/#breadcrumb\",\"itemListElement\":[{\"@type\":\"ListItem\",\"position\":1,\"name\":\"\u9996\u9875\",\"item\":\"https:\/\/zhaocunwei.co.uk\/\"},{\"@type\":\"ListItem\",\"position\":2,\"name\":\"\u6211\u4f7f\u7528\u4e09\u53f0Centos\u79bb\u7ebf\u90e8\u7f72K8S\u96c6\u7fa4-\u4e3b\u8981\u662f\u516c\u53f8\u4ea7\u54c1\u90e8\u7f72\u5728\u7532\u65b9\u7684\u673a\u623f\uff0c\u7532\u65b9\u673a\u623f\u662f\u5c40\u57df\u7f51\"}]},{\"@type\":\"WebSite\",\"@id\":\"https:\/\/zhaocunwei.co.uk\/#website\",\"url\":\"https:\/\/zhaocunwei.co.uk\/\",\"name\":\"\u672c\u7f51\u7ad9\u5206\u4eab\u7f16\u7a0b\u8fc7\u7a0b\u4e2d\u51fa\u73b0\u7684bug\",\"description\":\"This site shares programming bugs\",\"potentialAction\":[{\"@type\":\"SearchAction\",\"target\":{\"@type\":\"EntryPoint\",\"urlTemplate\":\"https:\/\/zhaocunwei.co.uk\/?s={search_term_string}\"},\"query-input\":\"required name=search_term_string\"}],\"inLanguage\":\"zh-CN\"},{\"@type\":\"Person\",\"@id\":\"https:\/\/zhaocunwei.co.uk\/#\/schema\/person\/dfb1dc0fc4a330c41908d477cd99c0b4\",\"name\":\"\u603b\u662f\u5e78\u798f\u7684\u8001\u8c4c\u8c46\",\"image\":{\"@type\":\"ImageObject\",\"inLanguage\":\"zh-CN\",\"@id\":\"https:\/\/zhaocunwei.co.uk\/#\/schema\/person\/image\/\",\"url\":\"https:\/\/secure.gravatar.com\/avatar\/4226cc1ca6640507df1d2d4ba3da7a62?s=96&d=mm&r=g\",\"contentUrl\":\"https:\/\/secure.gravatar.com\/avatar\/4226cc1ca6640507df1d2d4ba3da7a62?s=96&d=mm&r=g\",\"caption\":\"\u603b\u662f\u5e78\u798f\u7684\u8001\u8c4c\u8c46\"},\"sameAs\":[\"http:\/\/zhaocunwei.co.uk\"],\"url\":\"https:\/\/zhaocunwei.co.uk\/index.php\/author\/18500103508163-com\/\"}]}<\/script>\n<!-- \/ Yoast SEO plugin. -->","yoast_head_json":{"title":"\u6211\u4f7f\u7528\u4e09\u53f0Centos\u79bb\u7ebf\u90e8\u7f72K8S\u96c6\u7fa4-\u4e3b\u8981\u662f\u516c\u53f8\u4ea7\u54c1\u90e8\u7f72\u5728\u7532\u65b9\u7684\u673a\u623f\uff0c\u7532\u65b9\u673a\u623f\u662f\u5c40\u57df\u7f51 - \u672c\u7f51\u7ad9\u5206\u4eab\u7f16\u7a0b\u8fc7\u7a0b\u4e2d\u51fa\u73b0\u7684bug","description":"\u79bb\u7ebf\u90e8\u7f72K8S\u9ad8\u53ef\u7528\u96c6\u7fa4\u670d\u52a1\u5668","robots":{"index":"index","follow":"follow","max-snippet":"max-snippet:-1","max-image-preview":"max-image-preview:large","max-video-preview":"max-video-preview:-1"},"canonical":"https:\/\/zhaocunwei.co.uk\/index.php\/2023\/04\/26\/k8s-2\/","og_locale":"zh_CN","og_type":"article","og_title":"\u6211\u4f7f\u7528\u4e09\u53f0Centos\u79bb\u7ebf\u90e8\u7f72K8S\u96c6\u7fa4-\u4e3b\u8981\u662f\u516c\u53f8\u4ea7\u54c1\u90e8\u7f72\u5728\u7532\u65b9\u7684\u673a\u623f\uff0c\u7532\u65b9\u673a\u623f\u662f\u5c40\u57df\u7f51 - \u672c\u7f51\u7ad9\u5206\u4eab\u7f16\u7a0b\u8fc7\u7a0b\u4e2d\u51fa\u73b0\u7684bug","og_description":"\u79bb\u7ebf\u90e8\u7f72K8S\u9ad8\u53ef\u7528\u96c6\u7fa4\u670d\u52a1\u5668","og_url":"https:\/\/zhaocunwei.co.uk\/index.php\/2023\/04\/26\/k8s-2\/","og_site_name":"\u672c\u7f51\u7ad9\u5206\u4eab\u7f16\u7a0b\u8fc7\u7a0b\u4e2d\u51fa\u73b0\u7684bug","article_published_time":"2023-04-26T06:01:26+00:00","article_modified_time":"2023-05-09T04:54:11+00:00","author":"\u603b\u662f\u5e78\u798f\u7684\u8001\u8c4c\u8c46","twitter_card":"summary_large_image","twitter_misc":{"\u4f5c\u8005":"\u603b\u662f\u5e78\u798f\u7684\u8001\u8c4c\u8c46","\u9884\u8ba1\u9605\u8bfb\u65f6\u95f4":"50\u5206"},"schema":{"@context":"https:\/\/schema.org","@graph":[{"@type":"WebPage","@id":"https:\/\/zhaocunwei.co.uk\/index.php\/2023\/04\/26\/k8s-2\/","url":"https:\/\/zhaocunwei.co.uk\/index.php\/2023\/04\/26\/k8s-2\/","name":"\u6211\u4f7f\u7528\u4e09\u53f0Centos\u79bb\u7ebf\u90e8\u7f72K8S\u96c6\u7fa4-\u4e3b\u8981\u662f\u516c\u53f8\u4ea7\u54c1\u90e8\u7f72\u5728\u7532\u65b9\u7684\u673a\u623f\uff0c\u7532\u65b9\u673a\u623f\u662f\u5c40\u57df\u7f51 - \u672c\u7f51\u7ad9\u5206\u4eab\u7f16\u7a0b\u8fc7\u7a0b\u4e2d\u51fa\u73b0\u7684bug","isPartOf":{"@id":"https:\/\/zhaocunwei.co.uk\/#website"},"datePublished":"2023-04-26T06:01:26+00:00","dateModified":"2023-05-09T04:54:11+00:00","author":{"@id":"https:\/\/zhaocunwei.co.uk\/#\/schema\/person\/dfb1dc0fc4a330c41908d477cd99c0b4"},"description":"\u79bb\u7ebf\u90e8\u7f72K8S\u9ad8\u53ef\u7528\u96c6\u7fa4\u670d\u52a1\u5668","breadcrumb":{"@id":"https:\/\/zhaocunwei.co.uk\/index.php\/2023\/04\/26\/k8s-2\/#breadcrumb"},"inLanguage":"zh-CN","potentialAction":[{"@type":"ReadAction","target":["https:\/\/zhaocunwei.co.uk\/index.php\/2023\/04\/26\/k8s-2\/"]}]},{"@type":"BreadcrumbList","@id":"https:\/\/zhaocunwei.co.uk\/index.php\/2023\/04\/26\/k8s-2\/#breadcrumb","itemListElement":[{"@type":"ListItem","position":1,"name":"\u9996\u9875","item":"https:\/\/zhaocunwei.co.uk\/"},{"@type":"ListItem","position":2,"name":"\u6211\u4f7f\u7528\u4e09\u53f0Centos\u79bb\u7ebf\u90e8\u7f72K8S\u96c6\u7fa4-\u4e3b\u8981\u662f\u516c\u53f8\u4ea7\u54c1\u90e8\u7f72\u5728\u7532\u65b9\u7684\u673a\u623f\uff0c\u7532\u65b9\u673a\u623f\u662f\u5c40\u57df\u7f51"}]},{"@type":"WebSite","@id":"https:\/\/zhaocunwei.co.uk\/#website","url":"https:\/\/zhaocunwei.co.uk\/","name":"\u672c\u7f51\u7ad9\u5206\u4eab\u7f16\u7a0b\u8fc7\u7a0b\u4e2d\u51fa\u73b0\u7684bug","description":"This site shares programming bugs","potentialAction":[{"@type":"SearchAction","target":{"@type":"EntryPoint","urlTemplate":"https:\/\/zhaocunwei.co.uk\/?s={search_term_string}"},"query-input":"required name=search_term_string"}],"inLanguage":"zh-CN"},{"@type":"Person","@id":"https:\/\/zhaocunwei.co.uk\/#\/schema\/person\/dfb1dc0fc4a330c41908d477cd99c0b4","name":"\u603b\u662f\u5e78\u798f\u7684\u8001\u8c4c\u8c46","image":{"@type":"ImageObject","inLanguage":"zh-CN","@id":"https:\/\/zhaocunwei.co.uk\/#\/schema\/person\/image\/","url":"https:\/\/secure.gravatar.com\/avatar\/4226cc1ca6640507df1d2d4ba3da7a62?s=96&d=mm&r=g","contentUrl":"https:\/\/secure.gravatar.com\/avatar\/4226cc1ca6640507df1d2d4ba3da7a62?s=96&d=mm&r=g","caption":"\u603b\u662f\u5e78\u798f\u7684\u8001\u8c4c\u8c46"},"sameAs":["http:\/\/zhaocunwei.co.uk"],"url":"https:\/\/zhaocunwei.co.uk\/index.php\/author\/18500103508163-com\/"}]}},"_links":{"self":[{"href":"https:\/\/zhaocunwei.co.uk\/index.php\/wp-json\/wp\/v2\/posts\/92"}],"collection":[{"href":"https:\/\/zhaocunwei.co.uk\/index.php\/wp-json\/wp\/v2\/posts"}],"about":[{"href":"https:\/\/zhaocunwei.co.uk\/index.php\/wp-json\/wp\/v2\/types\/post"}],"author":[{"embeddable":true,"href":"https:\/\/zhaocunwei.co.uk\/index.php\/wp-json\/wp\/v2\/users\/1"}],"replies":[{"embeddable":true,"href":"https:\/\/zhaocunwei.co.uk\/index.php\/wp-json\/wp\/v2\/comments?post=92"}],"version-history":[{"count":5,"href":"https:\/\/zhaocunwei.co.uk\/index.php\/wp-json\/wp\/v2\/posts\/92\/revisions"}],"predecessor-version":[{"id":704,"href":"https:\/\/zhaocunwei.co.uk\/index.php\/wp-json\/wp\/v2\/posts\/92\/revisions\/704"}],"wp:attachment":[{"href":"https:\/\/zhaocunwei.co.uk\/index.php\/wp-json\/wp\/v2\/media?parent=92"}],"wp:term":[{"taxonomy":"category","embeddable":true,"href":"https:\/\/zhaocunwei.co.uk\/index.php\/wp-json\/wp\/v2\/categories?post=92"},{"taxonomy":"post_tag","embeddable":true,"href":"https:\/\/zhaocunwei.co.uk\/index.php\/wp-json\/wp\/v2\/tags?post=92"}],"curies":[{"name":"wp","href":"https:\/\/api.w.org\/{rel}","templated":true}]}}