[{"title": "K8S\u96c6\u7fa4\u72b6\u6001\u57fa\u672c\u64cd\u4f5c", "desc": "\u8fd9\u91cc\u987a\u5e26\u8bb2\u4e0bNODE\u8282\u70b9\u52a0\u5165\u548c\u7f51\u7edc\u63d2\u4ef6\u7684\u5b89\u88c5Kubernetes\u96c6\u7fa4\uff0ctoken\u7b2c\u4e8c\u5929\u5c31\u5931\u6548\u4e86\uff0c\u9700\u8981\u91cd\u65b0\u751f\u6210\u3002", "content": "

\u5173\u4e8e\u8282\u70b9\u52a0\u5165

\u4e00\u3001\u67e5\u770b\u53ef\u7528Token\uff0c\u5982\u679c\u4e3a\u7a7a\u8bf4\u660e\u90fd\u8fc7\u671f\u4e86

[root@k8s ~]# kubeadm token list

\u4e8c\u3001\u5982\u679c\u6709\u53ef\u7528\u7684token\u83b7\u53d6discovery-token-ca-cert-hash

openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | \\

   openssl dgst -sha256 -hex | sed 's/^.* //'

\u4e09\u3001\u4e5f\u53ef\u4ee5\u91cd\u65b0\u751f\u6210\u52a0\u5165\u8282\u70b9\u547d\u4ee4

[root@k8s ~]# kubeadm token create --print-join-command

kubeadm join masters:16443 --token pyachl.bdttyj22wmbsj2qr --discovery-token-ca-cert-hash sha256:0353b3c61a96ee46dc7191975ce520dc9c337c7e8c30142e137757b2de1275c9 

\u5982\u679c\u4f5c\u4e3a\u4e3b\u8282\u70b9\u52a0\u5165\u5728\u540e\u9762\u52a0\u5165 --control-plane 

\u56db\u3001\u52a0\u5165k8s\u96c6\u7fa4\u540e\u6839\u636e\u63d0\u793a\u6267\u884c\u5982\u4e0b\u64cd\u4f5c\u4e0d\u7136\u62a5X509\u9519\u8bef

mkdir -p $HOME/.kube

cp -i /etc/kubernetes/admin.conf $HOME/.kube/config

chown $(id -u):$(id -g) $HOME/.kube/config

\u4e94\u3001\u8bc1\u4e66\u8fc7\u671f\u540e\u53ef\u7eed\u8ba2\u8bc1\u4e66

kubeadm upgrade apply

kubeadm upgrade node 

\u516d\u3001\u5b89\u88c5\u7f51\u7edc\u63d2\u4ef6\uff0c\u6211\u4f7f\u7528\u7684\u662fweave

\u53c2\u8003\u5730\u5740\uff1ahttps://www.weave.works/docs/net/latest/kubernetes/kube-addon/

[root@k8s ~]# kubectl apply -f \"https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\\n')\"

serviceaccount/weave-net created

clusterrole.rbac.authorization.k8s.io/weave-net created

clusterrolebinding.rbac.authorization.k8s.io/weave-net created

role.rbac.authorization.k8s.io/weave-net created

rolebinding.rbac.authorization.k8s.io/weave-net created

daemonset.apps/weave-net created

\u5b89\u88c5\u5b8c\u63d2\u4ef6\u67e5\u770bpod\u662f\u5426\u6b63\u5e38\uff0c\u8282\u70b9\u72b6\u6001\u662f\u5426\u4e3aReady

\u4e03\u3001\u5982\u679c\u72b6\u6001\u4e0d\u5bf9\u53ef\u4ee5\u901a\u8fc7journalctl -f  \u67e5\u770b\u65e5\u5fd7\u8fdb\u884c\u6392\u67e5

\u516b\u3001\u5982\u679c\u8981\u67e5\u770bPOD\u542f\u52a8\u5931\u8d25\u7684\u5177\u4f53\u539f\u56e0\u53ef\u4ee5\u901a\u8fc7kubectl describe \u547d\u4ee4

[root@k8s ~]# kubectl describe pod weave-net-7s2md --namespace=kube-system 

Name:                 weave-net-7s2md

Namespace:            kube-system

Priority:             2000001000

Priority Class Name:  system-node-critical

Node:                 k8s.node.238/192.168.1.238

Start Time:           Mon, 11 Apr 2022 14:34:38 +0800

Labels:               controller-revision-hash=59d968cb54

                      name=weave-net

                      pod-template-generation=1

Annotations:          <none>

Status:               Running

IP:                   192.168.1.238

IPs:

  IP:           192.168.1.238

Controlled By:  DaemonSet/weave-net

Init Containers:

  weave-init:

    Container ID:  docker://1a9ea91ae05ca9e9238d3f52033529cf95339037cc39a4f0a96533a158969e90

    Image:         ghcr.io/weaveworks/launcher/weave-kube:2.8.1

    Image ID:      docker-pullable://ghcr.io/weaveworks/launcher/weave-kube@sha256:d797338e7beb17222e10757b71400d8471bdbd9be13b5da38ce2ebf597fb4e63

    Port:          <none>

    Host Port:     <none>

    Command:

      /home/weave/init.sh

    State:          Terminated

      Reason:       Completed

      Exit Code:    0

      Started:      Mon, 11 Apr 2022 14:34:40 +0800

      Finished:     Mon, 11 Apr 2022 14:34:41 +0800

    Ready:          True

    Restart Count:  0

    Environment:    <none>

    Mounts:

      /host/etc from cni-conf (rw)

      /host/home from cni-bin2 (rw)

      /host/opt from cni-bin (rw)

      /lib/modules from lib-modules (rw)

      /run/xtables.lock from xtables-lock (rw)

      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-trzkk (ro)

Containers:

  weave:

    Container ID:  docker://90387541cad6ebe2cfd0c2588822956f051a74d50e9d593ab04fc483978d27dc

    Image:         ghcr.io/weaveworks/launcher/weave-kube:2.8.1

    Image ID:      docker-pullable://ghcr.io/weaveworks/launcher/weave-kube@sha256:d797338e7beb17222e10757b71400d8471bdbd9be13b5da38ce2ebf597fb4e63

    Port:          <none>

    Host Port:     <none>

    Command:

      /home/weave/launch.sh

    State:          Running

      Started:      Mon, 11 Apr 2022 14:34:41 +0800

    Ready:          True

    Restart Count:  0

    Requests:

      cpu:      50m

      memory:   100Mi

    Readiness:  http-get http://127.0.0.1:6784/status delay=0s timeout=1s period=10s #success=1 #failure=3

    Environment:

      HOSTNAME:         (v1:spec.nodeName)

      INIT_CONTAINER:  true

    Mounts:

      /host/etc/machine-id from machine-id (ro)

      /host/var/lib/dbus from dbus (rw)

      /run/xtables.lock from xtables-lock (rw)

      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-trzkk (ro)

      /weavedb from weavedb (rw)

  weave-npc:

    Container ID:   docker://0ddc4bcd36dffcea950e4e8a057d552b7a606b976540ad0e90767c1db3b218b6

    Image:          ghcr.io/weaveworks/launcher/weave-npc:2.8.1

    Image ID:       docker-pullable://ghcr.io/weaveworks/launcher/weave-npc@sha256:38d3e30a97a2260558f8deb0fc4c079442f7347f27c86660dbfc8ca91674f14c

    Port:           <none>

    Host Port:      <none>

    State:          Running

      Started:      Mon, 11 Apr 2022 14:34:42 +0800

    Ready:          True

    Restart Count:  0

    Requests:

      cpu:     50m

      memory:  100Mi

    Environment:

      HOSTNAME:   (v1:spec.nodeName)

    Mounts:

      /run/xtables.lock from xtables-lock (rw)

      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-trzkk (ro)

Conditions:

  Type              Status

  Initialized       True 

  Ready             True 

  ContainersReady   True 

  PodScheduled      True 

Volumes:

  weavedb:

    Type:          HostPath (bare host directory volume)

    Path:          /var/lib/weave

    HostPathType:  

  cni-bin:

    Type:          HostPath (bare host directory volume)

    Path:          /opt

    HostPathType:  

  cni-bin2:

    Type:          HostPath (bare host directory volume)

    Path:          /home

    HostPathType:  

  cni-conf:

    Type:          HostPath (bare host directory volume)

    Path:          /etc

    HostPathType:  

  dbus:

    Type:          HostPath (bare host directory volume)

    Path:          /var/lib/dbus

    HostPathType:  

  lib-modules:

    Type:          HostPath (bare host directory volume)

    Path:          /lib/modules

    HostPathType:  

  machine-id:

    Type:          HostPath (bare host directory volume)

    Path:          /etc/machine-id

    HostPathType:  FileOrCreate

  xtables-lock:

    Type:          HostPath (bare host directory volume)

    Path:          /run/xtables.lock

    HostPathType:  FileOrCreate

  kube-api-access-trzkk:

    Type:                    Projected (a volume that contains injected data from multiple sources)

    TokenExpirationSeconds:  3607

    ConfigMapName:           kube-root-ca.crt

    ConfigMapOptional:       <nil>

    DownwardAPI:             true

QoS Class:                   Burstable

Node-Selectors:              <none>

Tolerations:                 :NoSchedule op=Exists

                             :NoExecute op=Exists

                             node.kubernetes.io/disk-pressure:NoSchedule op=Exists

                             node.kubernetes.io/memory-pressure:NoSchedule op=Exists

                             node.kubernetes.io/network-unavailable:NoSchedule op=Exists

                             node.kubernetes.io/not-ready:NoExecute op=Exists

                             node.kubernetes.io/pid-pressure:NoSchedule op=Exists

                             node.kubernetes.io/unreachable:NoExecute op=Exists

                             node.kubernetes.io/unschedulable:NoSchedule op=Exists

Events:                      <none>

[root@k8s ~]# 



"}, {"title": "k8s1.23.4\u9ad8\u53ef\u7528\u96c6\u7fa4\u90e8\u7f72\u5fc3\u5f972", "desc": "\u5728\u8bfb\u61c2\u5e76\u64cd\u4f5c\u5b8c\u6210\u300ak8s1.23.4\u9ad8\u53ef\u7528\u96c6\u7fa4\u90e8\u7f72\u5fc3\u5f97\u300b\u57fa\u7840\u4e0a\uff0c\u8fdb\u884c kubernetes\u90e8\u7f72\u3002", "content": "

\u8bf4\u4e0b\u673a\u5668\u540d\u79f0\u548cIP

192.168.1.220 k8s.mast.220

192.168.1.233 k8s.node.233

192.168.1.238 k8s.node.238

192.168.1.249 masters

192.168.1.212 k8s-mast-212

192.168.1.240 k8s.mast.240

---------------------------------\u5f00\u59cb\u5b89\u88c5\uff08MAST\u8282\u70b9\uff09---------------------------

\u4e00\u3001\u6240\u6709master\u8282\u70b9\u5b89\u88c5k8s\u57fa\u7840DOCKER\u955c\u50cf

\u67e5\u770b\u9700\u8981\u7528\u5230\u7684\u955c\u50cf\u5217\u8868

[root@k8s docker]# kubeadm config images list

k8s.gcr.io/kube-apiserver:v1.23.4

k8s.gcr.io/kube-controller-manager:v1.23.4

k8s.gcr.io/kube-scheduler:v1.23.4

k8s.gcr.io/kube-proxy:v1.23.4

k8s.gcr.io/pause:3.6

k8s.gcr.io/etcd:3.5.1-0

k8s.gcr.io/coredns/coredns:v1.8.6

\u767b\u5f55\u963f\u91cc\u7684DOCKER\u4ed3\u5e93

docker login --username=120231708@qq.com registry.cn-hangzhou.aliyuncs.com

docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.23.4

docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.23.4

docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.23.4

docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.23.4

docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.6

docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.5.1-0

docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:v1.8.6

docker pull registry.cn-hangzhou.aliyuncs.com/zhoudl/weave-kube:2.8.1

docker pull registry.cn-hangzhou.aliyuncs.com/zhoudl/weave-npc:2.8.1

\u4fee\u6539tag

docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.23.4 k8s.gcr.io/kube-apiserver:v1.23.4 && \\

docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.23.4 k8s.gcr.io/kube-controller-manager:v1.23.4 && \\

docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.23.4 k8s.gcr.io/kube-scheduler:v1.23.4 && \\

docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.23.4 k8s.gcr.io/kube-proxy:v1.23.4 && \\

docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.6 k8s.gcr.io/pause:3.6 && \\

docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.5.1-0 k8s.gcr.io/etcd:3.5.1-0 && \\

docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:v1.8.6 k8s.gcr.io/coredns/coredns:v1.8.6 && \\

docker tag registry.cn-hangzhou.aliyuncs.com/zhoudl/weave-kube:2.8.1 ghcr.io/weaveworks/launcher/weave-npc:2.8.1 && \\

docker tag registry.cn-hangzhou.aliyuncs.com/zhoudl/weave-npc:2.8.1 ghcr.io/weaveworks/launcher/weave-kube:2.8.1

\u4e8c\u3001\u6240\u6709MAST\u8282\u70b9\u5b89\u88c5keepalived\u548chaproxy\u652f\u6301\u9ad8\u53ef\u7528\uff08\u6211\u7684\u9ed8\u8ba4VIP\u4e3b\u673a\u4e3a220\u7684\uff0cvip_ip\u4e3a192.168.1.249\uff09

2.1 keepalived\u4e3b\u8981\u6709\u4e24\u4e2a\u6587\u4ef6\u68c0\u67e5\u670d\u52a1\u5668\u6587\u4ef6\u548c\u914d\u7f6e\u6587\u4ef6

apiserver\u670d\u52a1\u68c0\u67e5\u6587\u4ef6 check_apiserver.sh \u5185\u5bb9\u5982\u4e0b\uff08\u4e09\u53f0mast\u90fd\u4e00\u6837\uff09\uff1a

[root@k8s-mast-212 ~]# more /etc/keepalived/check_apiserver.sh 

#!/bin/sh

errorExit() {

    echo \"*** $*\" 1>&2

    exit 1

}


curl --silent --max-time 2 --insecure https://localhost:6443 -o /dev/null || errorExit \"Error GET https://localhost:6443/\"

if ip addr | grep -q 192.168.1.249; then

    curl --silent --max-time 2 --insecure https://192.168.1.249:6443/ -o /dev/null || errorExit \"Error GET https://192.168.1.249:6443/\"

fi

2.2 vip\u670d\u52a1\u5668\u7684keepalive\u914d\u7f6e

[root@k8s ~]# more /etc/keepalived/keepalived.conf 

! Configuration File for keepalived


global_defs {

    router_id LVS_DEVEL

}


vrrp_script check_haproxy {

    script \"/etc/keepalived/check_haproxy.sh\"         # \u68c0\u6d4b\u811a\u672c\u8def\u5f84

    interval 3

    weight -2 

    fall 10

    rise 2

}




vrrp_instance VI_1 {

    state MASTER            # MASTER

    interface ens32         # \u672c\u673a\u7f51\u5361\u540d

    virtual_router_id 51

    priority 101             # \u6743\u91cd101

    advert_int 1

    authentication {

        auth_type PASS

        auth_pass 1111

    }

    virtual_ipaddress {

        192.168.1.249      # \u865a\u62dfIP

    }

    track_script {

        check_haproxy       # \u6a21\u5757

    }

}

2.3 ip212\u7684\u914d\u7f6e

[root@k8s-mast-212 ~]# more /etc/keepalived/keepalived.conf 

! Configuration File for keepalived

global_defs {

    router_id LVS_DEVEL

}

vrrp_script check_haproxy {

    script \"/etc/keepalived/check_haproxy.sh\"         # \u68c0\u6d4b\u811a\u672c\u8def\u5f84

    interval 3

    weight -2 

    fall 10

    rise 2

}


vrrp_instance VI_1 {

    state BACKUP           # MASTER

    interface ens32         # \u672c\u673a\u7f51\u5361\u540d

    virtual_router_id 51

    priority 100             # \u6743\u91cd101

    advert_int 1

    authentication {

        auth_type PASS

        auth_pass 1111

    }

    virtual_ipaddress {

        192.168.1.249      # \u865a\u62dfIP

    }

    track_script {

        check_haproxy       # \u6a21\u5757

    }

}

2.4 ip240\u7684keepalive\u914d\u7f6e

[root@k8s ~]# hostname

k8s.mast.240

[root@k8s ~]# more /etc/keepalived/keepalived.conf 

! Configuration File for keepalived


global_defs {

    router_id LVS_DEVEL

}


vrrp_script check_haproxy {

    script \"/etc/keepalived/check_haproxy.sh\"         # \u68c0\u6d4b\u811a\u672c\u8def\u5f84

    interval 3

    weight -2 

    fall 10

    rise 2

}


vrrp_instance VI_1 {

    state BACKUP            # MASTER

    interface ens32         # \u672c\u673a\u7f51\u5361\u540d

    virtual_router_id 51

    priority 99             # \u6743\u91cd101

    advert_int 1

    authentication {

        auth_type PASS

        auth_pass 1111

    }

    virtual_ipaddress {

        192.168.1.249      # \u865a\u62dfIP

    }

    track_script {

        check_haproxy       # \u6a21\u5757

    }

}

2.2 haproxy\u914d\u7f6e\u5982\u4e0b\uff08\u4e09\u53f0mast\u90fd\u4e00\u6837\uff09\uff1a

[root@k8s ~]# more /etc/haproxy/haproxy.cfg 

# /etc/haproxy/haproxy.cfg

#---------------------------------------------------------------------

# Global settings

#---------------------------------------------------------------------

global

    log /dev/log local0

    log /dev/log local1 notice

    daemon


#---------------------------------------------------------------------

# common defaults that all the 'listen' and 'backend' sections will

# use if not designated in their block

#---------------------------------------------------------------------

defaults

    mode                    http

    log                     global

    option                  httplog

    option                  dontlognull

    option http-server-close

    option forwardfor       except 127.0.0.0/8

    option                  redispatch

    retries                 1

    timeout http-request    10s

    timeout queue           20s

    timeout connect         5s

    timeout client          20s

    timeout server          20s

    timeout http-keep-alive 10s

    timeout check           10s


#---------------------------------------------------------------------

# apiserver frontend which proxys to the control plane nodes

#---------------------------------------------------------------------

frontend apiserver

    bind *:16443

    mode tcp

    option tcplog

    default_backend apiserver


#---------------------------------------------------------------------

# round robin balancing for apiserver

#---------------------------------------------------------------------

backend apiserver

    option httpchk GET /healthz

    http-check expect status 200

    mode tcp

    option ssl-hello-chk

    balance     roundrobin

        server master1 192.168.1.220:6443 check

        server master2 192.168.1.212:6443 check

        server master3 192.168.1.240:6443 check

\u4e09\u3001\u5728vip\u4e3b\u8282\u70b9220\u4e0a\u64cd\u4f5c\u521d\u59cb\u5316k8s\u4e3b\u8282\u70b9

[root@k8s ~]# kubeadm init \\

  --control-plane-endpoint=masters:16443 \\

  --kubernetes-version v1.23.4 \\

  --service-cidr=10.96.96.0/24 

[init] Using Kubernetes version: v1.23.4

[preflight] Running pre-flight checks

[preflight] Pulling images required for setting up a Kubernetes cluster

[preflight] This might take a minute or two, depending on the speed of your internet connection

[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'

[certs] Using certificateDir folder \"/etc/kubernetes/pki\"

[certs] Generating \"ca\" certificate and key

[certs] Generating \"apiserver\" certificate and key

[certs] apiserver serving cert is signed for DNS names [k8s.mast.220 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local masters] and IPs [10.96.96.1 192.168.1.220]

[certs] Generating \"apiserver-kubelet-client\" certificate and key

[certs] Generating \"front-proxy-ca\" certificate and key

[certs] Generating \"front-proxy-client\" certificate and key

[certs] Generating \"etcd/ca\" certificate and key

[certs] Generating \"etcd/server\" certificate and key

[certs] etcd/server serving cert is signed for DNS names [k8s.mast.220 localhost] and IPs [192.168.1.220 127.0.0.1 ::1]

[certs] Generating \"etcd/peer\" certificate and key

[certs] etcd/peer serving cert is signed for DNS names [k8s.mast.220 localhost] and IPs [192.168.1.220 127.0.0.1 ::1]

[certs] Generating \"etcd/healthcheck-client\" certificate and key

[certs] Generating \"apiserver-etcd-client\" certificate and key

[certs] Generating \"sa\" key and public key

[kubeconfig] Using kubeconfig folder \"/etc/kubernetes\"

[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address

[kubeconfig] Writing \"admin.conf\" kubeconfig file

[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address

[kubeconfig] Writing \"kubelet.conf\" kubeconfig file

[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address

[kubeconfig] Writing \"controller-manager.conf\" kubeconfig file

[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address

[kubeconfig] Writing \"scheduler.conf\" kubeconfig file

[kubelet-start] Writing kubelet environment file with flags to file \"/var/lib/kubelet/kubeadm-flags.env\"

[kubelet-start] Writing kubelet configuration to file \"/var/lib/kubelet/config.yaml\"

[kubelet-start] Starting the kubelet

[control-plane] Using manifest folder \"/etc/kubernetes/manifests\"

[control-plane] Creating static Pod manifest for \"kube-apiserver\"

[control-plane] Creating static Pod manifest for \"kube-controller-manager\"

[control-plane] Creating static Pod manifest for \"kube-scheduler\"

[etcd] Creating static Pod manifest for local etcd in \"/etc/kubernetes/manifests\"

[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory \"/etc/kubernetes/manifests\". This can take up to 4m0s

[apiclient] All control plane components are healthy after 8.018196 seconds

[upload-config] Storing the configuration used in ConfigMap \"kubeadm-config\" in the \"kube-system\" Namespace

[kubelet] Creating a ConfigMap \"kubelet-config-1.23\" in namespace kube-system with the configuration for the kubelets in the cluster

NOTE: The \"kubelet-config-1.23\" naming of the kubelet ConfigMap is deprecated. Once the UnversionedKubeletConfigMap feature gate graduates to Beta the default name will become just \"kubelet-config\". Kubeadm upgrade will handle this transition transparently.

[upload-certs] Skipping phase. Please see --upload-certs

[mark-control-plane] Marking the node k8s.mast.220 as control-plane by adding the labels: [node-role.kubernetes.io/master(deprecated) node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]

[mark-control-plane] Marking the node k8s.mast.220 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]

[bootstrap-token] Using token: 7ex6em.otjn22xd1vdraquq

[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles

[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes

[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials

[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token

[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster

[bootstrap-token] Creating the \"cluster-info\" ConfigMap in the \"kube-public\" namespace

[kubelet-finalize] Updating \"/etc/kubernetes/kubelet.conf\" to point to a rotatable kubelet client certificate and key

[addons] Applied essential addon: CoreDNS

[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address

[addons] Applied essential addon: kube-proxy


Your Kubernetes control-plane has initialized successfully!


To start using your cluster, you need to run the following as a regular user:


  mkdir -p $HOME/.kube

  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config

  sudo chown $(id -u):$(id -g) $HOME/.kube/config


Alternatively, if you are the root user, you can run:


  export KUBECONFIG=/etc/kubernetes/admin.conf


You should now deploy a pod network to the cluster.

Run \"kubectl apply -f [podnetwork].yaml\" with one of the options listed at:

  https://kubernetes.io/docs/concepts/cluster-administration/addons/


You can now join any number of control-plane nodes by copying certificate authorities

and service account keys on each node and then running the following as root:


  kubeadm join masters:16443 --token 7ex6em.otjn22xd1vdraquq \\

        --discovery-token-ca-cert-hash sha256:1e4caff411bc02a9a01b9ca1f295292c1194b355807eb8d4db172709d1f7d09b \\

        --control-plane 


Then you can join any number of worker nodes by running the following on each as root:


kubeadm join masters:16443 --token 7ex6em.otjn22xd1vdraquq \\

        --discovery-token-ca-cert-hash sha256:1e4caff411bc02a9a01b9ca1f295292c1194b355807eb8d4db172709d1f7d09b 

\u4e09\u3001\u8282\u70b9\u521d\u59cb\u5316\u5b8c\u6210\u4ee5\u540e\u628avip\u4e3b\u8282\u70b9\u7684\u8bc1\u4e66\u590d\u5236\u5230\u53e6\u5916\u4e24\u4e2amast\u8282\u70b9

scp /etc/kubernetes/pki/ca.* root@192.168.1.212:/etc/kubernetes/pki/

scp /etc/kubernetes/pki/sa.* root@192.168.1.212:/etc/kubernetes/pki/

scp /etc/kubernetes/pki/front-proxy-ca.* root@192.168.1.212:/etc/kubernetes/pki/

scp /etc/kubernetes/pki/etcd/ca.* root@192.168.1.212:/etc/kubernetes/pki/etcd/

scp /etc/kubernetes/admin.conf root@192.168.1.212:/etc/kubernetes/

scp /etc/kubernetes/pki/ca.* root@192.168.1.240:/etc/kubernetes/pki/

scp /etc/kubernetes/pki/sa.* root@192.168.1.240:/etc/kubernetes/pki/

scp /etc/kubernetes/pki/front-proxy-ca.* root@192.168.1.240:/etc/kubernetes/pki/

scp /etc/kubernetes/pki/etcd/ca.* root@192.168.1.240:/etc/kubernetes/pki/etcd/

scp /etc/kubernetes/admin.conf root@192.168.1.240:/etc/kubernetes/

\u56db\u3001\u5728\u53e6\u5916\u4e24\u4e2a\u4e3b\u8282\u70b9\u64cd\u4f5c\u52a0\u5165\u7b2c\u4e8c\u6b65\u521d\u59cb\u5316\u7684\u8282\u70b9

  kubeadm join masters:16443 --token 7ex6em.otjn22xd1vdraquq \\

        --discovery-token-ca-cert-hash sha256:1e4caff411bc02a9a01b9ca1f295292c1194b355807eb8d4db172709d1f7d09b \\

        --control-plane 

\u4e94\u3001\u52a0\u5165\u6210\u529f\u540e\u968f\u4fbf\u5728\u4e09\u53f0mast\u4e0a\u90fd\u53ef\u4ee5\u4f7f\u7528 kebectl \u64cd\u4f5c\u96c6\u7fa4\u3002[root@k8s ~]# kubectl get node

NAME           STATUS   ROLES                  AGE     VERSION

k8s-mast-212   NotReady    control-plane,master   3d      v1.23.4

k8s.mast.220   NotReady    control-plane,master   3d      v1.23.4

k8s.mast.240   NotReady    control-plane,master   2d18h   v1.23.4

------------------------NODE\u8282\u70b9\u52a0\u5165\u548c\u64cd\u4f5c\u300ak8s\u57fa\u672c\u64cd\u4f5c\u91cc\u4ecb\u7ecd\u300b-----------------------


"}, {"title": "k8s1.23.4\u9ad8\u53ef\u7528\u96c6\u7fa4\u90e8\u7f72\u5fc3\u5f97", "desc": "\u4e0a\u5b66\u65f6\u4e00\u5b9a\u8981\u5b66\u597d\u82f1\u8bed\uff0c\u56e0\u4e3a\u6700\u65b0\u7684\u65b9\u6848\u548c\u6587\u6863\u5927\u591a\u90fd\u662f\u82f1\u6587\u7684\uff0c\u4e2d\u6587\u7248\u7684\u5f88\u591a\u90fd\u8fc7\u65f6\u4e86\u3002\u770b\u6559\u5b66\u89c6\u9891\u53ea\u4f1a\u6559\u4f60\u600e\u4e48\u505a\u4e0d\u4f1a\u6559\u4f60\u4e3a\u4ec0\u4e48\u8fd9\u4e48\u505a\uff0c\u9047\u5230\u7248\u672c\u66f4\u65b0\u6216\u8005\u66f4\u6539\u67b6\u6784\u5c31\u61f5\u4e86\u3002\u5373\u4f7f\u56e0\u4e3a\u6ca1\u5b66\u597d\u4e5f\u8981\u575a\u6301\u770b\u5b98\u65b9\u6587\u6863\u501f\u52a9\u767e\u5ea6\u7ffb\u8bd1\u548c\u8c37\u6b4c\u7ffb\u8bd1\u8fb9\u770b\u8fb9\u505a\uff0c\u505a\u7740\u505a\u5c31\u4f1a\u4e86\uff0c\u82f1\u6587\u6c34\u5e73\u4e5f\u63d0\u9ad8\u4e86\uff0c\u4e00\u5b9a\u8981\u591a\u52a8\u624b\u591a\u5c1d\u8bd5\u597d\u8bb0\u6027\u62b5\u4e0d\u8fc7\u70c2\u7b14\u5934\uff0c\u505a\u591a\u4e86\u81ea\u7136\u5c31\u8bb0\u4f4f\u4e86\u3002", "content": "

1.k8s\u9ad8\u53ef\u7528\u96c6\u7fa4\u65b9\u6848\u53c2\u8003\u5b98\u65b9\u7ad9\u70b9\uff1ahttps://kubernetes.io/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/

kubeadm\u5b89\u88c5\u53c2\u8003\u5730\u5740\uff1ahttps://kubernetes.io/zh/docs/setup/production-environment/tools/kubeadm/install-kubeadm/

1.1\u4f7f\u7528kebuadm\u90e8\u7f72

1.2 \u64cd\u4f5c\u7cfb\u7edfCentOS Linux release 7.9.2009 (Core)

1.3 \u4e09\u53f0MAST\u4e3b\uff08\u81f3\u5c11\u4e09\u53f0\uff09\uff0c4G\u5185\u5b58,\u4e24\u53f0node

2. \u5c06k8s\u7528\u5230\u7684\u57fa\u7840\u955c\u50cf\u4e0a\u4f20\u81f3\u56fd\u5185\u4ed3\u5e93\uff0c\u63d0\u524dpull\u81f3\u672c\u673a\u53ef\u7701\u5927\u91cf\u65f6\u95f4\uff0c\u6781\u5927\u964d\u4f4e\u5931\u8d25\u51e0\u7387\u3002

[root@k8s docker]# kubeadm config images list

k8s.gcr.io/kube-apiserver:v1.23.4

k8s.gcr.io/kube-controller-manager:v1.23.4

k8s.gcr.io/kube-scheduler:v1.23.4

k8s.gcr.io/kube-proxy:v1.23.4

k8s.gcr.io/pause:3.6

k8s.gcr.io/etcd:3.5.1-0

k8s.gcr.io/coredns/coredns:v1.8.6

3. \u642d\u5efa\u672c\u5730\u4ed3\u5e93\u6211\u4f7f\u7528\u7684\u662fnexus(\u6211\u7684\u9875\u9762\u91cc\u6709\u914d\u7f6e\u65b9\u6cd5)\uff0c\u4e0d\u4f1a\u5c31\u4f7f\u7528\u963f\u91cc\u63d0\u4f9b\u7684\u514d\u8d39\u7248\u4e5f\u884c\u3002

\u672c\u5730\u4ed3\u5e93\u5730\u5740\uff1ahttps://mvn.jschrj.com

\u963f\u91cc\u4ed3\u5e93\u5730\u5740\uff1ahttps://cr.console.aliyun.com/

4. \u963f\u91cc\u4e91\u7684kubernetes\u6e90\u5730\u5740https://developer.aliyun.com/mirror/kubernetes?spm=a2c6h.13651102.0.0.3e221b11hNNOXX

5. \u9ad8\u53ef\u7528\u96c6\u7fa4\u7684\u6700\u5c11\u9700\u8981\u4e09\u53f0MASTER\uff0c\u5176\u4e2d\u4e00\u53f0\u5b95\u673a\u4e0d\u5f71\u54cd\u96c6\u7fa4\u6b63\u5e38\u4f7f\u7528\u3002

------------------------\u4e0b\u9762\u5f00\u59cb\u5177\u4f53\u5b89\u88c5\u6b65\u9aa4(\u4e09\u53f0mast\u4e3b\u673a\uff0c\u4e24\u53f0node.)---------------------

\u4e00\u3001\u6240\u6709\u673a\u5668\u57fa\u672c\u64cd\u4f5c

1.0 \u4fee\u6539\u670d\u52a1\u5668\u4e3b\u673a\u540d\u79f0

1.1 \u5173\u95ed\u865a\u62df\u5185\u5b58

1.2 \u5173\u95edselinux

1.3 \u5173\u95edfirewalld

1.4 \u4fee\u6539hosts

[root@k8s-mast-212 ~]# more /etc/hosts

192.168.1.220 k8s.mast.220

192.168.1.233 k8s.node.233

192.168.1.238 k8s.node.238

192.168.1.249 masters

192.168.1.212 k8s-mast-212

192.168.1.240 k8s.mast.240

1.5\u67e5\u770b\u6bcf\u53f0\u673a\u5668mac\u5730\u5740\u548cuuid\u662f\u5426\u91cd\u590d

[root@k8s-mast-212 ~]# ip link

[root@k8s-mast-212 ~]#cat /sys/class/dmi/id/product_uuid 

1.7 \u52a0\u8f7dbr_netfilter \u6a21\u5757

cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf

br_netfilter

EOF


cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf

net.ipv4.ip_forward=1

net.bridge.bridge-nf-call-ip6tables = 1

net.bridge.bridge-nf-call-iptables = 1

EOF

sysctl --system

1.8 \u6240\u6709\u673a\u5668\u90fd\u914d\u7f6e\u597dk8s\u548cdocker\u56fd\u5185\u955c\u50cf\u6e90\uff08\u53c2\u8003\u4e0a\u9762\u7684\u963f\u91cc\u6e90\u7f51\u7ad9\uff09

1.9\u6240\u6709\u673a\u5668\u4e3b\u673a\u5b89\u88c5DOCKER

yum install docker-ce-20.10.9-3.el7 --nogpgcheck

1.10 \u914d\u7f6eDOCKER\u5b88\u62a4\u8fdb\u7a0b

[root@k8s ~]# mkdir /etc/docker


cat <<EOF | sudo tee /etc/docker/daemon.json

{

  \"exec-opts\": [\"native.cgroupdriver=systemd\"],

  \"log-driver\": \"json-file\",

  \"log-opts\": {

    \"max-size\": \"100m\"

  },

  \"storage-driver\": \"overlay2\"

}

EOF

1.11 \u91cd\u542fDOCKER\uff0c\u8bbe\u4e3a\u5f00\u673a\u81ea\u52a8\u542f\u52a8

[root@k8s ~]# systemctl enable docker

Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.

[root@k8s ~]# systemctl daemon-reload

[root@k8s ~]# systemctl restart docker

1.12 \u6bcf\u53f0\u673a\u5668\u90fd\u5b89\u88c5 kubeadm kubelet kubectl

[root@k8s kubernetes]# yum install kubeadm kubelet kubectl

[root@k8s kubernetes]# systemctl enable kubelet && systemctl start kubelet

------------------------\u6240\u6709\u673a\u5668\u90fd\u914d\u7f6e\u5b89\u88c5\u5b8c\u6210\uff08\u51c6\u5907\u5de5\u4f5c\u5df2\u5b8c\u6210\u4e0b\u9762\u6587\u7ae0\u8bb2\u96c6\u7fa4\u7684\u914d\u7f6e\u642d\u5efa\uff09-------------------

\u8bf7\u770b\u300ak8s1.23.4\u9ad8\u53ef\u7528\u96c6\u7fa4\u90e8\u7f72\u5fc3\u5f972\u300b


"}, {"title": "Nexus3.14\u914d\u7f6eDocker\u4ed3\u5e93", "desc": "Nexus\u7528\u4f5cmaven,pipy,node\u672c\u5730\u4ed3\u5e93\u53ef\u4ee5\u5728\u7f51\u9875\u7aef\u76f4\u63a5\u4e0a\u4f20\u4f9d\u8d56\u5305\uff0c\u4f5c\u4e3adocker\u79c1\u6709\u4ed3\u5e93\u5fc5\u987b\u4ecedocker push \u4e0a\u4f20\u3002\u6839\u636e\u5b98\u65b9\u63d0\u4f9b\u7684\u601d\u8def\uff0c\u65b9\u6cd5\u4e00\uff1a\u4f7f\u7528nginx\u7b49\u53cd\u5411\u4ee3\u7406\u652f\u6301ssl\uff0c\u65b9\u6cd5\u4e8c\uff1a\u5c06nexus\u6539\u9020\u6210https\u7684\u3002", "content": "

\u6211\u91c7\u7528nginx\u4ee3\u7406nexus

\u539f\u6587\u4e24\u5957\u65b9\u6848\uff1a

Available in Nexus Repository OSS and Nexus Repository Pro

Providing access to the user interface and content via HTTPS is a best practice.

You have two options:

\u5148\u8bf4\u4e0b\u6211\u7684\u601d\u8def\uff1a

1.\u817e\u8baf\u4e91\u7533\u8bf7\u514d\u8d39\u8bc1\u4e66\uff08\u5e26\u9a8c\u8bc1\u597d\u7528\uff09\uff1a

2.\u914d\u7f6enginx\u8fd4\u5411\u4ee3\u7406\uff1a\u5177\u4f53\u914d\u7f6e\u5982\u4e0b

server {

    listen       443 ssl;

    server_name  mvn.jschrj.com;

    ssl_certificate mvn.jschrj.com_bundle.crt;

    ssl_certificate_key mvn.jschrj.com.key;

    underscores_in_headers on;

    charset utf-8;

    access_log  /etc/nginx/logs/jnwx_https.access.log  main;


    location / {

        proxy_pass         http://192.168.1.232:10000; 

        proxy_redirect     off;

        proxy_set_header   Host             $http_host;

        proxy_set_header   X-Real-IP        $remote_addr;

        proxy_set_header   X-Forwarded-For  $proxy_add_x_forwarded_for;

        proxy_set_header Upgrade $http_upgrade;

        proxy_set_header Connection \"upgrade\";

        proxy_read_timeout 300s;

    }

}

3.\u8fdb\u5165nexus\u7f51\u9875\u7aef\u914d\u7f6edocker\u79c1\u6709\u4ed3\u5e93\uff0c\u914d\u7f6e\u5982\u56fe
4.\u4fee\u6539tag\u5e76\u4e14\u4e0a\u4f20docker \u955c\u50cf
[root@k8s home]# docker tag ghcr.io/weaveworks/launcher/weave-npc:2.8.1 mvn.jschrj.com/docker-jschrj/weave-npc:2.8.1
[root@k8s home]# docker push mvn.jschrj.com/docker-jschrj/weave-npc:2.8.1
The push refers to repository [mvn.jschrj.com/docker-jschrj/weave-npc]
a8764e32e9fe: Pushed 
175a472c5f77: Pushed 
ed37391def99: Pushed 
998efb010df6: Pushed 
1b3ee35aacca: Pushed 
2.8.1: digest: sha256:2be329164796241e72c530c4c8df5faf4e82fead28372a8cdbb651e74d4dba0a size: 1363
[root@k8s home]# docker tag ghcr.io/weaveworks/launcher/weave-kube:2.8.1 mvn.jschrj.com/docker-jschrj/weave-kube:2.8.1
[root@k8s home]# docker push mvn.jschrj.com/docker-jschrj/weave-kube:2.8.1
The push refers to repository [mvn.jschrj.com/docker-jschrj/weave-kube]
a13197d8bda5: Pushed 
334d70dc85ec: Pushed 
084e56a9c24b: Pushed 
a8e8b7b8e08a: Pushed 
910ce076f504: Pushed 
1b3ee35aacca: Layer already exists 
2.8.1: digest: sha256:3d04dfb38e965daa9258fc4dd14a91f9f6471b73c0d3127652b97c08ccdc8ddb size: 1579
[root@k8s home]# kubectl get node
NAME           STATUS   ROLES                  AGE     VERSION
k8s-mast-212   Ready    control-plane,master   7h40m   v1.23.4
k8s.mast.220   Ready    control-plane,master   7h43m   v1.23.4
k8s.mast.240   Ready    control-plane,master   120m    v1.23.4
k8s.node.233   Ready    <none>                 3h29m   v1.23.4
k8s.node.238   Ready    <none>                 3h24m   v1.23.4
5.\u8fdb\u5165\u7f51\u9875\u7aef\u67e5\u770b\u4e0a\u4f20\u7684Docker\u955c\u50cf

\n

"}, {"title": "K8S ingress-nginx\u955c\u50cf\u63d2\u4ef6\u65e0\u6cd5\u4e0b\u8f7d\u7684\u89e3\u51b3\u65b9\u6848", "desc": "k8s.gcr.io/ingress-nginx/controller:v1.1.1 \u548c k8s.gcr.io/ingress-nginx/kube-webhook-certgen:v1.1.1\u56fd\u5185\u4e0b\u8f7d\u4e0d\u4e86\u89e3\u51b3\u65b9\u6848\u3002", "content": "

1.\u901a\u8fc7github\u3001gitlab\u3001travis-cli\u7b49\u56fd\u5916\u81ea\u52a8\u5316\u6d41\u6c34\u7ebf\u4e0b\u8f7dDOCKER\u955c\u50cf\u7136\u540e\u63a8\u9001\u5230docker hub\u4e2d\u592e\u4ed3\u5e93\u6216\u8005\u79c1\u6709\u4ed3\u5e93

\u6211\u4f7f\u7528\u7684\u662fgithub\u7684Actions

2.\u5728\u9879\u76ee\u91cc\u65b0\u5efa\u76ee\u5f55.github\\workflows 

3.\u7f16\u5199demo.yml\u6587\u4ef6\uff0c\u5185\u5bb9\u5982\u4e0b\uff08\u6211\u7684\u5f88\u7b80\u5355\u4e3b\u8981\u662f\u4e3a\u4e86\u4e0b\u8f7d\u548c\u4e0a\u4f20\u955c\u50cf\uff09

nameGitHub Actions Demo
on: [push]
jobs:
  pull-images:
    runs-onubuntu-latest
    steps:
      - namepull-controller 
        rundocker pull k8s.gcr.io/ingress-nginx/controller:v1.1.1@sha256:0bc88eb15f9e7f84e8e56c14fa5735aaa488b840983f87bd79b1054190e660de
      - namedocker-images
        rundocker images
      - nametag-images
        rundocker tag  2461b2698dcd zhoudl/controller:v1.1.1
      - namelogin-docker
        runecho \"\u5bc6\u7801\" | docker login -u \"\u8d26\u53f7\" --password-stdin
      - namepush-images
        rundocker push zhoudl/kube-webhook-certgen:v1.1.1 

4.\u767b\u9646github\u67e5\u770b\u8fd0\u884c\u60c5\u51b5

5.\u767b\u9646\u4e2d\u592e\u4ed3\u5e93\u67e5\u770b

6.\u7136\u540e\u672c\u5730\u4e0b\u8f7d

docker pull zhoudl/kube-webhook-certgen:v1.1.1

"}, {"title": "travisd\u62a5\u9519\u8bb0\u5f55", "desc": "windows\u4e0b\u52a0\u5bc6\u5982\u679c\u6ce8\u518c\u8868\u4e2d\u8bbe\u7f6e\u8fc7CMD\u4f7f\u7528\u81ea\u52a8\u8bbe\u7f6e\u4e3autf-8\u4f1a\u62a5\u9519\uff0c\u5220\u9664\u6ce8\u518c\u8868\u914d\u7f6e\u5373\u53ef\uff0c\u6ce8\u518c\u8868\u4f4d\u7f6e[HKEY_LOCAL_MACHINE\\Software\\Microsoft\\Command Processor\\Autorun]", "content": "

\u62a5\u9519\u4fe1\u606f\uff1a

 F:\\linux\\kubernetes\\travis> travis encrypt --pro SOMEVAR=\"secretvalue\"

Active code page: 65001

error: wrong number of arguments

usage: git config [<options>]


Config file location

    --global              use global config file

    --system              use system config file

    --local               use repository config file

    -f, --file <file>     use given config file

    --blob <blob-id>      read config from given blob object


Action

    --get                 get value: name [value-regex]

    --get-all             get all values: key [value-regex]

    --get-regexp          get values for regexp: name-regex [value-regex]

    --get-urlmatch        get value specific for the URL: section[.var] URL

    --replace-all         replace all matching variables: name value [value_regex]

    --add                 add a new variable: name value

    --unset               remove a variable: name [value-regex]

    --unset-all           remove all matches: name [value-regex]

    --rename-section      rename section: old-name new-name

    --remove-section      remove a section: name

    -l, --list            list all

    -e, --edit            open an editor

    --get-color           find the color configured: slot [default]

    --get-colorbool       find the color setting: slot [stdout-is-tty]


Type

    --bool                value is \"true\" or \"false\"

    --bool-or-int         value is --bool or --int

    --path                value is a path (file or directory name)


Other

    -z, --null            terminate values with NUL byte

    --name-only           show variable names only

    --includes            respect include directives on lookup

    --show-origin         show origin of config (file, standard input, blob, command line)


bad URI(is not URI?): \"Active code page: 65001\\nActive\"

for a full error report, run travis report --pro

\u67e5\u770b\u8be6\u7ec6\u62a5\u9519\uff1a

F:\\linux\\kubernetes\\travis>  travis report

Active code page: 65001

System

Ruby:                     Ruby 3.1.1-p18

Operating System:         Windows

RubyGems:                 RubyGems 3.3.8


CLI

Version:                  1.11.0

Plugins:                  none

Auto-Completion:          yes

Last Version Check:       2022-02-26 08:54:31 +0800


Session

API Endpoint:             https://api.travis-ci.com/

Logged In:                as \"zhoudl\"

Verify SSL:               yes

Enterprise:               no


Endpoints

com:                      https://api.travis-ci.com/ (access token, default, current)


Last Exception

An error occurred running `travis encrypt`:

    URI::InvalidURIError: bad URI(is not URI?): \"Active code page: 65001\\nActive\"

        from D:/ruby/rubyinstaller-3.1.1-1-x64/lib/ruby/3.1.0/uri/rfc3986_parser.rb:67:in `split'

        from D:/ruby/rubyinstaller-3.1.1-1-x64/lib/ruby/3.1.0/uri/rfc3986_parser.rb:72:in `parse'

        from D:/ruby/rubyinstaller-3.1.1-1-x64/lib/ruby/3.1.0/uri/common.rb:188:in `parse'

        from D:/ruby/rubyinstaller-3.1.1-1-x64/lib/ruby/gems/3.1.0/gems/travis-1.11.0/lib/travis/cli/repo_command.rb:74:in `detect_slug'

        from D:/ruby/rubyinstaller-3.1.1-1-x64/lib/ruby/gems/3.1.0/gems/travis-1.11.0/lib/travis/cli/repo_command.rb:63:in `find_slug'

        from D:/ruby/rubyinstaller-3.1.1-1-x64/lib/ruby/gems/3.1.0/gems/travis-1.11.0/lib/travis/cli/repo_command.rb:24:in `setup'

        from D:/ruby/rubyinstaller-3.1.1-1-x64/lib/ruby/gems/3.1.0/gems/travis-1.11.0/lib/travis/cli/command.rb:199:in `execute'

        from D:/ruby/rubyinstaller-3.1.1-1-x64/lib/ruby/gems/3.1.0/gems/travis-1.11.0/lib/travis/cli.rb:64:in `run'

        from D:/ruby/rubyinstaller-3.1.1-1-x64/lib/ruby/gems/3.1.0/gems/travis-1.11.0/bin/travis:18:in `<top (required)>'

        from D:/ruby/rubyinstaller-3.1.1-1-x64/bin/travis:32:in `load'

        from D:/ruby/rubyinstaller-3.1.1-1-x64/bin/travis:32:in `<main>'


For issues with the command line tool, please visit https://github.com/travis-ci/travis.rb/issues.

"}, {"title": "k8s\u521b\u5efamaridb/mysql\u6302\u8f7d\u5b58\u50a8\u62a5\u9519", "desc": "\u4e3b\u8981\u662f\u56e0\u4e3a\u8981\u5bf9\u5916\u90e8\u5b58\u50a8\u66f4\u6539\u6240\u6709\u8005\u62a5\u9519", "content": "

\u4e00\u3001\u67e5\u770bpod\u72b6\u6001

[root@k8s-mast-220 opt]# kubectl get pod

NAME                     READY     STATUS             RESTARTS   AGE

mysql-1092829943-40528   0/1       CrashLoopBackOff   3          1m

mysql-1092829943-lwcvj   0/1       CrashLoopBackOff   3          1m

\u4e8c\u3001\u67e5\u770b\u65e5\u5fd7\uff0c

[root@k8s-mast-220 opt]# kubectl logs mysql-1092829943-lwcvj

chown: changing ownership of '/var/lib/mysql/data/mysql': Operation not permitted

chown: changing ownership of '/var/lib/mysql/data/mysql/conf.d': Operation not permitted

chown: changing ownership of '/var/lib/mysql/data/mysql/conf.d/docker.cnf': Operation not permitted

chown: changing ownership of '/var/lib/mysql/data/mysql/conf.d/mysqld_safe_syslog.cnf': Operation not permitted

chown: changing ownership of '/var/lib/mysql/data/mysql/mysql': Operation not permitted

\u62a5\u9519\u4e3a\u6ca1\u6709\u6743\u9650\u66f4\u6539\u6587\u4ef6\u6240\u6709\u8005

\u4e09\u3001\u4fee\u6539Deployment

        securityContext:

          privileged: true

\u56db\u3001\u91cd\u65b0\u5efa\u7acb Deployment \u62a5\u9519

[root@k8s-mast-220 opt]# kubectl create -f deploy-mysql.yml
The Deployment \"mysql\" is invalid: spec.template.spec.containers[0].securityContext.privileged: Forbidden: disallowed by policy
\u4e94\u3001\u4e3b\u8981\u56e0\u4e3a\u914d\u7f6e\u91cc\u9762\u4e0d\u5141\u8bb8\u4f7f\u7528\u7279\u6b8a\u6743\u9650\uff0c\u4fee\u6539\u914d\u7f6e\u6587\u4ef6
[root@k8s-mast-220 opt]# vi /etc/kubernetes/config 
###
# kubernetes system config
#
# The following values are used to configure various aspects of all
# kubernetes services, including
#
#   kube-apiserver.service
#   kube-controller-manager.service
#   kube-scheduler.service
#   kubelet.service
#   kube-proxy.service
# logging to stderr means we get it in the systemd journal
KUBE_LOGTOSTDERR=\"--logtostderr=true\"

# journal message level, 0 is debug
KUBE_LOG_LEVEL=\"--v=0\"

# Should this cluster be allowed to run privileged docker containers
KUBE_ALLOW_PRIV=\"--allow-privileged=true\"

# How the controller-manager, scheduler, and proxy find the apiserver
KUBE_MASTER=\"--master=http://k8s-mast-220:8080\"
# Comma separated list of nodes in the etcd cluster
KUBE_ETCD_SERVERS=\"\u2013etcd_servers=http://centos-mast:2379\"

# logging to stderr means we get it in the systemd journal
KUBE_LOGTOSTDERR=\"\u2013logtostderr=true\"

\u539f\u6765 --allow-privileged=false \u6539\u4e3atrue
\u516d\u3001\u91cd\u65b0\u5728\u5efa\u5c31\u6210\u529f\u4e86
[root@k8s-mast-220 opt]# kubectl create -f deploy-mysql.yml
deployment \"mysql\" created
 
"}, {"title": "kubernetes\u672c\u5730\u955c\u50cf\u4ed3\u5e93\u7ba1\u7406", "desc": "\u6211\u7684\u955c\u50cf\u5730\u5740\u662f\u5728\u963f\u91cc\u4e91\u7533\u8bf7\u7684\uff0chttp://registry.cn-shanghai.aliyuncs.com\u3002", "content": "

\u4e00\u3001\u672c\u5730\u6d4b\u8bd5\u955c\u50cf\u5730\u5740\u767b\u5f55

[root@Docker docker]# docker login http://registry.cn-shanghai.aliyuncs.com

Username: 120231708@qq.com

Password: 

Login Succeeded

[root@Docker docker]# 

\u4e8c\u3001\u6dfb\u52a0\u56fd\u5185\u955c\u50cf

[root@k8s-mast-220 ~]# kubectl create secret docker-registry registrykey --docker-server=http://mvn.jschrj.com/repository/docker/  --docker-username=120231708@qq.com --docker-password=\u8fd9\u91cc\u6539\u6210\u81ea\u5df1\u6ce8\u518c\u7684\u5bc6\u7801 --docker-email=120231708@qq.com

secret \"registrykey\" created

\u4e09\u3001\u5728\u521b\u5efaPod\u7684\u65f6\u5019\uff0c\u901a\u8fc7imagePullSecrets\u6765\u5f15\u7528\u521a\u521b\u5efa\u7684registrykey:

apiVersion: v1\nkind: Pod\nmetadata:\n  name: foo\nspec:\n  containers:\n    - name: foo\n      image: janedoe/awesomeapp:v1\n  imagePullSecrets:\n    - name: registrykey
\u4e09\u3001\u5982\u679c\u5efa\u9519\u4e86\u53ef\u4ee5\u5220\u9664\u91cd\u65b0\u5efa\uff1a

[root@k8s-mast-220 ~]# kubectl get secret

NAME          TYPE                      DATA      AGE

registrykey   kubernetes.io/dockercfg   1         3h


[root@k8s-mast-220 ~]# kubectl delete secret registrykey

secret \"registrykey\" deleted


"}, {"title": "CentOS\u90e8\u7f72kubernetes", "desc": "\u64cd\u4f5c\u7cfb\u7edf\u955c\u50cf\u7248\u672c CentOS-7-x86_64-Minimal-2009 \uff0c\u7cfb\u7edf\u9ed8\u8ba4 yum \u6e90\u3002\u4e24\u53f0\u865a\u62df\u673a\uff0cmaster:192.168.1.220, node:192.168.1.232\u3002", "content": "

#############\u4e3b\u673a\u5b89\u88c5\u64cd\u4f5c########

[root@k8s-mast-220 etcd]# yum install etcd kubernetes

\u4e00\u3001\u914d\u7f6ehosts,\u4fee\u6539/etc/hosts\u6587\u4ef6\uff0c\u5185\u5bb9\u5982\u4e0b\uff0c\u8282\u70b9\u673a\u5668\u4e5f\u8981\u4fee\u6539hosts\uff1a

[root@k8s-mast-220 etcd]# more /etc/hosts

192.168.1.220 k8s-mast-220

192.168.1.232 k8s-node-232

\u4e8c\u3001\u4fee\u6539etcd\u914d\u7f6e\u6587\u4ef6

\u5c06\u9ed8\u8ba4\u7684http://localhost:2379 \u6539\u4e3ahttp://0.0.0.2379\u3002\u4e0d\u7136\u4f1a\u5bfc\u81f4etcd\u8fde\u4e0d\u4e0a kube-apiserver \u8d77\u4e0d\u6765\u3002

\u4e09\u3001\u4fee\u6539/etc/kubernetes/apiserver\u914d\u7f6e\u5185\u5bb9\u5982\u4e0b

###

# kubernetes system config

#

# The following values are used to configure the kube-apiserver

#

# The address on the local server to listen to.

KUBE_API_ADDRESS=\"--insecure-bind-address=0.0.0.0\"


# The port on the local server to listen on.

KUBE_API_PORT=\"--port=8080\"


# Port minions listen on

KUBELET_PORT=\"--kubelet-port=10250\"


# Comma separated list of nodes in the etcd cluster

KUBE_ETCD_SERVERS=\"--etcd-servers=http://k8s-mast-220:2379\"


# Address range to use for services

KUBE_SERVICE_ADDRESSES=\"--service-cluster-ip-range=10.254.0.0/16\"


# default admission control policies

KUBE_ADMISSION_CONTROL=\"--admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota\"


# Add your own!

KUBE_API_ARGS=\"\"

\u56db\u3001\u4fee\u6539/etc/kubernetes/config\u914d\u7f6e\u6587\u4ef6\u5185\u5bb9\u5982\u4e0b\uff08\u8fd9\u6b65\u5728\u8282\u70b9\u670d\u52a1\u5668\u4e0a\u4e5f\u8981\u4fee\u6539\uff0c\u53ef\u4ee5\u76f4\u63a5\u590d\u5236\u6587\u4ef6\u8fc7\u53bb\uff09\uff1a

###

# kubernetes system config

#

# The following values are used to configure various aspects of all

# kubernetes services, including

#

#   kube-apiserver.service

#   kube-controller-manager.service

#   kube-scheduler.service

#   kubelet.service

#   kube-proxy.service

# logging to stderr means we get it in the systemd journal

KUBE_LOGTOSTDERR=\"--logtostderr=true\"


# journal message level, 0 is debug

KUBE_LOG_LEVEL=\"--v=0\"


# Should this cluster be allowed to run privileged docker containers

KUBE_ALLOW_PRIV=\"--allow-privileged=false\"


# How the controller-manager, scheduler, and proxy find the apiserver

KUBE_MASTER=\"--master=http://k8s-mast-220:8080\"

# Comma separated list of nodes in the etcd cluster

KUBE_ETCD_SERVERS=\"\u2013etcd_servers=http://k8s-mast-220:2379\"


# logging to stderr means we get it in the systemd journal

KUBE_LOGTOSTDERR=\u201d\u2013logtostderr=true\u201d


# journal message level, 0 is debug

KUBE_LOG_LEVEL=\"\u2013v=0\"


# Should this cluster be allowed to run privileged docker containers

KUBE_ALLOW_PRIV=\"\u2013allow_privileged=false\"


#############node\u8282\u70b9\u670d\u52a1\u5668\u5b89\u88c5\u64cd\u4f5c###############

[root@k8s-node-232 kubernetes]# yum install kubernetes

\u4e00\u3001\u4fee\u6539/etc/hosts\u6587\u4ef6\u540c\u4e0a

\u4e8c\u3001\u4fee\u6539 /etc/kubernetes/config\u914d\u7f6e\u6587\u4ef6\uff0c\u540c\u4e0a

\u4e09\u3001\u4fee\u6539 /etc/kubernetes/kubelet \u914d\u7f6e\u6587\u4ef6

###

# kubernetes kubelet (minion) config


# The address for the info server to serve on (set to 0.0.0.0 or \"\" for all interfaces)

KUBELET_ADDRESS=\"--address=0.0.0.0\"


# The port for the info server to serve on

KUBELET_PORT=\"--port=10250\"


# You may leave this blank to use the actual hostname

KUBELET_HOSTNAME=\"--hostname-override=k8s-node-232\"


# location of the api-server

KUBELET_API_SERVER=\"--api-servers=http://k8s-mast-220:8080\"


# pod infrastructure container

#KUBELET_POD_INFRA_CONTAINER=\"--pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest\"


# Add your own!

KUBELET_ARGS=\"\"

######\u5f00\u59cb\u542f\u52a8\u670d\u52a1\u5668###############

\u4e00\u3001\u542f\u52a8\u4e3b\u8282\u70b9

[root@k8s-mast-220 kubernetes]# for SERVICES in etcd kube-apiserver kube-controller-manager kube-scheduler; do

> systemctl restart $SERVICES

> systemctl enable $SERVICES

> systemctl status $SERVICES

> done

\u25cf etcd.service - Etcd Server

   Loaded: loaded (/usr/lib/systemd/system/etcd.service; enabled; vendor preset: disabled)

   Active: active (running) since Tue 2022-01-25 15:16:13 CST; 81ms ago

 Main PID: 12078 (etcd)

   CGroup: /system.slice/etcd.service

           \u2514\u250012078 /usr/bin/etcd --name=default --data-dir=/var/lib/etcd/default.etcd --listen-client-urls=http://0.0.0.0:2379


Jan 25 15:16:11 k8s-mast-220 etcd[12078]: enabled capabilities for version 3.3

Jan 25 15:16:13 k8s-mast-220 etcd[12078]: 8e9e05c52164694d is starting a new election at term 6

Jan 25 15:16:13 k8s-mast-220 etcd[12078]: 8e9e05c52164694d became candidate at term 7

Jan 25 15:16:13 k8s-mast-220 etcd[12078]: 8e9e05c52164694d received MsgVoteResp from 8e9e05c52164694d at term 7

Jan 25 15:16:13 k8s-mast-220 etcd[12078]: 8e9e05c52164694d became leader at term 7

Jan 25 15:16:13 k8s-mast-220 etcd[12078]: raft.node: 8e9e05c52164694d elected leader 8e9e05c52164694d at term 7

Jan 25 15:16:13 k8s-mast-220 etcd[12078]: published {Name:default ClientURLs:[http://0.0.0.0:2379]} to cluster cdf818194e3a8c32

Jan 25 15:16:13 k8s-mast-220 etcd[12078]: ready to serve client requests

Jan 25 15:16:13 k8s-mast-220 systemd[1]: Started Etcd Server.

Jan 25 15:16:13 k8s-mast-220 etcd[12078]: serving insecure client requests on [::]:2379, this is strongly discouraged!

\u25cf kube-apiserver.service - Kubernetes API Server

   Loaded: loaded (/usr/lib/systemd/system/kube-apiserver.service; enabled; vendor preset: disabled)

   Active: active (running) since Tue 2022-01-25 15:16:13 CST; 55ms ago

     Docs: https://github.com/GoogleCloudPlatform/kubernetes

 Main PID: 12127 (kube-apiserver)

   CGroup: /system.slice/kube-apiserver.service

           \u2514\u250012127 /usr/bin/kube-apiserver logtostderr=true v=0 --etcd-servers=http://k8s-mast-220:2379 --insecure-bind-address=0.0.0.0 --port=8080 --kubelet-port=10250 allow_privileg...


Jan 25 15:16:13 k8s-mast-220 kube-apiserver[12127]: W0125 15:16:13.341020   12127 handlers.go:50] Authentication is disabled

Jan 25 15:16:13 k8s-mast-220 kube-apiserver[12127]: E0125 15:16:13.341194   12127 reflector.go:199] k8s.io/kubernetes/plugin/pkg/admission/resourcequota/resource_access.go:...ion refused

Jan 25 15:16:13 k8s-mast-220 kube-apiserver[12127]: E0125 15:16:13.341249   12127 reflector.go:199] k8s.io/kubernetes/plugin/pkg/admission/serviceaccount/admission.go:119: Failed to l...

Jan 25 15:16:13 k8s-mast-220 kube-apiserver[12127]: E0125 15:16:13.381517   12127 reflector.go:199] pkg/controller/informers/factory.go:89: Failed to list *api.LimitRange: ...ion refused

Jan 25 15:16:13 k8s-mast-220 kube-apiserver[12127]: E0125 15:16:13.381572   12127 reflector.go:199] pkg/controller/informers/factory.go:89: Failed to list *api.Namespace: G...ion refused

Jan 25 15:16:13 k8s-mast-220 kube-apiserver[12127]: [restful] 2022/01/25 15:16:13 log.go:30: [restful/swagger] listing is available at https://192.168.1.220:6443/swaggerapi/

Jan 25 15:16:13 k8s-mast-220 kube-apiserver[12127]: [restful] 2022/01/25 15:16:13 log.go:30: [restful/swagger] https://192.168.1.220:6443/swaggerui/ is mapped to folder /swagger-ui/

Jan 25 15:16:13 k8s-mast-220 kube-apiserver[12127]: I0125 15:16:13.441725   12127 serve.go:95] Serving securely on 0.0.0.0:6443

Jan 25 15:16:13 k8s-mast-220 kube-apiserver[12127]: I0125 15:16:13.441822   12127 serve.go:109] Serving insecurely on 0.0.0.0:8080

Jan 25 15:16:13 k8s-mast-220 systemd[1]: Started Kubernetes API Server.

Hint: Some lines were ellipsized, use -l to show in full.

\u25cf kube-controller-manager.service - Kubernetes Controller Manager

   Loaded: loaded (/usr/lib/systemd/system/kube-controller-manager.service; enabled; vendor preset: disabled)

   Active: active (running) since Tue 2022-01-25 15:16:13 CST; 64ms ago

     Docs: https://github.com/GoogleCloudPlatform/kubernetes

 Main PID: 12165 (kube-controller)

   CGroup: /system.slice/kube-controller-manager.service

           \u2514\u250012165 /usr/bin/kube-controller-manager logtostderr=true v=0 --master=http://k8s-mast-220:8080


Jan 25 15:16:13 k8s-mast-220 systemd[1]: Started Kubernetes Controller Manager.

\u25cf kube-scheduler.service - Kubernetes Scheduler Plugin

   Loaded: loaded (/usr/lib/systemd/system/kube-scheduler.service; enabled; vendor preset: disabled)

   Active: active (running) since Tue 2022-01-25 15:16:13 CST; 64ms ago

     Docs: https://github.com/GoogleCloudPlatform/kubernetes

 Main PID: 12203 (kube-scheduler)

   CGroup: /system.slice/kube-scheduler.service

           \u2514\u250012203 /usr/bin/kube-scheduler logtostderr=true v=0 --master=http://k8s-mast-220:8080


Jan 25 15:16:13 k8s-mast-220 systemd[1]: Started Kubernetes Scheduler Plugin.

[root@k8s-mast-220 kubernetes]# 

\u4e8c\u3001\u542f\u52a8node\u8282\u70b9

[root@k8s-node-232 kubernetes]# for SERVICES in kube-proxy kubelet docker; do

> systemctl restart $SERVICES

> systemctl enable $SERVICES

> systemctl status $SERVICES

> done

\u25cf kube-proxy.service - Kubernetes Kube-Proxy Server

   Loaded: loaded (/usr/lib/systemd/system/kube-proxy.service; enabled; vendor preset: disabled)

   Active: active (running) since Tue 2022-01-25 02:17:18 EST; 74ms ago

     Docs: https://github.com/GoogleCloudPlatform/kubernetes

 Main PID: 44187 (kube-proxy)

   CGroup: /system.slice/kube-proxy.service

           \u2514\u250044187 /usr/bin/kube-proxy logtostderr=true v=0 --master=http://k8s-mast-220:8080


Jan 25 02:17:18 k8s-node-232 systemd[1]: Started Kubernetes Kube-Proxy Server.

\u25cf kubelet.service - Kubernetes Kubelet Server

   Loaded: loaded (/usr/lib/systemd/system/kubelet.service; enabled; vendor preset: disabled)

   Active: active (running) since Tue 2022-01-25 02:17:19 EST; 91ms ago

     Docs: https://github.com/GoogleCloudPlatform/kubernetes

 Main PID: 44225 (kubelet)

   CGroup: /system.slice/kubelet.service

           \u2514\u250044225 /usr/bin/kubelet logtostderr=true v=0 --api-servers=http://k8s-mast-220:8080 --address=0.0.0.0 --port=10250 --hostname-override=k8s-node-232 allow_privileged=false


Jan 25 02:17:19 k8s-node-232 systemd[1]: Started Kubernetes Kubelet Server.

\u25cf docker.service - Docker Application Container Engine

   Loaded: loaded (/usr/lib/systemd/system/docker.service; enabled; vendor preset: disabled)

   Active: active (running) since Tue 2022-01-25 02:17:21 EST; 90ms ago

     Docs: http://docs.docker.com

 Main PID: 44335 (dockerd-current)

   CGroup: /system.slice/docker.service

           \u251c\u250044335 /usr/bin/dockerd-current --add-runtime docker-runc=/usr/libexec/docker/docker-runc-current --default-runtime=docker-runc --exec-opt native.cgroupdriver=systemd --us...

           \u2514\u250044347 /usr/bin/docker-containerd-current -l unix:///var/run/docker/libcontainerd/docker-containerd.sock --metrics-interval=0 --start-timeout 2m --state-dir /var/run/docke...


Jan 25 02:17:20 k8s-node-232 dockerd-current[44335]: time=\"2022-01-25T02:17:20.443846113-05:00\" level=info msg=\"libcontainerd: new containerd process, pid: 44347\"

Jan 25 02:17:21 k8s-node-232 dockerd-current[44335]: time=\"2022-01-25T02:17:21.502676459-05:00\" level=info msg=\"Graph migration to content-addressability took 0.00 seconds\"

Jan 25 02:17:21 k8s-node-232 dockerd-current[44335]: time=\"2022-01-25T02:17:21.503911811-05:00\" level=info msg=\"Loading containers: start.\"

Jan 25 02:17:21 k8s-node-232 dockerd-current[44335]: time=\"2022-01-25T02:17:21.510329460-05:00\" level=info msg=\"Firewalld running: false\"

Jan 25 02:17:21 k8s-node-232 dockerd-current[44335]: time=\"2022-01-25T02:17:21.570362619-05:00\" level=info msg=\"Default bridge (docker0) is assigned with an IP address 172....IP address\"

Jan 25 02:17:21 k8s-node-232 dockerd-current[44335]: time=\"2022-01-25T02:17:21.589716121-05:00\" level=info msg=\"Loading containers: done.\"

Jan 25 02:17:21 k8s-node-232 dockerd-current[44335]: time=\"2022-01-25T02:17:21.612286197-05:00\" level=info msg=\"Daemon has completed initialization\"

Jan 25 02:17:21 k8s-node-232 dockerd-current[44335]: time=\"2022-01-25T02:17:21.612316040-05:00\" level=info msg=\"Docker daemon\" commit=\"7d71120/1.13.1\" graphdriver=overlay2 version=1.13.1

Jan 25 02:17:21 k8s-node-232 dockerd-current[44335]: time=\"2022-01-25T02:17:21.616526404-05:00\" level=info msg=\"API listen on /var/run/docker.sock\"

Jan 25 02:17:21 k8s-node-232 systemd[1]: Started Docker Application Container Engine.

Hint: Some lines were ellipsized, use -l to show in full.

[root@k8s-node-232 kubernetes]# 

\u4e09\u3001\u6d4b\u8bd5\u9a8c\u8bc1

[root@k8s-mast-220 kubernetes]# kubectl get nodes

NAME           STATUS    AGE

k8s-node-232   Ready     28m

[root@k8s-mast-220 kubernetes]# 

"}, {"title": "firewalld\u963b\u6b62\u6076\u610f\u767b\u5f55IP\u5730\u5740", "desc": "\u7ecf\u5e38\u6709\u56fd\u5916IP\u5730\u5740\u6076\u610f\u767b\u5f55\u670d\u52a1\u5668\uff0cCentos7\u4ee5\u540e\u4f7f\u7528firewalld\u5199\u4e2a\u811a\u672c\u81ea\u52a8\u7981\u6b62\u6076\u610f\u9017\u5bc6\u7801\u7684IP\u5730\u5740\u8bbf\u95ee\u3002", "content": "

#\u901a\u8fc7\u67e5\u770bsecure \u65e5\u5fd7\uff0c\u5c06\u591a\u6b21\u8f93\u5165\u9519\u8bef\u5bc6\u7801\u767b\u5f55\u7684IP\u5730\u5740\u6dfb\u52a0\u5230\u7cfb\u7edf\u9632\u706b\u5899\u3002

\u5982\u679c/var/log/secure \u65e5\u5fd7\u6ca1\u7684\u5185\u5bb9\u8bf7\u67e5\u770b\u4e0b\u9762\u6587\u6863\u91cd\u65b0\u5efa\u7acb

\u5220\u9664secure\u65e5\u5fd7\u91cd\u65b0\u5efa\u7acb

\n

###\u811a\u672c\u5185\u5bb9

#!/bin/sh

#auto drop ssh failed IP address

#wugk 2013-1-2

SEC_FILE=/var/log/secure

IP_ADDR=`tail -n 1000 /var/log/secure |grep \"Failed password\"| egrep -o \"([0-9]{1,3}\\.){3}[0-9]{1,3}\" | sort -nr | uniq -c |awk ' $1>=4 {print $2}'`

IPTABLE_CONF=/etc/firewalld/zones/public.xml

cat << EOF

++++++++++++++welcome to use ssh login drop failed ip+++++++++++++++++

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

++++++++++++++++------------------------------------++++++++++++++++++

EOF

#\u6253\u5370\u52a8\u6001\u6eda\u52a8\u6761\uff0c\u53c2\u7167\u8001\u7537\u5b69\u535a\u5ba2-\u6570\u7ec4\u5206\u6790\u6587\u7ae0

echo -n \"\u8bf7\u7b49\u5f855\u79d2\u540e\u5f00\u59cb\u6267\u884c\"

for i in `echo $IP_ADDR`;

do

 cat $IPTABLE_CONF |grep $i >/dev/null

 if

    [ $? -ne 0 ];then

 firewall-cmd --permanent --add-rich-rule='rule family=ipv4 source address='\"$i\"' drop'

 else

    #\u5982\u4f55\u5b58\u5728\u7684\u8bdd\uff0c\u5c31\u6253\u5370\u63d0\u793a\u4fe1\u606f\u5373\u53ef

    echo  \"This is $i is exist in iptables,please exit ......\"

fi

done

echo -n \"\u91cd\u542f\u9632\u706b\u5899\"

firewall-cmd --reload


###\u5c06\u4e0a\u9762\u7684\u5185\u5bb9\u4fdd\u5b58\u4e3afirewalld.sh \u653e\u5230crontab\u5b9a\u65f6\u4efb\u52a1\u91cc\u6267\u884c\u5373\u53ef\uff0c\u624b\u52a8\u6267\u884c\u4e5f\u884c\uff0c\u9ed8\u8ba4\u8bfb\u53d6 /var/log/secure \u65e5\u5fd7\u7684 1000\u884c\u5185\u5bb9\uff0c\u4e5f\u53ef\u6839\u636e\u5b9e\u9645\u60c5\u51b5\u4fee\u6539\u3002

"}, {"title": "\u5220\u9664secure\u65e5\u5fd7\u91cd\u65b0\u5efa\u7acb", "desc": "secure \u65e5\u5fd7\u65f6\u95f4\u957f\u4e86\u5f88\u5927\uff0c\u53ef\u4ee5\u5220\u9664\u91cd\u5efa\uff0c\u4e5f\u53ef\u4ee5\u4f7f\u7528 echo /dev/null >> /var/log/secure \u6e05\u9664\u3002", "content": "

#\u5220\u9664 secure \u65e5\u5fd7

[root@Docker log]# rm -rf /var/log/secure 

#\u65b0\u5efa\u65e5\u5fd7\u5e76\u91cd\u542f\u670d\u52a1

[root@Docker log]# touch /var/log/secure

[root@Docker log]# chmod 600 /var/log/secure 

[root@Docker log]# systemctl restart sshd

[root@Docker log]# systemctl restart rsyslog

#\u67e5\u770bsecure \u65e5\u5fd7

[root@Docker log]# more /var/log/secure 

Nov  3 15:29:15 Docker polkitd[756]: Unregistered Authentication Agent for unix-process:1965:146417433 (system bus name :1.93, object path /org/freedesktop/PolicyKit1/AuthenticationAgent, l

ocale en_US.UTF-8) (disconnected from bus)

Nov  3 15:31:57 Docker sshd[1989]: Accepted password for zhoudl from 192.168.1.168 port 53959 ssh2

Nov  3 15:31:58 Docker sshd[1989]: pam_unix(sshd:session): session opened for user zhoudl by (uid=0)

Nov  3 15:32:27 Docker unix_chkpwd[2005]: password check failed for user (root)

Nov  3 15:32:27 Docker sshd[2003]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=142.93.163.248  user=root

Nov  3 15:32:27 Docker sshd[2003]: pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"

Nov  3 15:32:29 Docker sshd[2003]: Failed password for root from 142.93.163.248 port 34516 ssh2

Nov  3 15:32:29 Docker sshd[2003]: Received disconnect from 142.93.163.248 port 34516:11: Normal Shutdown, Thank you for playing [preauth]

Nov  3 15:32:29 Docker sshd[2003]: Disconnected from 142.93.163.248 port 34516 [preauth]

Nov  3 15:33:00 Docker sshd[2006]: reverse mapping checking getaddrinfo for 221-44-121-138.eagleredes.net.br [138.121.44.221] failed - POSSIBLE BREAK-IN ATTEMPT!

Nov  3 15:33:00 Docker sshd[2006]: Invalid user dimitra from 138.121.44.221 port 6599

Nov  3 15:33:00 Docker sshd[2006]: input_userauth_request: invalid user dimitra [preauth]

Nov  3 15:33:00 Docker sshd[2006]: pam_unix(sshd:auth): check pass; user unknown

Nov  3 15:33:00 Docker sshd[2006]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=138.121.44.221

Nov  3 15:33:02 Docker sshd[2006]: Failed password for invalid user dimitra from 138.121.44.221 port 6599 ssh2

Nov  3 15:33:02 Docker sshd[2006]: Received disconnect from 138.121.44.221 port 6599:11: Bye Bye [preauth]

Nov  3 15:33:02 Docker sshd[2006]: Disconnected from 138.121.44.221 port 6599 [preauth]

Nov  3 15:34:42 Docker sshd[2008]: Invalid user delaine from 125.71.235.94 port 44300

Nov  3 15:34:42 Docker sshd[2008]: input_userauth_request: invalid user delaine [preauth]

Nov  3 15:34:42 Docker sshd[2008]: pam_unix(sshd:auth): check pass; user unknown

Nov  3 15:34:42 Docker sshd[2008]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=125.71.235.94

Nov  3 15:34:44 Docker sshd[2008]: Failed password for invalid user delaine from 125.71.235.94 port 44300 ssh2

Nov  3 15:34:44 Docker sshd[2008]: Received disconnect from 125.71.235.94 port 44300:11: Bye Bye [preauth]

Nov  3 15:34:44 Docker sshd[2008]: Disconnected from 125.71.235.94 port 44300 [preauth]

Nov  3 15:41:32 Docker sshd[2014]: Invalid user hxl from 180.101.224.36 port 4725

Nov  3 15:41:32 Docker sshd[2014]: input_userauth_request: invalid user hxl [preauth]

Nov  3 15:41:32 Docker sshd[2014]: pam_unix(sshd:auth): check pass; user unknown

Nov  3 15:41:32 Docker sshd[2014]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=180.101.224.36

Nov  3 15:41:34 Docker sshd[2014]: Failed password for invalid user hxl from 180.101.224.36 port 4725 ssh2

Nov  3 15:41:34 Docker sshd[2014]: Received disconnect from 180.101.224.36 port 4725:11: Bye Bye [preauth]

Nov  3 15:41:34 Docker sshd[2014]: Disconnected from 180.101.224.36 port 4725 [preauth]

Nov  3 15:42:17 Docker unix_chkpwd[2018]: password check failed for user (root)

Nov  3 15:42:17 Docker sshd[2016]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=142.93.163.248  user=root

Nov  3 15:42:17 Docker sshd[2016]: pam_succeed_if(sshd:auth): requirement \"uid >= 1000\" not met by user \"root\"

Nov  3 15:42:19 Docker sshd[2016]: Failed password for root from 142.93.163.248 port 45758 ssh2

Nov  3 15:42:19 Docker sshd[2016]: Received disconnect from 142.93.163.248 port 45758:11: Normal Shutdown, Thank you for playing [preauth]

Nov  3 15:42:19 Docker sshd[2016]: Disconnected from 142.93.163.248 port 45758 [preauth]

Nov  3 15:44:36 Docker sshd[2019]: Connection closed by 138.121.44.221 port 37692 [preauth]

Nov  3 15:46:46 Docker sshd[2022]: Invalid user ioannis from 125.71.235.94 port 43080

Nov  3 15:46:46 Docker sshd[2022]: input_userauth_request: invalid user ioannis [preauth]

Nov  3 15:46:46 Docker sshd[2022]: pam_unix(sshd:auth): check pass; user unknown

Nov  3 15:46:46 Docker sshd[2022]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=125.71.235.94

Nov  3 15:46:48 Docker sshd[2022]: Failed password for invalid user ioannis from 125.71.235.94 port 43080 ssh2

Nov  3 15:46:48 Docker sshd[2022]: Received disconnect from 125.71.235.94 port 43080:11: Bye Bye [preauth]

Nov  3 15:46:48 Docker sshd[2022]: Disconnected from 125.71.235.94 port 43080 [preauth]




"}, {"title": "btmp\u548cwtmp", "desc": "\u8fd9\u4e24\u4e2a\u6587\u4ef6\u90fd\u4f4d\u4e8e/var/log\u4e0b\u9762\uff0cwtmp\u8bb0\u5f55\u7684\u662f\u767b\u5f55\u7684\u4fe1\u606f\uff0cbtmp\u8bb0\u5f55\u7684\u662f\u767b\u5f55\u5931\u8d25\u4fe1\u606f\u3002\u6700\u8fd1\u8001\u662f\u6709\u654c\u5bf9\u52bf\u529b\u66b4\u529b\u7834\u89e3\u5bfc\u81f4btmp\uff0c\u6587\u4ef6\u731b\u589e\uff0c\u7531\u4e8e\u670d\u52a1\u5668\u8d44\u6e90\u6709\u9650\uff0c\u5220\u9664btmp\u91cd\u5efa\u4e86\u3002", "content": "

1.\u5220\u9664\u4e4b\u524d\u5148\u7528lastb \u547d\u4ee4\u67e5\u770b\u4e0b\u6076\u610f\u767b\u5f55\u7684\u5883\u5916IP\uff0c\u7528\u9632\u706b\u5899\u628aIP\u7981\u6b62\u4e86\u3002

[root@Docker log]#  lastb

[root@Docker log]#  firewall-cmd --permanent --add-rich-rule='rule family=ipv4 source address=\"161.97.86.26\" drop'

[root@Docker log]#  firewall-cmd --permanent --add-rich-rule='rule family=ipv4 source address=\"188.166.72.50\" drop'

[root@Docker log]#  firewall-reload

2.\u5220\u9664\u91cd\u65b0\u5efa

[root@Docker log]# rm -rf /var/log/btmp

[root@Docker log]# touch /var/log/btmp

[root@Docker log]# chown root:utmp /var/log/btmp

[root@Docker log]# chmod 0600 /var/log/btmp

[root@Docker log]# lastb


btmp begins Wed Jul 28 09:26:19 2021

3.\u5220\u9664\u540e\u7684\u62a5\u9519\u63d0\u793a

last: /var/log/wtmp: No such file or directory

Perhaps this file was removed by the operator to prevent logging last info.


lastb: /var/log/btmp: No such file or directory

Perhaps this file was removed by the operator to prevent logging lastb info.

"}, {"title": "\u534e\u4e3a\u8363\u8000magicbook pro20\u724816.1\u5bf8\u62c6\u673a\u6362\u5c4f", "desc": "\u8fd9\u6b21\u6362\u5c4f\uff0c\u7f51\u4e0a\u641c\u4e86\u597d\u591a\u8d44\u6599\u90fd\u662f19\u724814\u82f1\u5bf8\u7684\u62c6\u673a\u56fe\u7247\uff0c\u6211\u8fd9\u6b3e\u662f\u534e\u4e3a\u8363\u8000magicbook pro20\u724816.1\u5bf8\u65e0\u8fb9\u6846\u7684\u5e9f\u4e86\u8001\u5927\u52b2\u624d\u641e\u5b9a\uff0c\u7559\u56fe\u5e2e\u52a9\u5176\u5b83\u6362\u5c4f\u7684\u5c0f\u4f19\u4f34\u3002", "content": "

1.\u5c4f\u5df2\u788e\u3002\u539f\u56e0\u4e0d\u8bf4\u4e86\uff0c\u75c7\u72b6\u5c31\u662f\u8fd9\u4e48\u4e2a\u60c5\u51b5

2.\u62c6\u540e\u76d6\uff0c\u628a\u7535\u6c60\u7ebf\u6263\u4e0b\u6765\uff0c\u9632\u6b62\u5c4f\u70e7\u574f
3.\u7528\u5439\u98ce\u673a\u628a\u5c4f\u4e00\u5468\u5439\u5439\uff0c\u7136\u540e\u7528\u64ac\u68d2\u62c6\u673a\u5361\uff0c\u628a\u8fb9\u6846\u62c6\u6389\u3002