728x90

root@labs--10000:/home/project/lab-env/DockerLab# docker run -p 5432:5432 --name mypostgres -e POSTGRES_PASSWORD=700103 -d postgres

Unable to find image 'postgres:latest' locally
latest: Pulling from library/postgres
33847f680f63: Already exists
1b09e96014b3: Pull complete
eb49b6d9d1f3: Pull complete
4057ebf78d2d: Pull complete
f92d870e2c4f: Pull complete
b03847575a18: Pull complete
475945131fa9: Pull complete
c042b5a6607d: Pull complete
cfe883b776dc: Pull complete
61af04e5c3eb: Pull complete
4e9965ae9062: Pull complete
7b9708b81aa6: Pull complete
871877336770: Pull complete
Digest: sha256:6647385dd9ae11aa2216bf55c54d126b0a85637b3cf4039ef24e3234113588e3
Status: Downloaded newer image for postgres:latest
a868fc06d7b462e13116f917301a4fbbbb26578cfa3ace48936e439bbf222182

 

root@labs--10000:/home/project/lab-env/DockerLab# docker ps -a

CONTAINER ID        IMAGE                 COMMAND                  CREATED             STATUS              PORTS                    NAMES
a868fc06d7b4        postgres              "docker-entrypoint..."   17 seconds ago      Up 13 seconds       0.0.0.0:5432->5432/tcp   mypostgres
9b5bf23d92f3        mson218/my-nginx:v1   "/docker-entrypoin..."   45 minutes ago      Up 45 minutes       0.0.0.0:8087->80/tcp     my-nginx

 

root@labs--10000:/home/project/lab-env/DockerLab# docker exec -it mypostgres /bin/bash

root@a868fc06d7b4:/#

 

Pod 안에 컨테이너가 있음

쿠버네티스는 컨테이너/서비스를 어떻게 관리할 것이냐

Pod 단위로 관리함

Pod를 관리하기 위한 전체 아키텍처라고 이해하면 된다.

 

Public Kubernetes Services…

Microsoft Azure, AWS, Google Cloud Platform

 

 

노드만 관리하면 된다.

 

Node1에서 작업

[node1 ~]$ kubeadm init --apiserver-advertise-address $(hostname -i) --pod-network-cidr 10.5.0.0/16
Initializing machine ID from random generator.
I0805 07:17:20.997084    8684 version.go:251] remote version is much newer: v1.22.0; falling back to: stable-1.20
[init] Using Kubernetes version: v1.20.9
[preflight] Running pre-flight checks
        [WARNING Service-Docker]: docker service is not active, please run 'systemctl start docker.service'
        [WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow theguide at https://kubernetes.io/docs/setup/cri/
        [WARNING FileContent--proc-sys-net-bridge-bridge-nf-call-iptables]: /proc/sys/net/bridge/bridge-nf-call-iptables does not exist
[preflight] The system verification failed. Printing the output from the verification:
KERNEL_VERSION: 4.4.0-101-generic
DOCKER_VERSION: 20.10.1
OS: Linux
CGROUPS_CPU: enabled
CGROUPS_CPUACCT: enabled
CGROUPS_CPUSET: enabled
CGROUPS_DEVICES: enabled
CGROUPS_FREEZER: enabled
CGROUPS_MEMORY: enabled
CGROUPS_PIDS: enabled
CGROUPS_HUGETLB: enabled
        [WARNING SystemVerification]: this Docker version is not on thelist of validated versions: 20.10.1. Latest validated version: 19.03
        [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "", err: exit status 1
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local node1] and IPs [10.96.0.1 192.168.0.18]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [localhost node1] and IPs [192.168.0.18 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [localhost node1] and IPs [192.168.0.18 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 20.298219 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.20" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node node1 as control-plane by adding the labels "node-role.kubernetes.io/master=''" and "node-role.kubernetes.io/control-plane='' (deprecated)"
[mark-control-plane] Marking the node node1 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: sxselk.89o66wc3lsjesxre
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in orderfor nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy
 
Your Kubernetes control-plane has initialized successfully!
 
To start using your cluster, you need to run the following as a regular user:
 
  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config
 
Alternatively, if you are the root user, you can run:
 
  export KUBECONFIG=/etc/kubernetes/admin.conf
 
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/
 
Then you can join any number of worker nodes by running the following on each as root:
 
kubeadm join 192.168.0.18:6443 --token sxselk.89o66wc3lsjesxre \
    --discovery-token-ca-cert-hash sha256:f190ea5d5b9e3c95afa98ddaf8eac353b23418a9d7af5f72f38dda81f7e8d9aa
Waiting for api server to startup
Warning: resource daemonsets/kube-proxy is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically.
daemonset.apps/kube-proxy configured
No resources found

 

[node1 ~]$ kubectl get nodes

NAME    STATUS     ROLES                  AGE   VERSION
node1   NotReady   control-plane,master   3m    v1.20.1

 

 

Node2에서 작업

[node2 ~]$ kubeadm join 192.168.0.18:6443 --token sxselk.89o66wc3lsjesxre \
>     --discovery-token-ca-cert-hash sha256:f190ea5d5b9e3c95afa98ddaf8eac353b23418a9d7af5f72f38dda81f7e8d9aa
 
Initializing machine ID from random generator.
[preflight] Running pre-flight checks
        [WARNING Service-Docker]: docker service is not active, please run 'systemctl startdocker.service'
        [WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
        [WARNING FileContent--proc-sys-net-bridge-bridge-nf-call-iptables]: /proc/sys/net/bridge/bridge-nf-call-iptables does not exist
[preflight] The system verification failed. Printing the output from the verification:
KERNEL_VERSION: 4.4.0-101-generic
DOCKER_VERSION: 20.10.1
OS: Linux
CGROUPS_CPU: enabled
CGROUPS_CPUACCT: enabled
CGROUPS_CPUSET: enabled
CGROUPS_DEVICES: enabled
CGROUPS_FREEZER: enabled
CGROUPS_MEMORY: enabled
CGROUPS_PIDS: enabled
CGROUPS_HUGETLB: enabled
        [WARNING SystemVerification]: this Docker version is not on the list of validated versions: 20.10.1. Latest validated version: 19.03
        [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "", err: exit status 1
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
 
This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.
 
Run 'kubectl get nodes' on the control-plane to see this node join the cluster.

 

[node1 ~]$ kubectl get nodes

NAME    STATUS     ROLES                  AGE     VERSION
node1   NotReady   control-plane,master   4m56s   v1.20.1
node2   NotReady   <none>                 64s     v1.20.1

 

[node1 ~]$ kubectl get nodes

NAME    STATUS     ROLES                  AGE   VERSION
node1   NotReady   control-plane,master   3m    v1.20.1

 

[node1 ~]$ kubectl config view

apiVersion: v1
clusters:
- cluster:
    certificate-authority-data: DATA+OMITTED
    server: https://192.168.0.18:6443
  name: kubernetes
contexts:
- context:
    cluster: kubernetes
    user: kubernetes-admin
  name: kubernetes-admin@kubernetes
current-context: kubernetes-admin@kubernetes
kind: Config
preferences: {}
users:
- name: kubernetes-admin
  user:
    client-certificate-data: REDACTED
    client-key-data: REDACTED

 

 

[node1 ~]$ kubectl create deployment my-home --image=ghcr.io/acmexii/edu-welcome:latest

deployment.apps/my-home created

 

 

[node1 ~]$ kubectl get all

NAME                          READY   STATUS    RESTARTS   AGE
pod/my-home-98b4df49c-k6mds   0/1     Pending   0          74s
 
NAME                 TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
service/kubernetes   ClusterIP   10.96.0.1    <none>        443/TCP   8m56s
 
NAME                      READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/my-home   0/1     1            0           74s
 
NAME                                DESIRED   CURRENT   READY   AGE
replicaset.apps/my-home-98b4df49c   1         1         0       74s

 

[node1 ~]$ kubectl delete deployment.apps/my-home

deployment.apps "my-home" deleted

 

[node1 ~]$ kubectl get all

NAME                 TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
service/kubernetes   ClusterIP   10.96.0.1    <none>        443/TCP   9m49s

 

[node1 ~]$ kubectl create deployment my-home --image=nginx

deployment.apps/my-home created

 

[node1 ~]$ kubectl create deployment nginx --image=nginx

deployment.apps/nginx created

 

[node1 ~]$ kubectl get nodes

NAME    STATUS     ROLES                  AGE   VERSION
node1   NotReady   control-plane,master   14m   v1.20.1
node2   NotReady   <none>                 10m   v1.20.1

 

 

[deployment.yaml]

--- 
apiVersion: "apps/v1"
kind: "Deployment"
metadata: 
  name: "nginx-dep"
  labels: 
    app: "nginx-dep"
spec: 
  selector: 
    matchLabels: 
      app: "nginx-dep"
  replicas: 1
  template: 
    metadata: 
      labels: 
        app: "nginx-dep"
    spec: 
      containers: 
        - 
          name: "nginx-dep"
          image: "nginx"
          ports: 
            - 
              containerPort: 80

 

[service.yaml]

--- 
apiVersion: "v1"
kind: "Service"
metadata: 
  name: ""
  labels: 
    app: ""
spec: 
  ports: 
    - 
      port: 80
      targetPort: 80
  selector: 
    app: "nginx-dep"
  type: "NodePort"
728x90
728x90

root@devops:~# uname -a

Linux devops86 5.4.0-80-generic #90-Ubuntu SMP Fri Jul 9 22:49:44 UTC 2021 x86_64 x86_64 x86_64 GNU/Linux

 

root@devops:~# cat /etc/*release

DISTRIB_DESCRIPTION="Ubuntu 20.04.2 LTS"

 

## 네트워크 namespace를 만들고 생성한 namespace에 웹서버(nginx) 실행

root@devops:~# ip netns list

root@devops:~# ip netns add uengine1

root@devops:~# ip link

1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
2: ens18: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP mode DEFAULT group default qlen 1000
    link/ether 92:5c:0d:39:99:b2 brd ff:ff:ff:ff:ff:ff
3: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN mode DEFAULT group default
    link/ether 02:42:5b:86:7f:85 brd ff:ff:ff:ff:ff:ff

 

root@devops:~# ip link add veth0 type veth peer name veth1

root@devops:~# ip -br link

lo               UNKNOWN        00:00:00:00:00:00 <LOOPBACK,UP,LOWER_UP>
ens18            UP             92:5c:0d:39:99:b2 <BROADCAST,MULTICAST,UP,LOWER_UP>
docker0          DOWN           02:42:5b:86:7f:85 <NO-CARRIER,BROADCAST,MULTICAST,UP>
veth1@veth0      DOWN           86:8a:10:05:ad:bf <BROADCAST,MULTICAST,M-DOWN>
veth0@veth1      DOWN           86:9c:b2:e1:13:ec <BROADCAST,MULTICAST,M-DOWN>

 

root@devops:~# ip netns exec uengine1 ip -br link

lo               DOWN           00:00:00:00:00:00 <LOOPBACK>

 

root@devops:~# ip netns exec uengine1 ip link set dev lo up

root@devops:~# ip netns exec uengine1 ip link

1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00

 

 

# 네트워크 디바이스 연결

root@devops:~# ip link set veth1 netns uengine1

root@devops:~# ip -br link

lo               UNKNOWN        00:00:00:00:00:00 <LOOPBACK,UP,LOWER_UP>
ens18            UP             92:5c:0d:39:99:b2 <BROADCAST,MULTICAST,UP,LOWER_UP>
docker0          DOWN           02:42:5b:86:7f:85 <NO-CARRIER,BROADCAST,MULTICAST,UP>
veth0@if4        DOWN           86:9c:b2:e1:13:ec <BROADCAST,MULTICAST>

 

root@devops:~# ip netns exec uengine1 ip -br link

lo               UNKNOWN        00:00:00:00:00:00 <LOOPBACK,UP,LOWER_UP>
veth1@if5        DOWN           86:8a:10:05:ad:bf <BROADCAST,MULTICAST>

 

root@devops:~# ip address add 10.24.70.100/24 dev veth0

root@devops:~# ip netns exec uengine1 ip address add 10.24.70.101/24 dev veth1

root@devops:~# ip netns exec uengine1 ip -br link

lo               UNKNOWN        00:00:00:00:00:00 <LOOPBACK,UP,LOWER_UP>
veth1@if5        DOWN           86:8a:10:05:ad:bf <BROADCAST,MULTICAST>

 

root@devops:~# ip -br link

lo               UNKNOWN        00:00:00:00:00:00 <LOOPBACK,UP,LOWER_UP>
ens18            UP             92:5c:0d:39:99:b2 <BROADCAST,MULTICAST,UP,LOWER_UP>
docker0          DOWN           02:42:5b:86:7f:85 <NO-CARRIER,BROADCAST,MULTICAST,UP>
veth0@if4        DOWN           86:9c:b2:e1:13:ec <BROADCAST,MULTICAST>

 

root@devops:~# ip link set dev veth0 up

 

# namespace 디바이스 활성화

root@devops86:~# ip netns exec uengine1 ip link set dev veth1 up

 

root@devops:~# ip -br link

lo               UNKNOWN        00:00:00:00:00:00 <LOOPBACK,UP,LOWER_UP>
ens18            UP             92:5c:0d:39:99:b2 <BROADCAST,MULTICAST,UP,LOWER_UP>
docker0          DOWN           02:42:5b:86:7f:85 <NO-CARRIER,BROADCAST,MULTICAST,UP>
veth0@if4        UP             86:9c:b2:e1:13:ec <BROADCAST,MULTICAST,UP,LOWER_UP>

 

root@devops:~# ip netns exec uengine1 ip -br link

lo               UNKNOWN        00:00:00:00:00:00 <LOOPBACK,UP,LOWER_UP>
veth1@if5        UP             86:8a:10:05:ad:bf <BROADCAST,MULTICAST,UP,LOWER_UP>

 

root@devops:~# ip netns exec uengine1 ip addr

1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host
       valid_lft forever preferred_lft forever
4: veth1@if5: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default qlen 1000
    link/ether 86:8a:10:05:ad:bf brd ff:ff:ff:ff:ff:ff link-netnsid 0
    inet 10.24.70.101/24 scope global veth1
       valid_lft forever preferred_lft forever
    inet6 fe80::848a:10ff:fe05:adbf/64 scope link
       valid_lft forever preferred_lft forever

 

root@devops:~# ip addr

1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host
       valid_lft forever preferred_lft forever
2: ens18: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
    link/ether 92:5c:0d:39:99:b2 brd ff:ff:ff:ff:ff:ff
    inet 218.236.22.86/24 brd 218.236.22.255 scope global ens18
       valid_lft forever preferred_lft forever
    inet6 fe80::905c:dff:fe39:99b2/64 scope link
       valid_lft forever preferred_lft forever
3: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default
    link/ether 02:42:5b:86:7f:85 brd ff:ff:ff:ff:ff:ff
    inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0
       valid_lft forever preferred_lft forever
5: veth0@if4: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default qlen 1000
    link/ether 86:9c:b2:e1:13:ec brd ff:ff:ff:ff:ff:ff link-netns uengine1
    inet 10.24.70.100/24 scope global veth0
       valid_lft forever preferred_lft forever
    inet6 fe80::849c:b2ff:fee1:13ec/64 scope link
       valid_lft forever preferred_lft forever

 

root@devops:~# ping 10.24.70.100

PING 10.24.70.100 (10.24.70.100) 56(84) bytes of data.
64 bytes from 10.24.70.100: icmp_seq=1 ttl=64 time=0.108 ms
64 bytes from 10.24.70.100: icmp_seq=2 ttl=64 time=0.213 ms

 

root@devops:~# ping 10.24.70.101

PING 10.24.70.101 (10.24.70.101) 56(84) bytes of data.
64 bytes from 10.24.70.101: icmp_seq=1 ttl=64 time=0.220 ms
64 bytes from 10.24.70.101: icmp_seq=2 ttl=64 time=0.073 ms

 

root@devops:~# ip route

default via 218.236.22.254 dev ens18 proto static
10.24.70.0/24 dev veth0 proto kernel scope link src 10.24.70.100
172.17.0.0/16 dev docker0 proto kernel scope link src 172.17.0.1 linkdown
218.236.22.0/24 dev ens18 proto kernel scope link src 218.236.22.86

 

root@devops:~# apt install nginx-core -y

Reading package lists... Done
Building dependency tree      
Reading state information... Done
The following additional packages will be installed:
  fontconfig-config fonts-dejavu-core libfontconfig1 libgd3 libjbig0 libjpeg-turbo8 libjpeg8 libnginx-mod-http-image-filter
  libnginx-mod-http-xslt-filter libnginx-mod-mail libnginx-mod-stream libtiff5 libwebp6 libxpm4 nginx-common
Suggested packages:
  libgd-tools fcgiwrap nginx-doc ssl-cert
The following NEW packages will be installed:
  fontconfig-config fonts-dejavu-core libfontconfig1 libgd3 libjbig0 libjpeg-turbo8 libjpeg8 libnginx-mod-http-image-filter
  libnginx-mod-http-xslt-filter libnginx-mod-mail libnginx-mod-stream libtiff5 libwebp6 libxpm4 nginx-common nginx-core
0 upgraded, 16 newly installed, 0 to remove and 0 not upgraded.
Need to get 2428 kB of archives.
After this operation, 7846 kB of additional disk space will be used.
Get:1 http://kr.archive.ubuntu.com/ubuntu focal/main amd64 fonts-dejavu-core all 2.37-1 [1041 kB]

 

 

root@devops:~# curl 127.0.0.1

<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
    body {
        width: 35em;
        margin: 0 auto;
        font-family: Tahoma, Verdana, Arial, sans-serif;
    }
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>
 
<p>For online documentation and support please refer to
nginx.org.

Commercial support is available at
nginx.com.

 
<p><em>Thank you for using nginx.</em></p>
</body>
</html>

 

root@devops:~# systemctl status nginx

 nginx.service - A high performance web server and a reverse proxy server
     Loaded: loaded (/lib/systemd/system/nginx.service; enabled; vendor preset: enabled)
     Active: active (running) since Thu 2021-08-05 02:06:34 UTC; 47s ago
       Docs: man:nginx(8)
   Main PID: 3928 (nginx)
      Tasks: 3 (limit: 4617)
     Memory: 7.5M
     CGroup: /system.slice/nginx.service
             ├─3928 nginx: master process /usr/sbin/nginx -g daemon on; master_process on;
             ├─3929 nginx: worker process
             └─3930 nginx: worker process
 
Aug 05 02:06:34 devops systemd[1]: Starting A high performance web server and a reverse proxy server...
Aug 05 02:06:34 devops systemd[1]: Started A high performance web server and a reverse proxy server.

 

root@devops:~# systemctl stop nginx

 

root@devops:~# curl 127.0.0.1

curl: (7) Failed to connect to 127.0.0.1 port 80: Connection refused

 

# 웹서버 실행(기본 80번 포트)

root@devops:~# ip netns exec uengine1 nginx -g 'daemon off;'

 

root@devops:~# curl localhost

curl: (7) Failed to connect to localhost port 80: Connection refused

 

root@devops:~# curl 10.24.70.101:80

curl: (7) Failed to connect to 10.24.70.101 port 80: Connection refused

 

## 2개의 네트워크 namespace를 만들고 생성한 namespace 간 통신 확인

root@devops:~# ip netns

uengine1 (id: 0)

 

root@devops:~# ip link delete dev veth0

 

root@devops:~# ip netns exec uengine1 ip addr

1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host
       valid_lft forever preferred_lft forever

 

root@devops:~# ip netns add uengine2

root@devops:~# ip netns exec uengine2 ip link set dev lo up

root@devops:~# ip netns

uengine2
uengine1 (id: 0)

 

root@devops:~# ip netns exec uengine2 ip link

1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00

 

root@devops:~# ip link

1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
2: ens18: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP mode DEFAULT group default qlen 1000
    link/ether 92:5c:0d:39:99:b2 brd ff:ff:ff:ff:ff:ff
3: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN mode DEFAULT group default
    link/ether 02:42:5b:86:7f:85 brd ff:ff:ff:ff:ff:ff

 

root@devops:~# ip link add veth1 type veth peer name veth2

 

root@devops:~# ip link

1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
2: ens18: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP mode DEFAULT group default qlen 1000
    link/ether 92:5c:0d:39:99:b2 brd ff:ff:ff:ff:ff:ff
3: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN mode DEFAULT group default
    link/ether 02:42:5b:86:7f:85 brd ff:ff:ff:ff:ff:ff
6: veth2@veth1: <BROADCAST,MULTICAST,M-DOWN> mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1000
    link/ether 4a:d8:94:37:f2:7e brd ff:ff:ff:ff:ff:ff
7: veth1@veth2: <BROADCAST,MULTICAST,M-DOWN> mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1000
    link/ether 9e:75:75:6f:03:82 brd ff:ff:ff:ff:ff:ff

 

root@devops:~# ip link set dev veth1 netns uengine1

root@devops:~# ip link set dev veth2 netns uengine2

 

root@devops:~# ip netns exec uengine1 ip -br link

lo               UNKNOWN        00:00:00:00:00:00 <LOOPBACK,UP,LOWER_UP>
veth1@if6        DOWN           9e:75:75:6f:03:82 <BROADCAST,MULTICAST>

 

root@devops:~# ip netns exec uengine2 ip -br link

lo               UNKNOWN        00:00:00:00:00:00 <LOOPBACK,UP,LOWER_UP>
veth2@if7        DOWN           4a:d8:94:37:f2:7e <BROADCAST,MULTICAST>

 

#IP 할당

root@devops:~# ip netns exec uengine1 ip addr add 192.168.0.100/24 dev veth1

root@devops:~# ip netns exec uengine2 ip addr add 192.168.0.200/24 dev veth2

 

root@devops:~# ip netns exec uengine1 ip link set dev veth1 up

root@devops:~# ip netns exec uengine2 ip link set dev veth2 up

 

root@devops:~# ip netns exec uengine2 ip -br link

lo               UNKNOWN        00:00:00:00:00:00 <LOOPBACK,UP,LOWER_UP>
veth2@if7        UP             4a:d8:94:37:f2:7e <BROADCAST,MULTICAST,UP,LOWER_UP>

 

root@devops:~# ip netns exec uengine1 ip -br link

lo               UNKNOWN        00:00:00:00:00:00 <LOOPBACK,UP,LOWER_UP>
veth1@if6        UP             9e:75:75:6f:03:82 <BROADCAST,MULTICAST,UP,LOWER_UP>

 

root@devops:~# ip netns ls

uengine2 (id: 1)
uengine1 (id: 0)

 

root@devops:~# ip netns exec uengine1 ping 192.168.0.200

PING 192.168.0.200 (192.168.0.200) 56(84) bytes of data.
64 bytes from 192.168.0.200: icmp_seq=1 ttl=64 time=0.098 ms
64 bytes from 192.168.0.200: icmp_seq=2 ttl=64 time=0.156 ms

 

root@devops:~# ip netns exec uengine1 ping 192.168.0.100

PING 192.168.0.100 (192.168.0.100) 56(84) bytes of data.
64 bytes from 192.168.0.100: icmp_seq=1 ttl=64 time=0.132 ms
64 bytes from 192.168.0.100: icmp_seq=2 ttl=64 time=0.051 ms

 

## 가상스위치를 만들어 2개의 namespace를 연결하고 default 도메인(로칼서버)과 통신

root@devops:~# ip netns exec uengine1 ip link delete dev veth1

root@devops:~# ip netns exec uengine2 ip -br link

lo               UNKNOWN        00:00:00:00:00:00 <loopback,up,lower_up></loopback,up,lower_up>

 

#브릿지 만들기

root@devops:~# ip link add br0 type bridge

root@devops:~# ip link

1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
2: ens18: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP mode DEFAULT group default qlen 1000
    link/ether 92:5c:0d:39:99:b2 brd ff:ff:ff:ff:ff:ff
3: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN mode DEFAULT group default
    link/ether 02:42:5b:86:7f:85 brd ff:ff:ff:ff:ff:ff
8: br0: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1000
    link/ether f2:ee:73:58:ab:13 brd ff:ff:ff:ff:ff:ff

 

root@devops:~# ip link set br0 up

root@devops:~# ip link

1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
2: ens18: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP mode DEFAULT group default qlen 1000
    link/ether 92:5c:0d:39:99:b2 brd ff:ff:ff:ff:ff:ff
3: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN mode DEFAULT group default
    link/ether 02:42:5b:86:7f:85 brd ff:ff:ff:ff:ff:ff
8: br0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000
    link/ether f2:ee:73:58:ab:13 brd ff:ff:ff:ff:ff:ff

 

# cable 만들기

root@devops:~# ip link add br1 type veth peer name veth1

root@devops:~# ip link add br2 type veth peer name veth2

 

root@devops:~# ip link

1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
2: ens18: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP mode DEFAULT group default qlen 1000
    link/ether 92:5c:0d:39:99:b2 brd ff:ff:ff:ff:ff:ff
3: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN mode DEFAULT group default
    link/ether 02:42:5b:86:7f:85 brd ff:ff:ff:ff:ff:ff
8: br0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000
    link/ether f2:ee:73:58:ab:13 brd ff:ff:ff:ff:ff:ff
9: veth1@br1: <BROADCAST,MULTICAST,M-DOWN> mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1000
    link/ether 06:4d:76:28:dd:0d brd ff:ff:ff:ff:ff:ff
10: br1@veth1: <BROADCAST,MULTICAST,M-DOWN> mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1000
    link/ether 56:d0:71:6b:d7:b6 brd ff:ff:ff:ff:ff:ff
11: veth2@br2: <BROADCAST,MULTICAST,M-DOWN> mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1000
    link/ether 06:51:40:42:e2:2a brd ff:ff:ff:ff:ff:ff
12: br2@veth2: <BROADCAST,MULTICAST,M-DOWN> mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1000
    link/ether 8a:2e:81:f4:1d:94 brd ff:ff:ff:ff:ff:ff

 

# 케이블 꽂기

root@devops:~# ip link set veth1 netns uengine1

root@devops:~# ip link set veth2 netns uengine2

 

root@devops:~# ip netns exec uengine1 ip addr add 192.168.0.101/24 dev veth1

root@devops:~# ip netns exec uengine2 ip addr add 192.168.0.102/24 dev veth2’

 

root@devops:~# ip netns exec uengine1

No command specified

 

root@devops:~# ip netns exec uengine1 ip -br link

lo               UNKNOWN        00:00:00:00:00:00 <LOOPBACK,UP,LOWER_UP>
veth1@if10       DOWN           06:4d:76:28:dd:0d <BROADCAST,MULTICAST>

 

root@devops:~# ip netns exec uengine1 ip link set dev veth1 up

root@devops:~# ip netns exec uengine2 ip link set dev veth2 up

 

root@devops:~# ip netns exec uengine2 ip -br link

lo               UNKNOWN        00:00:00:00:00:00 <LOOPBACK,UP,LOWER_UP>
veth2@if12       LOWERLAYERDOWN 06:51:40:42:e2:2a <NO-CARRIER,BROADCAST,MULTICAST,UP>

 

# 브릿지 디바이스 연결

root@devops:~# ip link set br1 master br0

root@devops:~# ip link set br2 master br0

 

root@devops:~# ip netns exec uengine1 ip -br link

lo               UNKNOWN        00:00:00:00:00:00 <LOOPBACK,UP,LOWER_UP>
veth1@if10       LOWERLAYERDOWN 06:4d:76:28:dd:0d <NO-CARRIER,BROADCAST,MULTICAST,UP>

 

# 브릿지 디바이스 활성화

root@devops:~# ip link set dev br1 up

root@devops:~# ip link set dev br2 up

 

root@devops:~# ip netns exec uengine2 ip -br link

lo               UNKNOWN        00:00:00:00:00:00 <LOOPBACK,UP,LOWER_UP>
veth2@if12       UP             06:51:40:42:e2:2a <BROADCAST,MULTICAST,UP,LOWER_UP>

 

root@devops:~# ip netns exec uengine1 ping 192.168.0.102

PING 192.168.0.102 (192.168.0.102) 56(84) bytes of data.

 

root@devops:~# ip netns exec uengine1 ip addr

1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host
       valid_lft forever preferred_lft forever
9: veth1@if10: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default qlen 1000
    link/ether 06:4d:76:28:dd:0d brd ff:ff:ff:ff:ff:ff link-netnsid 0
    inet 192.168.0.101/24 scope global veth1
       valid_lft forever preferred_lft forever
    inet6 fe80::44d:76ff:fe28:dd0d/64 scope link
       valid_lft forever preferred_lft forever

 

root@devops:~# ip netns exec uengine2 ip addr

1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host
       valid_lft forever preferred_lft forever
11: veth2@if12: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default qlen 1000
    link/ether 06:51:40:42:e2:2a brd ff:ff:ff:ff:ff:ff link-netnsid 1
    inet 192.168.0.102/24 scope global veth2
       valid_lft forever preferred_lft forever
    inet6 fe80::451:40ff:fe42:e22a/64 scope link
       valid_lft forever preferred_lft forever

 

root@devops:~# ip link

1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
2: ens18: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP mode DEFAULT group default qlen 1000
    link/ether 92:5c:0d:39:99:b2 brd ff:ff:ff:ff:ff:ff
3: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN mode DEFAULT group default
    link/ether 02:42:5b:86:7f:85 brd ff:ff:ff:ff:ff:ff
8: br0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP mode DEFAULT group default qlen 1000
    link/ether 56:d0:71:6b:d7:b6 brd ff:ff:ff:ff:ff:ff
10: br1@if9: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master br0 state UP mode DEFAULT group default qlen 1000
    link/ether 56:d0:71:6b:d7:b6 brd ff:ff:ff:ff:ff:ff link-netns uengine1
12: br2@if11: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master br0 state UP mode DEFAULT group default qlen 1000
    link/ether 8a:2e:81:f4:1d:94 brd ff:ff:ff:ff:ff:ff link-netns uengine2

 

# 통신이 안될 경우 ip forward 정책 변경 : DROP -> ACCEPT

root@devops:~# iptables -L |grep FORWARD

Chain FORWARD (policy DROP)

 

root@devops:~# iptables --policy FORWARD ACCEPT

 

root@devops:~# ip netns exec uengine1 ping 192.168.0.102

PING 192.168.0.102 (192.168.0.102) 56(84) bytes of data.
64 bytes from 192.168.0.102: icmp_seq=1 ttl=64 time=0.185 ms
64 bytes from 192.168.0.102: icmp_seq=2 ttl=64 time=0.091 ms

 

root@devops:~# ip netns exec uengine2 ping 192.168.0.101

PING 192.168.0.101 (192.168.0.101) 56(84) bytes of data.
64 bytes from 192.168.0.101: icmp_seq=1 ttl=64 time=0.070 ms
64 bytes from 192.168.0.101: icmp_seq=2 ttl=64 time=0.072 ms

 

root@devops:~# ping 192.168.0.100

PING 192.168.0.100 (192.168.0.100) 56(84) bytes of data.
^C
--- 192.168.0.100 ping statistics ---
2 packets transmitted, 0 received, 100% packet loss, time 1004ms

 

root@devops:~# ip addr

1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host
       valid_lft forever preferred_lft forever
2: ens18: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
    link/ether 92:5c:0d:39:99:b2 brd ff:ff:ff:ff:ff:ff
    inet 218.236.22.86/24 brd 218.236.22.255 scope global ens18
       valid_lft forever preferred_lft forever
    inet6 fe80::905c:dff:fe39:99b2/64 scope link
       valid_lft forever preferred_lft forever
3: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default
    link/ether 02:42:5b:86:7f:85 brd ff:ff:ff:ff:ff:ff
    inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0
       valid_lft forever preferred_lft forever
8: br0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default qlen 1000
    link/ether 56:d0:71:6b:d7:b6 brd ff:ff:ff:ff:ff:ff
    inet6 fe80::f0ee:73ff:fe58:ab13/64 scope link
       valid_lft forever preferred_lft forever
10: br1@if9: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master br0 state UP group default qlen 1000
    link/ether 56:d0:71:6b:d7:b6 brd ff:ff:ff:ff:ff:ff link-netns uengine1
    inet6 fe80::54d0:71ff:fe6b:d7b6/64 scope link
       valid_lft forever preferred_lft forever
12: br2@if11: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master br0 state UP group default qlen 1000
    link/ether 8a:2e:81:f4:1d:94 brd ff:ff:ff:ff:ff:ff link-netns uengine2
    inet6 fe80::882e:81ff:fef4:1d94/64 scope link
       valid_lft forever preferred_lft forever

 

root@devops:~# ping 192.168.0.101

PING 192.168.0.101 (192.168.0.101) 56(84) bytes of data.
^C
--- 192.168.0.101 ping statistics ---
1 packets transmitted, 0 received, 100% packet loss, time 0ms

 

root@devops:~# ip route

default via 218.236.22.254 dev ens18 proto static
172.17.0.0/16 dev docker0 proto kernel scope link src 172.17.0.1 linkdown
218.236.22.0/24 dev ens18 proto kernel scope link src 218.236.22.86

 

root@devops:~# route

Kernel IP routing table
Destination     Gateway         Genmask         Flags Metric Ref    Use Iface
default         _gateway        0.0.0.0         UG    0      0        0 ens18
172.17.0.0      0.0.0.0         255.255.0.0     U     0      0        0 docker0
218.236.22.0    0.0.0.0         255.255.255.0   U     0      0        0 ens18

 

# br0를 라우터로 활성화

root@devops:~# ip addr add 192.168.0.1/24 brd 192.168.0.255 dev br0

 

root@devops:~# ip addr

1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host
       valid_lft forever preferred_lft forever
2: ens18: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
    link/ether 92:5c:0d:39:99:b2 brd ff:ff:ff:ff:ff:ff
    inet 218.236.22.86/24 brd 218.236.22.255 scope global ens18
       valid_lft forever preferred_lft forever
    inet6 fe80::905c:dff:fe39:99b2/64 scope link
       valid_lft forever preferred_lft forever
3: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default
    link/ether 02:42:5b:86:7f:85 brd ff:ff:ff:ff:ff:ff
    inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0
       valid_lft forever preferred_lft forever
8: br0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default qlen 1000
    link/ether 56:d0:71:6b:d7:b6 brd ff:ff:ff:ff:ff:ff
    inet 192.168.0.1/24 brd 192.168.0.255 scope global br0
       valid_lft forever preferred_lft forever
    inet6 fe80::f0ee:73ff:fe58:ab13/64 scope link
       valid_lft forever preferred_lft forever
10: br1@if9: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master br0 state UP group default qlen 1000
    link/ether 56:d0:71:6b:d7:b6 brd ff:ff:ff:ff:ff:ff link-netns uengine1
    inet6 fe80::54d0:71ff:fe6b:d7b6/64 scope link
       valid_lft forever preferred_lft forever
12: br2@if11: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master br0 state UP group default qlen 1000
    link/ether 8a:2e:81:f4:1d:94 brd ff:ff:ff:ff:ff:ff link-netns uengine2
    inet6 fe80::882e:81ff:fef4:1d94/64 scope link
       valid_lft forever preferred_lft forever

 

root@devops:~# ip addr show br0

8: br0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default qlen 1000
    link/ether 56:d0:71:6b:d7:b6 brd ff:ff:ff:ff:ff:ff
    inet 192.168.0.1/24 brd 192.168.0.255 scope global br0
       valid_lft forever preferred_lft forever
    inet6 fe80::f0ee:73ff:fe58:ab13/64 scope link
       valid_lft forever preferred_lft forever

 

root@devops:~# route

Kernel IP routing table
Destination     Gateway         Genmask         Flags Metric Ref    Use Iface
default         _gateway        0.0.0.0         UG    0      0        0 ens18
172.17.0.0      0.0.0.0         255.255.0.0     U     0      0        0 docker0
192.168.0.0     0.0.0.0         255.255.255.0   U     0      0        0 br0
218.236.22.0    0.0.0.0         255.255.255.0   U     0      0        0 ens18

 

root@devops:~# ping 192.168.0.101

PING 192.168.0.101 (192.168.0.101) 56(84) bytes of data.
64 bytes from 192.168.0.101: icmp_seq=1 ttl=64 time=0.341 ms
64 bytes from 192.168.0.101: icmp_seq=2 ttl=64 time=0.071 ms

 

root@devops:~# ping 192.168.0.102

PING 192.168.0.102 (192.168.0.102) 56(84) bytes of data.
64 bytes from 192.168.0.102: icmp_seq=1 ttl=64 time=0.197 ms
64 bytes from 192.168.0.102: icmp_seq=2 ttl=64 time=0.064 ms

 

root@devops:~# ip addr

1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host
       valid_lft forever preferred_lft forever
2: ens18: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
    link/ether 92:5c:0d:39:99:b2 brd ff:ff:ff:ff:ff:ff
    inet 218.236.22.86/24 brd 218.236.22.255 scope global ens18
       valid_lft forever preferred_lft forever
    inet6 fe80::905c:dff:fe39:99b2/64 scope link
       valid_lft forever preferred_lft forever
3: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default
    link/ether 02:42:5b:86:7f:85 brd ff:ff:ff:ff:ff:ff
    inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0
       valid_lft forever preferred_lft forever
8: br0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default qlen 1000
    link/ether 56:d0:71:6b:d7:b6 brd ff:ff:ff:ff:ff:ff
    inet 192.168.0.1/24 brd 192.168.0.255 scope global br0
       valid_lft forever preferred_lft forever
    inet6 fe80::f0ee:73ff:fe58:ab13/64 scope link
       valid_lft forever preferred_lft forever
10: br1@if9: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master br0 state UP group default qlen 1000
    link/ether 56:d0:71:6b:d7:b6 brd ff:ff:ff:ff:ff:ff link-netns uengine1
    inet6 fe80::54d0:71ff:fe6b:d7b6/64 scope link
       valid_lft forever preferred_lft forever
12: br2@if11: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master br0 state UP group default qlen 1000
    link/ether 8a:2e:81:f4:1d:94 brd ff:ff:ff:ff:ff:ff link-netns uengine2
    inet6 fe80::882e:81ff:fef4:1d94/64 scope link
       valid_lft forever preferred_lft forever

 

root@devops:~# ip netns exec uengine1 ping 10.0.0.4

ping: connect: Network is unreachable

 

root@devops:~# ip netns exec uengine1 ping 192.168.0.1

PING 192.168.0.1 (192.168.0.1) 56(84) bytes of data.
64 bytes from 192.168.0.1: icmp_seq=1 ttl=64 time=0.063 ms
64 bytes from 192.168.0.1: icmp_seq=2 ttl=64 time=0.063 ms

 

root@devops:~# ip netns exec uengine1 route

Kernel IP routing table
Destination     Gateway         Genmask         Flags Metric Ref    Use Iface
192.168.0.0     0.0.0.0         255.255.255.0   U     0      0        0 veth1

 

 

## 만들어진 2개의 namespace를 외부와 통신

root@devops:~# ip netns exec uengine1 ping 8.8.8.8

ping: connect: Network is unreachable

 

root@devops:~# ip addr show eth0

Device "eth0" does not exist.

 

root@devops:~# ip netns exec uengine1 ping 10.0.0.4

ping: connect: Network is unreachable

 

root@devops:~# ip route

default via 218.236.22.254 dev ens18 proto static
172.17.0.0/16 dev docker0 proto kernel scope link src 172.17.0.1 linkdown
192.168.0.0/24 dev br0 proto kernel scope link src 192.168.0.1
218.236.22.0/24 dev ens18 proto kernel scope link src 218.236.22.86

 

root@devops:~# ip netns exec uengine1 ip route

192.168.0.0/24 dev veth1 proto kernel scope link src 192.168.0.101

 

# namespace에서 br0를 라우터로 등록

root@devops:~# ip netns exec uengine1 ip route add default via 192.168.0.1

root@devops:~# ip netns exec uengine2 ip route add default via 192.168.0.1

 

root@devops:~# ip netns exec uengine ip route

Cannot open network namespace "uengine": No such file or directory

 

root@devops:~# ip netns exec uengine1 ip route

default via 192.168.0.1 dev veth1
192.168.0.0/24 dev veth1 proto kernel scope link src 192.168.0.101

 

root@devops:~# ip netns exec uengine1 ping 10.0.0.4

PING 10.0.0.4 (10.0.0.4) 56(84) bytes of data.

 

root@devops:~# ip netns exec uengine1 ping 8.8.8.8

PING 8.8.8.8 (8.8.8.8) 56(84) bytes of data.

 

root@devops:~# sysctl -a|grep forward

net.ipv4.conf.all.bc_forwarding = 0
net.ipv4.conf.all.forwarding = 1
net.ipv4.conf.all.mc_forwarding = 0
net.ipv4.conf.br0.bc_forwarding = 0
net.ipv4.conf.br0.forwarding = 1
net.ipv4.conf.br0.mc_forwarding = 0
net.ipv4.conf.br1.bc_forwarding = 0
net.ipv4.conf.br1.forwarding = 1
net.ipv4.conf.br1.mc_forwarding = 0
net.ipv4.conf.br2.bc_forwarding = 0
net.ipv4.conf.br2.forwarding = 1
net.ipv4.conf.br2.mc_forwarding = 0
net.ipv4.conf.default.bc_forwarding = 0
net.ipv4.conf.default.forwarding = 1
net.ipv4.conf.default.mc_forwarding = 0
net.ipv4.conf.docker0.bc_forwarding = 0
net.ipv4.conf.docker0.forwarding = 1
net.ipv4.conf.docker0.mc_forwarding = 0
net.ipv4.conf.ens18.bc_forwarding = 0
net.ipv4.conf.ens18.forwarding = 1
net.ipv4.conf.ens18.mc_forwarding = 0
net.ipv4.conf.lo.bc_forwarding = 0
net.ipv4.conf.lo.forwarding = 1
net.ipv4.conf.lo.mc_forwarding = 0
net.ipv4.ip_forward = 1
net.ipv4.ip_forward_update_priority = 1
net.ipv4.ip_forward_use_pmtu = 0
net.ipv6.conf.all.forwarding = 0
net.ipv6.conf.all.mc_forwarding = 0
net.ipv6.conf.br0.forwarding = 0
net.ipv6.conf.br0.mc_forwarding = 0
net.ipv6.conf.br1.forwarding = 0
net.ipv6.conf.br1.mc_forwarding = 0
net.ipv6.conf.br2.forwarding = 0
net.ipv6.conf.br2.mc_forwarding = 0
net.ipv6.conf.default.forwarding = 0
net.ipv6.conf.default.mc_forwarding = 0
net.ipv6.conf.docker0.forwarding = 0
net.ipv6.conf.docker0.mc_forwarding = 0
net.ipv6.conf.ens18.forwarding = 0
net.ipv6.conf.ens18.mc_forwarding = 0
net.ipv6.conf.lo.forwarding = 0
net.ipv6.conf.lo.mc_forwarding = 0

 

# namespace에서 외부IP 통신을 위한 namespace 대역 masquerade 적용 1/2

root@devops:~# sysctl -w net.ipv4.ip_forward=1

net.ipv4.ip_forward = 1

 

# namespace에서 외부IP 통신을 위한 namespace 대역 masquerade 적용 2/2

root@devops:~# iptables -t nat -A POSTROUTING -s 192.168.0.0/24 -j MASQUERADE

 

root@devops:~# iptables -L

Chain INPUT (policy ACCEPT)
target     prot opt source               destination

Chain FORWARD (policy ACCEPT)
target     prot opt source               destination        
DOCKER-USER  all  --  anywhere             anywhere           
DOCKER-ISOLATION-STAGE-1  all  --  anywhere             anywhere           
ACCEPT     all  --  anywhere             anywhere             ctstate RELATED,ESTABLISHED
DOCKER     all  --  anywhere             anywhere           
ACCEPT     all  --  anywhere             anywhere            
ACCEPT     all  --  anywhere             anywhere    

Chain OUTPUT (policy ACCEPT)
target     prot opt source               destination        
 
Chain DOCKER (1 references)
target     prot opt source               destination        
 
Chain DOCKER-ISOLATION-STAGE-1 (1 references)
target     prot opt source               destination        
DOCKER-ISOLATION-STAGE-2  all  --  anywhere             anywhere           
RETURN     all  --  anywhere             anywhere           
 
Chain DOCKER-ISOLATION-STAGE-2 (1 references)
target     prot opt source               destination        
DROP       all  --  anywhere             anywhere           
RETURN     all  --  anywhere             anywhere           
 
Chain DOCKER-USER (1 references)
target     prot opt source               destination        
RETURN     all  --  anywhere             anywhere

root@devops:~# ip netns exec uengine1 ping 8.8.8.8

PING 8.8.8.8 (8.8.8.8) 56(84) bytes of data.
64 bytes from 8.8.8.8: icmp_seq=1 ttl=56 time=76.7 ms
64 bytes from 8.8.8.8: icmp_seq=2 ttl=56 time=76.5 ms

 

root@devops:~# ip netns exec uengine1 curl google.com

curl: (6) Could not resolve host: google.com

 

root@devops:~# mkdir -p /etc/netns/uengine1/

 

# namespace 외부 DNS 통신 확인

root@devops:~# echo 'nameserver 8.8.8.8' > /etc/netns/uengine1/resolv.conf

 

root@devops:~# ip netns exec uengine1 curl google.com

<HTML><HEAD><meta http-equiv="content-type" content="text/html;charset=utf-8">
<TITLE>301 Moved</TITLE></HEAD><BODY>
<H1>301 Moved</H1>
The document has moved
here.
</BODY></HTML>

 

root@devops:~# mkdir -p /etc/netns/uengine2

 

root@devops:~# echo 'nameserver 8.8.8.8' > /etc/netns/uengine2/resolv.conf

 

root@devops:~# ip netns exec uengine2 curl google.com

<HTML><HEAD><meta http-equiv="content-type" content="text/html;charset=utf-8">
<TITLE>301 Moved</TITLE></HEAD><BODY>
<H1>301 Moved</H1>
The document has moved
here.
</BODY></HTML>

 

## 지우는 명령어

Ip link delete br0

Ip link delete uengine1

 

# 네임스페이스 지우는 명령어

ip netns delete uengine1

728x90
728x90

root@devops:~# docker login

Login with your Docker ID to push and pull images from Docker Hub. If you don't have a Docker ID, head over to https://hub.docker.com to create one.
Username: 아이디 입력
Password: 패스워드 입력
WARNING! Your password will be stored unencrypted in /root/.docker/config.json.
Configure a credential helper to remove this warning. See
https://docs.docker.com/engine/reference/commandline/login/#credentials-store
 
Login Succeeded

 

# Image가 도커에 탑재됐을 때 Container가 된다.

# Immutable 이미지를 만들고 그거 위에 새로운 걸 얹는 느낌

# Immutable 이미지에 IP 같은 것들은 들어가면 안됨

 

#Private host url

 

#Basic image

#Dockerfile … Docker build까지만 해주고, 나머지는 쿠버네티스 이용할 예정

 

 

root@labs--1234:/home/project/lab-env# pwd

/home/project/lab-env

root@labs--1234:/home/project/lab-env# mkdir DockerLab

root@labs--1234:/home/project/lab-env# cd DockerLab

 

root@labs--1234:/home/project/lab-env/DockerLab# vi index.html

Hi~ My name is Hong Gil Dong

 

root@labs--1234:/home/project/lab-env/DockerLab# vi Dockerfile

FROM nginx
COPY index.html /usr/share/nginx/html/

 

#Dockerfile로 이미지 생성

root@labs--1234:/home/project/lab-env/DockerLab# docker build -t 도커아이디/my-nginx:v1 .

Sending build context to Docker daemon 3.072 kB
Step 1/2 : FROM nginx
latest: Pulling from library/nginx
33847f680f63: Pull complete
dbb907d5159d: Pull complete
8a268f30c42a: Pull complete
b10cf527a02d: Pull complete
c90b090c213b: Pull complete
1f41b2f2bf94: Pull complete
Digest: sha256:8f335768880da6baf72b70c701002b45f4932acae8d574dedfddaf967fc3ac90
Status: Downloaded newer image for nginx:latest
 ---> 08b152afcfae
Step 2/2 : COPY index.html /usr/share/nginx/html/
 ---> 9a72d6ff453a
Successfully built 9a72d6ff453a
Successfully tagged 도커아이디/my-nginx:v1

 

#도커 이미지 목록 확인

root@labs--1234:/home/project/lab-env/DockerLab# docker images

REPOSITORY          TAG                 IMAGE ID            CREATED             SIZE
도커아이디/my-nginx    v1                  9a72d6ff453a        27 seconds ago      133 MB
nginx               latest              08b152afcfae        13 days ago         133 MB

 

# 도커 이미지 Push 1)

root@labs--1234:/home/project/lab-env/DockerLab# docker login


Login Succeeded

# 도커 이미지 Push 2)

root@labs--1234:/home/project/lab-env/DockerLab# docker push 도커아이디/my-nginx:v1

The push refers to repository [docker.io/도커아이디/my-nginx]
deea81dd057c: Pushed
e3135447ca3e: Mounted from library/nginx
b85734705991: Mounted from library/nginx
988d9a3509bb: Mounted from library/nginx
59b01b87c9e7: Mounted from library/nginx
7c0b223167b9: Mounted from library/nginx
814bff734324: Mounted from library/nginx
v1: digest: sha256:ad4511eda68b63ab92dc2de4bc7bea38a06af47af376ce96c23e14ab1566a1a9 size: 1777

 

#컨테이너 실행

root@labs--1234:/home/project/lab-env/DockerLab# docker run --name=my-nginx -d -p 8072:80 도커아이디/my-nginx:v1

3e9bac040be41b7daa5ee1d5bde74e526db79ab052482593f7ced7aadb32916e

 

root@labs--1234:/home/project/lab-env/DockerLab# curl localhost:8072

Hi~ My name is Hong Gil Dong

 

#모든 컨테이너 한번에 삭제 (중지 후 삭제)

root@labs--1234:/home/project/lab-env/DockerLab# docker container rm $(docker ps -a -q)

Error response from daemon: You cannot remove a running container 3e9bac040be41b7daa5ee1d5bde74e526db79ab052482593f7ced7aadb32916e. Stop the container before attempting removal or force remove

 

# 컨테이너 종료

root@labs--1234:/home/project/lab-env/DockerLab# docker stop

3e9bac040be41b7daa5ee1d5bde74e526db79ab052482593f7ced7aadb32916e
3e9bac040be41b7daa5ee1d5bde74e526db79ab052482593f7ced7aadb32916e

 

#모든 컨테이너 한번에 삭제 (중지 후 삭제)

root@labs--1234:/home/project/lab-env/DockerLab# docker container rm $(docker ps -a -q)

3e9bac040be4

 

# 실행중인 컨테이너 확인

root@labs--1234:/home/project/lab-env/DockerLab# docker ps

CONTAINER ID        IMAGE               COMMAND             CREATED             STATUS              PORTS               NAMES

 

root@labs--1234:/home/project/lab-env/DockerLab# docker run --name=my-nginx -d -p 8087:80 도커아이디/my-nginx:v19b5bf23d92f3581c67e1164873612ce7e149a5e12793011d734639cef4a730ff

 

 

 

 

 

728x90

+ Recent posts