클라우드 네이티브 라우터 운영자 서비스 모듈: 호스트 기반 라우팅 예제 구성 파일
이 섹션에는 서비스 모듈 호스트 기반 라우팅 배치를 작성하는 데 사용할 수 있는 예제 스크립트 및 구성 파일이 포함되어 있습니다.
호스트 기반 라우팅: cRPD를 설치하기 위한 예제 스크립트 및 구성 파일
cRPD 설치 스크립트 예
다음 예제 스크립트는 스크립트를 실행하는 노드에 cRPD를 설치합니다. cRPD가 노드에서 이미 실행 중인 경우 스크립트는 실행 중인 cRPD 인스턴스를 제거하고 새 인스턴스를 설치합니다. 스크립트가 기존 cRPD 구성 파일을 찾으면 해당 구성 파일을 재사용합니다. 그렇지 않으면 스크립트에서 설정한 변수로 CONFIG_TEMPLATE 지정된 구성 파일을 사용합니다.
클러스터의 모든 노드에서 적절한 CONFIG_TEMPLATE 구성 파일을 사용하여 이 스크립트를 실행합니다.
컨트롤 플레인 노드 구성 파일 예시 및 작업자 노드 구성 파일 예시에서 샘플 CONFIG_TEMPLATE 구성 파일을 제공합니다.
install-crpd.sh:
set -o nounset
set -o errexit
SCRIPT_DIR=$(cd -P `dirname $0`; pwd)
NETWORK_NS="ns:/run/netns/crpd"
# Specify the config file. For example:
# ctl_plane_crpd_connectivity_template_5_node.conf or worker_crpd_connectivity_template_5_node.conf
CONFIG_TEMPLATE=ctl_plane_crpd_connectivity_template_5_node.conf
POD_NAME=crpd
CONTAINER_NAME=crpd01
# Remove existing pod
## Stop all containers in pod crpd
POD_ID=$(sudo podman pod ls -fname=${POD_NAME} -q)
if [ -n "$POD_ID" ]; then
sudo podman pod stop ${POD_ID}
sudo podman pod rm ${POD_ID}
fi
# Create Pod in NS (Tested with podman 4.6.2)
#
sudo podman pod create --name ${POD_NAME} --network ${NETWORK_NS}
# create config dir
CRPD_CONFIG_DIR=/etc/crpd/config
sudo rm -rf ${CRPD_CONFIG_DIR}
sudo mkdir -p ${CRPD_CONFIG_DIR}
if [[ -f ${CRPD_CONFIG_DIR}/juniper.conf || -f ${CRPD_CONFIG_DIR}/juniper.conf.gz ]]; then
echo "conf file exists"
else
echo "initialize with base config"
envsubst < ${CONFIG_TEMPLATE} > crpd_base_connectivity.conf
sudo cp ${SCRIPT_DIR}/crpd_base_connectivity.conf ${CRPD_CONFIG_DIR}/juniper.conf
fi
sudo podman volume create crpd01-varlog --ignore
sudo podman run --rm -d --name ${CONTAINER_NAME} --pod ${POD_NAME} --privileged -v /etc/crpd/config:/config:Z -v crpd01-varlog:/var/log -it enterprise-hub.juniper.net:/jcnr-container-prod/crpd:24.4R1.9
# List
sudo podman pod ps --ctr-status --ctr-names --ctr-ids
컨트롤 플레인 노드 구성 파일 예
이 구성 파일은 cRPD 설치 스크립트에서 에서 참조 CONFIG_TEMPLATE 됩니다. 컨트롤 플레인 노드당 하나의 컨트롤 플레인 노드 구성 파일이 있습니다. 표 1 에서 표 3 까지는 각 컨트롤 플레인 노드에 대해 설정할 변수 값을 참조하십시오.
ctl_plane_crpd_connectivity_template_5_node.conf 크랙:
groups {
base {
apply-flags omit;
system {
root-authentication {
encrypted-password "<encrypted_password>"
}
commit {
xpath;
constraints {
direct-access;
}
notification {
configuration-diff-format xml;
}
}
scripts {
action {
max-datasize 256m;
}
language python3;
}
services {
netconf {
ssh;
}
ssh {
root-login allow;
port 24;
}
}
license {
keys {
key "<crpd-license-key>";
}
}
}
}
connectivity {
interfaces {
lo0 {
mtu 9216;
unit 0 {
family inet {
address ${LO0_IP}/32;
}
}
}
veth-crpd {
mtu 9216;
unit 0 {
family inet {
address ${VETH_CRPD}/30;
}
# *** uncomment below if running dual stack ***
#family inet6 {
# address ${VETH6_CRPD}/126;
}
}
}
}
policy-options {
policy-statement accept-podcidr {
term accept {
from {
route-filter ${POD_CIDR} orlonger;
}
then accept;
}
then reject;
}
policy-statement export-direct {
term 1 {
from {
route-filter ${LO0_IP_POOL} orlonger;
}
then accept;
}
then reject;
}
policy-statement export-evpn {
term 1 {
from protocol evpn;
then accept;
}
then reject;
}
policy-statement export-veth {
term 1 {
from {
protocol direct;
route-filter ${VETH_PREFIX}/30 exact;
}
then accept;
}
term 2 {
from protocol bgp;
then accept;
}
then {
# *** uncomment below if running dual stack ***
#next policy;
reject;
}
}
# *** uncomment below if running dual stack ***
#policy-statement export-veth-v6 {
# term 1 {
# from {
# protocol direct;
# route-filter ${VETH6_PREFIX}/126 exact;
# }
# then accept;
# }
# term 2 {
# from protocol bgp;
# then accept;
# }
# then reject;
#}
}
routing-instances {
master-calico-ri {
instance-type vrf;
protocols {
bgp {
group calico-bgprtrgrp-master {
multihop;
local-address ${VETH_CRPD};
import accept-podcidr;
export export-evpn;
remove-private no-peer-loop-check;
peer-as 64512;
local-as 64600;
neighbor ${VETH_K8S};
}
# *** uncomment below if running dual stack ***
#group calico-bgprtrgrp-master6 {
# multihop;
# local-address ${VETH6_CRPD};
# export export-evpn;
# remove-private no-peer-loop-check;
# peer-as 64512;
# local-as 64600;
# neighbor ${VETH6_K8S};
#}
}
evpn {
ip-prefix-routes {
advertise direct-nexthop;
encapsulation vxlan;
vni 4096;
# ***Include below line when running IPv4 only. Comment out if running dual stack.***
export export-veth;
# ***Include below line when running dual stack. Comment out if running IPv4 only.***
#export [ export-veth export-veth-v6 ];
route-attributes {
community {
import-action allow;
export-action allow;
}
}
}
}
}
interface veth-crpd;
vrf-target target:1:4;
}
}
routing-options {
route-distinguisher-id ${LO0_IP};
router-id ${LO0_IP};
}
protocols {
bgp {
group crpd-master-bgprtrgrp {
export export-direct;
peer-as 64500;
local-as 64500;
neighbor ${MASTER1_PEER_ENS4_IP};
neighbor ${MASTER2_PEER_ENS4_IP};
}
group crpd-worker-bgprtrgrp {
multihop;
export export-direct;
peer-as 64500;
local-as 64500;
neighbor ${WORKER1_PEER_ENS4_IP};
neighbor ${WORKER2_PEER_ENS4_IP};
}
group crpd-master-lo-bgprtrgrp {
local-address ${LO0_IP};
family evpn {
signaling;
}
peer-as 64600;
local-as 64600;
neighbor ${MASTER1_EVPN_PEER_IP};
neighbor ${MASTER2_EVPN_PEER_IP};
}
group crpd-worker-lo-bgprtrgrp {
local-address ${LO0_IP};
family evpn {
signaling;
}
peer-as 64600;
local-as 64600;
neighbor ${WORKER1_EVPN_PEER_IP};
neighbor ${WORKER2_EVPN_PEER_IP};
}
cluster ${LO0_IP};
}
}
}
}
apply-groups base;
apply-groups connectivity;
| 변수 |
설정 |
|---|---|
| LO0_IP_POOL |
10.12.0.0/24 |
| LO0_IP |
10.12.0.1 |
| VETH_CRPD |
10.1.1.2 |
| VETH6_CRPD |
2001:db8:1::2 |
| VETH_PREFIX |
10.1.1.0 |
| VETH6_PREFIX |
2001:db8:1::0 |
| VETH_K8S |
10.1.1.1 |
| VETH6_K8S |
2001:db8:1::1 |
| POD_CIDR |
192.168.0.0/24 |
| MASTER1_EVPN_PEER_IP |
10.12.0.2 |
| MASTER2_EVPN_PEER_IP |
10.12.0.3 |
| WORKER1_EVPN_PEER_IP |
10.12.0.4 |
| WORKER2_EVPN_PEER_IP |
10.12.0.5 |
| MASTER1_PEER_ENS4_IP |
192.168.1.102 |
| MASTER2_PEER_ENS4_IP |
192.168.1.103 |
| WORKER1_PEER_ENS4_IP |
192.168.1.104 |
| WORKER2_PEER_ENS4_IP |
192.168.1.105 |
| 변수 |
설정 |
|---|---|
| LO0_IP_POOL |
10.12.0.0/24 |
| LO0_IP |
10.12.0.2 |
| VETH_CRPD |
10.1.2.2 |
| VETH6_CRPD |
2001:DB8:2::2 |
| VETH_PREFIX |
10.1.2.0 |
| VETH6_PREFIX |
2001:db8:2::0 |
| VETH_K8S |
10.1.2.1 |
| VETH6_K8S |
2001:db8:2::1 |
| POD_CIDR |
192.168.0.0/24 |
| MASTER1_EVPN_PEER_IP |
10.12.0.1 |
| MASTER2_EVPN_PEER_IP |
10.12.0.3 |
| WORKER1_EVPN_PEER_IP |
10.12.0.4 |
| WORKER2_EVPN_PEER_IP |
10.12.0.5 |
| MASTER1_PEER_ENS4_IP |
192.168.1.1 |
| MASTER2_PEER_ENS4_IP |
192.168.1.3 |
| WORKER1_PEER_ENS4_IP |
192.168.1.4 |
| WORKER2_PEER_ENS4_IP |
192.168.1.5 |
| 변수 |
설정 |
|---|---|
| LO0_IP_POOL |
10.12.0.0/24 |
| LO0_IP |
10.12.0.3 |
| VETH_CRPD |
10.1.3.2 |
| VETH6_CRPD |
2001:db8:3::2 |
| VETH_PREFIX |
10.1.3.0 |
| VETH6_PREFIX |
2001:db8:3::0 |
| VETH_K8S |
10.1.3.1 |
| VETH6_K8S |
2001:db8:3::1 |
| POD_CIDR |
192.168.0.0/24 |
| MASTER1_EVPN_PEER_IP |
10.12.0.1 |
| MASTER2_EVPN_PEER_IP |
10.12.0.2 |
| WORKER1_EVPN_PEER_IP |
10.12.0.4 |
| WORKER2_EVPN_PEER_IP |
10.12.0.5 |
| MASTER1_PEER_ENS4_IP |
192.168.1.1 |
| MASTER2_PEER_ENS4_IP |
192.168.1.2 |
| WORKER1_PEER_ENS4_IP |
192.168.1.4 |
| WORKER2_PEER_ENS4_IP |
192.168.1.5 |
작업자 노드 구성 파일 예
이 구성 파일은 cRPD 설치 스크립트에서 에서 참조 CONFIG_TEMPLATE 됩니다. 작업자 노드당 하나의 작업자 노드 구성 파일이 있습니다. 각 작업자 노드에 대해 설정할 변수 값은 표 4 와 표 5 를 참조하십시오.
worker_crpd_connectivity_template_5_node.conf 크랙:
groups {
base {
apply-flags omit;
system {
root-authentication {
encrypted-password "<encrypted_password>"
}
commit {
xpath;
constraints {
direct-access;
}
notification {
configuration-diff-format xml;
}
}
scripts {
action {
max-datasize 256m;
}
language python3;
}
services {
netconf {
ssh;
}
ssh {
root-login allow;
port 24;
}
}
license {
keys {
key "<crpd_license_key>";
}
}
}
}
connectivity {
interfaces {
lo0 {
mtu 9216;
unit 0 {
family inet {
address ${LO0_IP}/32;
}
}
}
veth-crpd {
mtu 9216;
unit 0 {
family inet {
address ${VETH_CRPD}/30;
}
# *** uncomment below if running dual stack ***
#family inet6 {
# address ${VETH6_CRPD}/126;
#}
}
}
}
policy-options {
policy-statement accept-podcidr {
term accept {
from {
route-filter ${POD_CIDR} orlonger;
}
then accept;
}
then reject;
}
policy-statement export-direct {
term 1 {
from {
route-filter ${LO0_IP_POOL} orlonger;
}
then accept;
}
then reject;
}
policy-statement export-evpn {
term 1 {
from protocol evpn;
then accept;
}
then reject;
}
policy-statement export-veth {
term 1 {
from {
protocol direct;
route-filter ${VETH_PREFIX}/30 exact;
}
then accept;
}
term 2 {
from protocol bgp;
then accept;
}
then {
# *** uncomment below if running dual stack ***
#next policy;
reject;
}
}
# *** uncomment below if running dual stack ***
#policy-statement export-veth-v6 {
# term 1 {
# from {
# protocol direct;
# route-filter ${VETH6_PREFIX}/126 exact;
# }
# then accept;
# }
# term 2 {
# from protocol bgp;
# then accept;
# }
# then reject;
#}
}
routing-instances {
worker-calico-ri {
instance-type vrf;
protocols {
bgp {
group calico-bgprtrgrp-worker {
multihop;
local-address ${VETH_CRPD};
import accept-podcidr;
export export-evpn;
remove-private no-peer-loop-check;
peer-as 64512;
local-as 64600;
neighbor ${VETH_K8S};
}
# *** uncomment below if running dual stack ***
#group calico-bgprtrgrp-worker6 {
# multihop;
# local-address ${VETH6_CRPD};
# export export-evpn;
# remove-private no-peer-loop-check;
# peer-as 64512;
# local-as 64600;
# neighbor ${VETH6_K8S};
#}
}
evpn {
ip-prefix-routes {
advertise direct-nexthop;
encapsulation vxlan;
vni 4300;
# ***Include below line when running IPv4 only. Comment out if running dual stack.***
export export-veth;
# ***Include below line when running dual stack. Comment out if running IPv4 only.***
#export [ export-veth export-veth-v6 ];
route-attributes {
community {
import-action allow;
export-action allow;
}
}
}
}
}
interface veth-crpd;
vrf-target target:1:4;
}
}
routing-options {
route-distinguisher-id ${LO0_IP};
router-id ${LO0_IP};
}
protocols {
bgp {
group crpd-master-bgprtrgrp {
multihop;
export export-direct;
peer-as 64500;
local-as 64500;
neighbor ${MASTER1_PEER_ENS4_IP};
neighbor ${MASTER2_PEER_ENS4_IP};
neighbor ${MASTER3_PEER_ENS4_IP};
}
group crpd-master-lo-bgprtrgrp {
local-address ${LO0_IP};
family evpn {
signaling;
}
peer-as 64600;
local-as 64600;
neighbor ${MASTER1_EVPN_PEER_IP};
neighbor ${MASTER2_EVPN_PEER_IP};
neighbor ${MASTER3_EVPN_PEER_IP};
}
}
}
}
}
apply-groups base;
apply-groups connectivity;
| 변수 |
설정 |
|---|---|
| LO0_IP_POOL |
10.12.0.0/24 |
| LO0_IP |
10.12.0.4 |
| VETH_CRPD |
10.1.4.2 |
| VETH6_CRPD |
2001:db8:4::2 |
| VETH_PREFIX |
10.1.4.0 |
| VETH6_PREFIX |
2001:db8:4::0 |
| VETH_K8S |
10.1.4.1 |
| VETH6_K8S |
2001:db8:4::1 |
| POD_CIDR |
192.168.0.0/24 |
| MASTER1_EVPN_PEER_IP |
10.12.0.1 |
| MASTER2_EVPN_PEER_IP |
10.12.0.2 |
| MASTER3_EVPN_PEER_IP |
10.12.0.3 |
| MASTER1_PEER_ENS4_IP |
192.168.1.101 |
| MASTER2_PEER_ENS4_IP |
192.168.1.102 |
| MASTER3_PEER_ENS4_IP |
192.168.1.103 |
| 변수 |
설정 |
|---|---|
| LO0_IP_POOL |
10.12.0.0/24 |
| LO0_IP |
10.12.0.5 |
| VETH_CRPD |
10.1.5.2 |
| VETH6_CRPD |
2001:db8:5::2 |
| VETH_PREFIX |
10.1.5.0 |
| VETH6_PREFIX |
2001:db8:5::0 |
| VETH_K8S |
10.1.5.1 |
| VETH6_K8S |
2001:db8:5::1 |
| POD_CIDR |
192.168.0.0/24 |
| MASTER1_EVPN_PEER_IP |
10.12.0.1 |
| MASTER2_EVPN_PEER_IP |
10.12.0.2 |
| MASTER3_EVPN_PEER_IP |
10.12.0.3 |
| MASTER1_PEER_ENS4_IP |
192.168.1.101 |
| MASTER2_PEER_ENS4_IP |
192.168.1.102 |
| MASTER3_PEER_ENS4_IP |
192.168.1.103 |
호스트 기반 라우팅: Calico 구성 예
BGP 구성 예
bgpconfig.yaml:
apiVersion: crd.projectcalico.org/v1 kind: BGPConfiguration metadata: name: default spec: asNumber: 64512 listenPort: 1179 logSeverityScreen: Debug nodeToNodeMeshEnabled: false
IP 풀 구성 예
ippool-v4.yaml에 추가합니다.
apiVersion: crd.projectcalico.org/v1 kind: IPPool metadata: name: default-ipv4-ippool spec: allowedUses: - Workload blockSize: 26 cidr: 192.168.7.0/24 ipipMode: Never natOutgoing: true nodeSelector: all() vxlanMode: Never
ippool-v6.yaml에 추가합니다.
apiVersion: crd.projectcalico.org/v1 kind: IPPool metadata: name: default-ipv6-ippool spec: allowedUses: - Workload blockSize: 122 cidr: 2001:db8:42:0::/56 ipipMode: Never natOutgoing: true nodeSelector: all() vxlanMode: Never
BGP 피어 구성 예
bgppeers-v4.yaml:
apiVersion: crd.projectcalico.org/v1 kind: BGPPeer metadata: name: node1 spec: sourceAddress: None asNumber: 64600 node: node1 peerIP: 10.1.1.2:179 --- apiVersion: crd.projectcalico.org/v1 kind: BGPPeer metadata: name: node2 spec: sourceAddress: None asNumber: 64600 node: node2 peerIP: 10.1.2.2:179 --- apiVersion: crd.projectcalico.org/v1 kind: BGPPeer metadata: name: node3 spec: sourceAddress: None asNumber: 64600 node: node3 peerIP: 10.1.3.2:179 --- apiVersion: crd.projectcalico.org/v1 kind: BGPPeer metadata: name: node4 spec: sourceAddress: None asNumber: 64600 node: node4 peerIP: 10.1.4.2:179 --- apiVersion: crd.projectcalico.org/v1 kind: BGPPeer metadata: name: node5 spec: sourceAddress: None asNumber: 64600 node: node5 peerIP: 10.1.5.2:179
bgppeers-v6.yaml:
apiVersion: crd.projectcalico.org/v1 kind: BGPPeer metadata: # Change for every node name: node1-ipv6 spec: sourceAddress: None asNumber: 64600 node: node1 peerIP: '[2001:db8:1::2]:179' --- apiVersion: crd.projectcalico.org/v1 kind: BGPPeer metadata: # Change for every node name: node2-ipv6 spec: sourceAddress: None asNumber: 64600 node: node2 peerIP: '[2001:db8:2::2]:179' --- apiVersion: crd.projectcalico.org/v1 kind: BGPPeer metadata: # Change for every node name: node3-ipv6 spec: sourceAddress: None asNumber: 64600 node: node3 peerIP: '[2001:db8:3::2]:179' --- apiVersion: crd.projectcalico.org/v1 kind: BGPPeer metadata: # Change for every node name: node4-ipv6 spec: sourceAddress: None asNumber: 64600 node: node4 peerIP: '[2001:db8:4::2]:179' --- apiVersion: crd.projectcalico.org/v1 kind: BGPPeer metadata: # Change for every node name: node5-ipv6 spec: sourceAddress: None asNumber: 64600 node: node5 peerIP: '[2001:db8:5::2]:179'
호스트 기반 라우팅: VxLAN 및 경로 대상 풀의 예
VxLAN 풀 예
vxlan-pool.yaml:
apiVersion: core.svcmodule.juniper.net/v1
kind: Pool
metadata:
name: default-vni
namespace: svcmodule-system
spec:
vxlanId:
start: 4096
end: 16777215
경로 대상 풀 예시
rt-pool.yaml 크랙:
apiVersion: core.svcmodule.juniper.net/v1
kind: Pool
metadata:
name: default-route-target-number
namespace: svcmodule-system
spec:
routeTarget:
start: 8000000
size: 2048
호스트 기반 라우팅: JCNR 구성 예
JCNR 구성
jcnr-config.yaml:
apiVersion: configplane.juniper.net/v1
kind: Jcnr
metadata:
name: crpd-master
namespace: hbn
spec:
replicas: 3
jcnrTemplate:
externallyInitialized: true
loopbackAddressInitialized: true
nodeSelector:
master: ""
---
apiVersion: configplane.juniper.net/v1
kind: Jcnr
metadata:
name: crpd-worker
namespace: hbn
spec:
replicas: 2
jcnrTemplate:
externallyInitialized: true
loopbackAddressInitialized: true
nodeSelector:
worker: ""
호스트 기반 라우팅: 보조 CNI 구성 파일의 예
MACVLAN 사용자 정의 리소스 예
macvlan-cr.yaml:
apiVersion: core.svcmodule.juniper.net/v1
kind: RoutingInstance
metadata:
name: macvlan-ri-master
namespace: hbn
spec:
crpdGroupReference:
name: crpd-master
instanceType: mac-vrf
vrfTarget:
importExport:
name: target:64512:8000000
routingOptions:
routeDistinguisherId: 192.168.100.2:11
bridgeDomains:
- name: test-domain
interface: vrf-end
vLanId: 100
vni: 4200
---
apiVersion: core.svcmodule.juniper.net/v1
kind: RoutingInstance
metadata:
name: macvlan-ri-worker
namespace: hbn
spec:
crpdGroupReference:
name: crpd-worker
instanceType: mac-vrf
vrfTarget:
importExport:
name: target:64512:8000000
routingOptions:
routeDistinguisherId: 192.168.100.2:11
bridgeDomains:
- name: test-domain
interface: vrf-end
vLanId: 100
vni: 4200
---
apiVersion: core.svcmodule.juniper.net/v1
kind: EVPN
metadata:
name: macvlan-evpn-master
namespace: hbn
spec:
encapsulation: vxlan
defaultGateway: no-gateway-community
routingInstanceParent:
name: macvlan-ri-master
---
apiVersion: core.svcmodule.juniper.net/v1
kind: EVPN
metadata:
name: macvlan-evpn-worker
namespace: hbn
spec:
encapsulation: vxlan
defaultGateway: no-gateway-community
routingInstanceParent:
name: macvlan-ri-worker
---
apiVersion: core.svcmodule.juniper.net/v1
kind: InterfaceGroup
metadata:
name: jcnr-macvlan-master
namespace: hbn
spec:
instanceParent:
parentType: jcnr
reference:
name: crpd-master
interfaceName: vrf-end
interfaceTemplate:
encapsulation: vlan-bridge
families:
- addressFamily: bridge
---
apiVersion: core.svcmodule.juniper.net/v1
kind: InterfaceGroup
metadata:
name: jcnr-macvlan-worker
namespace: hbn
spec:
instanceParent:
parentType: jcnr
reference:
name: crpd-worker
interfaceName: vrf-end
interfaceTemplate:
encapsulation: vlan-bridge
families:
- addressFamily: bridge
MACVLAN 포드 예
macvlan-pods.yaml:
apiVersion: "k8s.cni.cncf.io/v1"
kind: NetworkAttachmentDefinition
metadata:
name: macvlan-conf
spec:
config: '{
"cniVersion": "0.3.1",
"plugins": [
{
"type": "macvlan",
"capabilities": { "ips": true },
"master": "host-end",
"mode": "bridge",
"ipam": {
"type": "static",
"routes": [
{
"dst": "0.0.0.0/0",
"gw": "10.9.1.1"
}
]
}
}, {
"capabilities": { "mac": true },
"type": "tuning"
}
]
}'
---
apiVersion: v1
kind: Pod
metadata:
name: l2-pod-1
annotations:
k8s.v1.cni.cncf.io/networks: '[
{ "name": "macvlan-conf",
"ips": [ "10.9.1.101/24" ],
"mac": "00:53:57:49:47:aa",
"gateway": [ "10.9.1.1" ]
}]'
spec:
containers:
- name: l2-pod-1
command: ["/bin/bash", "-c", "trap : TERM INT; sleep infinity & wait"]
image: google-containers/toolbox
ports:
- containerPort: 80
securityContext:
capabilities:
add:
- NET_ADMIN
privileged: true
automountServiceAccountToken: false
nodeName: ${node-name}
---
apiVersion: v1
kind: Pod
metadata:
name: l2-pod-2
annotations:
k8s.v1.cni.cncf.io/networks: '[
{ "name": "macvlan-conf",
"ips": [ "10.9.1.102/24" ],
"mac": "00:53:57:49:47:bb",
"gateway": [ "10.9.1.1" ]
}]'
spec:
containers:
- name: samplepod
command: ["/bin/bash", "-c", "trap : TERM INT; sleep infinity & wait"]
image: google-containers/toolbox
ports:
- containerPort: 80
securityContext:
capabilities:
add:
- NET_ADMIN
privileged: true
automountServiceAccountToken: false
nodeName: ${node-name}
IPVLAN 사용자 정의 리소스 예
ipvlan-cr.yaml:
apiVersion: core.svcmodule.juniper.net/v1
kind: RoutingPolicy
metadata:
name: static-rt
namespace: hbn
spec:
terms:
- name: learned-from-static
from:
protocol: static
then:
accept: true
default:
accept: false
---
apiVersion: core.svcmodule.juniper.net/v1
kind: RoutingInstance
metadata:
name: ipvlan-ri-master
namespace: hbn
spec:
crpdGroupReference:
name: crpd-master
instanceType: vrf
interfaces:
- ipvlan-vrf
vrfTarget:
importExport:
name: target:11:11
routingOptions:
routeDistinguisherId: 11:11
---
apiVersion: core.svcmodule.juniper.net/v1
kind: RoutingInstance
metadata:
name: ipvlan-ri-worker
namespace: hbn
spec:
crpdGroupReference:
name: crpd-worker
instanceType: vrf
interfaces:
- ipvlan-vrf
vrfTarget:
importExport:
name: target:11:11
routingOptions:
routeDistinguisherId: 11:11
---
apiVersion: core.svcmodule.juniper.net/v1
kind: EVPN
metadata:
name: ipvlan-evpn-master
namespace: hbn
spec:
encapsulation: vxlan
exportPolicy:
name: static-rt
routingInstanceParent:
name: ipvlan-ri-master
---
apiVersion: core.svcmodule.juniper.net/v1
kind: EVPN
metadata:
name: ipvlan-evpn-worker
namespace: hbn
spec:
encapsulation: vxlan
exportPolicy:
name: static-rt
routingInstanceParent:
name: ipvlan-ri-worker
---
apiVersion: core.svcmodule.juniper.net/v1
kind: InterfaceGroup
metadata:
name: jcnr-ipvlan-master
namespace: hbn
spec:
instanceParent:
parentType: jcnr
reference:
name: crpd-master
interfaceName: ipvlan-vrf
interfaceTemplate:
families:
- addressFamily: inet
ipAddress: 10.19.19.1/24
---
apiVersion: core.svcmodule.juniper.net/v1
kind: InterfaceGroup
metadata:
name: jcnr-ipvlan-worker
namespace: hbn
spec:
instanceParent:
parentType: jcnr
reference:
name: crpd-worker
interfaceName: ipvlan-vrf
interfaceTemplate:
families:
- addressFamily: inet
ipAddress: 10.19.19.1/24
---
apiVersion: configplane.juniper.net/v1
kind: NodeConfiglet
metadata:
labels:
core.juniper.net/nodeName: <node-name where ipvlan-pod-1 will be scheduled>
name: ipvlan-addon-node-1
namespace: hbn
spec:
clis:
- set routing-instances <name of RI to which node belongs to> routing-options static route 10.19.19.101/32 nexthop 10.19.19.101
nodeName: <node-name where ipvlan-pod-1 will be scheduled>
---
apiVersion: configplane.juniper.net/v1
kind: NodeConfiglet
metadata:
labels:
core.juniper.net/nodeName: <node-name where ipvlan-pod-2 will be scheduled>
name: ipvlan-addon-node-2
namespace: hbn
spec:
clis:
- set routing-instances <name of RI to which node belongs to> routing-options static route 10.19.19.102/32 nexthop 10.19.19.102
nodeName: <node-name where ipvlan-pod-2 will be scheduled>
IPVLAN 포드 예
ipvlan-pods.yaml:
apiVersion: "k8s.cni.cncf.io/v1"
kind: NetworkAttachmentDefinition
metadata:
name: ipvlan-conf
spec:
config: '{
"cniVersion": "0.3.1",
"name": "ipvlan-conf",
"type": "ipvlan",
"master": "ipvlan-host",
"mode": "l2",
"ipam": {
"type": "static"
}
}'
---
apiVersion: v1
kind: Pod
metadata:
name: ipvlan-pod-1
annotations:
k8s.v1.cni.cncf.io/networks: '[
{ "name": "ipvlan-conf",
"ips": [ "10.19.19.101/24" ]
}]'
spec:
containers:
- name: samplepod-1
command: ["/bin/bash", "-c", "trap : TERM INT; sleep infinity & wait"]
image: google-containers/toolbox
ports:
- containerPort: 80
securityContext:
capabilities:
add:
- NET_ADMIN
privileged: true
automountServiceAccountToken: false
nodeName: ${node-name}
---
apiVersion: v1
kind: Pod
metadata:
name: ipvlan-pod-2
annotations:
k8s.v1.cni.cncf.io/networks: '[
{ "name": "ipvlan-conf",
"ips": [ "10.19.19.102/24" ]
}]'
spec:
containers:
- name: samplepod
command: ["/bin/bash", "-c", "trap : TERM INT; sleep infinity & wait"]
image: google-containers/toolbox
ports:
- containerPort: 80
securityContext:
capabilities:
add:
- NET_ADMIN
privileged: true
automountServiceAccountToken: false
nodeName: ${node-name}