Cloud-Native Router Operator Service Module: Host-Based Routing Example Configuration Files
This section contains example scripts and configuration files that you can use to create a service module host-based routing deployment.
Host-Based Routing: Example Scripts and Configuration Files to Install cRPD
- Example cRPD Installation Script
- Example Control Plane Node Configuration File
- Example Worker Node Configuration File
Example cRPD Installation Script
The following example script installs cRPD on the node where you run the script. If cRPD is already running on the node, the script removes the running cRPD instance and installs a new instance. If the script finds an existing cRPD configuration file, it will reuse that configuration file. Otherwise, it will use the configuration file specified by the CONFIG_TEMPLATE variable that you set in the script.
Run this script with the proper CONFIG_TEMPLATE configuration file on every node in your cluster.
We provide sample CONFIG_TEMPLATE configuration files in Example Control Plane Node Configuration File and Example Worker Node Configuration File.
install-crpd.sh:
set -o nounset
set -o errexit
SCRIPT_DIR=$(cd -P `dirname $0`; pwd)
NETWORK_NS="ns:/run/netns/crpd"
# Specify the config file. For example:
# ctl_plane_crpd_connectivity_template_5_node.conf or worker_crpd_connectivity_template_5_node.conf
CONFIG_TEMPLATE=ctl_plane_crpd_connectivity_template_5_node.conf
POD_NAME=crpd
CONTAINER_NAME=crpd01
# Remove existing pod
## Stop all containers in pod crpd
POD_ID=$(sudo podman pod ls -fname=${POD_NAME} -q)
if [ -n "$POD_ID" ]; then
sudo podman pod stop ${POD_ID}
sudo podman pod rm ${POD_ID}
fi
# Create Pod in NS (Tested with podman 4.6.2)
#
sudo podman pod create --name ${POD_NAME} --network ${NETWORK_NS}
# create config dir
CRPD_CONFIG_DIR=/etc/crpd/config
sudo rm -rf ${CRPD_CONFIG_DIR}
sudo mkdir -p ${CRPD_CONFIG_DIR}
if [[ -f ${CRPD_CONFIG_DIR}/juniper.conf || -f ${CRPD_CONFIG_DIR}/juniper.conf.gz ]]; then
echo "conf file exists"
else
echo "initialize with base config"
envsubst < ${CONFIG_TEMPLATE} > crpd_base_connectivity.conf
sudo cp ${SCRIPT_DIR}/crpd_base_connectivity.conf ${CRPD_CONFIG_DIR}/juniper.conf
fi
sudo podman volume create crpd01-varlog --ignore
sudo podman run --rm -d --name ${CONTAINER_NAME} --pod ${POD_NAME} --privileged -v /etc/crpd/config:/config:Z -v crpd01-varlog:/var/log -it enterprise-hub.juniper.net:/jcnr-container-prod/crpd:24.4R1.9
# List
sudo podman pod ps --ctr-status --ctr-names --ctr-idsExample Control Plane Node Configuration File
This configuration file is referenced by CONFIG_TEMPLATE in the cRPD installation script. There is one control plane node configuration file per control plane node. See Table 1 through Table 3 for the variable values to set for each control plane node.
ctl_plane_crpd_connectivity_template_5_node.conf:
groups {
base {
apply-flags omit;
system {
root-authentication {
encrypted-password "<encrypted_password>"
}
commit {
xpath;
constraints {
direct-access;
}
notification {
configuration-diff-format xml;
}
}
scripts {
action {
max-datasize 256m;
}
language python3;
}
services {
netconf {
ssh;
}
ssh {
root-login allow;
port 24;
}
}
license {
keys {
key "<crpd-license-key>";
}
}
}
}
connectivity {
interfaces {
lo0 {
mtu 9216;
unit 0 {
family inet {
address ${LO0_IP}/32;
}
}
}
veth-crpd {
mtu 9216;
unit 0 {
family inet {
address ${VETH_CRPD}/30;
}
# *** uncomment below if running dual stack ***
#family inet6 {
# address ${VETH6_CRPD}/126;
}
}
}
}
policy-options {
policy-statement accept-podcidr {
term accept {
from {
route-filter ${POD_CIDR} orlonger;
}
then accept;
}
then reject;
}
policy-statement export-direct {
term 1 {
from {
route-filter ${LO0_IP_POOL} orlonger;
}
then accept;
}
then reject;
}
policy-statement export-evpn {
term 1 {
from protocol evpn;
then accept;
}
then reject;
}
policy-statement export-veth {
term 1 {
from {
protocol direct;
route-filter ${VETH_PREFIX}/30 exact;
}
then accept;
}
term 2 {
from protocol bgp;
then accept;
}
then {
# *** uncomment below if running dual stack ***
#next policy;
reject;
}
}
# *** uncomment below if running dual stack ***
#policy-statement export-veth-v6 {
# term 1 {
# from {
# protocol direct;
# route-filter ${VETH6_PREFIX}/126 exact;
# }
# then accept;
# }
# term 2 {
# from protocol bgp;
# then accept;
# }
# then reject;
#}
}
routing-instances {
master-calico-ri {
instance-type vrf;
protocols {
bgp {
group calico-bgprtrgrp-master {
multihop;
local-address ${VETH_CRPD};
import accept-podcidr;
export export-evpn;
remove-private no-peer-loop-check;
peer-as 64512;
local-as 64600;
neighbor ${VETH_K8S};
}
# *** uncomment below if running dual stack ***
#group calico-bgprtrgrp-master6 {
# multihop;
# local-address ${VETH6_CRPD};
# export export-evpn;
# remove-private no-peer-loop-check;
# peer-as 64512;
# local-as 64600;
# neighbor ${VETH6_K8S};
#}
}
evpn {
ip-prefix-routes {
advertise direct-nexthop;
encapsulation vxlan;
vni 4096;
# ***Include below line when running IPv4 only. Comment out if running dual stack.***
export export-veth;
# ***Include below line when running dual stack. Comment out if running IPv4 only.***
#export [ export-veth export-veth-v6 ];
route-attributes {
community {
import-action allow;
export-action allow;
}
}
}
}
}
interface veth-crpd;
vrf-target target:1:4;
}
}
routing-options {
route-distinguisher-id ${LO0_IP};
router-id ${LO0_IP};
}
protocols {
bgp {
group crpd-master-bgprtrgrp {
export export-direct;
peer-as 64500;
local-as 64500;
neighbor ${MASTER1_PEER_ENS4_IP};
neighbor ${MASTER2_PEER_ENS4_IP};
}
group crpd-worker-bgprtrgrp {
multihop;
export export-direct;
peer-as 64500;
local-as 64500;
neighbor ${WORKER1_PEER_ENS4_IP};
neighbor ${WORKER2_PEER_ENS4_IP};
}
group crpd-master-lo-bgprtrgrp {
local-address ${LO0_IP};
family evpn {
signaling;
}
peer-as 64600;
local-as 64600;
neighbor ${MASTER1_EVPN_PEER_IP};
neighbor ${MASTER2_EVPN_PEER_IP};
}
group crpd-worker-lo-bgprtrgrp {
local-address ${LO0_IP};
family evpn {
signaling;
}
peer-as 64600;
local-as 64600;
neighbor ${WORKER1_EVPN_PEER_IP};
neighbor ${WORKER2_EVPN_PEER_IP};
}
cluster ${LO0_IP};
}
}
}
}
apply-groups base;
apply-groups connectivity;|
Variable |
Setting |
|---|---|
|
LO0_IP_POOL |
10.12.0.0/24 |
|
LO0_IP |
10.12.0.1 |
|
VETH_CRPD |
10.1.1.2 |
|
VETH6_CRPD |
2001:db8:1::2 |
|
VETH_PREFIX |
10.1.1.0 |
|
VETH6_PREFIX |
2001:db8:1::0 |
|
VETH_K8S |
10.1.1.1 |
|
VETH6_K8S |
2001:db8:1::1 |
|
POD_CIDR |
192.168.0.0/24 |
|
MASTER1_EVPN_PEER_IP |
10.12.0.2 |
|
MASTER2_EVPN_PEER_IP |
10.12.0.3 |
|
WORKER1_EVPN_PEER_IP |
10.12.0.4 |
|
WORKER2_EVPN_PEER_IP |
10.12.0.5 |
|
MASTER1_PEER_ENS4_IP |
192.168.1.102 |
|
MASTER2_PEER_ENS4_IP |
192.168.1.103 |
|
WORKER1_PEER_ENS4_IP |
192.168.1.104 |
|
WORKER2_PEER_ENS4_IP |
192.168.1.105 |
|
Variable |
Setting |
|---|---|
|
LO0_IP_POOL |
10.12.0.0/24 |
|
LO0_IP |
10.12.0.2 |
|
VETH_CRPD |
10.1.2.2 |
|
VETH6_CRPD |
2001:db8:2::2 |
|
VETH_PREFIX |
10.1.2.0 |
|
VETH6_PREFIX |
2001:db8:2::0 |
|
VETH_K8S |
10.1.2.1 |
|
VETH6_K8S |
2001:db8:2::1 |
|
POD_CIDR |
192.168.0.0/24 |
|
MASTER1_EVPN_PEER_IP |
10.12.0.1 |
|
MASTER2_EVPN_PEER_IP |
10.12.0.3 |
|
WORKER1_EVPN_PEER_IP |
10.12.0.4 |
|
WORKER2_EVPN_PEER_IP |
10.12.0.5 |
|
MASTER1_PEER_ENS4_IP |
192.168.1.1 |
|
MASTER2_PEER_ENS4_IP |
192.168.1.3 |
|
WORKER1_PEER_ENS4_IP |
192.168.1.4 |
|
WORKER2_PEER_ENS4_IP |
192.168.1.5 |
|
Variable |
Setting |
|---|---|
|
LO0_IP_POOL |
10.12.0.0/24 |
|
LO0_IP |
10.12.0.3 |
|
VETH_CRPD |
10.1.3.2 |
|
VETH6_CRPD |
2001:db8:3::2 |
|
VETH_PREFIX |
10.1.3.0 |
|
VETH6_PREFIX |
2001:db8:3::0 |
|
VETH_K8S |
10.1.3.1 |
|
VETH6_K8S |
2001:db8:3::1 |
|
POD_CIDR |
192.168.0.0/24 |
|
MASTER1_EVPN_PEER_IP |
10.12.0.1 |
|
MASTER2_EVPN_PEER_IP |
10.12.0.2 |
|
WORKER1_EVPN_PEER_IP |
10.12.0.4 |
|
WORKER2_EVPN_PEER_IP |
10.12.0.5 |
|
MASTER1_PEER_ENS4_IP |
192.168.1.1 |
|
MASTER2_PEER_ENS4_IP |
192.168.1.2 |
|
WORKER1_PEER_ENS4_IP |
192.168.1.4 |
|
WORKER2_PEER_ENS4_IP |
192.168.1.5 |
Example Worker Node Configuration File
This configuration file is referenced by CONFIG_TEMPLATE in the cRPD installation script. There is one worker node configuration file per worker node. See Table 4 and Table 5 for the variable values to set for each worker node.
worker_crpd_connectivity_template_5_node.conf:
groups {
base {
apply-flags omit;
system {
root-authentication {
encrypted-password "<encrypted_password>"
}
commit {
xpath;
constraints {
direct-access;
}
notification {
configuration-diff-format xml;
}
}
scripts {
action {
max-datasize 256m;
}
language python3;
}
services {
netconf {
ssh;
}
ssh {
root-login allow;
port 24;
}
}
license {
keys {
key "<crpd_license_key>";
}
}
}
}
connectivity {
interfaces {
lo0 {
mtu 9216;
unit 0 {
family inet {
address ${LO0_IP}/32;
}
}
}
veth-crpd {
mtu 9216;
unit 0 {
family inet {
address ${VETH_CRPD}/30;
}
# *** uncomment below if running dual stack ***
#family inet6 {
# address ${VETH6_CRPD}/126;
#}
}
}
}
policy-options {
policy-statement accept-podcidr {
term accept {
from {
route-filter ${POD_CIDR} orlonger;
}
then accept;
}
then reject;
}
policy-statement export-direct {
term 1 {
from {
route-filter ${LO0_IP_POOL} orlonger;
}
then accept;
}
then reject;
}
policy-statement export-evpn {
term 1 {
from protocol evpn;
then accept;
}
then reject;
}
policy-statement export-veth {
term 1 {
from {
protocol direct;
route-filter ${VETH_PREFIX}/30 exact;
}
then accept;
}
term 2 {
from protocol bgp;
then accept;
}
then {
# *** uncomment below if running dual stack ***
#next policy;
reject;
}
}
# *** uncomment below if running dual stack ***
#policy-statement export-veth-v6 {
# term 1 {
# from {
# protocol direct;
# route-filter ${VETH6_PREFIX}/126 exact;
# }
# then accept;
# }
# term 2 {
# from protocol bgp;
# then accept;
# }
# then reject;
#}
}
routing-instances {
worker-calico-ri {
instance-type vrf;
protocols {
bgp {
group calico-bgprtrgrp-worker {
multihop;
local-address ${VETH_CRPD};
import accept-podcidr;
export export-evpn;
remove-private no-peer-loop-check;
peer-as 64512;
local-as 64600;
neighbor ${VETH_K8S};
}
# *** uncomment below if running dual stack ***
#group calico-bgprtrgrp-worker6 {
# multihop;
# local-address ${VETH6_CRPD};
# export export-evpn;
# remove-private no-peer-loop-check;
# peer-as 64512;
# local-as 64600;
# neighbor ${VETH6_K8S};
#}
}
evpn {
ip-prefix-routes {
advertise direct-nexthop;
encapsulation vxlan;
vni 4300;
# ***Include below line when running IPv4 only. Comment out if running dual stack.***
export export-veth;
# ***Include below line when running dual stack. Comment out if running IPv4 only.***
#export [ export-veth export-veth-v6 ];
route-attributes {
community {
import-action allow;
export-action allow;
}
}
}
}
}
interface veth-crpd;
vrf-target target:1:4;
}
}
routing-options {
route-distinguisher-id ${LO0_IP};
router-id ${LO0_IP};
}
protocols {
bgp {
group crpd-master-bgprtrgrp {
multihop;
export export-direct;
peer-as 64500;
local-as 64500;
neighbor ${MASTER1_PEER_ENS4_IP};
neighbor ${MASTER2_PEER_ENS4_IP};
neighbor ${MASTER3_PEER_ENS4_IP};
}
group crpd-master-lo-bgprtrgrp {
local-address ${LO0_IP};
family evpn {
signaling;
}
peer-as 64600;
local-as 64600;
neighbor ${MASTER1_EVPN_PEER_IP};
neighbor ${MASTER2_EVPN_PEER_IP};
neighbor ${MASTER3_EVPN_PEER_IP};
}
}
}
}
}
apply-groups base;
apply-groups connectivity;|
Variable |
Setting |
|---|---|
|
LO0_IP_POOL |
10.12.0.0/24 |
|
LO0_IP |
10.12.0.4 |
|
VETH_CRPD |
10.1.4.2 |
|
VETH6_CRPD |
2001:db8:4::2 |
|
VETH_PREFIX |
10.1.4.0 |
|
VETH6_PREFIX |
2001:db8:4::0 |
|
VETH_K8S |
10.1.4.1 |
|
VETH6_K8S |
2001:db8:4::1 |
|
POD_CIDR |
192.168.0.0/24 |
|
MASTER1_EVPN_PEER_IP |
10.12.0.1 |
|
MASTER2_EVPN_PEER_IP |
10.12.0.2 |
|
MASTER3_EVPN_PEER_IP |
10.12.0.3 |
|
MASTER1_PEER_ENS4_IP |
192.168.1.101 |
|
MASTER2_PEER_ENS4_IP |
192.168.1.102 |
|
MASTER3_PEER_ENS4_IP |
192.168.1.103 |
|
Variable |
Setting |
|---|---|
|
LO0_IP_POOL |
10.12.0.0/24 |
|
LO0_IP |
10.12.0.5 |
|
VETH_CRPD |
10.1.5.2 |
|
VETH6_CRPD |
2001:db8:5::2 |
|
VETH_PREFIX |
10.1.5.0 |
|
VETH6_PREFIX |
2001:db8:5::0 |
|
VETH_K8S |
10.1.5.1 |
|
VETH6_K8S |
2001:db8:5::1 |
|
POD_CIDR |
192.168.0.0/24 |
|
MASTER1_EVPN_PEER_IP |
10.12.0.1 |
|
MASTER2_EVPN_PEER_IP |
10.12.0.2 |
|
MASTER3_EVPN_PEER_IP |
10.12.0.3 |
|
MASTER1_PEER_ENS4_IP |
192.168.1.101 |
|
MASTER2_PEER_ENS4_IP |
192.168.1.102 |
|
MASTER3_PEER_ENS4_IP |
192.168.1.103 |
Host-Based Routing: Example Calico Configuration
BGP Configuration Example
bgpconfig.yaml:
apiVersion: crd.projectcalico.org/v1 kind: BGPConfiguration metadata: name: default spec: asNumber: 64512 listenPort: 1179 logSeverityScreen: Debug nodeToNodeMeshEnabled: false
IP Pool Configuration Example
ippool-v4.yaml:
apiVersion: crd.projectcalico.org/v1 kind: IPPool metadata: name: default-ipv4-ippool spec: allowedUses: - Workload blockSize: 26 cidr: 192.168.7.0/24 ipipMode: Never natOutgoing: true nodeSelector: all() vxlanMode: Never
ippool-v6.yaml:
apiVersion: crd.projectcalico.org/v1 kind: IPPool metadata: name: default-ipv6-ippool spec: allowedUses: - Workload blockSize: 122 cidr: 2001:db8:42:0::/56 ipipMode: Never natOutgoing: true nodeSelector: all() vxlanMode: Never
BGP Peer Configuration Example
bgppeers-v4.yaml:
apiVersion: crd.projectcalico.org/v1 kind: BGPPeer metadata: name: node1 spec: sourceAddress: None asNumber: 64600 node: node1 peerIP: 10.1.1.2:179 --- apiVersion: crd.projectcalico.org/v1 kind: BGPPeer metadata: name: node2 spec: sourceAddress: None asNumber: 64600 node: node2 peerIP: 10.1.2.2:179 --- apiVersion: crd.projectcalico.org/v1 kind: BGPPeer metadata: name: node3 spec: sourceAddress: None asNumber: 64600 node: node3 peerIP: 10.1.3.2:179 --- apiVersion: crd.projectcalico.org/v1 kind: BGPPeer metadata: name: node4 spec: sourceAddress: None asNumber: 64600 node: node4 peerIP: 10.1.4.2:179 --- apiVersion: crd.projectcalico.org/v1 kind: BGPPeer metadata: name: node5 spec: sourceAddress: None asNumber: 64600 node: node5 peerIP: 10.1.5.2:179
bgppeers-v6.yaml:
apiVersion: crd.projectcalico.org/v1 kind: BGPPeer metadata: # Change for every node name: node1-ipv6 spec: sourceAddress: None asNumber: 64600 node: node1 peerIP: '[2001:db8:1::2]:179' --- apiVersion: crd.projectcalico.org/v1 kind: BGPPeer metadata: # Change for every node name: node2-ipv6 spec: sourceAddress: None asNumber: 64600 node: node2 peerIP: '[2001:db8:2::2]:179' --- apiVersion: crd.projectcalico.org/v1 kind: BGPPeer metadata: # Change for every node name: node3-ipv6 spec: sourceAddress: None asNumber: 64600 node: node3 peerIP: '[2001:db8:3::2]:179' --- apiVersion: crd.projectcalico.org/v1 kind: BGPPeer metadata: # Change for every node name: node4-ipv6 spec: sourceAddress: None asNumber: 64600 node: node4 peerIP: '[2001:db8:4::2]:179' --- apiVersion: crd.projectcalico.org/v1 kind: BGPPeer metadata: # Change for every node name: node5-ipv6 spec: sourceAddress: None asNumber: 64600 node: node5 peerIP: '[2001:db8:5::2]:179'
Host-Based Routing: Example VxLAN and Route Target Pools
VxLAN Pool Example
vxlan-pool.yaml:
apiVersion: core.svcmodule.juniper.net/v1
kind: Pool
metadata:
name: default-vni
namespace: svcmodule-system
spec:
vxlanId:
start: 4096
end: 16777215Route Target Pool Example
rt-pool.yaml:
apiVersion: core.svcmodule.juniper.net/v1
kind: Pool
metadata:
name: default-route-target-number
namespace: svcmodule-system
spec:
routeTarget:
start: 8000000
size: 2048Host-Based Routing: Example JCNR Configuration
JCNR Configuration
jcnr-config.yaml:
apiVersion: configplane.juniper.net/v1
kind: Jcnr
metadata:
name: crpd-master
namespace: hbn
spec:
replicas: 3
jcnrTemplate:
externallyInitialized: true
loopbackAddressInitialized: true
nodeSelector:
master: ""
---
apiVersion: configplane.juniper.net/v1
kind: Jcnr
metadata:
name: crpd-worker
namespace: hbn
spec:
replicas: 2
jcnrTemplate:
externallyInitialized: true
loopbackAddressInitialized: true
nodeSelector:
worker: ""Host-Based Routing: Example Secondary CNI Configuration Files
- Example MACVLAN Custom Resource
- Example MACVLAN Pods
- Example IPVLAN Custom Resource
- Example IPVLAN Pods
Example MACVLAN Custom Resource
macvlan-cr.yaml:
apiVersion: core.svcmodule.juniper.net/v1
kind: RoutingInstance
metadata:
name: macvlan-ri-master
namespace: hbn
spec:
crpdGroupReference:
name: crpd-master
instanceType: mac-vrf
vrfTarget:
importExport:
name: target:64512:8000000
routingOptions:
routeDistinguisherId: 192.168.100.2:11
bridgeDomains:
- name: test-domain
interface: vrf-end
vLanId: 100
vni: 4200
---
apiVersion: core.svcmodule.juniper.net/v1
kind: RoutingInstance
metadata:
name: macvlan-ri-worker
namespace: hbn
spec:
crpdGroupReference:
name: crpd-worker
instanceType: mac-vrf
vrfTarget:
importExport:
name: target:64512:8000000
routingOptions:
routeDistinguisherId: 192.168.100.2:11
bridgeDomains:
- name: test-domain
interface: vrf-end
vLanId: 100
vni: 4200
---
apiVersion: core.svcmodule.juniper.net/v1
kind: EVPN
metadata:
name: macvlan-evpn-master
namespace: hbn
spec:
encapsulation: vxlan
defaultGateway: no-gateway-community
routingInstanceParent:
name: macvlan-ri-master
---
apiVersion: core.svcmodule.juniper.net/v1
kind: EVPN
metadata:
name: macvlan-evpn-worker
namespace: hbn
spec:
encapsulation: vxlan
defaultGateway: no-gateway-community
routingInstanceParent:
name: macvlan-ri-worker
---
apiVersion: core.svcmodule.juniper.net/v1
kind: InterfaceGroup
metadata:
name: jcnr-macvlan-master
namespace: hbn
spec:
instanceParent:
parentType: jcnr
reference:
name: crpd-master
interfaceName: vrf-end
interfaceTemplate:
encapsulation: vlan-bridge
families:
- addressFamily: bridge
---
apiVersion: core.svcmodule.juniper.net/v1
kind: InterfaceGroup
metadata:
name: jcnr-macvlan-worker
namespace: hbn
spec:
instanceParent:
parentType: jcnr
reference:
name: crpd-worker
interfaceName: vrf-end
interfaceTemplate:
encapsulation: vlan-bridge
families:
- addressFamily: bridgeExample MACVLAN Pods
macvlan-pods.yaml:
apiVersion: "k8s.cni.cncf.io/v1"
kind: NetworkAttachmentDefinition
metadata:
name: macvlan-conf
spec:
config: '{
"cniVersion": "0.3.1",
"plugins": [
{
"type": "macvlan",
"capabilities": { "ips": true },
"master": "host-end",
"mode": "bridge",
"ipam": {
"type": "static",
"routes": [
{
"dst": "0.0.0.0/0",
"gw": "10.9.1.1"
}
]
}
}, {
"capabilities": { "mac": true },
"type": "tuning"
}
]
}'
---
apiVersion: v1
kind: Pod
metadata:
name: l2-pod-1
annotations:
k8s.v1.cni.cncf.io/networks: '[
{ "name": "macvlan-conf",
"ips": [ "10.9.1.101/24" ],
"mac": "00:53:57:49:47:aa",
"gateway": [ "10.9.1.1" ]
}]'
spec:
containers:
- name: l2-pod-1
command: ["/bin/bash", "-c", "trap : TERM INT; sleep infinity & wait"]
image: google-containers/toolbox
ports:
- containerPort: 80
securityContext:
capabilities:
add:
- NET_ADMIN
privileged: true
automountServiceAccountToken: false
nodeName: ${node-name}
---
apiVersion: v1
kind: Pod
metadata:
name: l2-pod-2
annotations:
k8s.v1.cni.cncf.io/networks: '[
{ "name": "macvlan-conf",
"ips": [ "10.9.1.102/24" ],
"mac": "00:53:57:49:47:bb",
"gateway": [ "10.9.1.1" ]
}]'
spec:
containers:
- name: samplepod
command: ["/bin/bash", "-c", "trap : TERM INT; sleep infinity & wait"]
image: google-containers/toolbox
ports:
- containerPort: 80
securityContext:
capabilities:
add:
- NET_ADMIN
privileged: true
automountServiceAccountToken: false
nodeName: ${node-name}Example IPVLAN Custom Resource
ipvlan-cr.yaml:
apiVersion: core.svcmodule.juniper.net/v1
kind: RoutingPolicy
metadata:
name: static-rt
namespace: hbn
spec:
terms:
- name: learned-from-static
from:
protocol: static
then:
accept: true
default:
accept: false
---
apiVersion: core.svcmodule.juniper.net/v1
kind: RoutingInstance
metadata:
name: ipvlan-ri-master
namespace: hbn
spec:
crpdGroupReference:
name: crpd-master
instanceType: vrf
interfaces:
- ipvlan-vrf
vrfTarget:
importExport:
name: target:11:11
routingOptions:
routeDistinguisherId: 11:11
---
apiVersion: core.svcmodule.juniper.net/v1
kind: RoutingInstance
metadata:
name: ipvlan-ri-worker
namespace: hbn
spec:
crpdGroupReference:
name: crpd-worker
instanceType: vrf
interfaces:
- ipvlan-vrf
vrfTarget:
importExport:
name: target:11:11
routingOptions:
routeDistinguisherId: 11:11
---
apiVersion: core.svcmodule.juniper.net/v1
kind: EVPN
metadata:
name: ipvlan-evpn-master
namespace: hbn
spec:
encapsulation: vxlan
exportPolicy:
name: static-rt
routingInstanceParent:
name: ipvlan-ri-master
---
apiVersion: core.svcmodule.juniper.net/v1
kind: EVPN
metadata:
name: ipvlan-evpn-worker
namespace: hbn
spec:
encapsulation: vxlan
exportPolicy:
name: static-rt
routingInstanceParent:
name: ipvlan-ri-worker
---
apiVersion: core.svcmodule.juniper.net/v1
kind: InterfaceGroup
metadata:
name: jcnr-ipvlan-master
namespace: hbn
spec:
instanceParent:
parentType: jcnr
reference:
name: crpd-master
interfaceName: ipvlan-vrf
interfaceTemplate:
families:
- addressFamily: inet
ipAddress: 10.19.19.1/24
---
apiVersion: core.svcmodule.juniper.net/v1
kind: InterfaceGroup
metadata:
name: jcnr-ipvlan-worker
namespace: hbn
spec:
instanceParent:
parentType: jcnr
reference:
name: crpd-worker
interfaceName: ipvlan-vrf
interfaceTemplate:
families:
- addressFamily: inet
ipAddress: 10.19.19.1/24
---
apiVersion: configplane.juniper.net/v1
kind: NodeConfiglet
metadata:
labels:
core.juniper.net/nodeName: <node-name where ipvlan-pod-1 will be scheduled>
name: ipvlan-addon-node-1
namespace: hbn
spec:
clis:
- set routing-instances <name of RI to which node belongs to> routing-options static route 10.19.19.101/32 nexthop 10.19.19.101
nodeName: <node-name where ipvlan-pod-1 will be scheduled>
---
apiVersion: configplane.juniper.net/v1
kind: NodeConfiglet
metadata:
labels:
core.juniper.net/nodeName: <node-name where ipvlan-pod-2 will be scheduled>
name: ipvlan-addon-node-2
namespace: hbn
spec:
clis:
- set routing-instances <name of RI to which node belongs to> routing-options static route 10.19.19.102/32 nexthop 10.19.19.102
nodeName: <node-name where ipvlan-pod-2 will be scheduled>Example IPVLAN Pods
ipvlan-pods.yaml:
apiVersion: "k8s.cni.cncf.io/v1"
kind: NetworkAttachmentDefinition
metadata:
name: ipvlan-conf
spec:
config: '{
"cniVersion": "0.3.1",
"name": "ipvlan-conf",
"type": "ipvlan",
"master": "ipvlan-host",
"mode": "l2",
"ipam": {
"type": "static"
}
}'
---
apiVersion: v1
kind: Pod
metadata:
name: ipvlan-pod-1
annotations:
k8s.v1.cni.cncf.io/networks: '[
{ "name": "ipvlan-conf",
"ips": [ "10.19.19.101/24" ]
}]'
spec:
containers:
- name: samplepod-1
command: ["/bin/bash", "-c", "trap : TERM INT; sleep infinity & wait"]
image: google-containers/toolbox
ports:
- containerPort: 80
securityContext:
capabilities:
add:
- NET_ADMIN
privileged: true
automountServiceAccountToken: false
nodeName: ${node-name}
---
apiVersion: v1
kind: Pod
metadata:
name: ipvlan-pod-2
annotations:
k8s.v1.cni.cncf.io/networks: '[
{ "name": "ipvlan-conf",
"ips": [ "10.19.19.102/24" ]
}]'
spec:
containers:
- name: samplepod
command: ["/bin/bash", "-c", "trap : TERM INT; sleep infinity & wait"]
image: google-containers/toolbox
ports:
- containerPort: 80
securityContext:
capabilities:
add:
- NET_ADMIN
privileged: true
automountServiceAccountToken: false
nodeName: ${node-name}