ON THIS PAGE
Appendix A: Sample Configuration Files
Sample site.yml Configuration File
# Copyright 2018 Juniper Networks, Inc. All rights reserved.
# Licensed under the Juniper Networks Script Software License (the "License").
# You may not use this script file except in compliance with the License, which is located at
# http://www.juniper.net/support/legal/scriptlicense/
# Unless required by applicable law or otherwise agreed to in writing by the parties,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
#
#
global:
# List of DNS nameservers
dns:
# Google Public DNS
- "8.8.8.8"
- "8.8.4.4"
# List of NTP time servers
ntp:
# public pool.ntp.org
- "0.pool.ntp.org"
- "1.pool.ntp.org"
- "2.pool.ntp.org"
- "3.pool.ntp.org"
# Timezone for all servers
timezone: 'America/Los_Angeles'
rhel:
# Contrail Cloud Activation Key
# These details are provided when you request an activation key from
# contrail cloud subscriptions <contrail_cloud_subscriptions@juniper.net>
#
satellite:
#SATELLITE_KEY should be defined in vault-data.yml file
#SATELLITE_ORG
organization: "ContrailCloud"
#SATELLITE_FQDN
fqdn: contrail-cloud-satellite.juniper.net
# DNS domain information.
# Must be unique for every deployment to avoid name conflicts.
# Need not be a registered DNS domain.
domain: "my.unique.domain"
jumphost:
network:
# network used for provisioning (PXE booting) servers
provision:
# jumphost nic to be used for provisioning (PXE booting) servers
nic: eno1
control_hosts:
# Contains a list of label to disk mappings for roles
disk_mapping:
# the control host always uses the "baremetal" role
baremetal:
# Mapping of labels to disk devices. The label is assigned to the disk
# device so that the disk can be referenced by the alias in other
# configurations. for example /dev/disk/by-alias/<labels>
# Each list element contains:
# label: label to assign
# And one of following elements:
# name: disk device path (e.g. /dev/sdb)
# hctl: alternative notation for disk paths specifying SCSI address (Host, Channel, Target and Lun) The HCTL can be found with thh lsscsi (or lspci) command or it can be found in introspection data
# wwn: List of disks WWNs across hosts, which should have the same label e.g. host1_disk1_wwn, host2_disk1_wwn (WWN can be found in introspection data)
# Example of wwn usage can be found in samples/features/label-disk/site.yml
#
- label: spinning-0
name: /dev/sdb
- label: spinning-1
name: /dev/sdc
- label: spinning-2
name: /dev/sdd
- label: spinning-3
name: /dev/sde
- label: ssd-0
hctl: "0:2:3:0"
storage:
# Define a set of disk groups that can be referenced for VM virtual disk allocations
# These become virsh storage pools on the control host
# Each pool has:
# type: Either "dir" or "logical".
# "dir" resides on /var/lib/libvirt/images.
# "logical" is a LVM volume placed on the list of "disk".
# disk: List of disk devices to use for the pool
# There is a built-in storage type called "default_dir_pool" which resides on /var/lib/libvirt/images.
hdd_storage:
type: logical
disk:
- "/dev/disk/by-alias/spinning-0"
- "/dev/disk/by-alias/spinning-1"
- "/dev/disk/by-alias/spinning-2"
- "/dev/disk/by-alias/spinning-3"
ssd_storage:
type: logical
disk:
- "/dev/disk/by-alias/ssd-0"
vm:
# VM for Openstack Controller role
control:
disk:
# Root disk
vda:
# Virsh storage pool (see storage above)
pool: hdd_storage
# VMs for ContrailController role
contrail-controller:
disk:
# Root disk
vda:
# Virsh storage pool (see storage above)
pool: hdd_storage
# VM for ContrailTsn role
contrail-tsn:
disk:
# Root disk
vda:
# Virsh storage pool (see storage above)
pool: hdd_storage
# VM for ContrailAnalytics role
contrail-analytics:
disk:
# Root disk
vda:
# Virsh storage pool (see storage above)
pool: hdd_storage
# VM for ContrailAnalyticsDatabase role
contrail-analytics-database:
disk:
# Root disk
vda:
# Virsh storage pool (see storage above)
pool: hdd_storage
# Analytics database journal (ssd when possible)
vdb:
# Virsh storage pool (see storage above)
pool: ssd_storage
# Analytics data (large capacity)
vdc:
# Virsh storage pool (see storage above)
pool: hdd_storage
# VM for AppFormix controller role
appformix-controller:
disk:
# Root disk
vda:
# Virsh storage pool (see storage above)
pool: hdd_storage
compute_hosts:
sriov:
#enable sriov support
enabled: true
#enable sriov with dpdk
# Contrail vrouter mode:
# supported values are: dpdk or anything else means kernel vRouter
mode: dpdk
#Sriov NumVFs separated by comma
num_vf:
- "ens2f1:7"
#NovaPCIPassthrough settings
pci_passthrough:
- devname: "ens2f1"
physical_network: "sriov1"
root_disk:
# Define root disk for the listed ironic profiles.
# The default of "/dev/sda" will be used if there is no
# specific profile definition
ComputeKernel0Hw0:
name: "/dev/sda"
ComputeKernel0Hw1:
name: "/dev/sda"
ComputeKernel1Hw1:
name: "/dev/sda"
ComputeKernel1Hw0:
name: "/dev/sda"
ComputeDpdk0Hw2:
name: "/dev/sda"
ComputeDpdk1Hw3:
name: "/dev/sda"
ComputeSriov0Hw4:
name: "/dev/sda"
ComputeSriov1Hw5:
name: "/dev/sda"
resource:
minimal_disk:
# This value will be used as the local_gb size for the listed ironic profiles
# If not defined for a profile then the default will be used
ComputeKernel0Hw0: 50
ComputeKernel0Hw1: 50
ComputeKernel1Hw1: 50
ComputeKernel1Hw0: 50
ComputeDpdk0Hw2: 50
ComputeDpdk1Hw3: 50
ComputeSriov0Hw4: 50
ComputeSriov1Hw5: 50
storage_hosts:
root_disk:
# Define root disk for the listed ironic profiles.
# The default of "/dev/sda" will be used if there is no
# specific profile definition
CephStorage0Hw6:
name: "/dev/sda"
CephStorage1Hw7:
name: "/dev/sda"
undercloud:
nova:
# Nova flavor definitions for roles
flavor:
CephStorage0Hw6:
cpu: 1
memory: 4
disk: 40
ephemeral: 0
CephStorage1Hw7:
cpu: 1
memory: 4
disk: 40
ephemeral: 0
ComputeKernel0Hw0:
cpu: 8
memory: 24
disk: 40
ephemeral: 0
ComputeKernel0Hw1:
cpu: 8
memory: 24
disk: 40
ephemeral: 0
ComputeKernel1Hw1:
cpu: 8
memory: 24
disk: 40
ephemeral: 0
ComputeKernel1Hw0:
cpu: 8
memory: 24
disk: 40
ephemeral: 0
ComputeDpdk0Hw2:
cpu: 8
memory: 24
disk: 40
ephemeral: 0
ComputeDpdk1Hw3:
cpu: 8
memory: 24
disk: 40
ephemeral: 0
ComputeSriov0Hw4:
cpu: 8
memory: 24
disk: 40
ephemeral: 0
ComputeSriov1Hw5:
cpu: 8
memory: 24
disk: 40
ephemeral: 0
overcloud:
# Contains a list of label to disk mappings for roles.
# When Ceph Storage is disabled, compute-related roles (Compute* and
# ComputeDpdk* roles) will use any disks labeled with
# "ephemeral-<digits>" for local Nova ephemeral storage.
disk_mapping:
ComputeKernel:
# Mapping of labels to disk devices. The label is assigned to the disk
# device so that the disk can be referenced by the alias in other
# configurations. for example /dev/disk/by-alias/<label>
# Each list element contains:
# label: label to assign
# And one of following elements:
# name: disk device path (e.g. /dev/sdb)
# hctl: alternative notation for disk paths specifying SCSI address (Host, Channel, Target and Lun) The HCTL can be found with thh lsscsi (or lspci) command or it can be found in introspection data
# wwn: List of disks WWNs across hosts, which should have the same label e.g. host1_disk1_wwn, host2_disk1_wwn (WWN can be found in introspection data)
# Example of wwn usage can be found in samples/features/label-disk/site.yml
- label: ephemeral-0
hctl: '5:0:0:0'
- label: ephemeral-1
hctl: '6:0:0:0'
- label: ephemeral-2
hctl: '7:0:0:0'
- label: ephemeral-3
hctl: '8:0:0:0'
ComputeKernel0Hw0:
# Mapping of labels to disk devices. The label is assigned to the disk
# device so that the disk can be referenced by the alias in other
# configurations. for example /dev/disk/by-alias/<label>
# Each list element contains:
# label: label to assign
# And one of following elements:
# name: disk device path (e.g. /dev/sdb)
# hctl: alternative notation for disk paths specifying SCSI address (Host, Channel, Target and Lun) The HCTL can be found with thh lsscsi (or lspci) command or it can be found in introspection data
# wwn: List of disks WWNs across hosts, which should have the same label e.g. host1_disk1_wwn, host2_disk1_wwn (WWN can be found in introspection data)
# Example of wwn usage can be found in samples/features/label-disk/site.yml
- label: ephemeral-0
hctl: '5:0:0:0'
- label: ephemeral-1
hctl: '6:0:0:0'
- label: ephemeral-2
hctl: '7:0:0:0'
- label: ephemeral-3
hctl: '8:0:0:0'
ComputeKernel1Hw0:
# Mapping of labels to disk devices. The label is assigned to the disk
# device so that the disk can be referenced by the alias in other
# configurations. for example /dev/disk/by-alias/<label>
# Each list element contains:
# label: label to assign
# And one of following elements:
# name: disk device path (e.g. /dev/sdb)
# hctl: alternative notation for disk paths specifying SCSI address (Host, Channel, Target and Lun) The HCTL can be found with thh lsscsi (or lspci) command or it can be found in introspection data
# wwn: List of disks WWNs across hosts, which should have the same label e.g. host1_disk1_wwn, host2_disk1_wwn (WWN can be found in introspection data)
# Example of wwn usage can be found in samples/features/label-disk/site.yml
- label: ephemeral-0
hctl: '5:0:0:0'
- label: ephemeral-1
hctl: '6:0:0:0'
- label: ephemeral-2
hctl: '7:0:0:0'
- label: ephemeral-3
hctl: '8:0:0:0'
ComputeKernel1Hw1:
# Mapping of labels to disk devices. The label is assigned to the disk
# device so that the disk can be referenced by the alias in other
# configurations. for example /dev/disk/by-alias/<label>
# Each list element contains:
# label: label to assign
# And one of following elements:
# name: disk device path (e.g. /dev/sdb)
# hctl: alternative notation for disk paths specifying SCSI address (Host, Channel, Target and Lun) The HCTL can be found with thh lsscsi (or lspci) command or it can be found in introspection data
# wwn: List of disks WWNs across hosts, which should have the same label e.g. host1_disk1_wwn, host2_disk1_wwn (WWN can be found in introspection data)
# Example of wwn usage can be found in samples/features/label-disk/site.yml
- label: ephemeral-0
hctl: '5:0:0:0'
- label: ephemeral-1
hctl: '6:0:0:0'
- label: ephemeral-2
hctl: '7:0:0:0'
- label: ephemeral-3
hctl: '8:0:0:0'
ComputeKernel0Hw1:
# Mapping of labels to disk devices. The label is assigned to the disk
# device so that the disk can be referenced by the alias in other
# configurations. for example /dev/disk/by-alias/<label>
# Each list element contains:
# label: label to assign
# And one of following elements:
# name: disk device path (e.g. /dev/sdb)
# hctl: alternative notation for disk paths specifying SCSI address (Host, Channel, Target and Lun) The HCTL can be found with thh lsscsi (or lspci) command or it can be found in introspection data
# wwn: List of disks WWNs across hosts, which should have the same label e.g. host1_disk1_wwn, host2_disk1_wwn (WWN can be found in introspection data)
# Example of wwn usage can be found in samples/features/label-disk/site.yml
- label: ephemeral-0
hctl: '5:0:0:0'
- label: ephemeral-1
hctl: '6:0:0:0'
- label: ephemeral-2
hctl: '7:0:0:0'
- label: ephemeral-3
hctl: '8:0:0:0'
ComputeDpdk:
# Mapping of labels to disk devices. The label is assigned to the disk
# device so that the disk can be referenced by the alias in other
# configurations. for example /dev/disk/by-alias/<label>
# Each list element contains:
# label: label to assign
# And one of following elements:
# name: disk device path (e.g. /dev/sdb)
# hctl: alternative notation for disk paths specifying SCSI address (Host, Channel, Target and Lun) The HCTL can be found with thh lsscsi (or lspci) command or it can be found in introspection data
# wwn: List of disks WWNs across hosts, which should have the same label e.g. host1_disk1_wwn, host2_disk1_wwn (WWN can be found in introspection data)
# Example of wwn usage can be found in samples/features/label-disk/site.yml
- label: ephemeral-0
hctl: '5:0:0:0'
- label: ephemeral-1
hctl: '6:0:0:0'
- label: ephemeral-2
hctl: '7:0:0:0'
- label: ephemeral-3
hctl: '8:0:0:0'
ComputeDpdk0Hw2:
# Mapping of labels to disk devices. The label is assigned to the disk
# device so that the disk can be referenced by the alias in other
# configurations. for example /dev/disk/by-alias/<label>
# Each list element contains:
# label: label to assign
# And one of following elements:
# name: disk device path (e.g. /dev/sdb)
# hctl: alternative notation for disk paths specifying SCSI address (Host, Channel, Target and Lun) The HCTL can be found with thh lsscsi (or lspci) command or it can be found in introspection data
# wwn: List of disks WWNs across hosts, which should have the same label e.g. host1_disk1_wwn, host2_disk1_wwn (WWN can be found in introspection data)
# Example of wwn usage can be found in samples/features/label-disk/site.yml
hctl: '5:0:0:0'
- label: ephemeral-1
hctl: '6:0:0:0'
- label: ephemeral-2
hctl: '7:0:0:0'
- label: ephemeral-3
hctl: '8:0:0:0'
ComputeDpdk1Hw3:
# Mapping of labels to disk devices. The label is assigned to the disk
# device so that the disk can be referenced by the alias in other
# configurations. for example /dev/disk/by-alias/<label>
# Each list element contains:
# label: label to assign
# And one of following elements:
# name: disk device path (e.g. /dev/sdb)
# hctl: alternative notation for disk paths specifying SCSI address (Host, Channel, Target and Lun) The HCTL can be found with thh lsscsi (or lspci) command or it can be found in introspection data
# wwn: List of disks WWNs across hosts, which should have the same label e.g. host1_disk1_wwn, host2_disk1_wwn (WWN can be found in introspection data)
# Example of wwn usage can be found in samples/features/label-disk/site.yml
- label: ephemeral-0
hctl: '5:0:0:0'
- label: ephemeral-1
hctl: '6:0:0:0'
- label: ephemeral-2
hctl: '7:0:0:0'
- label: ephemeral-3
hctl: '8:0:0:0'
ComputeSriov:
# Mapping of labels to disk devices. The label is assigned to the disk
# device so that the disk can be referenced by the alias in other
# configurations. for example /dev/disk/by-alias/<label>
# Each list element contains:
# label: label to assign
# And one of following elements:
# name: disk device path (e.g. /dev/sdb)
# hctl: alternative notation for disk paths specifying SCSI address (Host, Channel, Target and Lun) The HCTL can be found with thh lsscsi (or lspci) command or it can be found in introspection data
# wwn: List of disks WWNs across hosts, which should have the same label e.g. host1_disk1_wwn, host2_disk1_wwn (WWN can be found in introspection data)
# Example of wwn usage can be found in samples/features/label-disk/site.yml
- label: ephemeral-0
hctl: '5:0:0:0'
- label: ephemeral-1
hctl: '6:0:0:0'
- label: ephemeral-2
hctl: '7:0:0:0'
- label: ephemeral-3
hctl: '8:0:0:0'
ComputeSriov0Hw4:
# Mapping of labels to disk devices. The label is assigned to the disk
# device so that the disk can be referenced by the alias in other
# configurations. for example /dev/disk/by-alias/<label>
# Each list element contains:
# label: label to assign
# And one of following elements:
# name: disk device path (e.g. /dev/sdb)
# hctl: alternative notation for disk paths specifying SCSI address (Host, Channel, Target and Lun) The HCTL can be found with thh lsscsi (or lspci) command or it can be found in introspection data
# wwn: List of disks WWNs across hosts, which should have the same label e.g. host1_disk1_wwn, host2_disk1_wwn (WWN can be found in introspection data)
# Example of wwn usage can be found in samples/features/label-disk/site.yml
hctl: '5:0:0:0'
- label: ephemeral-1
hctl: '6:0:0:0'
- label: ephemeral-2
hctl: '7:0:0:0'
- label: ephemeral-3
hctl: '8:0:0:0'
ComputeSriov1Hw5:
# Mapping of labels to disk devices. The label is assigned to the disk
# device so that the disk can be referenced by the alias in other
# configurations. for example /dev/disk/by-alias/<label>
# Each list element contains:
# label: label to assign
# And one of following elements:
# name: disk device path (e.g. /dev/sdb)
# hctl: alternative notation for disk paths specifying SCSI address (Host, Channel, Target and Lun) The HCTL can be found with thh lsscsi (or lspci) command or it can be found in introspection data
# wwn: List of disks WWNs across hosts, which should have the same label e.g. host1_disk1_wwn, host2_disk1_wwn (WWN can be found in introspection data)
# Example of wwn usage can be found in samples/features/label-disk/site.yml
- label: ephemeral-0
hctl: '5:0:0:0'
- label: ephemeral-1
hctl: '6:0:0:0'
- label: ephemeral-2
hctl: '7:0:0:0'
- label: ephemeral-3
hctl: '8:0:0:0'
network:
# The external network is used for referencing the overcloud APIs from outside the infrastructure.
external:
# Network name used by TripleO Heat Templates
heat_name: External
# CIDR (IP/prefix) for the external network subnet
# Corresponds to the ExternalIpSubnet heat property
cidr: "10.10.10.64/26"
# Default route for the external network
# Corresponds to the ExternalInterfaceDefaultRoute heat property
gateway: "10.10.10.126"
# VLAN tag for the external network
# Corresponds to the ExternalNetworkVlanID heat property
vlan: 18
# Floating virtual IP for the Openstack APIs on the external network
# Corresponds to the PublicVirtualFixedIPs heat property
vip: "10.10.10.100"
# DHCP pool for the external network
# Be sure that the range is large enough to accommodate all nodes in the external network
pool:
# Range start for the DHCP pool
start: "10.10.10.70"
# Range end for the DHCP pool
end: "10.10.10.99"
# MTU for external network
# Corresponds to the ExternalNetworkMtu heat property
mtu: 1500
# List of roles that can be on this network
role:
- Controller
- AppformixController
# The internal API network is used for control plane signalling and service API calls
internal_api:
# Network name used by TripleO Heat Templates
heat_name: InternalApi
# VLAN tag for the internal API network
# Corresponds to the InternalApiNetworkVlanID heat property
vlan: 226
# CIDR (IP/prefix) for the internal api supernet network subnet
# Corresponds to the InternalApiSupernet heat property
# Supernet is used in spine/leaf configuration
# Supernet accommodate all related leaf networks, e.g. internal_api0 and internal_api1
# Supernet is used to create static routes between leafs
# Supernet is defined only for main network, not per leafs
supernet: "172.16.0.0/16"
# CIDR (IP/prefix) for the internal api network subnet
# Corresponds to the InternalApiIpSubnet heat property
cidr: "172.16.0.0/24"
# Default route for the internal api network
# Corresponds to the InternalApiInterfaceDefaultRoute heat property
gateway: 172.16.0.1
# MTU for internal api network
# Corresponds to the InternalApiNetworkMtu heat property
mtu: 1500
# DHCP pool for the internal api network
# Be sure that the range is large enough to accommodate all nodes in the internal api network
pool:
# Range start for the DHCP pool
start: 172.16.0.100
# Range end for the DHCP pool
end: 172.16.0.160
# Floating virtual IP for the Openstack APIs on the internal api network
# Corresponds to the InternalApiVirtualFixedIPs heat property
vip: 172.16.0.90
# List of roles that can be on this network
role:
- Controller
- ContrailController
- ContrailAnalytics
- ContrailAnalyticsDatabase
- ContrailTsn
- AppformixController
# Leaf 0 subnet of the internal_api network
internal_api0:
# Network name used by TripleO Heat Templates
heat_name: InternalApi0
# VLAN tag for the internal API 0 network
# Corresponds to the InternalApi0NetworkVlanID heat property
vlan: 229
# CIDR (IP/prefix) for the internal api 0 network subnet
# Corresponds to the InternalApi0IpSubnet heat property
cidr: "172.16.1.0/24"
# Default route for the internal api 0 network
# Corresponds to the InternalApi0InterfaceDefaultRoute heat property
gateway: 172.16.1.1
# MTU for internal api 0 network
# Corresponds to the InternalApi0NetworkMtu heat property
mtu: 1500
# DHCP pool for the internal api 0 network
# Be sure that the range is large enough to accommodate all nodes in the internal api network
pool:
# Range start for the DHCP pool
start: 172.16.1.100
# Range end for the DHCP pool
end: 172.16.1.200
# List of roles that can be on this network
role:
- ComputeDpdk0Hw2
- ComputeSriov0Hw4
- ComputeKernel0Hw0
- ComputeKernel0Hw1
# Leaf 1 subnet of the internal_api network
internal_api1:
# Network name used by TripleO Heat Templates
heat_name: InternalApi1
# VLAN tag for the internal API 1 network
# Corresponds to the InternalApi1NetworkVlanID heat property
vlan: 449
# CIDR (IP/prefix) for the internal api 1 network subnet
# Corresponds to the InternalApi1IpSubnet heat property
cidr: "172.16.2.0/24"
# Default route for the internal api 1 network
# Corresponds to the InternalApi1InterfaceDefaultRoute heat property
gateway: 172.16.2.1
# MTU for internal api 1 network
# Corresponds to the InternalApi1NetworkMtu heat property
mtu: 1500
# DHCP pool for the internal api 1 network
# Be sure that the range is large enough to accommodate all nodes in the internal api network
pool:
# Range start for the DHCP pool
start: 172.16.2.100
# Range end for the DHCP pool
end: 172.16.2.200
# List of roles that can be on this network
role:
- ComputeDpdk1Hw3
- ComputeSriov1Hw5
- ComputeKernel1Hw1
- ComputeKernel1Hw0
# The management network is defined for backwards-compatibility in RHOSP and is not
# used by default by any roles.
management:
# Network name used by TripleO Heat Templates
heat_name: Management
# VLAN tag for the management network
# Corresponds to the ManagementNetworkVlanID heat property
vlan: 225
# CIDR (IP/prefix) for the network subnet
# Corresponds to the ManagementIpSubnet heat property
cidr: "192.168.1.0/24"
# MTU for the network
# Corresponds to the ManagementNetworkMtu heat property
mtu: 1500
# DHCP pool for the network
# Be sure that the range is large enough to accommodate all nodes in the network
pool:
# Range start for the DHCP pool
start: 192.168.1.100
# Range end for the DHCP pool
end: 192.168.1.200
# The storage network is used for Compute storage access
storage:
# Network name used by TripleO Heat Templates
heat_name: Storage
# VLAN tag for the storage network
# Corresponds to the StorageNetworkVlanID heat property
vlan: 227
supernet: "172.19.0.0/16"
cidr: "172.19.0.0/24"
gateway: 172.19.0.1
mtu: 1500
pool:
start: 172.19.0.100
end: 172.19.0.200
# List of roles that can be on this network
role:
- Controller
- ContrailTsn
# Leaf 0 subnet of the storage network
storage0:
# Network name used by TripleO Heat Templates
heat_name: Storage0
vlan: 223
cidr: "172.19.1.0/24"
gateway: 172.19.1.1
mtu: 1500
pool:
start: 172.19.1.100
end: 172.19.1.200
# List of roles that can be on this network
role:
- ComputeDpdk0Hw2
- ComputeSriov0Hw4
- CephStorage0Hw6
- ComputeKernel0Hw0
- ComputeKernel0Hw1
# Leaf 1 subnet of the storage network
storage1:
# Network name used by TripleO Heat Templates
heat_name: Storage1
vlan: 443
cidr: "172.19.2.0/24"
gateway: 172.19.2.1
mtu: 1500
pool:
start: 172.19.2.100
end: 172.19.2.200
# List of roles that can be on this network
role:
- ComputeDpdk1Hw3
- ComputeSriov1Hw5
- CephStorage1Hw7
- ComputeKernel1Hw1
- ComputeKernel1Hw0
# The storage management network is used for storage operations such as replication
storage_mgmt:
# Network name used by TripleO Heat Templates
heat_name: StorageMgmt
# VLAN tag for the storage management network
# Corresponds to the StorageMgmtNetworkVlanID heat property
vlan: 224
supernet: "172.20.0.0/16"
cidr: "172.20.0.0/24"
gateway: 172.20.0.1
mtu: 1500
pool:
start: 172.20.0.100
end: 172.20.0.200
# List of roles that can be on this network
role:
- Controller
# Leaf 0 subnet of the storage_mgmt network
storage_mgmt0:
# Network name used by TripleO Heat Templates
heat_name: StorageMgmt0
vlan: 221
cidr: "172.20.1.0/24"
gateway: 172.20.1.1
mtu: 1500
pool:
start: 172.20.1.100
end: 172.20.1.200
# List of roles that can be on this network
role:
- CephStorage0Hw6
# Leaf 1 subnet of the storage_mgmt network
storage_mgmt1:
# Network name used by TripleO Heat Templates
heat_name: StorageMgmt1
vlan: 444
cidr: "172.20.2.0/24"
gateway: 172.20.2.1
mtu: 1500
pool:
start: 172.20.2.100
end: 172.20.2.200
# List of roles that can be on this network
role:
- CephStorage1Hw7
# The tenant network is used for tenant workload data
tenant:
# Network name used by TripleO Heat Templates
heat_name: Tenant
# VLAN tag for the tenant network
# Corresponds to the TenantNetworkVlanID heat property
vlan: 228
supernet: "172.18.0.0/16"
cidr: "172.18.0.0/24"
gateway: 172.18.0.1
vrouter_gateway: 172.18.0.1
mtu: 1500
pool:
start: 172.18.0.100
end: 172.18.0.200
# List of roles that can be on this network
role:
- ContrailController
- ContrailAnalytics
- ContrailAnalyticsDatabase
- ContrailTsn
# Leaf 0 subnet of the tenant network
tenant0:
# Network name used by TripleO Heat Templates
heat_name: Tenant0
vlan: 222
cidr: "172.18.1.0/24"
gateway: 172.18.1.1
vrouter_gateway: 172.18.1.1
mtu: 1500
pool:
start: 172.18.1.100
end: 172.18.1.200
# List of roles that can be on this network
role:
- ComputeDpdk0Hw2
- ComputeSriov0Hw4
- ComputeKernel0Hw0
- ComputeKernel0Hw1
# Leaf 1 subnet of the tenant network
tenant1:
# Network name used by TripleO Heat Templates
heat_name: Tenant1
vlan: 442
cidr: "172.18.2.0/24"
gateway: 172.18.2.1
vrouter_gateway: 172.18.2.1
mtu: 1500
pool:
start: 172.18.2.100
end: 172.18.2.200
# List of roles that can be on this network
role:
- ComputeDpdk1Hw3
- ComputeSriov1Hw5
- ComputeKernel1Hw1
- ComputeKernel1Hw0
# Contrail sepcific settings
#contrail:
# aaa_mode: cloud-admin
# vrouter:
# contrail_settings:
# # Settings per profile.
# # Profile's contrail_settings replace default settings and should include
# # all keys and values which are intended to be exported on given role.
# # When leafs are used it implies per profile configuration as it defines
# # VROUTER_GATEWAY for profile by quering node's tenant network for
# # vrouter_gateway value.
# default:
# VROUTER_GATEWAY: 172.16.81.254
# BGP_ASN: 64512
# LACP_RATE: 1
# ComputeKernel1Hw0:
# LACP_RATE: 1
# Information used to generate the SSL certificates for the public Openstack service APIs
tls:
#countryName_default
country: "US"
#stateOrProvinceName_default
state: "CA"
#localityName_default
city: "Sunnyvale"
#organizationalUnitName_default
organization: "JNPR"
#commonName_default - this is typically the external VIP
common_name: "10.10.10.90"
ceph:
# Choice to enable Ceph storage in the overcloud.
# "true" means that Ceph will be deployed as the backed for Cinder and Glance services.
# "false" false means that Ceph will not be deployed.
enabled: true
# Ceph OSD disk configuration
osd:
# Update the Ceph crush map when OSDs are started
crush_update_on_start: true
# Ceph OSD disk assignments. The named disks will be exclusively used by Ceph for persistence.
# Lvm is a default scenario for ceph deployment with bluestore as a backend.
# When all named disks are the same type, spinning or solid state, all of them will be used
# as ceph osds. When disks with mixed types are defined spinning disks will be used as osds
# and on solid state disks ceph db will be created. For mixed types of disks the automatic pgp
# number calculation requires assigning key 'contents' with value 'db' to ssd disks.
# In below example disks sd[b-e] are spinning disks and sdf is solid state disk.
default:
disk:
'/dev/sdb':
'/dev/sdc':
'/dev/sdd':
'/dev/sde':
'/dev/sdf':
contents: db
CephStorage0Hw6:
disk:
'/dev/sdb':
'/dev/sdc':
'/dev/sdd':
'/dev/sde':
'/dev/sdf':
contents: db
CephStorage1Hw7:
disk:
'/dev/sdb':
'/dev/sdc':
'/dev/sdd':
'/dev/sde':
'/dev/sdf':
contents: db
# By default, pgp number is calculated by contrail cloud. If you want, you can give this parameter
# by yourself. Use the calculator on the website: https://ceph.com/pgcalc/. Calculator takes into
# account also pool utilization. Calculated pgp_num can be introduced in configuration as below.
# It's defined per used pool.
# pool:
# vms:
# pgp_num: 32
# rbd:
# pgp_num: 32
# images:
# pgp_num: 32
# volumes:
# pgp_num: 32
# backups:
# pgp_num: 32
#
# Rados Gateway when enabled, which is a default behaviour, creates it's own ceph pools
# not tracked by contrail cloud. Those pools can be predefined to better control
# their sizes. Below pools definitions are not an exhaustive, please consult with
# https://ceph.com/pgcalc/
# Pools should have enabled application according to their use.
# If not changed explicit, pools are created with 'rbd' application assigned.
# Available options are:
# - rbd for the Ceph Block Device
# - rgw for the Ceph Object Gateway
# - cephfs for the Ceph Filesystem
# or user defined value for custom application.
# More details can be found on
# https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/3/html/storage_strategies_guide/pools-1#enable-application
# .rgw.root:
# pgp_num: 16
# enabled: true
# replica: 3
# application: rgw
# default.rgw.control:
# pgp_num: 16
# enabled: true
# replica: 3
# application: rgw
# default.rgw.meta:
# pgp_num: 16
# enabled: true
# replica: 3
# application: rgw
# default.rgw.log:
# pgp_num: 16
# enabled: true
# replica: 3
# application: rgw
# default.rgw.buckets.index:
# pgp_num: 16
# enabled: true
# replica: 3
# application: rgw
# default.rgw.buckets.data:
# pgp_num: 16
# enabled: true
# replica: 3
# application: rgw
# default.rgw.buckets.non-ec:
# pgp_num: 16
# enabled: true
# replica: 3
# application: rgw
appformix:
# Set to true if you have multiple control hosts which allows Apformix to run in HA mode
enable_ha: true
# Floating virtual IP for the Appformix APIs on the external network, used and required by HA mode.
vip: "10.10.10.101"
keepalived:
# Set which interface will be used for vrrp
vrrp_interface: "eth1"
Sample inventory.yml Configuration File
# Copyright 2018 Juniper Networks, Inc. All rights reserved.
# Licensed under the Juniper Networks Script Software License (the "License").
# You may not use this script file except in compliance with the License, which is located at
# http://www.juniper.net/support/legal/scriptlicense/
# Unless required by applicable law or otherwise agreed to in writing by the parties,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
#
#
# Common values shared among group of nodes
ipmi_hardware1: &hardware1
pm_type: "ipmi"
pm_user: "{{ vault['inventory_nodes']['hardware1']['pm_user'] }}"
pm_password: "{{ vault['inventory_nodes']['hardware1']['pm_password'] }}"
capabilities: "boot_mode:uefi"
# List of baremetal server nodes that can be used for the deploying roles
# Each list item contains:
# name: logical name to assign this resource (string)
# pm_addr: IP address for resourceIPMI interface (string)
# pm_type: Ironic driver to interface with this resource (typically ipmi) (string)
# pm_user: IPMI user account (string)
# pm_password: IPMI account user password (string)
# capabilities: String of comma separated list of node capabilities.
# Capabilities 'profile' and 'boot_option' are managed
# by Contrail Cloud and will be omitted. (string)
# e.g capabilities: "boot_mode:uefi" set boot mode to uefi
#
# Some values common for nodes can be moved to dedicated section like ipmi_hardware1
# and be referred like this:
# <<: *hardware1
inventory_nodes:
- name: "control-host1"
pm_addr: "10.10.11.58"
<<: *hardware1
- name: "control-host2"
pm_addr: "10.10.11.59"
<<: *hardware1
- name: "control-host3"
pm_addr: "10.10.11.60"
<<: *hardware1
- name: "storage1"
pm_addr: "10.10.11.61"
<<: *hardware1
- name: "storage2"
pm_addr: "10.10.11.62"
<<: *hardware1
- name: "storage3"
pm_addr: "10.10.11.63"
<<: *hardware1
- name: "computedpdk1"
pm_addr: "10.10.11.64"
<<: *hardware1
- name: "computedpdk2"
pm_addr: "10.10.11.65"
<<: *hardware1
- name: "compute1"
pm_addr: "10.10.11.66"
<<: *hardware1
- name: "compute2"
pm_addr: "10.10.11.67"
<<: *hardware1
- name: "compute3"
pm_addr: "10.10.11.68"
<<: *hardware1
- name: "compute4"
pm_addr: "10.10.11.69"
<<: *hardware1
- name: "computesriov1"
pm_addr: "10.10.11.70"
<<: *hardware1
- name: "computesriov2"
pm_addr: "10.10.11.71"
<<: *hardware1
Sample control-host-nodes.yml Configuration File
# Copyright 2018 Juniper Networks, Inc. All rights reserved.
# Licensed under the Juniper Networks Script Software License (the "License").
# You may not use this script file except in compliance with the License, which is located at
# http://www.juniper.net/support/legal/scriptlicense/
# Unless required by applicable law or otherwise agreed to in writing by the parties,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
#
#
# List of nodes to use as control host role
# Each list item contains a set of variables which can be referenced
# with "{{ host.<variable> }}" in control_host_nodes_network_config below.
# Other ad-hoc variables can be added as needed.
# name: name of a node in the inventory (string)
# hostname: hostname to assign the node after it is imaged (string)
# control_ip_netmask: static CIDR address on Control Plane network.
# Choose a value outside the DHCP range. (string)
# dns_server1,dns_server2: dns server addresses (string)
# max_mtu: The largest MTU supported by an interface
#
control_host_nodes:
- name: "control-host1"
control_ip_netmask: "192.168.213.5/24"
dns_server1: "172.29.143.60"
dns_server2: "172.29.139.60"
max_mtu: 9216
- name: "control-host2"
control_ip_netmask: "192.168.213.6/24"
dns_server1: "172.29.143.60"
dns_server2: "172.29.139.60"
max_mtu: 9216
- name: "control-host3"
control_ip_netmask: "192.168.213.7/24"
dns_server1: "172.29.143.60"
dns_server2: "172.29.139.60"
max_mtu: 9216
# Template for network layout on all control host nodes
# This follows the os-net-config syntax
# See https://github.com/openstack/os-net-config/tree/stable/queens
# variables from control_host_nodes can be refered with "{{ host.variable> }}"
control_host_nodes_network_config:
- type: ovs_bridge
name: br-eno1
use_dhcp: false
mtu: "{{ overcloud['network']['control']['mtu'] }}"
addresses:
-
ip_netmask: "{{ host.control_ip_netmask }}"
routes:
-
next_hop: "{{ overcloud['network']['control']['gateway'] }}"
default: true
dns_servers:
- "{{ host.dns_server1 }}"
- "{{ host.dns_server2 }}"
members:
- type: interface
name: eno1
use_dhcp: false
mtu: "{{ overcloud['network']['control']['mtu'] }}"
- type: ovs_bridge
name: br-eno2
use_dhcp: false
mtu: "{{ host.max_mtu }}"
members:
-
type: interface
name: eno2
use_dhcp: false
mtu: "{{ host.max_mtu }}"
- type: ovs_bridge
name: br-bond0
use_dhcp: false
mtu: "{{ host.max_mtu }}"
members:
-
type: linux_bond
name: bond0
use_dhcp: false
mtu: "{{ host.max_mtu }}"
bonding_options: "mode=802.3ad xmit_hash_policy=layer3+4 lacp_rate=fast miimon=100"
members:
-
type: interface
name: ens7f0
use_dhcp: false
mtu: "{{ host.max_mtu }}"
primary: true
-
type: interface
name: ens7f1
use_dhcp: false
mtu: "{{ host.max_mtu }}"
control_hosts:
# The mapping from control host interfaces to the control VM interfaces
# The first interface (eth0) must always be the Control Plane network to allow the VM to PXE boot
# VM interface names must be sequential with no gaps (e.g. eth0, eth1, eth2,...)
vm_interfaces:
- interface: eth0
bridge: br-eno1
- interface: eth1
bridge: br-eno2
- interface: eth2
bridge: br-bond0
Sample overcloud-nics.yml Configuration File
Contrail_network_config:
- type: interface
name: eth0
dns_servers:
get_param: DnsServers
mtu:
get_param: ControlPlaneNetworkMtu
addresses:
- ip_netmask:
list_join:
- '/'
- - get_param: ControlPlaneIp
- get_param: ControlPlaneSubnetCidr
use_dhcp: false
routes:
-
ip_netmask: 169.254.169.254/32
next_hop:
get_param: EC2MetadataIp
-
default: True
next_hop:
get_param: ControlPlaneDefaultRoute
- type: vlan
device: eth0
vlan_id:
get_param: InternalApiNetworkVlanID
mtu:
get_param: InternalApiNetworkMtu
addresses:
- ip_netmask:
get_param: InternalApiIpSubnet
routes:
-
ip_netmask:
get_param: InternalApiSupernet
next_hop:
get_param: InternalApiInterfaceDefaultRoute
- type: interface
name: eth1
use_dhcp: false
- type: interface
name: eth2
use_dhcp: false
mtu:
get_param: TenantNetworkMtu
- type: vlan
device: eth2
vlan_id:
get_param: TenantNetworkVlanID
mtu:
get_param: TenantNetworkMtu
addresses:
- ip_netmask:
get_param: TenantIpSubnet
routes:
-
ip_netmask:
get_param: TenantSupernet
next_hop:
get_param: TenantInterfaceDefaultRoute
Controller_network_config:
- type: interface
name: eth0
dns_servers:
get_param: DnsServers
use_dhcp: false
mtu:
get_param: ControlPlaneNetworkMtu
addresses:
- ip_netmask:
list_join:
- '/'
- - get_param: ControlPlaneIp
- get_param: ControlPlaneSubnetCidr
routes:
-
ip_netmask: 169.254.169.254/32
next_hop:
get_param: EC2MetadataIp
- type: vlan
device: eth0
vlan_id:
get_param: StorageNetworkVlanID
mtu:
get_param: StorageNetworkMtu
addresses:
- ip_netmask:
get_param: StorageIpSubnet
routes:
-
ip_netmask:
get_param: StorageSupernet
next_hop:
get_param: StorageInterfaceDefaultRoute
- type: vlan
device: eth0
vlan_id:
get_param: StorageMgmtNetworkVlanID
mtu:
get_param: StorageMgmtNetworkMtu
addresses:
- ip_netmask:
get_param: StorageMgmtIpSubnet
routes:
-
ip_netmask:
get_param: StorageMgmtSupernet
next_hop:
get_param: StorageMgmtInterfaceDefaultRoute
- type: vlan
device: eth0
vlan_id:
get_param: InternalApiNetworkVlanID
mtu:
get_param: InternalApiNetworkMtu
addresses:
- ip_netmask:
get_param: InternalApiIpSubnet
routes:
-
ip_netmask:
get_param: InternalApiSupernet
next_hop:
get_param: InternalApiInterfaceDefaultRoute
- type: interface
name: eth1
mtu:
get_param: ExternalNetworkMtu
addresses:
- ip_netmask:
get_param: ExternalIpSubnet
routes:
-
default: True
next_hop:
get_param: ExternalInterfaceDefaultRoute
- type: interface
name: eth2
use_dhcp: false
AppformixController_network_config:
- type: interface
name: eth0
dns_servers:
get_param: DnsServers
use_dhcp: false
mtu:
get_param: ControlPlaneNetworkMtu
addresses:
- ip_netmask:
list_join:
- '/'
- - get_param: ControlPlaneIp
- get_param: ControlPlaneSubnetCidr
routes:
-
ip_netmask: 169.254.169.254/32
next_hop:
get_param: EC2MetadataIp
- type: vlan
device: eth0
vlan_id:
get_param: InternalApiNetworkVlanID
mtu:
get_param: InternalApiNetworkMtu
addresses:
- ip_netmask:
get_param: InternalApiIpSubnet
routes:
-
ip_netmask:
get_param: InternalApiSupernet
next_hop:
get_param: InternalApiInterfaceDefaultRoute
- type: interface
name: eth1
mtu:
get_param: ExternalNetworkMtu
addresses:
- ip_netmask:
get_param: ExternalIpSubnet
routes:
-
default: True
next_hop:
get_param: ExternalInterfaceDefaultRoute
- type: interface
name: eth2
use_dhcp: false
ContrailTsn_network_config:
- type: interface
name: eth0
dns_servers:
get_param: DnsServers
mtu:
get_param: ControlPlaneNetworkMtu
addresses:
- ip_netmask:
list_join:
- '/'
- - get_param: ControlPlaneIp
- get_param: ControlPlaneSubnetCidr
use_dhcp: false
routes:
-
ip_netmask: 169.254.169.254/32
next_hop:
get_param: EC2MetadataIp
-
default: True
next_hop:
get_param: ControlPlaneDefaultRoute
- type: vlan
device: eth0
vlan_id:
get_param: InternalApiNetworkVlanID
mtu:
get_param: InternalApiNetworkMtu
addresses:
- ip_netmask:
get_param: InternalApiIpSubnet
routes:
-
ip_netmask:
get_param: InternalApiSupernet
next_hop:
get_param: InternalApiInterfaceDefaultRoute
- type: interface
name: eth1
use_dhcp: false
- type: interface
name: eth2
use_dhcp: false
mtu:
get_param: TenantNetworkMtu
- type: vlan
device: eth2
vlan_id:
get_param: TenantNetworkVlanID
mtu:
get_param: TenantNetworkMtu
use_dhcp: false
- type: contrail_vrouter
name: vhost0
members:
-
type: interface
name:
str_replace:
template: vlanVLANID
params:
VLANID: {get_param: TenantNetworkVlanID}
use_dhcp: false
mtu:
get_param: TenantNetworkMtu
addresses:
- ip_netmask:
get_param: TenantIpSubnet
routes:
-
ip_netmask:
get_param: TenantSupernet
next_hop:
get_param: TenantInterfaceDefaultRoute
ComputeKernel0Hw1_network_config:
- type: interface
name: nic1
dns_servers:
get_param: DnsServers
use_dhcp: false
mtu:
get_param: ControlPlaneNetworkMtu
addresses:
- ip_netmask:
list_join:
- '/'
- - get_param: ControlPlaneIp
- get_param: ControlPlaneSubnetCidr
routes:
-
ip_netmask: 169.254.169.254/32
next_hop:
get_param: EC2MetadataIp
-
default: True
next_hop:
get_param: ControlPlaneDefaultRoute
- type: vlan
device: nic1
vlan_id:
get_param: Storage0NetworkVlanID
mtu:
get_param: Storage0NetworkMtu
addresses:
- ip_netmask:
get_param: Storage0IpSubnet
routes:
-
ip_netmask:
get_param: StorageSupernet
next_hop:
get_param: Storage0InterfaceDefaultRoute
- type: vlan
device: nic1
vlan_id:
get_param: InternalApi0NetworkVlanID
mtu:
get_param: InternalApi0NetworkMtu
addresses:
- ip_netmask:
get_param: InternalApi0IpSubnet
routes:
-
ip_netmask:
get_param: InternalApiSupernet
next_hop:
get_param: InternalApi0InterfaceDefaultRoute
- type: interface
name: nic2
use_dhcp: false
- type: linux_bond
name: bond0
use_dhcp: false
bonding_options: "mode=802.3ad xmit_hash_policy=layer3+4 lacp_rate=fast updelay=1000 miimon=100"
mtu:
get_param: Tenant0NetworkMtu
members:
- type: interface
name: nic3
primary: true
mtu:
get_param: Tenant0NetworkMtu
- type: interface
name: nic4
mtu:
get_param: Tenant0NetworkMtu
- type: vlan
vlan_id:
get_param: Tenant0NetworkVlanID
device: bond0
- type: contrail_vrouter
name: vhost0
use_dhcp: false
members:
-
type: interface
name:
str_replace:
template: vlanVLANID
params:
VLANID: {get_param: Tenant0NetworkVlanID}
use_dhcp: false
addresses:
- ip_netmask:
get_param: Tenant0IpSubnet
mtu:
get_param: Tenant0NetworkMtu
routes:
-
ip_netmask:
get_param: TenantSupernet
next_hop:
get_param: Tenant0InterfaceDefaultRoute
ComputeKernel0Hw0_network_config:
- type: interface
name: nic1
dns_servers:
get_param: DnsServers
use_dhcp: false
mtu:
get_param: ControlPlaneNetworkMtu
addresses:
- ip_netmask:
list_join:
- '/'
- - get_param: ControlPlaneIp
- get_param: ControlPlaneSubnetCidr
routes:
-
ip_netmask: 169.254.169.254/32
next_hop:
get_param: EC2MetadataIp
-
default: True
next_hop:
get_param: ControlPlaneDefaultRoute
- type: vlan
device: nic1
vlan_id:
get_param: Storage0NetworkVlanID
mtu:
get_param: Storage0NetworkMtu
addresses:
- ip_netmask:
get_param: Storage0IpSubnet
routes:
-
ip_netmask:
get_param: StorageSupernet
next_hop:
get_param: Storage0InterfaceDefaultRoute
- type: vlan
device: nic1
vlan_id:
get_param: InternalApi0NetworkVlanID
mtu:
get_param: InternalApi0NetworkMtu
addresses:
- ip_netmask:
get_param: InternalApi0IpSubnet
routes:
-
ip_netmask:
get_param: InternalApiSupernet
next_hop:
get_param: InternalApi0InterfaceDefaultRoute
- type: interface
name: nic2
use_dhcp: false
- type: linux_bond
name: bond0
use_dhcp: false
bonding_options: "mode=802.3ad xmit_hash_policy=layer3+4 lacp_rate=fast updelay=1000 miimon=100"
mtu:
get_param: Tenant0NetworkMtu
members:
- type: interface
name: nic3
primary: true
mtu:
get_param: Tenant0NetworkMtu
- type: interface
name: nic4
mtu:
get_param: Tenant0NetworkMtu
- type: vlan
vlan_id:
get_param: Tenant0NetworkVlanID
device: bond0
- type: contrail_vrouter
name: vhost0
use_dhcp: false
members:
-
type: interface
name:
str_replace:
template: vlanVLANID
params:
VLANID: {get_param: Tenant0NetworkVlanID}
use_dhcp: false
addresses:
- ip_netmask:
get_param: Tenant0IpSubnet
mtu:
get_param: Tenant0NetworkMtu
routes:
-
ip_netmask:
get_param: TenantSupernet
next_hop:
get_param: Tenant0InterfaceDefaultRoute
ComputeKernel1Hw0_network_config:
- type: interface
name: nic1
dns_servers:
get_param: DnsServers
use_dhcp: false
mtu:
get_param: ControlPlaneNetworkMtu
addresses:
- ip_netmask:
list_join:
- '/'
- - get_param: ControlPlaneIp
- get_param: ControlPlaneSubnetCidr
routes:
-
ip_netmask: 169.254.169.254/32
next_hop:
get_param: EC2MetadataIp
-
default: True
next_hop:
get_param: ControlPlaneDefaultRoute
- type: vlan
device: nic1
vlan_id:
get_param: Storage1NetworkVlanID
mtu:
get_param: Storage1NetworkMtu
addresses:
- ip_netmask:
get_param: Storage1IpSubnet
routes:
-
ip_netmask:
get_param: StorageSupernet
next_hop:
get_param: Storage1InterfaceDefaultRoute
- type: vlan
device: nic1
vlan_id:
get_param: InternalApi1NetworkVlanID
mtu:
get_param: InternalApi1NetworkMtu
addresses:
- ip_netmask:
get_param: InternalApi1IpSubnet
routes:
-
ip_netmask:
get_param: InternalApiSupernet
next_hop:
get_param: InternalApi1InterfaceDefaultRoute
- type: interface
name: nic2
use_dhcp: false
- type: linux_bond
name: bond0
use_dhcp: false
bonding_options: "mode=802.3ad xmit_hash_policy=layer3+4 lacp_rate=fast updelay=1000 miimon=100"
mtu:
get_param: Tenant1NetworkMtu
members:
- type: interface
name: nic3
primary: true
mtu:
get_param: Tenant1NetworkMtu
- type: interface
name: nic4
mtu:
get_param: Tenant1NetworkMtu
- type: vlan
vlan_id:
get_param: Tenant1NetworkVlanID
device: bond0
- type: contrail_vrouter
name: vhost0
use_dhcp: false
members:
-
type: interface
name:
str_replace:
template: vlanVLANID
params:
VLANID: {get_param: Tenant1NetworkVlanID}
use_dhcp: false
addresses:
- ip_netmask:
get_param: Tenant1IpSubnet
mtu:
get_param: Tenant1NetworkMtu
routes:
-
ip_netmask:
get_param: TenantSupernet
next_hop:
get_param: Tenant1InterfaceDefaultRoute
ComputeKernel1Hw1_network_config:
- type: interface
name: nic1
dns_servers:
get_param: DnsServers
use_dhcp: false
mtu:
get_param: ControlPlaneNetworkMtu
addresses:
- ip_netmask:
list_join:
- '/'
- - get_param: ControlPlaneIp
- get_param: ControlPlaneSubnetCidr
routes:
-
ip_netmask: 169.254.169.254/32
next_hop:
get_param: EC2MetadataIp
-
default: True
next_hop:
get_param: ControlPlaneDefaultRoute
- type: vlan
device: nic1
vlan_id:
get_param: Storage1NetworkVlanID
mtu:
get_param: Storage1NetworkMtu
addresses:
- ip_netmask:
get_param: Storage1IpSubnet
routes:
-
ip_netmask:
get_param: StorageSupernet
next_hop:
get_param: Storage1InterfaceDefaultRoute
- type: vlan
device: nic1
vlan_id:
get_param: InternalApi1NetworkVlanID
mtu:
get_param: InternalApi1NetworkMtu
addresses:
- ip_netmask:
get_param: InternalApi1IpSubnet
routes:
-
ip_netmask:
get_param: InternalApiSupernet
next_hop:
get_param: InternalApi1InterfaceDefaultRoute
- type: interface
name: nic2
use_dhcp: false
- type: linux_bond
name: bond0
use_dhcp: false
bonding_options: "mode=802.3ad xmit_hash_policy=layer3+4 lacp_rate=fast updelay=1000 miimon=100"
mtu:
get_param: Tenant1NetworkMtu
members:
- type: interface
name: nic3
primary: true
mtu:
get_param: Tenant1NetworkMtu
- type: interface
name: nic4
mtu:
get_param: Tenant1NetworkMtu
- type: vlan
vlan_id:
get_param: Tenant1NetworkVlanID
device: bond0
- type: contrail_vrouter
name: vhost0
use_dhcp: false
members:
-
type: interface
name:
str_replace:
template: vlanVLANID
params:
VLANID: {get_param: Tenant1NetworkVlanID}
use_dhcp: false
addresses:
- ip_netmask:
get_param: Tenant1IpSubnet
mtu:
get_param: Tenant1NetworkMtu
routes:
-
ip_netmask:
get_param: TenantSupernet
next_hop:
get_param: Tenant1InterfaceDefaultRoute
ComputeSriov0Hw4_network_config:
- type: interface
name: nic1
dns_servers:
get_param: DnsServers
use_dhcp: false
mtu:
get_param: ControlPlaneNetworkMtu
addresses:
- ip_netmask:
list_join:
- '/'
- - get_param: ControlPlaneIp
- get_param: ControlPlaneSubnetCidr
routes:
-
ip_netmask: 169.254.169.254/32
next_hop:
get_param: EC2MetadataIp
-
default: True
next_hop:
get_param: ControlPlaneDefaultRoute
- type: vlan
device: nic1
vlan_id:
get_param: Storage0NetworkVlanID
mtu:
get_param: Storage0NetworkMtu
addresses:
- ip_netmask:
get_param: Storage0IpSubnet
routes:
-
ip_netmask:
get_param: StorageSupernet
next_hop:
get_param: Storage0InterfaceDefaultRoute
- type: vlan
device: nic1
vlan_id:
get_param: InternalApi0NetworkVlanID
mtu:
get_param: InternalApi0NetworkMtu
addresses:
- ip_netmask:
get_param: InternalApi0IpSubnet
routes:
-
ip_netmask:
get_param: InternalApiSupernet
next_hop:
get_param: InternalApi0InterfaceDefaultRoute
- type: interface
name: nic2
use_dhcp: false
- type: linux_bond
name: bond0
use_dhcp: false
bonding_options: "mode=802.3ad xmit_hash_policy=layer3+4 lacp_rate=fast updelay=1000 miimon=100"
mtu:
get_param: Tenant0NetworkMtu
members:
- type: interface
name: nic3
primary: true
mtu:
get_param: Tenant0NetworkMtu
- type: interface
name: nic4
mtu:
get_param: Tenant0NetworkMtu
- type: vlan
vlan_id:
get_param: Tenant0NetworkVlanID
device: bond0
- type: contrail_vrouter
name: vhost0
use_dhcp: false
members:
-
type: interface
name:
str_replace:
template: vlanVLANID
params:
VLANID: {get_param: Tenant0NetworkVlanID}
use_dhcp: false
addresses:
- ip_netmask:
get_param: Tenant0IpSubnet
mtu:
get_param: Tenant0NetworkMtu
routes:
-
ip_netmask:
get_param: TenantSupernet
next_hop:
get_param: Tenant0InterfaceDefaultRoute
ComputeSriov1Hw5_network_config:
- type: interface
name: nic1
dns_servers:
get_param: DnsServers
use_dhcp: false
mtu:
get_param: ControlPlaneNetworkMtu
addresses:
- ip_netmask:
list_join:
- '/'
- - get_param: ControlPlaneIp
- get_param: ControlPlaneSubnetCidr
routes:
-
ip_netmask: 169.254.169.254/32
next_hop:
get_param: EC2MetadataIp
-
default: True
next_hop:
get_param: ControlPlaneDefaultRoute
- type: vlan
device: nic1
vlan_id:
get_param: Storage1NetworkVlanID
mtu:
get_param: Storage1NetworkMtu
addresses:
- ip_netmask:
get_param: Storage1IpSubnet
routes:
-
ip_netmask:
get_param: StorageSupernet
next_hop:
get_param: Storage1InterfaceDefaultRoute
- type: vlan
device: nic1
vlan_id:
get_param: InternalApi1NetworkVlanID
mtu:
get_param: InternalApi1NetworkMtu
addresses:
- ip_netmask:
get_param: InternalApi1IpSubnet
routes:
-
ip_netmask:
get_param: InternalApiSupernet
next_hop:
get_param: InternalApi1InterfaceDefaultRoute
- type: interface
name: nic2
use_dhcp: false
- type: linux_bond
name: bond0
use_dhcp: false
bonding_options: "mode=802.3ad xmit_hash_policy=layer3+4 lacp_rate=fast updelay=1000 miimon=100"
mtu:
get_param: Tenant1NetworkMtu
members:
- type: interface
name: nic3
primary: true
mtu:
get_param: Tenant1NetworkMtu
- type: interface
name: nic4
mtu:
get_param: Tenant1NetworkMtu
- type: vlan
vlan_id:
get_param: Tenant1NetworkVlanID
device: bond0
- type: contrail_vrouter
name: vhost0
use_dhcp: false
members:
-
type: interface
name:
str_replace:
template: vlanVLANID
params:
VLANID: {get_param: Tenant1NetworkVlanID}
use_dhcp: false
addresses:
- ip_netmask:
get_param: Tenant1IpSubnet
mtu:
get_param: Tenant1NetworkMtu
routes:
-
ip_netmask:
get_param: TenantSupernet
next_hop:
get_param: Tenant1InterfaceDefaultRoute
ComputeDpdk0Hw2_network_config:
- type: interface
name: nic1
dns_servers:
get_param: DnsServers
use_dhcp: false
mtu:
get_param: ControlPlaneNetworkMtu
addresses:
- ip_netmask:
list_join:
- '/'
- - get_param: ControlPlaneIp
- get_param: ControlPlaneSubnetCidr
routes:
-
ip_netmask: 169.254.169.254/32
next_hop:
get_param: EC2MetadataIp
-
default: True
next_hop:
get_param: ControlPlaneDefaultRoute
- type: vlan
device: nic1
vlan_id:
get_param: Storage0NetworkVlanID
mtu:
get_param: Storage0NetworkMtu
addresses:
- ip_netmask:
get_param: Storage0IpSubnet
routes:
-
ip_netmask:
get_param: StorageSupernet
next_hop:
get_param: Storage0InterfaceDefaultRoute
- type: vlan
device: nic1
vlan_id:
get_param: InternalApi0NetworkVlanID
mtu:
get_param: InternalApi0NetworkMtu
addresses:
- ip_netmask:
get_param: InternalApi0IpSubnet
routes:
-
ip_netmask:
get_param: InternalApiSupernet
next_hop:
get_param: InternalApi0InterfaceDefaultRoute
- type: interface
name: nic2
use_dhcp: false
- type: contrail_vrouter_dpdk
name: vhost0
vlan_id:
get_param: Tenant0NetworkVlanID
driver: "{{ overcloud['contrail']['vrouter']['dpdk']['driver'] }}"
bond_mode: 4
bond_policy: layer2+3
cpu_list: 1,2
members:
- type: interface
name: nic3
- type: interface
name: nic4
addresses:
- ip_netmask:
get_param: Tenant0IpSubnet
mtu:
get_param: Tenant0NetworkMtu
routes:
-
ip_netmask:
get_param: TenantSupernet
next_hop:
get_param: Tenant0InterfaceDefaultRoute
ComputeDpdk1Hw3_network_config:
- type: interface
name: nic1
dns_servers:
get_param: DnsServers
use_dhcp: false
mtu:
get_param: ControlPlaneNetworkMtu
addresses:
- ip_netmask:
list_join:
- '/'
- - get_param: ControlPlaneIp
- get_param: ControlPlaneSubnetCidr
routes:
-
ip_netmask: 169.254.169.254/32
next_hop:
get_param: EC2MetadataIp
-
default: True
next_hop:
get_param: ControlPlaneDefaultRoute
- type: vlan
device: nic1
vlan_id:
get_param: Storage1NetworkVlanID
mtu:
get_param: Storage1NetworkMtu
addresses:
- ip_netmask:
get_param: Storage1IpSubnet
routes:
-
ip_netmask:
get_param: StorageSupernet
next_hop:
get_param: Storage1InterfaceDefaultRoute
- type: vlan
device: nic1
vlan_id:
get_param: InternalApi1NetworkVlanID
mtu:
get_param: InternalApi1NetworkMtu
addresses:
- ip_netmask:
get_param: InternalApi1IpSubnet
routes:
-
ip_netmask:
get_param: InternalApiSupernet
next_hop:
get_param: InternalApi1InterfaceDefaultRoute
- type: interface
name: nic2
use_dhcp: false
- type: contrail_vrouter_dpdk
name: vhost0
vlan_id:
get_param: Tenant1NetworkVlanID
driver: "{{ overcloud['contrail']['vrouter']['dpdk']['driver'] }}"
bond_mode: 4
bond_policy: layer2+3
cpu_list: 1,2
members:
- type: interface
name: nic3
- type: interface
name: nic4
addresses:
- ip_netmask:
get_param: Tenant1IpSubnet
mtu:
get_param: Tenant1NetworkMtu
routes:
-
ip_netmask:
get_param: TenantSupernet
next_hop:
get_param: Tenant1InterfaceDefaultRoute
CephStorage0Hw6_network_config:
- type: interface
name: nic1
dns_servers:
get_param: DnsServers
use_dhcp: false
mtu:
get_param: ControlPlaneNetworkMtu
addresses:
- ip_netmask:
list_join:
- '/'
- - get_param: ControlPlaneIp
- get_param: ControlPlaneSubnetCidr
routes:
-
ip_netmask: 169.254.169.254/32
next_hop:
get_param: EC2MetadataIp
-
default: True
next_hop:
get_param: ControlPlaneDefaultRoute
- type: vlan
device: nic1
vlan_id:
get_param: Storage0NetworkVlanID
mtu:
get_param: Storage0NetworkMtu
addresses:
- ip_netmask:
get_param: Storage0IpSubnet
routes:
-
ip_netmask:
get_param: StorageSupernet
next_hop:
get_param: Storage0InterfaceDefaultRoute
- type: vlan
device: nic1
vlan_id:
get_param: StorageMgmt0NetworkVlanID
mtu:
get_param: StorageMgmt0NetworkMtu
addresses:
- ip_netmask:
get_param: StorageMgmt0IpSubnet
routes:
-
ip_netmask:
get_param: StorageMgmtSupernet
next_hop:
get_param: StorageMgmt0InterfaceDefaultRoute
- type: interface
name: nic2
use_dhcp: false
- type: interface
name: nic3
use_dhcp: false
- type: interface
name: nic4
use_dhcp: false
CephStorage1Hw7_network_config:
- type: interface
name: nic1
dns_servers:
get_param: DnsServers
use_dhcp: false
mtu:
get_param: ControlPlaneNetworkMtu
addresses:
- ip_netmask:
list_join:
- '/'
- - get_param: ControlPlaneIp
- get_param: ControlPlaneSubnetCidr
routes:
-
ip_netmask: 169.254.169.254/32
next_hop:
get_param: EC2MetadataIp
-
default: True
next_hop:
get_param: ControlPlaneDefaultRoute
- type: vlan
device: nic1
vlan_id:
get_param: Storage1NetworkVlanID
mtu:
get_param: Storage1NetworkMtu
addresses:
- ip_netmask:
get_param: Storage1IpSubnet
routes:
-
ip_netmask:
get_param: StorageSupernet
next_hop:
get_param: Storage1InterfaceDefaultRoute
- type: vlan
device: nic1
vlan_id:
get_param: StorageMgmt1NetworkVlanID
mtu:
get_param: StorageMgmt1NetworkMtu
addresses:
- ip_netmask:
get_param: StorageMgmt1IpSubnet
routes:
-
ip_netmask:
get_param: StorageMgmtSupernet
next_hop:
get_param: StorageMgmt1InterfaceDefaultRoute
- type: interface
name: nic2
use_dhcp: false
- type: interface
name: nic3
use_dhcp: false
- type: interface
name: nic4
use_dhcp: false
Sample compute-nodes.yml Configuration File
# Copyright 2018 Juniper Networks, Inc. All rights reserved.
# Licensed under the Juniper Networks Script Software License (the "License").
# You may not use this script file except in compliance with the License, which is located at
# http://www.juniper.net/support/legal/scriptlicense/
# Unless required by applicable law or otherwise agreed to in writing by the parties,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
#
#
# Each list item contains:
# name: name of a node in the inventory (string)
# profile: name of hardware profile, group of servers (optional, string)
# leaf: leaf name (optional, string)
# List of nodes to use as compute role using Contrail DPDK vRouter
compute_nodes_dpdk:
- name: computedpdk1
leaf: '0'
profile: hw2
- name: computedpdk2
leaf: '1'
profile: hw3
# List of nodes to use as compute role using Sriov
compute_nodes_sriov:
- name: computesriov1
leaf: '0'
profile: hw4
- name: computesriov2
leaf: '1'
profile: hw5
# List of nodes to use as compute role using Contrail kernel vRouter
compute_nodes_kernel:
- name: compute1
leaf: '0'
profile: hw0
- name: compute2
leaf: '0'
profile: hw1
- name: compute3
leaf: '1'
profile: hw1
- name: compute4
leaf: '1'
profile: hw0
# Sample list of host aggregates, containing:
# - name of aggregate
# - (optional) Availability Zone
# - (optional) metadata: a list of key:values
# - hosts: list of hosts assigned to aggregate
aggregates:
rack1:
az: "az1"
metadata:
- location: "DC1A3R1"
- dc: "eng-prod1"
hosts:
- compute1
- compute4
- computedpdk1
rack2:
az: "az2"
metadata:
- location: "DC1A3R2"
hosts:
- compute2
- computesriov1
- computedpdk2
rack3:
az: "az3"
metadata:
- location: "DC1A3R3"
hosts:
- compute3
- computesriov2
sriov:
metadata:
- capabilities: "sriov"
hosts:
- computesriov1
- computesriov2
dpdk:
metadata:
- capabilities: "dpdk"
hosts:
- computedpdk1
- computedpdk2
kernel:
metadata:
- capabilities: "kernel"
hosts:
- compute1
- compute2
- compute3
- compute4
Sample storage-nodes.yml Configuration File
# Copyright 2018 Juniper Networks, Inc. All rights reserved.
# Licensed under the Juniper Networks Script Software License (the "License").
# You may not use this script file except in compliance with the License, which is located at
# http://www.juniper.net/support/legal/scriptlicense/
# Unless required by applicable law or otherwise agreed to in writing by the parties,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
#
#
# List of nodes to use as storage host role
# List item contains:
# name: name of a node in the inventory (string)
# profile: name of hardware profile, group of servers (optional, string)
# leaf: leaf name (optional, string)
storage_nodes:
- name: storage1
leaf: '0'
profile: hw6
- name: storage2
leaf: '0'
profile: hw6
- name: storage3
leaf: '1'
profile: hw7
Sample vault-data.yml Configuration File
# This config structure can be used to hold information that needs to be encrypted for privacy
# If there is a password stored in /var/lib/contrail_cloud/config/.vault_password then it will be used
# Otherwise the password can be entered interactively
#
# This file can be edited with the "ansible-vault edit" command
# This file can be re-encrypted with a new password with the "ansible-vault rekey" command
vault:
global:
rhel:
# Contrail Cloud Activation Key
satellite:
#SATELLITE_KEY
key: "PUT_YOUR_KEY_HERE"
# User account used for all Contrail Cloud automation
# This account will be created on:
# - jumphost
# - control hosts
# - all overcloud roles
# - appformix controllers
service_user:
# Account Name
name: "contrail"
# Account Password
password: "c0ntrail123"
# Passphrase used to encrypt ssh key of service user.
# If not defined ssh private key will not be encrypted.
# ssh_key_passphrase: "c0ntrail123"
rhvm:
vm:
# rhvm user name
user: "contrail"
# password for the rhvm vm user
password: "c0ntrail123"
# root password for the rhvm VM
root_password: "c0ntrail123"
# keystone admin password
admin_password: "c0ntrail123"
# Passphrase used to encrypt ssh key of rhvm user.
# If not defined ssh private key will not be encrypted.
# ssh_key_passphrase: "c0ntrail123"
vnc:
# VNC console password for the rhvm VM
password: "contrail123"
undercloud:
#Administrator password - default is randomly generated
#admin_password: "c0ntrail123"
vm:
# undercloud user name
user: "stack"
# password for the undercloud vm user
password: "contrail123"
# root password for the undercloud VM
root_password: "contrail123"
# Passphrase used to encrypt ssh key of undercloud user.
# If not defined ssh private key will not be encrypted.
# ssh_key_passphrase: "c0ntrail123"
vnc:
# VNC console password for the undercloud VM
password: "contrail123"
overcloud:
#Administrator password
admin_password: "c0ntrail123"
# Root password used for local login to overcloud nodes through console
# root_password: "contrail123"
contrail:
rabbitmq:
# contrail rabbitmq user name
user: "contrail_rabbitmq"
# contrail rabbitmq user password
password: "c0ntrail123"
control_hosts:
vm:
vnc:
# VNC console password for all control VMs
password: "contrail123"
appformix:
mysql:
# Approfmix MySQL user account
user: "appformix"
# Approfmix MySQL user password
password: "c0ntrail123"
rabbitmq:
# Approfmix RabbitMQ user account
user: "appformix"
# Approfmix RabbitMQ user password
password: "c0ntrail123"
# Credentials used to connect external ceph cluster
#ceph_external:
# client_key: "CLIENT_KEY"
# client_user: "openstack"
# List of inventory hardware types that can hold hardware-specific properties
# You can create similar configutations to allow reference from inventory-nodes.yml
inventory_nodes:
# A sample configuration for a hardware type
hardware1:
# IPMI user account for Ironic inevntory resources
pm_user: "ADMIN"
# IPMI user password for Ironic inevntory resources
pm_password: "ADMIN"
# A sample configuration for a hardware type
hardware2:
# IPMI user account for Ironic inevntory resource
pm_user: "admin"
# IPMI user password for Ironic inevntory resource
pm_password: "admin"
# User defined sensitive data can be stored under 'other' key.
# Schema validation will only check if key,value format is used.
#other:
# mykey: myvalue
