Help us improve your experience.

Let us know what you think.

Do you have time for a two-minute survey?

Navigation
Guide That Contains This Content
[+] Expand All
[-] Collapse All

    Sample Testbed.py Files for Contrail vCenter

    Sample Testbed.py File for vCenter-Only Mode

    from fabric.api import env

    #Management ip addresses of hosts in the cluster
    host1 = 'user@10.xx.xx.xx'   #Contrail Controller
    host2 = 'user@10.xx.xx.xx'   #ContrailVM on ESXi
    
    #External routers if any
    ext_routers = []
    
    #Autonomous system number
    router_asn = 64512
    
    #Host from which the fab commands are triggered to install and provision
    host_build = 'user@10.xx.xx.xx'
    
    minimum_diskGB=32
    
    env.ntp_server = 'ntp.juniper.net'
    
    #Role definition of the hosts.
    env.roledefs = {
        'all': [host1, host2],
        'cfgm': [host1],
        'control': [host1],
        'compute': [host2],
        'collector': [host1],
        'webui': [host1],
        'database': [host1],
        'build': [host_build],
    }
    
    #Openstack admin password
    env.openstack_admin_password = 'secret123'
    env.orchestrator = 'vcenter' #other values are 'vcenter' default:openstack
    
    #Disable multi-tenancy feature
    multi_tenancy = False
    
    env.password = 'secret'
    #Passwords of each host
    env.passwords = {
        host1: '<password>',
        host2: '<password>',
        host_build: '<password>',
    }
    
    #For reimage purpose
    env.ostypes = {
        host1: 'ubuntu',
        host2: 'ubuntu',
    }
    
    ######################################################
    #vcenter provisioning
    #server is the vcenter server ip
    #port is the port on which vcenter is listening for connection
    #username is the vcenter username credentials
    #password is the vcenter password credentials
    #auth is the authentication type used to talk to vcenter, http or https
    #datacenter is the datacenter name we are operating on
    #datacenter_mtu is the mtu size across the datacenter
    #            optional, defaults to 1500
    #cluster is the clustername we are operating on
    #dvswitch section contains distributed switch related para,s
    #       dv_switch_name is the name of the dvswitch
    #dvportgroup section contains the distributed port group info
    #       dv_portgroupname and the number of ports the group has
    #######################################################
    env.vcenter_servers = {
           'vcenter1': {
               'server':'10.xx.xx.xx',
               'port': '443',
               'username': 'administrator@vsphere.local',
               'password': '<password>!',
               'auth': 'https',
               'datacenter': 'test_dc',
               ‘datacenter_mtu’: ‘1500’,
               'cluster': ['test_cluster'],
               'dv_switch': { 'dv_switch_name': 'test_dvswitch',},
               'dv_port_group': { 'dv_portgroup_name': 'test_dvportgroup',
                                  'number_of_ports': '3',
                            },
           },
    }
    
    ###################################################################
    # The compute vm provisioning on ESXI host
    # This section is used to copy a vmdk on to the ESXI box and bring it up
    # the contrailVM which comes up will be setup as a compute node with only
    # vrouter running on it. Each host has an associated esxi to it.
    #
    # esxi_host information:
    #    ip: the esxi ip on which the contrailvm(host/compute) runs
    #    username: username used to login to esxi
    #    password: password for esxi
    #    fabric_vswitch: the name of the underlay vswitch that runs on esxi
    #                    optional, defaults to 'vswitch0'
    #    fabric_port_group: the name of the underlay port group for esxi
    #                       optional, defaults to contrail-fab-pg'
    #    uplink_nic: the nic used for underlay
    #                 optional, defaults to None
    #    data_store: the datastore on esxi where the vmdk is copied to
    #    vcenter_server: the vcenter_server name which manages this esxi
    #    cluster: name of the cluster to which this esxi is added
    #    contrail_vm information:
    #        mac: the virtual mac address for the contrail vm
    #        host: the contrail_vm ip in the form of 'user@contrailvm_ip'
    #        mode: the mode is 'openstack' or 'vcenter'
    #               optional, defaults to env.orchestrator value
    #        pci_devices: pci_devices information
    #            nic: pci_id of the pass-through interfaces
    #        sr_iov_nics: virtual functions enabled physical interface's name
    #        vmdk: the absolute path of the contrail-vmdk used to spawn vm
    #              optional, if vmdk_download_path is specified
    #        vmdk_download_path: download path of the contrail-vmdk.vmdk used to spawn vm
    #                            optional, if vmdk is specified
    ##########################################################################
    esxi_hosts = {
           'b4s4': {
                 'ip': '10.xx.xx.xx',
                 'username': 'root',
                 'password': '<password>',
                 'datastore': "/vmfs/volumes/",
                 'vcenter_server': 'vcenter1',
                 'cluster': 'test_cluster',
                 'contrail_vm': {
                       'mac': "00:50:56:05:ba:ba",
                       'host': host2,
                       'mode': "vcenter",
                       'vmdk': "/tmp/ContrailVM-disk1.vmdk",
                 }
           },
    }
    
    

    Sample Testbed.py File for vCenter-as-Compute Mode

    from fabric.api import env

    #Management ip addresses of hosts in the cluster
    host1 = 'user@10.xx.xx.xx'   #Contrail Controller
    host2 = 'user@10.xx.xx.xx'   #ContrailVM on ESXi
    host3 = 'user@10.xx.xx.xx'   #vcenter-compute
    host4 = 'user@10.xx.xx.xx'   #KVM Compute
    
    #External routers if any
    ext_routers = []
    
    #Autonomous system number
    router_asn = 64512
    
    #Host from which the fab commands are triggered to install and provision
    host_build = user@10.xx.xx.xx4'
    
    minimum_diskGB=32
    
    env.ntp_server = 'ntp.juniper.net'
    
    #Role definition of the hosts.
    env.roledefs = {
        'all': [host1, host2, host3, host4 ],
        'cfgm': [host1],
        'control': [host1],
        'compute': [host2, host4],
        'collector': [host1],
        'webui': [host1],
        'database': [host1],
        'openstack': [host1],
        'vcenter_compute': [host3 ],
        'build': [host_build],
    }
    
    
    #Openstack admin password
    env.openstack_admin_password = 'secret123'
    
    env.password = 'secret'
    #Passwords of each host
    env.passwords = {
        host1: '<password>',
        host2: '<password>',
        host3: '<password>',
        host4: '<password>',
        host_build: '<password>',
    }
    
    #For reimage purpose
    env.ostypes = {
        host1: 'ubuntu',
        host2: 'ubuntu',
        host3: 'ubuntu',
        host4: 'ubuntu',
    }
    
    #To enable multi-tenancy feature
    multi_tenancy = True
    
    ########################################################
    #vcenter provisioning
    #server is the vcenter server ip
    #port is the port on which vcenter is listening for connection
    #username is the vcenter username credentials
    #password is the vcenter password credentials
    #auth is the autentication type used to talk to vcenter, http or https
    #datacenter is the datacenter name we are operating on
    #datacenter_mtu is the mtu size across the datacenter
    #            optional, defaults to 1500
    #cluster is the clustername we are operating on
    #vcenter_compute is the nova-compute node for this vcenter server
    #dvswitch section contains distributed switch related para,s
    #       dv_switch_name is the name of the dvswitch
    #dvportgroup section contains the distributed port group info
    #       dv_portgroupname and the number of ports the group has
    #########################################################
    env.vcenter_servers = {
           'vcenter1': {
               'server':'10.xx.xx.xx',
               'port': '443',
               'username': 'administrator@vsphere.local',
               'password': '<password>',
               'auth': 'https',
               'datacenter': 'test_dc',
               ‘datacenter_mtu’: ‘1500’,
               'cluster': ['test_cluster'],
               'vcenter_compute':'10.xx.xx.xx',
               'dv_switch': { 'dv_switch_name': 'test_dvswitch',},
               'dv_port_group': { 'dv_portgroup_name': 'test_dvportgroup',
                                  'number_of_ports': '3',
                            },
           },
     }
    
    ########################################################################
    # The compute vm provisioning on ESXI host
    # This section is used to copy a vmdk on to the ESXI box and bring it up
    # the contrailVM which comes up will be setup as a compute node with only
    # vrouter running on it. Each host has an associated esxi to it.
    #
    # esxi_host information:
    #    ip: the esxi ip on which the contrailvm(host/compute) runs
    #    username: username used to login to esxi
    #    password: password for esxi
    #    fabric_vswitch: the name of the underlay vswitch that runs on esxi
    #                    optional, defaults to 'vswitch0'
    #    fabric_port_group: the name of the underlay port group for esxi
    #                       optional, defaults to contrail-fab-pg'
    #    uplink_nic: the nic used for underlay
    #                 optional, defaults to None
    #    data_store: the datastore on esxi where the vmdk is copied to
    #    vcenter_server: the vcenter_server name which manages this esxi
    #    cluster: name of the cluster to which this esxi is added
    #    contrail_vm information:
    #        mac: the virtual mac address for the contrail vm
    #        host: the contrail_vm ip in the form of 'user@contrailvm_ip'
    #        mode: the mode is 'openstack' or 'vcenter'
    #               optional, defaults to env.orchestrator value
    #        pci_devices: pci_devices information
    #            nic: pci_id of the pass-through interfaces
    #        sr_iov_nics: virtual functions enabled physical interface's name
    #        vmdk: the absolute path of the contrail-vmdk used to spawn vm
    #              optional, if vmdk_download_path is specified
    #        vmdk_download_path: download path of the contrail-vmdk.vmdk used to spawn vm
    #                            optional, if vmdk is specified
    ##########################################################################
    esxi_hosts = {
           'b4s4': {
                 'ip': '10.xx.xx.xx',
                 'username': 'user',
                 'password': '<password>',
                 'datastore': "/vmfs/volumes/",
                 'vcenter_server': 'vcenter1',
                 'cluster': 'test_cluster',
                 'contrail_vm': {
                       'mac': "00:50:56:05:ba:ba",
                       'host': host2,
                       'mode': "vcenter",
                       'vmdk': "/tmp/ContrailVM-disk1.vmdk",
                 }
           },
    }
    
    

    Modified: 2016-08-30