From 1c4b1054943453f2a0137b15493d12e951af9922 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Fri, 18 Oct 2013 11:07:43 -0400 Subject: [PATCH 01/23] add stable/havana branch Change-Id: I984759a81e577bffb44a1b6762b1bcb96754dd00 --- .gitreview | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitreview b/.gitreview index 570d31a987..0a2ab59429 100644 --- a/.gitreview +++ b/.gitreview @@ -2,3 +2,4 @@ host=review.openstack.org port=29418 project=openstack-dev/devstack.git +defaultbranch=stable/havana From 0540a3ec8e940d9096053af6f8737bcf32d4c467 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 21 Oct 2013 11:07:47 -0500 Subject: [PATCH 02/23] Set default branches to to stable/havana For services only, not including clients, external repos and projects without stable/havana branches Change-Id: I0ed88877b70499c7ff805be611b1bf708d2196eb --- stackrc | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/stackrc b/stackrc index 3f740b5678..5cdad63632 100644 --- a/stackrc +++ b/stackrc @@ -66,7 +66,7 @@ GIT_BASE=${GIT_BASE:-https://github.com} # metering service CEILOMETER_REPO=${CEILOMETER_REPO:-${GIT_BASE}/openstack/ceilometer.git} -CEILOMETER_BRANCH=${CEILOMETER_BRANCH:-master} +CEILOMETER_BRANCH=${CEILOMETER_BRANCH:-stable/havana} # ceilometer client library CEILOMETERCLIENT_REPO=${CEILOMETERCLIENT_REPO:-${GIT_BASE}/openstack/python-ceilometerclient.git} @@ -74,7 +74,7 @@ CEILOMETERCLIENT_BRANCH=${CEILOMETERCLIENT_BRANCH:-master} # volume service CINDER_REPO=${CINDER_REPO:-${GIT_BASE}/openstack/cinder.git} -CINDER_BRANCH=${CINDER_BRANCH:-master} +CINDER_BRANCH=${CINDER_BRANCH:-stable/havana} # volume client CINDERCLIENT_REPO=${CINDERCLIENT_REPO:-${GIT_BASE}/openstack/python-cinderclient.git} @@ -82,7 +82,7 @@ CINDERCLIENT_BRANCH=${CINDERCLIENT_BRANCH:-master} # image catalog service GLANCE_REPO=${GLANCE_REPO:-${GIT_BASE}/openstack/glance.git} -GLANCE_BRANCH=${GLANCE_BRANCH:-master} +GLANCE_BRANCH=${GLANCE_BRANCH:-stable/havana} # python glance client library GLANCECLIENT_REPO=${GLANCECLIENT_REPO:-${GIT_BASE}/openstack/python-glanceclient.git} @@ -90,7 +90,7 @@ GLANCECLIENT_BRANCH=${GLANCECLIENT_BRANCH:-master} # heat service HEAT_REPO=${HEAT_REPO:-${GIT_BASE}/openstack/heat.git} -HEAT_BRANCH=${HEAT_BRANCH:-master} +HEAT_BRANCH=${HEAT_BRANCH:-stable/havana} # python heat client library HEATCLIENT_REPO=${HEATCLIENT_REPO:-${GIT_BASE}/openstack/python-heatclient.git} @@ -98,7 +98,7 @@ HEATCLIENT_BRANCH=${HEATCLIENT_BRANCH:-master} # django powered web control panel for openstack HORIZON_REPO=${HORIZON_REPO:-${GIT_BASE}/openstack/horizon.git} -HORIZON_BRANCH=${HORIZON_BRANCH:-master} +HORIZON_BRANCH=${HORIZON_BRANCH:-stable/havana} # baremetal provisionint service IRONIC_REPO=${IRONIC_REPO:-${GIT_BASE}/openstack/ironic.git} @@ -106,7 +106,7 @@ IRONIC_BRANCH=${IRONIC_BRANCH:-master} # unified auth system (manages accounts/tokens) KEYSTONE_REPO=${KEYSTONE_REPO:-${GIT_BASE}/openstack/keystone.git} -KEYSTONE_BRANCH=${KEYSTONE_BRANCH:-master} +KEYSTONE_BRANCH=${KEYSTONE_BRANCH:-stable/havana} # python keystone client library to nova that horizon uses KEYSTONECLIENT_REPO=${KEYSTONECLIENT_REPO:-${GIT_BASE}/openstack/python-keystoneclient.git} @@ -114,7 +114,7 @@ KEYSTONECLIENT_BRANCH=${KEYSTONECLIENT_BRANCH:-master} # compute service NOVA_REPO=${NOVA_REPO:-${GIT_BASE}/openstack/nova.git} -NOVA_BRANCH=${NOVA_BRANCH:-master} +NOVA_BRANCH=${NOVA_BRANCH:-stable/havana} # python client library to nova that horizon (and others) use NOVACLIENT_REPO=${NOVACLIENT_REPO:-${GIT_BASE}/openstack/python-novaclient.git} @@ -126,7 +126,7 @@ OPENSTACKCLIENT_BRANCH=${OPENSTACKCLIENT_BRANCH:-master} # oslo.config OSLOCFG_REPO=${OSLOCFG_REPO:-${GIT_BASE}/openstack/oslo.config.git} -OSLOCFG_BRANCH=${OSLOCFG_BRANCH:-master} +OSLOCFG_BRANCH=${OSLOCFG_BRANCH:-stable/havana} # oslo.messaging OSLOMSG_REPO=${OSLOMSG_REPO:-${GIT_BASE}/openstack/oslo.messaging.git} @@ -138,7 +138,7 @@ PBR_BRANCH=${PBR_BRANCH:-master} # neutron service NEUTRON_REPO=${NEUTRON_REPO:-${GIT_BASE}/openstack/neutron.git} -NEUTRON_BRANCH=${NEUTRON_BRANCH:-master} +NEUTRON_BRANCH=${NEUTRON_BRANCH:-stable/havana} # neutron client NEUTRONCLIENT_REPO=${NEUTRONCLIENT_REPO:-${GIT_BASE}/openstack/python-neutronclient.git} @@ -146,11 +146,11 @@ NEUTRONCLIENT_BRANCH=${NEUTRONCLIENT_BRANCH:-master} # consolidated openstack requirements REQUIREMENTS_REPO=${REQUIREMENTS_REPO:-${GIT_BASE}/openstack/requirements.git} -REQUIREMENTS_BRANCH=${REQUIREMENTS_BRANCH:-master} +REQUIREMENTS_BRANCH=${REQUIREMENTS_BRANCH:-stable/havana} # storage service SWIFT_REPO=${SWIFT_REPO:-${GIT_BASE}/openstack/swift.git} -SWIFT_BRANCH=${SWIFT_BRANCH:-master} +SWIFT_BRANCH=${SWIFT_BRANCH:-stable/havana} SWIFT3_REPO=${SWIFT3_REPO:-${GIT_BASE}/fujita/swift3.git} SWIFT3_BRANCH=${SWIFT3_BRANCH:-master} @@ -160,7 +160,7 @@ SWIFTCLIENT_BRANCH=${SWIFTCLIENT_BRANCH:-master} # Tempest test suite TEMPEST_REPO=${TEMPEST_REPO:-${GIT_BASE}/openstack/tempest.git} -TEMPEST_BRANCH=${TEMPEST_BRANCH:-master} +TEMPEST_BRANCH=${TEMPEST_BRANCH:-stable/havana} # diskimage-builder @@ -187,7 +187,7 @@ SPICE_BRANCH=${SPICE_BRANCH:-master} # trove service TROVE_REPO=${TROVE_REPO:-${GIT_BASE}/openstack/trove.git} -TROVE_BRANCH=${TROVE_BRANCH:-master} +TROVE_BRANCH=${TROVE_BRANCH:-stable/havana} # trove client library test TROVECLIENT_REPO=${TROVECLIENT_REPO:-${GIT_BASE}/openstack/python-troveclient.git} From 763a3f9122654ba0c85736c327f5797b27591d5d Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Wed, 16 Oct 2013 17:48:16 -0400 Subject: [PATCH 03/23] Use nova.conf for auth_token configs. Updates lib/nova so that we use the application config file (nova.conf) instead of the Nova api-paste.ini config file. Related-Bug #1240753 Change-Id: I393a67f1f005e775928130c9241aa7e25c391ae3 --- lib/nova | 31 +++++++++++++++++++++---------- 1 file changed, 21 insertions(+), 10 deletions(-) diff --git a/lib/nova b/lib/nova index 8deb3a01a9..5ff5099c6d 100644 --- a/lib/nova +++ b/lib/nova @@ -212,26 +212,24 @@ function configure_nova() { configure_nova_rootwrap if is_service_enabled n-api; then - # Use the sample http middleware configuration supplied in the - # Nova sources. This paste config adds the configuration required - # for Nova to validate Keystone tokens. - # Remove legacy paste config if present rm -f $NOVA_DIR/bin/nova-api-paste.ini # Get the sample configuration file in place cp $NOVA_DIR/etc/nova/api-paste.ini $NOVA_CONF_DIR - iniset $NOVA_API_PASTE_INI filter:authtoken auth_host $KEYSTONE_AUTH_HOST + # Comment out the keystone configs in Nova's api-paste.ini. + # We are using nova.conf to configure this instead. + inicomment $NOVA_API_PASTE_INI filter:authtoken auth_host if is_service_enabled tls-proxy; then - iniset $NOVA_API_PASTE_INI filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL + inicomment $NOVA_API_PASTE_INI filter:authtoken auth_protocol fi - iniset $NOVA_API_PASTE_INI filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME - iniset $NOVA_API_PASTE_INI filter:authtoken admin_user nova - iniset $NOVA_API_PASTE_INI filter:authtoken admin_password $SERVICE_PASSWORD + inicomment $NOVA_API_PASTE_INI filter:authtoken admin_tenant_name + inicomment $NOVA_API_PASTE_INI filter:authtoken admin_user + inicomment $NOVA_API_PASTE_INI filter:authtoken admin_password fi - iniset $NOVA_API_PASTE_INI filter:authtoken signing_dir $NOVA_AUTH_CACHE_DIR + inicomment $NOVA_API_PASTE_INI filter:authtoken signing_dir if is_service_enabled n-cpu; then # Force IP forwarding on, just on case @@ -394,7 +392,20 @@ function create_nova_conf() { # Set the service port for a proxy to take the original iniset $NOVA_CONF DEFAULT osapi_compute_listen_port "$NOVA_SERVICE_PORT_INT" fi + + # Add keystone authtoken configuration + + iniset $NOVA_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST + if is_service_enabled tls-proxy; then + iniset $NOVA_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL + fi + iniset $NOVA_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME + iniset $NOVA_CONF keystone_authtoken admin_user nova + iniset $NOVA_CONF keystone_authtoken admin_password $SERVICE_PASSWORD fi + + iniset $NOVA_CONF keystone_authtoken signing_dir $NOVA_AUTH_CACHE_DIR + if is_service_enabled cinder; then iniset $NOVA_CONF DEFAULT volume_api_class "nova.volume.cinder.API" fi From f5d35b71e51a44223ccc012f344038e1a43d6d0a Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Wed, 16 Oct 2013 18:57:15 -0400 Subject: [PATCH 04/23] Use cinder.conf for auth_token configs. Updates lib/cinder so that we use the application config file (cinder.conf) instead of the Cinder api-paste.ini config file. Related-Bug #1240753 Change-Id: I6636d33ee522757145ac97fc354324a8b9379700 --- lib/cinder | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/lib/cinder b/lib/cinder index 220488a07e..f6f137cabd 100644 --- a/lib/cinder +++ b/lib/cinder @@ -202,15 +202,25 @@ function configure_cinder() { sudo mv $TEMPFILE /etc/sudoers.d/cinder-rootwrap cp $CINDER_DIR/etc/cinder/api-paste.ini $CINDER_API_PASTE_INI - iniset $CINDER_API_PASTE_INI filter:authtoken auth_host $KEYSTONE_AUTH_HOST - iniset $CINDER_API_PASTE_INI filter:authtoken auth_port $KEYSTONE_AUTH_PORT - iniset $CINDER_API_PASTE_INI filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL - iniset $CINDER_API_PASTE_INI filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME - iniset $CINDER_API_PASTE_INI filter:authtoken admin_user cinder - iniset $CINDER_API_PASTE_INI filter:authtoken admin_password $SERVICE_PASSWORD - iniset $CINDER_API_PASTE_INI filter:authtoken signing_dir $CINDER_AUTH_CACHE_DIR + + inicomment $CINDER_API_PASTE_INI filter:authtoken auth_host + inicomment $CINDER_API_PASTE_INI filter:authtoken auth_port + inicomment $CINDER_API_PASTE_INI filter:authtoken auth_protocol + inicomment $CINDER_API_PASTE_INI filter:authtoken admin_tenant_name + inicomment $CINDER_API_PASTE_INI filter:authtoken admin_user + inicomment $CINDER_API_PASTE_INI filter:authtoken admin_password + inicomment $CINDER_API_PASTE_INI filter:authtoken signing_dir cp $CINDER_DIR/etc/cinder/cinder.conf.sample $CINDER_CONF + + iniset $CINDER_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST + iniset $CINDER_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT + iniset $CINDER_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL + iniset $CINDER_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME + iniset $CINDER_CONF keystone_authtoken admin_user cinder + iniset $CINDER_CONF keystone_authtoken admin_password $SERVICE_PASSWORD + iniset $CINDER_CONF keystone_authtoken signing_dir $CINDER_AUTH_CACHE_DIR + iniset $CINDER_CONF DEFAULT auth_strategy keystone iniset $CINDER_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL iniset $CINDER_CONF DEFAULT verbose True From 7f9bdc47c8318a8c6d681a3fd229bf30348ecbce Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 17 Oct 2013 12:03:55 -0500 Subject: [PATCH 05/23] Fix typos and thinkos in docs Updates for the new major features and some clarification Partial-Bug: #1235626 Change-Id: If2da63e62a14894e498b4163b5052d9b2b2069ed --- HACKING.rst | 28 ++++---- README.md | 165 ++++++++++++++++++++++++++++++--------------- extras.d/README.md | 7 +- stack.sh | 2 +- 4 files changed, 131 insertions(+), 71 deletions(-) diff --git a/HACKING.rst b/HACKING.rst index 5f33d770f8..3c08e679d9 100644 --- a/HACKING.rst +++ b/HACKING.rst @@ -5,10 +5,10 @@ Contributing to DevStack General ------- -DevStack is written in POSIX shell script. This choice was made because -it best illustrates the configuration steps that this implementation takes -on setting up and interacting with OpenStack components. DevStack specifically -uses Bash and is compatible with Bash 3. +DevStack is written in UNIX shell script. It uses a number of bash-isms +and so is limited to Bash (version 3 and up) and compatible shells. +Shell script was chosen because it best illustrates the steps used to +set up and interact with OpenStack components. DevStack's official repository is located on GitHub at https://github.com/openstack-dev/devstack.git. Besides the master branch that @@ -54,14 +54,14 @@ Sometimes the script needs to know the location of the DevStack install director ``TOP_DIR`` should always point there, even if the script itself is located in a subdirectory:: - # Keep track of the current devstack directory. + # Keep track of the current DevStack directory. TOP_DIR=$(cd $(dirname "$0") && pwd) Many scripts will utilize shared functions from the ``functions`` file. There are also rc files (``stackrc`` and ``openrc``) that are often included to set the primary configuration of the user environment:: - # Keep track of the current devstack directory. + # Keep track of the current DevStack directory. TOP_DIR=$(cd $(dirname "$0") && pwd) # Import common functions @@ -100,13 +100,14 @@ stackrc ------- ``stackrc`` is the global configuration file for DevStack. It is responsible for -calling ``localrc`` if it exists so configuration can be overridden by the user. +calling ``local.conf`` (or ``localrc`` if it exists) so local user configuration +is recognized. The criteria for what belongs in ``stackrc`` can be vaguely summarized as follows: -* All project respositories and branches (for historical reasons) -* Global configuration that may be referenced in ``localrc``, i.e. ``DEST``, ``DATA_DIR`` +* All project repositories and branches handled directly in ``stack.sh`` +* Global configuration that may be referenced in ``local.conf``, i.e. ``DEST``, ``DATA_DIR`` * Global service configuration like ``ENABLED_SERVICES`` * Variables used by multiple services that do not have a clear owner, i.e. ``VOLUME_BACKING_FILE_SIZE`` (nova-volumes and cinder) or ``PUBLIC_NETWORK_NAME`` @@ -116,8 +117,9 @@ follows: not be changed for other reasons but the earlier file needs to dereference a variable set in the later file. This should be rare. -Also, variable declarations in ``stackrc`` do NOT allow overriding (the form -``FOO=${FOO:-baz}``); if they did then they can already be changed in ``localrc`` +Also, variable declarations in ``stackrc`` before ``local.conf`` is sourced +do NOT allow overriding (the form +``FOO=${FOO:-baz}``); if they did then they can already be changed in ``local.conf`` and can stay in the project file. @@ -139,7 +141,9 @@ verbose in the comments _ABOVE_ the code they pertain to. Shocco also supports Markdown formatting in the comments; use it sparingly. Specifically, ``stack.sh`` uses Markdown headers to divide the script into logical sections. -.. _shocco: http://rtomayko.github.com/shocco/ +.. _shocco: https://github.com/dtroyer/shocco/tree/rst_support + +The script used to drive shocco is tools/build_docs.sh. Exercises diff --git a/README.md b/README.md index 514786c60f..640fab65f9 100644 --- a/README.md +++ b/README.md @@ -6,35 +6,39 @@ DevStack is a set of scripts and utilities to quickly deploy an OpenStack cloud. * To describe working configurations of OpenStack (which code branches work together? what do config files look like for those branches?) * To make it easier for developers to dive into OpenStack so that they can productively contribute without having to understand every part of the system at once * To make it easy to prototype cross-project features -* To sanity-check OpenStack builds (used in gating commits to the primary repos) +* To provide an environment for the OpenStack CI testing on every commit to the projects -Read more at http://devstack.org (built from the gh-pages branch) +Read more at http://devstack.org. -IMPORTANT: Be sure to carefully read `stack.sh` and any other scripts you execute before you run them, as they install software and may alter your networking configuration. We strongly recommend that you run `stack.sh` in a clean and disposable vm when you are first getting started. - -# DevStack on Xenserver - -If you would like to use Xenserver as the hypervisor, please refer to the instructions in `./tools/xen/README.md`. - -# DevStack on Docker - -If you would like to use Docker as the hypervisor, please refer to the instructions in `./tools/docker/README.md`. +IMPORTANT: Be sure to carefully read `stack.sh` and any other scripts you +execute before you run them, as they install software and will alter your +networking configuration. We strongly recommend that you run `stack.sh` +in a clean and disposable vm when you are first getting started. # Versions -The devstack master branch generally points to trunk versions of OpenStack components. For older, stable versions, look for branches named stable/[release] in the DevStack repo. For example, you can do the following to create a diablo OpenStack cloud: +The DevStack master branch generally points to trunk versions of OpenStack +components. For older, stable versions, look for branches named +stable/[release] in the DevStack repo. For example, you can do the +following to create a grizzly OpenStack cloud: - git checkout stable/diablo + git checkout stable/grizzly ./stack.sh -You can also pick specific OpenStack project releases by setting the appropriate `*_BRANCH` variables in `localrc` (look in `stackrc` for the default set). Usually just before a release there will be milestone-proposed branches that need to be tested:: +You can also pick specific OpenStack project releases by setting the appropriate +`*_BRANCH` variables in the ``localrc`` section of `local.conf` (look in +`stackrc` for the default set). Usually just before a release there will be +milestone-proposed branches that need to be tested:: GLANCE_REPO=https://github.com/openstack/glance.git GLANCE_BRANCH=milestone-proposed # Start A Dev Cloud -Installing in a dedicated disposable vm is safer than installing on your dev machine! Plus you can pick one of the supported Linux distros for your VM. To start a dev cloud run the following NOT AS ROOT (see below for more): +Installing in a dedicated disposable VM is safer than installing on your +dev machine! Plus you can pick one of the supported Linux distros for +your VM. To start a dev cloud run the following NOT AS ROOT (see +**DevStack Execution Environment** below for more on user accounts): ./stack.sh @@ -45,7 +49,7 @@ When the script finishes executing, you should be able to access OpenStack endpo We also provide an environment file that you can use to interact with your cloud via CLI: - # source openrc file to load your environment with osapi and ec2 creds + # source openrc file to load your environment with OpenStack CLI creds . openrc # list instances nova list @@ -61,16 +65,37 @@ If the EC2 API is your cup-o-tea, you can create credentials and use euca2ools: DevStack runs rampant over the system it runs on, installing things and uninstalling other things. Running this on a system you care about is a recipe for disappointment, or worse. Alas, we're all in the virtualization business here, so run it in a VM. And take advantage of the snapshot capabilities of your hypervisor of choice to reduce testing cycle times. You might even save enough time to write one more feature before the next feature freeze... -``stack.sh`` needs to have root access for a lot of tasks, but it also needs to have not-root permissions for most of its work and for all of the OpenStack services. So ``stack.sh`` specifically does not run if you are root. This is a recent change (Oct 2013) from the previous behaviour of automatically creating a ``stack`` user. Automatically creating a user account is not always the right response to running as root, so that bit is now an explicit step using ``tools/create-stack-user.sh``. Run that (as root!) if you do not want to just use your normal login here, which works perfectly fine. +``stack.sh`` needs to have root access for a lot of tasks, but uses ``sudo`` +for all of those tasks. However, it needs to be not-root for most of its +work and for all of the OpenStack services. ``stack.sh`` specifically +does not run if started as root. + +This is a recent change (Oct 2013) from the previous behaviour of +automatically creating a ``stack`` user. Automatically creating +user accounts is not the right response to running as root, so +that bit is now an explicit step using ``tools/create-stack-user.sh``. +Run that (as root!) or just check it out to see what DevStack's +expectations are for the account it runs under. Many people simply +use their usual login (the default 'ubuntu' login on a UEC image +for example). # Customizing -You can override environment variables used in `stack.sh` by creating file name `localrc`. It is likely that you will need to do this to tweak your networking configuration should you need to access your cloud from a different host. +You can override environment variables used in `stack.sh` by creating file +name `local.conf` with a ``locarc`` section as shown below. It is likely +that you will need to do this to tweak your networking configuration should +you need to access your cloud from a different host. + + [[local|localrc]] + VARIABLE=value + +See the **Local Configuration** section below for more details. # Database Backend Multiple database backends are available. The available databases are defined in the lib/databases directory. -`mysql` is the default database, choose a different one by putting the following in `localrc`: +`mysql` is the default database, choose a different one by putting the +following in the `localrc` section: disable_service mysql enable_service postgresql @@ -81,7 +106,7 @@ Multiple database backends are available. The available databases are defined in Multiple RPC backends are available. Currently, this includes RabbitMQ (default), Qpid, and ZeroMQ. Your backend of -choice may be selected via the `localrc`. +choice may be selected via the `localrc` section. Note that selecting more than one RPC backend will result in a failure. @@ -95,9 +120,10 @@ Example (Qpid): # Apache Frontend -Apache web server is enabled for wsgi services by setting `APACHE_ENABLED_SERVICES` in your localrc. But remember to enable these services at first as above. +Apache web server is enabled for wsgi services by setting +`APACHE_ENABLED_SERVICES` in your ``localrc`` section. Remember to +enable these services at first as above. -Example: APACHE_ENABLED_SERVICES+=keystone,swift # Swift @@ -108,23 +134,23 @@ vm. When running with only one replica the account, container and object services will run directly in screen. The others services like replicator, updaters or auditor runs in background. -If you would like to enable Swift you can add this to your `localrc` : +If you would like to enable Swift you can add this to your `localrc` section: enable_service s-proxy s-object s-container s-account If you want a minimal Swift install with only Swift and Keystone you -can have this instead in your `localrc`: +can have this instead in your `localrc` section: disable_all_services enable_service key mysql s-proxy s-object s-container s-account If you only want to do some testing of a real normal swift cluster with multiple replicas you can do so by customizing the variable -`SWIFT_REPLICAS` in your `localrc` (usually to 3). +`SWIFT_REPLICAS` in your `localrc` section (usually to 3). # Swift S3 -If you are enabling `swift3` in `ENABLED_SERVICES` devstack will +If you are enabling `swift3` in `ENABLED_SERVICES` DevStack will install the swift3 middleware emulation. Swift will be configured to act as a S3 endpoint for Keystone so effectively replacing the `nova-objectstore`. @@ -137,7 +163,7 @@ services are started in background and managed by `swift-init` tool. Basic Setup In order to enable Neutron a single node setup, you'll need the -following settings in your `localrc` : +following settings in your `localrc` section: disable_service n-net enable_service q-svc @@ -146,12 +172,15 @@ following settings in your `localrc` : enable_service q-l3 enable_service q-meta enable_service neutron - # Optional, to enable tempest configuration as part of devstack + # Optional, to enable tempest configuration as part of DevStack enable_service tempest Then run `stack.sh` as normal. -devstack supports adding specific Neutron configuration flags to the service, Open vSwitch plugin and LinuxBridge plugin configuration files. To make use of this feature, the following variables are defined and can be configured in your `localrc` file: +DevStack supports setting specific Neutron configuration flags to the +service, Open vSwitch plugin and LinuxBridge plugin configuration files. +To make use of this feature, the following variables are defined and can +be configured in your `localrc` section: Variable Name Config File Section Modified ------------------------------------------------------------------------------------- @@ -160,12 +189,14 @@ devstack supports adding specific Neutron configuration flags to the service, Op Q_AGENT_EXTRA_SRV_OPTS Plugin `OVS` (for Open Vswitch) or `LINUX_BRIDGE` (for LinuxBridge) Q_SRV_EXTRA_DEFAULT_OPTS Service DEFAULT -An example of using the variables in your `localrc` is below: +An example of using the variables in your `localrc` section is below: Q_AGENT_EXTRA_AGENT_OPTS=(tunnel_type=vxlan vxlan_udp_port=8472) Q_SRV_EXTRA_OPTS=(tenant_network_type=vxlan) -devstack also supports configuring the Neutron ML2 plugin. The ML2 plugin can run with the OVS, LinuxBridge, or Hyper-V agents on compute hosts. A simple way to configure the ml2 plugin is shown below: +DevStack also supports configuring the Neutron ML2 plugin. The ML2 plugin +can run with the OVS, LinuxBridge, or Hyper-V agents on compute hosts. A +simple way to configure the ml2 plugin is shown below: # VLAN configuration Q_PLUGIN=ml2 @@ -179,7 +210,9 @@ devstack also supports configuring the Neutron ML2 plugin. The ML2 plugin can ru Q_PLUGIN=ml2 Q_ML2_TENANT_NETWORK_TYPE=vxlan -The above will default in devstack to using the OVS on each compute host. To change this, set the `Q_AGENT` variable to the agent you want to run (e.g. linuxbridge). +The above will default in DevStack to using the OVS on each compute host. +To change this, set the `Q_AGENT` variable to the agent you want to run +(e.g. linuxbridge). Variable Name Notes ------------------------------------------------------------------------------------- @@ -194,13 +227,13 @@ The above will default in devstack to using the OVS on each compute host. To cha # Heat Heat is disabled by default. To enable it you'll need the following settings -in your `localrc` : +in your `localrc` section: enable_service heat h-api h-api-cfn h-api-cw h-eng Heat can also run in standalone mode, and be configured to orchestrate on an external OpenStack cloud. To launch only Heat in standalone mode -you'll need the following settings in your `localrc` : +you'll need the following settings in your `localrc` section: disable_all_services enable_service rabbit mysql heat h-api h-api-cfn h-api-cw h-eng @@ -215,9 +248,23 @@ If tempest has been successfully configured, a basic set of smoke tests can be r $ cd /opt/stack/tempest $ nosetests tempest/scenario/test_network_basic_ops.py +# DevStack on Xenserver + +If you would like to use Xenserver as the hypervisor, please refer to the instructions in `./tools/xen/README.md`. + +# DevStack on Docker + +If you would like to use Docker as the hypervisor, please refer to the instructions in `./tools/docker/README.md`. + # Additional Projects -DevStack has a hook mechanism to call out to a dispatch script at specific points in the execution if `stack.sh`, `unstack.sh` and `clean.sh`. This allows higher-level projects, especially those that the lower level projects have no dependency on, to be added to DevStack without modifying the scripts. Tempest is built this way as an example of how to structure the dispatch script, see `extras.d/80-tempest.sh`. See `extras.d/README.md` for more information. +DevStack has a hook mechanism to call out to a dispatch script at specific +points in the execution of `stack.sh`, `unstack.sh` and `clean.sh`. This +allows upper-layer projects, especially those that the lower layer projects +have no dependency on, to be added to DevStack without modifying the core +scripts. Tempest is built this way as an example of how to structure the +dispatch script, see `extras.d/80-tempest.sh`. See `extras.d/README.md` +for more information. # Multi-Node Setup @@ -232,7 +279,8 @@ You should run at least one "controller node", which should have a `stackrc` tha enable_service q-meta enable_service neutron -You likely want to change your `localrc` to run a scheduler that will balance VMs across hosts: +You likely want to change your `localrc` section to run a scheduler that +will balance VMs across hosts: SCHEDULER=nova.scheduler.simple.SimpleScheduler @@ -249,7 +297,7 @@ You can then run many compute nodes, each of which should have a `stackrc` which Cells is a new scaling option with a full spec at http://wiki.openstack.org/blueprint-nova-compute-cells. -To setup a cells environment add the following to your `localrc`: +To setup a cells environment add the following to your `localrc` section: enable_service n-cell @@ -264,32 +312,41 @@ Historically DevStack has used ``localrc`` to contain all local configuration an The new config file ``local.conf`` is an extended-INI format that introduces a new meta-section header that provides some additional information such as a phase name and destination config filename: - [[ | ]] + [[ | ]] -where is one of a set of phase names defined by ``stack.sh`` and is the project config filename. The filename is eval'ed in the stack.sh context so all environment variables are available and may be used. Using the project config file variables in the header is strongly suggested (see example of NOVA_CONF below). If the path of the config file does not exist it is skipped. +where ```` is one of a set of phase names defined by ``stack.sh`` +and ```` is the configuration filename. The filename is +eval'ed in the ``stack.sh`` context so all environment variables are +available and may be used. Using the project config file variables in +the header is strongly suggested (see the ``NOVA_CONF`` example below). +If the path of the config file does not exist it is skipped. The defined phases are: -* local - extracts ``localrc`` from ``local.conf`` before ``stackrc`` is sourced -* post-config - runs after the layer 2 services are configured and before they are started -* extra - runs after services are started and before any files in ``extra.d`` are executes +* **local** - extracts ``localrc`` from ``local.conf`` before ``stackrc`` is sourced +* **post-config** - runs after the layer 2 services are configured and before they are started +* **extra** - runs after services are started and before any files in ``extra.d`` are executed The file is processed strictly in sequence; meta-sections may be specified more than once but if any settings are duplicated the last to appear in the file will be used. - [[post-config|$NOVA_CONF]] - [DEFAULT] - use_syslog = True + [[post-config|$NOVA_CONF]] + [DEFAULT] + use_syslog = True - [osapi_v3] - enabled = False + [osapi_v3] + enabled = False -A specific meta-section ``local:localrc`` is used to provide a default localrc file. This allows all custom settings for DevStack to be contained in a single file. ``localrc`` is not overwritten if it exists to preserve compatability. +A specific meta-section ``local|localrc`` is used to provide a default +``localrc`` file (actually ``.localrc.auto``). This allows all custom +settings for DevStack to be contained in a single file. If ``localrc`` +exists it will be used instead to preserve backward-compatibility. - [[local|localrc]] - FIXED_RANGE=10.254.1.0/24 - ADMIN_PASSWORD=speciale - LOGFILE=$DEST/logs/stack.sh.log + [[local|localrc]] + FIXED_RANGE=10.254.1.0/24 + ADMIN_PASSWORD=speciale + LOGFILE=$DEST/logs/stack.sh.log -Note that ``Q_PLUGIN_CONF_FILE`` is unique in that it is assumed to _NOT_ start with a ``/`` (slash) character. A slash will need to be added: +Note that ``Q_PLUGIN_CONF_FILE`` is unique in that it is assumed to *NOT* +start with a ``/`` (slash) character. A slash will need to be added: - [[post-config|/$Q_PLUGIN_CONF_FILE]] + [[post-config|/$Q_PLUGIN_CONF_FILE]] diff --git a/extras.d/README.md b/extras.d/README.md index 591e438b02..88e4265ced 100644 --- a/extras.d/README.md +++ b/extras.d/README.md @@ -10,12 +10,11 @@ that end with `.sh`. To control the order that the scripts are sourced their names start with a two digit sequence number. DevStack reserves the sequence numbers 00 through 09 and 90 through 99 for its own use. -The scripts are sourced at each hook point so they should not declare anything -at the top level that would cause a problem, specifically, functions. This does -allow the entire `stack.sh` variable space to be available. The scripts are +The scripts are sourced at the beginning of each script that calls them. The +entire `stack.sh` variable space is available. The scripts are sourced with one or more arguments, the first of which defines the hook phase: -arg 1: source | stack | unstack | clean + source | stack | unstack | clean source: always called first in any of the scripts, used to set the initial defaults in a lib/* script or similar diff --git a/stack.sh b/stack.sh index aa0efea487..b3380a8775 100755 --- a/stack.sh +++ b/stack.sh @@ -53,7 +53,7 @@ if [[ -r $TOP_DIR/local.conf ]]; then if [[ -r $TOP_DIR/localrc ]]; then warn $LINENO "localrc and local.conf:[[local]] both exist, using localrc" else - echo "# Generated file, do not exit" >$TOP_DIR/.localrc.auto + echo "# Generated file, do not edit" >$TOP_DIR/.localrc.auto get_meta_section $TOP_DIR/local.conf local $lfile >>$TOP_DIR/.localrc.auto fi fi From 0624eabf5e325fa2dc8d0bc33e1b017375d8a46f Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 22 Oct 2013 07:43:22 -0400 Subject: [PATCH 06/23] whitespace cleanup on functions & lib/config fix some of the bash8 issues found in functions and lib/config, part of the long march towards fixing all the bash8 issues. Change-Id: Ia131f64870acb0f9d196fe1a9a45d633abb6fc4d --- functions | 50 +++++++++++++++++++++++++------------------------- lib/config | 14 +++++++------- 2 files changed, 32 insertions(+), 32 deletions(-) diff --git a/functions b/functions index d969677fc5..9e5659b684 100644 --- a/functions +++ b/functions @@ -1371,9 +1371,9 @@ function upload_image() { IMAGE="$FILES/${IMAGE_FNAME}" IMAGE_NAME="${IMAGE_FNAME%.xen-raw.tgz}" glance \ - --os-auth-token $token \ - --os-image-url http://$GLANCE_HOSTPORT \ - image-create \ + --os-auth-token $token \ + --os-image-url http://$GLANCE_HOSTPORT \ + image-create \ --name "$IMAGE_NAME" --is-public=True \ --container-format=tgz --disk-format=raw \ --property vm_mode=xen < "${IMAGE}" @@ -1396,11 +1396,11 @@ function upload_image() { mkdir "$xdir" tar -zxf $FILES/$IMAGE_FNAME -C "$xdir" KERNEL=$(for f in "$xdir/"*-vmlinuz* "$xdir/"aki-*/image; do - [ -f "$f" ] && echo "$f" && break; done; true) + [ -f "$f" ] && echo "$f" && break; done; true) RAMDISK=$(for f in "$xdir/"*-initrd* "$xdir/"ari-*/image; do - [ -f "$f" ] && echo "$f" && break; done; true) + [ -f "$f" ] && echo "$f" && break; done; true) IMAGE=$(for f in "$xdir/"*.img "$xdir/"ami-*/image; do - [ -f "$f" ] && echo "$f" && break; done; true) + [ -f "$f" ] && echo "$f" && break; done; true) if [[ -z "$IMAGE_NAME" ]]; then IMAGE_NAME=$(basename "$IMAGE" ".img") fi @@ -1691,23 +1691,23 @@ function check_path_perm_sanity() { # # _vercmp_r sep ver1 ver2 function _vercmp_r { - typeset sep - typeset -a ver1=() ver2=() - sep=$1; shift - ver1=("${@:1:sep}") - ver2=("${@:sep+1}") + typeset sep + typeset -a ver1=() ver2=() + sep=$1; shift + ver1=("${@:1:sep}") + ver2=("${@:sep+1}") - if ((ver1 > ver2)); then - echo 1; return 0 - elif ((ver2 > ver1)); then - echo -1; return 0 - fi + if ((ver1 > ver2)); then + echo 1; return 0 + elif ((ver2 > ver1)); then + echo -1; return 0 + fi - if ((sep <= 1)); then - echo 0; return 0 - fi + if ((sep <= 1)); then + echo 0; return 0 + fi - _vercmp_r $((sep-1)) "${ver1[@]:1}" "${ver2[@]:1}" + _vercmp_r $((sep-1)) "${ver1[@]:1}" "${ver2[@]:1}" } @@ -1729,13 +1729,13 @@ function _vercmp_r { # # vercmp_numbers ver1 ver2 vercmp_numbers() { - typeset v1=$1 v2=$2 sep - typeset -a ver1 ver2 + typeset v1=$1 v2=$2 sep + typeset -a ver1 ver2 - IFS=. read -ra ver1 <<< "$v1" - IFS=. read -ra ver2 <<< "$v2" + IFS=. read -ra ver1 <<< "$v1" + IFS=. read -ra ver2 <<< "$v2" - _vercmp_r "${#ver1[@]}" "${ver1[@]}" "${ver2[@]}" + _vercmp_r "${#ver1[@]}" "${ver1[@]}" "${ver2[@]}" } diff --git a/lib/config b/lib/config index 6f686e9b5d..91cefe48cc 100644 --- a/lib/config +++ b/lib/config @@ -10,7 +10,7 @@ # [[group-name|file-name]] # # group-name refers to the group of configuration file changes to be processed -# at a particular time. These are called phases in ``stack.sh`` but +# at a particular time. These are called phases in ``stack.sh`` but # group here as these functions are not DevStack-specific. # # file-name is the destination of the config file @@ -64,12 +64,12 @@ function get_meta_section_files() { [[ -r $file ]] || return 0 $CONFIG_AWK_CMD -v matchgroup=$matchgroup ' - /^\[\[.+\|.*\]\]/ { - gsub("[][]", "", $1); - split($1, a, "|"); - if (a[1] == matchgroup) - print a[2] - } + /^\[\[.+\|.*\]\]/ { + gsub("[][]", "", $1); + split($1, a, "|"); + if (a[1] == matchgroup) + print a[2] + } ' $file } From ad0abc7f4b2c13d231bbb6bd416302060e5c811d Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 22 Oct 2013 08:36:16 -0400 Subject: [PATCH 07/23] fix bash8 indent problems in lib/neutron and friends Change-Id: Ia83ce84b792494800fbfe7baa6423c8de9260014 --- lib/neutron | 26 +++++++++++++------------- lib/neutron_plugins/midonet | 4 ++-- lib/neutron_plugins/nec | 18 +++++++++--------- lib/neutron_plugins/nicira | 8 ++++---- 4 files changed, 28 insertions(+), 28 deletions(-) diff --git a/lib/neutron b/lib/neutron index 778717d7a9..44fb9e1005 100644 --- a/lib/neutron +++ b/lib/neutron @@ -79,8 +79,8 @@ NEUTRON_AUTH_CACHE_DIR=${NEUTRON_AUTH_CACHE_DIR:-/var/cache/neutron} # Support entry points installation of console scripts if [[ -d $NEUTRON_DIR/bin/neutron-server ]]; then NEUTRON_BIN_DIR=$NEUTRON_DIR/bin - else -NEUTRON_BIN_DIR=$(get_python_exec_prefix) +else + NEUTRON_BIN_DIR=$(get_python_exec_prefix) fi NEUTRON_CONF_DIR=/etc/neutron @@ -373,7 +373,7 @@ function create_neutron_initial_network() { iniset $Q_L3_CONF_FILE DEFAULT router_id $ROUTER_ID fi fi - fi + fi } # init_neutron() - Initialize databases, etc. @@ -404,7 +404,7 @@ function install_neutron_agent_packages() { fi if is_service_enabled q-lbaas; then - neutron_agent_lbaas_install_agent_packages + neutron_agent_lbaas_install_agent_packages fi } @@ -414,13 +414,13 @@ function start_neutron_service_and_check() { local cfg_file local CFG_FILE_OPTIONS="--config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE" for cfg_file in ${Q_PLUGIN_EXTRA_CONF_FILES[@]}; do - CFG_FILE_OPTIONS+=" --config-file /$cfg_file" + CFG_FILE_OPTIONS+=" --config-file /$cfg_file" done # Start the Neutron service screen_it q-svc "cd $NEUTRON_DIR && python $NEUTRON_BIN_DIR/neutron-server $CFG_FILE_OPTIONS" echo "Waiting for Neutron to start..." if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- http://$Q_HOST:$Q_PORT; do sleep 1; done"; then - die $LINENO "Neutron did not start" + die $LINENO "Neutron did not start" fi } @@ -712,9 +712,9 @@ function _neutron_setup_rootwrap() { # Set up ``rootwrap.conf``, pointing to ``$NEUTRON_CONF_DIR/rootwrap.d`` # location moved in newer versions, prefer new location if test -r $NEUTRON_DIR/etc/neutron/rootwrap.conf; then - sudo cp -p $NEUTRON_DIR/etc/neutron/rootwrap.conf $Q_RR_CONF_FILE + sudo cp -p $NEUTRON_DIR/etc/neutron/rootwrap.conf $Q_RR_CONF_FILE else - sudo cp -p $NEUTRON_DIR/etc/rootwrap.conf $Q_RR_CONF_FILE + sudo cp -p $NEUTRON_DIR/etc/rootwrap.conf $Q_RR_CONF_FILE fi sudo sed -e "s:^filters_path=.*$:filters_path=$Q_CONF_ROOTWRAP_D:" -i $Q_RR_CONF_FILE sudo chown root:root $Q_RR_CONF_FILE @@ -848,11 +848,11 @@ function _ssh_check_neutron() { # please refer to ``lib/neutron_thirdparty/README.md`` for details NEUTRON_THIRD_PARTIES="" for f in $TOP_DIR/lib/neutron_thirdparty/*; do - third_party=$(basename $f) - if is_service_enabled $third_party; then - source $TOP_DIR/lib/neutron_thirdparty/$third_party - NEUTRON_THIRD_PARTIES="$NEUTRON_THIRD_PARTIES,$third_party" - fi + third_party=$(basename $f) + if is_service_enabled $third_party; then + source $TOP_DIR/lib/neutron_thirdparty/$third_party + NEUTRON_THIRD_PARTIES="$NEUTRON_THIRD_PARTIES,$third_party" + fi done function _neutron_third_party_do() { diff --git a/lib/neutron_plugins/midonet b/lib/neutron_plugins/midonet index 193055f7db..42415b1934 100644 --- a/lib/neutron_plugins/midonet +++ b/lib/neutron_plugins/midonet @@ -40,11 +40,11 @@ function neutron_plugin_configure_dhcp_agent() { } function neutron_plugin_configure_l3_agent() { - die $LINENO "q-l3 must not be executed with MidoNet plugin!" + die $LINENO "q-l3 must not be executed with MidoNet plugin!" } function neutron_plugin_configure_plugin_agent() { - die $LINENO "q-agt must not be executed with MidoNet plugin!" + die $LINENO "q-agt must not be executed with MidoNet plugin!" } function neutron_plugin_configure_service() { diff --git a/lib/neutron_plugins/nec b/lib/neutron_plugins/nec index 79d41dbf77..3806c32c75 100644 --- a/lib/neutron_plugins/nec +++ b/lib/neutron_plugins/nec @@ -101,15 +101,15 @@ function _neutron_setup_ovs_tunnels() { local id=0 GRE_LOCAL_IP=${GRE_LOCAL_IP:-$HOST_IP} if [ -n "$GRE_REMOTE_IPS" ]; then - for ip in ${GRE_REMOTE_IPS//:/ } - do - if [[ "$ip" == "$GRE_LOCAL_IP" ]]; then - continue - fi - sudo ovs-vsctl --no-wait add-port $bridge gre$id -- \ - set Interface gre$id type=gre options:remote_ip=$ip - id=`expr $id + 1` - done + for ip in ${GRE_REMOTE_IPS//:/ } + do + if [[ "$ip" == "$GRE_LOCAL_IP" ]]; then + continue + fi + sudo ovs-vsctl --no-wait add-port $bridge gre$id -- \ + set Interface gre$id type=gre options:remote_ip=$ip + id=`expr $id + 1` + done fi } diff --git a/lib/neutron_plugins/nicira b/lib/neutron_plugins/nicira index 082c84674d..7c99b692d6 100644 --- a/lib/neutron_plugins/nicira +++ b/lib/neutron_plugins/nicira @@ -58,13 +58,13 @@ function neutron_plugin_configure_dhcp_agent() { } function neutron_plugin_configure_l3_agent() { - # Nicira plugin does not run L3 agent - die $LINENO "q-l3 should must not be executed with Nicira plugin!" + # Nicira plugin does not run L3 agent + die $LINENO "q-l3 should must not be executed with Nicira plugin!" } function neutron_plugin_configure_plugin_agent() { - # Nicira plugin does not run L2 agent - die $LINENO "q-agt must not be executed with Nicira plugin!" + # Nicira plugin does not run L2 agent + die $LINENO "q-agt must not be executed with Nicira plugin!" } function neutron_plugin_configure_service() { From 7ba57152c6c1b035854a30b5b89e92bf7da754de Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 22 Oct 2013 08:47:11 -0400 Subject: [PATCH 08/23] fix whitespace in the rest of lib/* this brings this in line with bash8 checker Change-Id: Ib34a2292dd5bc259069457461041ec9cd4fd2957 --- lib/baremetal | 100 +++++++++++++------------- lib/glance | 2 +- lib/ironic | 2 +- lib/keystone | 2 +- lib/neutron_thirdparty/trema | 4 +- lib/nova | 41 ++++++----- lib/nova_plugins/hypervisor-baremetal | 4 +- lib/nova_plugins/hypervisor-libvirt | 8 +-- lib/rpc_backend | 6 +- lib/swift | 64 ++++++++--------- lib/tempest | 20 +++--- lib/trove | 15 ++-- 12 files changed, 134 insertions(+), 134 deletions(-) diff --git a/lib/baremetal b/lib/baremetal index f4d8589628..141c28d15f 100644 --- a/lib/baremetal +++ b/lib/baremetal @@ -256,19 +256,19 @@ function upload_baremetal_deploy() { # load them into glance BM_DEPLOY_KERNEL_ID=$(glance \ - --os-auth-token $token \ - --os-image-url http://$GLANCE_HOSTPORT \ - image-create \ - --name $BM_DEPLOY_KERNEL \ - --is-public True --disk-format=aki \ - < $TOP_DIR/files/$BM_DEPLOY_KERNEL | grep ' id ' | get_field 2) + --os-auth-token $token \ + --os-image-url http://$GLANCE_HOSTPORT \ + image-create \ + --name $BM_DEPLOY_KERNEL \ + --is-public True --disk-format=aki \ + < $TOP_DIR/files/$BM_DEPLOY_KERNEL | grep ' id ' | get_field 2) BM_DEPLOY_RAMDISK_ID=$(glance \ - --os-auth-token $token \ - --os-image-url http://$GLANCE_HOSTPORT \ - image-create \ - --name $BM_DEPLOY_RAMDISK \ - --is-public True --disk-format=ari \ - < $TOP_DIR/files/$BM_DEPLOY_RAMDISK | grep ' id ' | get_field 2) + --os-auth-token $token \ + --os-image-url http://$GLANCE_HOSTPORT \ + image-create \ + --name $BM_DEPLOY_RAMDISK \ + --is-public True --disk-format=ari \ + < $TOP_DIR/files/$BM_DEPLOY_RAMDISK | grep ' id ' | get_field 2) } # create a basic baremetal flavor, associated with deploy kernel & ramdisk @@ -278,11 +278,11 @@ function create_baremetal_flavor() { aki=$1 ari=$2 nova flavor-create $BM_FLAVOR_NAME $BM_FLAVOR_ID \ - $BM_FLAVOR_RAM $BM_FLAVOR_ROOT_DISK $BM_FLAVOR_CPU + $BM_FLAVOR_RAM $BM_FLAVOR_ROOT_DISK $BM_FLAVOR_CPU nova flavor-key $BM_FLAVOR_NAME set \ - "cpu_arch"="$BM_FLAVOR_ARCH" \ - "baremetal:deploy_kernel_id"="$aki" \ - "baremetal:deploy_ramdisk_id"="$ari" + "cpu_arch"="$BM_FLAVOR_ARCH" \ + "baremetal:deploy_kernel_id"="$aki" \ + "baremetal:deploy_ramdisk_id"="$ari" } @@ -311,19 +311,19 @@ function extract_and_upload_k_and_r_from_image() { # load them into glance KERNEL_ID=$(glance \ - --os-auth-token $token \ - --os-image-url http://$GLANCE_HOSTPORT \ - image-create \ - --name $image_name-kernel \ - --is-public True --disk-format=aki \ - < $TOP_DIR/files/$OUT_KERNEL | grep ' id ' | get_field 2) + --os-auth-token $token \ + --os-image-url http://$GLANCE_HOSTPORT \ + image-create \ + --name $image_name-kernel \ + --is-public True --disk-format=aki \ + < $TOP_DIR/files/$OUT_KERNEL | grep ' id ' | get_field 2) RAMDISK_ID=$(glance \ - --os-auth-token $token \ - --os-image-url http://$GLANCE_HOSTPORT \ - image-create \ - --name $image_name-initrd \ - --is-public True --disk-format=ari \ - < $TOP_DIR/files/$OUT_RAMDISK | grep ' id ' | get_field 2) + --os-auth-token $token \ + --os-image-url http://$GLANCE_HOSTPORT \ + image-create \ + --name $image_name-initrd \ + --is-public True --disk-format=ari \ + < $TOP_DIR/files/$OUT_RAMDISK | grep ' id ' | get_field 2) } @@ -365,11 +365,11 @@ function upload_baremetal_image() { mkdir "$xdir" tar -zxf $FILES/$IMAGE_FNAME -C "$xdir" KERNEL=$(for f in "$xdir/"*-vmlinuz* "$xdir/"aki-*/image; do - [ -f "$f" ] && echo "$f" && break; done; true) + [ -f "$f" ] && echo "$f" && break; done; true) RAMDISK=$(for f in "$xdir/"*-initrd* "$xdir/"ari-*/image; do - [ -f "$f" ] && echo "$f" && break; done; true) + [ -f "$f" ] && echo "$f" && break; done; true) IMAGE=$(for f in "$xdir/"*.img "$xdir/"ami-*/image; do - [ -f "$f" ] && echo "$f" && break; done; true) + [ -f "$f" ] && echo "$f" && break; done; true) if [[ -z "$IMAGE_NAME" ]]; then IMAGE_NAME=$(basename "$IMAGE" ".img") fi @@ -403,19 +403,19 @@ function upload_baremetal_image() { --container-format ari \ --disk-format ari < "$RAMDISK" | grep ' id ' | get_field 2) else - # TODO(deva): add support for other image types - return + # TODO(deva): add support for other image types + return fi glance \ - --os-auth-token $token \ - --os-image-url http://$GLANCE_HOSTPORT \ - image-create \ - --name "${IMAGE_NAME%.img}" --is-public True \ - --container-format $CONTAINER_FORMAT \ - --disk-format $DISK_FORMAT \ - ${KERNEL_ID:+--property kernel_id=$KERNEL_ID} \ - ${RAMDISK_ID:+--property ramdisk_id=$RAMDISK_ID} < "${IMAGE}" + --os-auth-token $token \ + --os-image-url http://$GLANCE_HOSTPORT \ + image-create \ + --name "${IMAGE_NAME%.img}" --is-public True \ + --container-format $CONTAINER_FORMAT \ + --disk-format $DISK_FORMAT \ + ${KERNEL_ID:+--property kernel_id=$KERNEL_ID} \ + ${RAMDISK_ID:+--property ramdisk_id=$RAMDISK_ID} < "${IMAGE}" # override DEFAULT_IMAGE_NAME so that tempest can find the image # that we just uploaded in glance @@ -439,15 +439,15 @@ function add_baremetal_node() { mac_2=${2:-$BM_SECOND_MAC} id=$(nova baremetal-node-create \ - --pm_address="$BM_PM_ADDR" \ - --pm_user="$BM_PM_USER" \ - --pm_password="$BM_PM_PASS" \ - "$BM_HOSTNAME" \ - "$BM_FLAVOR_CPU" \ - "$BM_FLAVOR_RAM" \ - "$BM_FLAVOR_ROOT_DISK" \ - "$mac_1" \ - | grep ' id ' | get_field 2 ) + --pm_address="$BM_PM_ADDR" \ + --pm_user="$BM_PM_USER" \ + --pm_password="$BM_PM_PASS" \ + "$BM_HOSTNAME" \ + "$BM_FLAVOR_CPU" \ + "$BM_FLAVOR_RAM" \ + "$BM_FLAVOR_ROOT_DISK" \ + "$mac_1" \ + | grep ' id ' | get_field 2 ) [ $? -eq 0 ] || [ "$id" ] || die $LINENO "Error adding baremetal node" if [ -n "$mac_2" ]; then id2=$(nova baremetal-interface-add "$id" "$mac_2" ) diff --git a/lib/glance b/lib/glance index c6f11d06da..75e3dd053d 100644 --- a/lib/glance +++ b/lib/glance @@ -194,7 +194,7 @@ function start_glance() { screen_it g-api "cd $GLANCE_DIR; $GLANCE_BIN_DIR/glance-api --config-file=$GLANCE_CONF_DIR/glance-api.conf" echo "Waiting for g-api ($GLANCE_HOSTPORT) to start..." if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- http://$GLANCE_HOSTPORT; do sleep 1; done"; then - die $LINENO "g-api did not start" + die $LINENO "g-api did not start" fi } diff --git a/lib/ironic b/lib/ironic index f3b4a72f66..431587d1ea 100644 --- a/lib/ironic +++ b/lib/ironic @@ -195,7 +195,7 @@ function start_ironic_api() { screen_it ir-api "cd $IRONIC_DIR; $IRONIC_BIN_DIR/ironic-api --config-file=$IRONIC_CONF_FILE" echo "Waiting for ir-api ($IRONIC_HOSTPORT) to start..." if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- http://$IRONIC_HOSTPORT; do sleep 1; done"; then - die $LINENO "ir-api did not start" + die $LINENO "ir-api did not start" fi } diff --git a/lib/keystone b/lib/keystone index c93a4367d2..beddb1cd75 100755 --- a/lib/keystone +++ b/lib/keystone @@ -373,7 +373,7 @@ function start_keystone() { echo "Waiting for keystone to start..." if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl --noproxy '*' -s http://$SERVICE_HOST:$service_port/v$IDENTITY_API_VERSION/ >/dev/null; do sleep 1; done"; then - die $LINENO "keystone did not start" + die $LINENO "keystone did not start" fi # Start proxies if enabled diff --git a/lib/neutron_thirdparty/trema b/lib/neutron_thirdparty/trema index 09dc46bd83..5b5c4590c3 100644 --- a/lib/neutron_thirdparty/trema +++ b/lib/neutron_thirdparty/trema @@ -66,8 +66,8 @@ function init_trema() { cp $TREMA_SS_DIR/sliceable_switch_null.conf $TREMA_SS_CONFIG sed -i -e "s|^\$apps_dir.*$|\$apps_dir = \"$TREMA_DIR/apps\"|" \ - -e "s|^\$db_dir.*$|\$db_dir = \"$TREMA_SS_DB_DIR\"|" \ - $TREMA_SS_CONFIG + -e "s|^\$db_dir.*$|\$db_dir = \"$TREMA_SS_DB_DIR\"|" \ + $TREMA_SS_CONFIG } function gem_install() { diff --git a/lib/nova b/lib/nova index 8deb3a01a9..974b0b421d 100644 --- a/lib/nova +++ b/lib/nova @@ -453,27 +453,27 @@ function create_nova_conf() { fi if is_service_enabled n-novnc || is_service_enabled n-xvnc; then - # Address on which instance vncservers will listen on compute hosts. - # For multi-host, this should be the management ip of the compute host. - VNCSERVER_LISTEN=${VNCSERVER_LISTEN=127.0.0.1} - VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=127.0.0.1} - iniset $NOVA_CONF DEFAULT vnc_enabled true - iniset $NOVA_CONF DEFAULT vncserver_listen "$VNCSERVER_LISTEN" - iniset $NOVA_CONF DEFAULT vncserver_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS" + # Address on which instance vncservers will listen on compute hosts. + # For multi-host, this should be the management ip of the compute host. + VNCSERVER_LISTEN=${VNCSERVER_LISTEN=127.0.0.1} + VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=127.0.0.1} + iniset $NOVA_CONF DEFAULT vnc_enabled true + iniset $NOVA_CONF DEFAULT vncserver_listen "$VNCSERVER_LISTEN" + iniset $NOVA_CONF DEFAULT vncserver_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS" else - iniset $NOVA_CONF DEFAULT vnc_enabled false + iniset $NOVA_CONF DEFAULT vnc_enabled false fi if is_service_enabled n-spice; then - # Address on which instance spiceservers will listen on compute hosts. - # For multi-host, this should be the management ip of the compute host. - SPICESERVER_PROXYCLIENT_ADDRESS=${SPICESERVER_PROXYCLIENT_ADDRESS=127.0.0.1} - SPICESERVER_LISTEN=${SPICESERVER_LISTEN=127.0.0.1} - iniset $NOVA_CONF spice enabled true - iniset $NOVA_CONF spice server_listen "$SPICESERVER_LISTEN" - iniset $NOVA_CONF spice server_proxyclient_address "$SPICESERVER_PROXYCLIENT_ADDRESS" + # Address on which instance spiceservers will listen on compute hosts. + # For multi-host, this should be the management ip of the compute host. + SPICESERVER_PROXYCLIENT_ADDRESS=${SPICESERVER_PROXYCLIENT_ADDRESS=127.0.0.1} + SPICESERVER_LISTEN=${SPICESERVER_LISTEN=127.0.0.1} + iniset $NOVA_CONF spice enabled true + iniset $NOVA_CONF spice server_listen "$SPICESERVER_LISTEN" + iniset $NOVA_CONF spice server_proxyclient_address "$SPICESERVER_PROXYCLIENT_ADDRESS" else - iniset $NOVA_CONF spice enabled false + iniset $NOVA_CONF spice enabled false fi iniset $NOVA_CONF DEFAULT ec2_dmz_host "$EC2_DMZ_HOST" @@ -590,7 +590,7 @@ function start_nova_api() { screen_it n-api "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-api" echo "Waiting for nova-api to start..." if ! wait_for_service $SERVICE_TIMEOUT http://$SERVICE_HOST:$service_port; then - die $LINENO "nova-api did not start" + die $LINENO "nova-api did not start" fi # Start proxies if enabled @@ -618,10 +618,9 @@ function start_nova() { # Use 'sg' to execute nova-compute as a member of the **$LIBVIRT_GROUP** group. screen_it n-cpu "cd $NOVA_DIR && sg $LIBVIRT_GROUP '$NOVA_BIN_DIR/nova-compute --config-file $NOVA_CONF_BOTTOM'" elif [[ "$VIRT_DRIVER" = 'fake' ]]; then - for i in `seq 1 $NUMBER_FAKE_NOVA_COMPUTE` - do - screen_it n-cpu "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-compute --config-file $NOVA_CONF_BOTTOM" - done + for i in `seq 1 $NUMBER_FAKE_NOVA_COMPUTE`; do + screen_it n-cpu "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-compute --config-file $NOVA_CONF_BOTTOM" + done else if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then start_nova_hypervisor diff --git a/lib/nova_plugins/hypervisor-baremetal b/lib/nova_plugins/hypervisor-baremetal index 4e7c1734d1..660c977bde 100644 --- a/lib/nova_plugins/hypervisor-baremetal +++ b/lib/nova_plugins/hypervisor-baremetal @@ -61,8 +61,8 @@ function configure_nova_hypervisor() { # Define extra baremetal nova conf flags by defining the array ``EXTRA_BAREMETAL_OPTS``. for I in "${EXTRA_BAREMETAL_OPTS[@]}"; do - # Attempt to convert flags to options - iniset $NOVA_CONF baremetal ${I/=/ } + # Attempt to convert flags to options + iniset $NOVA_CONF baremetal ${I/=/ } done } diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt index caf0296ad2..6fae0b17d0 100644 --- a/lib/nova_plugins/hypervisor-libvirt +++ b/lib/nova_plugins/hypervisor-libvirt @@ -82,10 +82,10 @@ EOF" sudo mkdir -p $rules_dir sudo bash -c "cat < $rules_dir/50-libvirt-$STACK_USER.rules polkit.addRule(function(action, subject) { - if (action.id == 'org.libvirt.unix.manage' && - subject.user == '"$STACK_USER"') { - return polkit.Result.YES; - } + if (action.id == 'org.libvirt.unix.manage' && + subject.user == '"$STACK_USER"') { + return polkit.Result.YES; + } }); EOF" unset rules_dir diff --git a/lib/rpc_backend b/lib/rpc_backend index 44c1e44817..a323d649a7 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -102,9 +102,9 @@ function install_rpc_backend() { if is_fedora; then install_package qpid-cpp-server if [[ $DISTRO =~ (rhel6) ]]; then - # RHEL6 leaves "auth=yes" in /etc/qpidd.conf, it needs to - # be no or you get GSS authentication errors as it - # attempts to default to this. + # RHEL6 leaves "auth=yes" in /etc/qpidd.conf, it needs to + # be no or you get GSS authentication errors as it + # attempts to default to this. sudo sed -i.bak 's/^auth=yes$/auth=no/' /etc/qpidd.conf fi elif is_ubuntu; then diff --git a/lib/swift b/lib/swift index 6ab43c420f..8726f1e7fc 100644 --- a/lib/swift +++ b/lib/swift @@ -104,17 +104,17 @@ ACCOUNT_PORT_BASE=${ACCOUNT_PORT_BASE:-6012} # cleanup_swift() - Remove residual data files function cleanup_swift() { - rm -f ${SWIFT_CONF_DIR}{*.builder,*.ring.gz,backups/*.builder,backups/*.ring.gz} - if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then - sudo umount ${SWIFT_DATA_DIR}/drives/sdb1 - fi - if [[ -e ${SWIFT_DISK_IMAGE} ]]; then - rm ${SWIFT_DISK_IMAGE} - fi - rm -rf ${SWIFT_DATA_DIR}/run/ - if is_apache_enabled_service swift; then - _cleanup_swift_apache_wsgi - fi + rm -f ${SWIFT_CONF_DIR}{*.builder,*.ring.gz,backups/*.builder,backups/*.ring.gz} + if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then + sudo umount ${SWIFT_DATA_DIR}/drives/sdb1 + fi + if [[ -e ${SWIFT_DISK_IMAGE} ]]; then + rm ${SWIFT_DISK_IMAGE} + fi + rm -rf ${SWIFT_DATA_DIR}/run/ + if is_apache_enabled_service swift; then + _cleanup_swift_apache_wsgi + fi } # _cleanup_swift_apache_wsgi() - Remove wsgi files, disable and remove apache vhost file @@ -192,7 +192,7 @@ function _config_swift_apache_wsgi() { sudo cp ${SWIFT_DIR}/examples/apache2/account-server.template ${apache_vhost_dir}/account-server-${node_number} sudo sed -e " - /^#/d;/^$/d; + /^#/d;/^$/d; s/%PORT%/$account_port/g; s/%SERVICENAME%/account-server-${node_number}/g; s/%APACHE_NAME%/${APACHE_NAME}/g; @@ -202,7 +202,7 @@ function _config_swift_apache_wsgi() { sudo cp ${SWIFT_DIR}/examples/wsgi/account-server.wsgi.template ${SWIFT_APACHE_WSGI_DIR}/account-server-${node_number}.wsgi sudo sed -e " - /^#/d;/^$/d; + /^#/d;/^$/d; s/%SERVICECONF%/account-server\/${node_number}.conf/g; " -i ${SWIFT_APACHE_WSGI_DIR}/account-server-${node_number}.wsgi done @@ -577,26 +577,26 @@ function start_swift() { return 0 fi - # By default with only one replica we are launching the proxy, - # container, account and object server in screen in foreground and - # other services in background. If we have SWIFT_REPLICAS set to something - # greater than one we first spawn all the swift services then kill the proxy - # service so we can run it in foreground in screen. ``swift-init ... - # {stop|restart}`` exits with '1' if no servers are running, ignore it just - # in case - swift-init --run-dir=${SWIFT_DATA_DIR}/run all restart || true - if [[ ${SWIFT_REPLICAS} == 1 ]]; then + # By default with only one replica we are launching the proxy, + # container, account and object server in screen in foreground and + # other services in background. If we have SWIFT_REPLICAS set to something + # greater than one we first spawn all the swift services then kill the proxy + # service so we can run it in foreground in screen. ``swift-init ... + # {stop|restart}`` exits with '1' if no servers are running, ignore it just + # in case + swift-init --run-dir=${SWIFT_DATA_DIR}/run all restart || true + if [[ ${SWIFT_REPLICAS} == 1 ]]; then todo="object container account" - fi - for type in proxy ${todo}; do - swift-init --run-dir=${SWIFT_DATA_DIR}/run ${type} stop || true - done - screen_it s-proxy "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONF_DIR}/proxy-server.conf -v" - if [[ ${SWIFT_REPLICAS} == 1 ]]; then - for type in object container account; do - screen_it s-${type} "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-${type}-server ${SWIFT_CONF_DIR}/${type}-server/1.conf -v" - done - fi + fi + for type in proxy ${todo}; do + swift-init --run-dir=${SWIFT_DATA_DIR}/run ${type} stop || true + done + screen_it s-proxy "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONF_DIR}/proxy-server.conf -v" + if [[ ${SWIFT_REPLICAS} == 1 ]]; then + for type in object container account; do + screen_it s-${type} "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-${type}-server ${SWIFT_CONF_DIR}/${type}-server/1.conf -v" + done + fi } # stop_swift() - Stop running processes (non-screen) diff --git a/lib/tempest b/lib/tempest index 9f41608187..8e4e5210ea 100644 --- a/lib/tempest +++ b/lib/tempest @@ -193,7 +193,7 @@ function configure_tempest() { # If namespaces are disabled, devstack will create a single # public router that tempest should be configured to use. public_router_id=$(neutron router-list | awk "/ $Q_ROUTER_NAME / \ - { print \$2 }") + { print \$2 }") fi fi @@ -328,15 +328,15 @@ function init_tempest() { local disk_image="$image_dir/${base_image_name}-blank.img" # if the cirros uec downloaded and the system is uec capable if [ -f "$kernel" -a -f "$ramdisk" -a -f "$disk_image" -a "$VIRT_DRIVER" != "openvz" \ - -a \( "$LIBVIRT_TYPE" != "lxc" -o "$VIRT_DRIVER" != "libvirt" \) ]; then - echo "Prepare aki/ari/ami Images" - ( #new namespace - # tenant:demo ; user: demo - source $TOP_DIR/accrc/demo/demo - euca-bundle-image -i "$kernel" --kernel true -d "$BOTO_MATERIALS_PATH" - euca-bundle-image -i "$ramdisk" --ramdisk true -d "$BOTO_MATERIALS_PATH" - euca-bundle-image -i "$disk_image" -d "$BOTO_MATERIALS_PATH" - ) 2>&1 &1 Date: Tue, 22 Oct 2013 10:06:06 -0400 Subject: [PATCH 09/23] clean up whitespace issues on exercises and friends Change-Id: I812a73e46ddd4d5fed4d304d9ef92c1de243f497 --- exercises/boot_from_volume.sh | 2 +- exercises/docker.sh | 3 +- exercises/euca.sh | 52 +++++++++++++++++------------------ exercises/floating_ips.sh | 4 +-- exercises/neutron-adv-test.sh | 24 ++++++++-------- exercises/volumes.sh | 2 +- files/keystone_data.sh | 24 ++++++++-------- tests/functions.sh | 8 +++--- 8 files changed, 59 insertions(+), 60 deletions(-) diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh index fe27bd0956..634a6d526c 100755 --- a/exercises/boot_from_volume.sh +++ b/exercises/boot_from_volume.sh @@ -119,7 +119,7 @@ nova flavor-list INSTANCE_TYPE=$(nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1) if [[ -z "$INSTANCE_TYPE" ]]; then # grab the first flavor in the list to launch if default doesn't exist - INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1) + INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1) fi # Clean-up from previous runs diff --git a/exercises/docker.sh b/exercises/docker.sh index 0672bc0087..10c5436c35 100755 --- a/exercises/docker.sh +++ b/exercises/docker.sh @@ -62,7 +62,7 @@ die_if_not_set $LINENO IMAGE "Failure getting image $DOCKER_IMAGE_NAME" INSTANCE_TYPE=$(nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1) if [[ -z "$INSTANCE_TYPE" ]]; then # grab the first flavor in the list to launch if default doesn't exist - INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1) + INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1) fi # Clean-up from previous runs @@ -102,4 +102,3 @@ set +o xtrace echo "*********************************************************************" echo "SUCCESS: End DevStack Exercise: $0" echo "*********************************************************************" - diff --git a/exercises/euca.sh b/exercises/euca.sh index 64c0014236..ed521e4f7f 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -87,31 +87,31 @@ fi # Volumes # ------- if is_service_enabled c-vol && ! is_service_enabled n-cell; then - VOLUME_ZONE=`euca-describe-availability-zones | head -n1 | cut -f2` - die_if_not_set $LINENO VOLUME_ZONE "Failure to find zone for volume" - - VOLUME=`euca-create-volume -s 1 -z $VOLUME_ZONE | cut -f2` - die_if_not_set $LINENO VOLUME "Failure to create volume" - - # Test that volume has been created - VOLUME=`euca-describe-volumes $VOLUME | cut -f2` - die_if_not_set $LINENO VOLUME "Failure to get volume" - - # Test volume has become available - if ! timeout $RUNNING_TIMEOUT sh -c "while ! euca-describe-volumes $VOLUME | grep -q available; do sleep 1; done"; then - die $LINENO "volume didn't become available within $RUNNING_TIMEOUT seconds" - fi - - # Attach volume to an instance - euca-attach-volume -i $INSTANCE -d $ATTACH_DEVICE $VOLUME || \ - die $LINENO "Failure attaching volume $VOLUME to $INSTANCE" - if ! timeout $ACTIVE_TIMEOUT sh -c "while ! euca-describe-volumes $VOLUME | grep -A 1 in-use | grep -q attach; do sleep 1; done"; then - die $LINENO "Could not attach $VOLUME to $INSTANCE" - fi - - # Detach volume from an instance - euca-detach-volume $VOLUME || \ - die $LINENO "Failure detaching volume $VOLUME to $INSTANCE" + VOLUME_ZONE=`euca-describe-availability-zones | head -n1 | cut -f2` + die_if_not_set $LINENO VOLUME_ZONE "Failure to find zone for volume" + + VOLUME=`euca-create-volume -s 1 -z $VOLUME_ZONE | cut -f2` + die_if_not_set $LINENO VOLUME "Failure to create volume" + + # Test that volume has been created + VOLUME=`euca-describe-volumes $VOLUME | cut -f2` + die_if_not_set $LINENO VOLUME "Failure to get volume" + + # Test volume has become available + if ! timeout $RUNNING_TIMEOUT sh -c "while ! euca-describe-volumes $VOLUME | grep -q available; do sleep 1; done"; then + die $LINENO "volume didn't become available within $RUNNING_TIMEOUT seconds" + fi + + # Attach volume to an instance + euca-attach-volume -i $INSTANCE -d $ATTACH_DEVICE $VOLUME || \ + die $LINENO "Failure attaching volume $VOLUME to $INSTANCE" + if ! timeout $ACTIVE_TIMEOUT sh -c "while ! euca-describe-volumes $VOLUME | grep -A 1 in-use | grep -q attach; do sleep 1; done"; then + die $LINENO "Could not attach $VOLUME to $INSTANCE" + fi + + # Detach volume from an instance + euca-detach-volume $VOLUME || \ + die $LINENO "Failure detaching volume $VOLUME to $INSTANCE" if ! timeout $ACTIVE_TIMEOUT sh -c "while ! euca-describe-volumes $VOLUME | grep -q available; do sleep 1; done"; then die $LINENO "Could not detach $VOLUME to $INSTANCE" fi @@ -120,7 +120,7 @@ if is_service_enabled c-vol && ! is_service_enabled n-cell; then euca-delete-volume $VOLUME || \ die $LINENO "Failure to delete volume" if ! timeout $ACTIVE_TIMEOUT sh -c "while euca-describe-volumes | grep $VOLUME; do sleep 1; done"; then - die $LINENO "Could not delete $VOLUME" + die $LINENO "Could not delete $VOLUME" fi else echo "Volume Tests Skipped" diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh index 2833b650ba..1a1608c872 100755 --- a/exercises/floating_ips.sh +++ b/exercises/floating_ips.sh @@ -113,7 +113,7 @@ nova flavor-list INSTANCE_TYPE=$(nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1) if [[ -z "$INSTANCE_TYPE" ]]; then # grab the first flavor in the list to launch if default doesn't exist - INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1) + INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1) fi # Clean-up from previous runs @@ -168,7 +168,7 @@ if ! is_service_enabled neutron; then # list floating addresses if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova floating-ip-list | grep $TEST_FLOATING_POOL | grep -q $TEST_FLOATING_IP; do sleep 1; done"; then die $LINENO "Floating IP not allocated" - fi + fi fi # Dis-allow icmp traffic (ping) diff --git a/exercises/neutron-adv-test.sh b/exercises/neutron-adv-test.sh index abb29cf333..f8cfff7a7a 100755 --- a/exercises/neutron-adv-test.sh +++ b/exercises/neutron-adv-test.sh @@ -272,12 +272,12 @@ function create_vms { } function ping_ip { - # Test agent connection. Assumes namespaces are disabled, and - # that DHCP is in use, but not L3 - local VM_NAME=$1 - local NET_NAME=$2 - IP=$(get_instance_ip $VM_NAME $NET_NAME) - ping_check $NET_NAME $IP $BOOT_TIMEOUT + # Test agent connection. Assumes namespaces are disabled, and + # that DHCP is in use, but not L3 + local VM_NAME=$1 + local NET_NAME=$2 + IP=$(get_instance_ip $VM_NAME $NET_NAME) + ping_check $NET_NAME $IP $BOOT_TIMEOUT } function check_vm { @@ -329,12 +329,12 @@ function delete_network { } function delete_networks { - foreach_tenant_net 'delete_network ${%TENANT%_NAME} %NUM%' - #TODO(nati) add secuirty group check after it is implemented - # source $TOP_DIR/openrc demo1 demo1 - # nova secgroup-delete-rule default icmp -1 -1 0.0.0.0/0 - # source $TOP_DIR/openrc demo2 demo2 - # nova secgroup-delete-rule default icmp -1 -1 0.0.0.0/0 + foreach_tenant_net 'delete_network ${%TENANT%_NAME} %NUM%' + # TODO(nati) add secuirty group check after it is implemented + # source $TOP_DIR/openrc demo1 demo1 + # nova secgroup-delete-rule default icmp -1 -1 0.0.0.0/0 + # source $TOP_DIR/openrc demo2 demo2 + # nova secgroup-delete-rule default icmp -1 -1 0.0.0.0/0 } function create_all { diff --git a/exercises/volumes.sh b/exercises/volumes.sh index e536d16249..9ee9fa910a 100755 --- a/exercises/volumes.sh +++ b/exercises/volumes.sh @@ -117,7 +117,7 @@ nova flavor-list INSTANCE_TYPE=$(nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1) if [[ -z "$INSTANCE_TYPE" ]]; then # grab the first flavor in the list to launch if default doesn't exist - INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1) + INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1) fi # Clean-up from previous runs diff --git a/files/keystone_data.sh b/files/keystone_data.sh index 3f3137cb14..ea2d52d114 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -66,12 +66,12 @@ fi # Heat if [[ "$ENABLED_SERVICES" =~ "heat" ]]; then HEAT_USER=$(get_id keystone user-create --name=heat \ - --pass="$SERVICE_PASSWORD" \ - --tenant_id $SERVICE_TENANT \ - --email=heat@example.com) + --pass="$SERVICE_PASSWORD" \ + --tenant_id $SERVICE_TENANT \ + --email=heat@example.com) keystone user-role-add --tenant-id $SERVICE_TENANT \ - --user-id $HEAT_USER \ - --role-id $SERVICE_ROLE + --user-id $HEAT_USER \ + --role-id $SERVICE_ROLE # heat_stack_user role is for users created by Heat keystone role-create --name heat_stack_user if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then @@ -126,16 +126,16 @@ fi # Ceilometer if [[ "$ENABLED_SERVICES" =~ "ceilometer" ]]; then CEILOMETER_USER=$(get_id keystone user-create --name=ceilometer \ - --pass="$SERVICE_PASSWORD" \ - --tenant_id $SERVICE_TENANT \ - --email=ceilometer@example.com) + --pass="$SERVICE_PASSWORD" \ + --tenant_id $SERVICE_TENANT \ + --email=ceilometer@example.com) keystone user-role-add --tenant-id $SERVICE_TENANT \ - --user-id $CEILOMETER_USER \ - --role-id $ADMIN_ROLE + --user-id $CEILOMETER_USER \ + --role-id $ADMIN_ROLE # Ceilometer needs ResellerAdmin role to access swift account stats. keystone user-role-add --tenant-id $SERVICE_TENANT \ - --user-id $CEILOMETER_USER \ - --role-id $RESELLER_ROLE + --user-id $CEILOMETER_USER \ + --role-id $RESELLER_ROLE if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then CEILOMETER_SERVICE=$(get_id keystone service-create \ --name=ceilometer \ diff --git a/tests/functions.sh b/tests/functions.sh index 7d486d4cc5..40376aa63f 100755 --- a/tests/functions.sh +++ b/tests/functions.sh @@ -122,16 +122,16 @@ fi # test empty option if ini_has_option test.ini ddd empty; then - echo "OK: ddd.empty present" + echo "OK: ddd.empty present" else - echo "ini_has_option failed: ddd.empty not found" + echo "ini_has_option failed: ddd.empty not found" fi # test non-empty option if ini_has_option test.ini bbb handlers; then - echo "OK: bbb.handlers present" + echo "OK: bbb.handlers present" else - echo "ini_has_option failed: bbb.handlers not found" + echo "ini_has_option failed: bbb.handlers not found" fi # test changing empty option From 15f4b7abc1bf4f5b79987886937f175394b6acd0 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 22 Oct 2013 10:08:04 -0400 Subject: [PATCH 10/23] clean up whitespace on stack.sh Change-Id: If73435968cfbd0dd3cc519f0a30e02bec5fcb386 --- stack.sh | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/stack.sh b/stack.sh index aa0efea487..9adb5c6908 100755 --- a/stack.sh +++ b/stack.sh @@ -1015,7 +1015,7 @@ if is_service_enabled nova && is_baremetal; then prepare_baremetal_toolchain configure_baremetal_nova_dirs if [[ "$BM_USE_FAKE_ENV" = "True" ]]; then - create_fake_baremetal_env + create_fake_baremetal_env fi fi @@ -1176,26 +1176,26 @@ if is_service_enabled g-reg; then TOKEN=$(keystone token-get | grep ' id ' | get_field 2) if is_baremetal; then - echo_summary "Creating and uploading baremetal images" + echo_summary "Creating and uploading baremetal images" - # build and upload separate deploy kernel & ramdisk - upload_baremetal_deploy $TOKEN + # build and upload separate deploy kernel & ramdisk + upload_baremetal_deploy $TOKEN - # upload images, separating out the kernel & ramdisk for PXE boot - for image_url in ${IMAGE_URLS//,/ }; do - upload_baremetal_image $image_url $TOKEN - done + # upload images, separating out the kernel & ramdisk for PXE boot + for image_url in ${IMAGE_URLS//,/ }; do + upload_baremetal_image $image_url $TOKEN + done else - echo_summary "Uploading images" + echo_summary "Uploading images" - # Option to upload legacy ami-tty, which works with xenserver - if [[ -n "$UPLOAD_LEGACY_TTY" ]]; then - IMAGE_URLS="${IMAGE_URLS:+${IMAGE_URLS},}https://github.com/downloads/citrix-openstack/warehouse/tty.tgz" - fi + # Option to upload legacy ami-tty, which works with xenserver + if [[ -n "$UPLOAD_LEGACY_TTY" ]]; then + IMAGE_URLS="${IMAGE_URLS:+${IMAGE_URLS},}https://github.com/downloads/citrix-openstack/warehouse/tty.tgz" + fi - for image_url in ${IMAGE_URLS//,/ }; do - upload_image $image_url $TOKEN - done + for image_url in ${IMAGE_URLS//,/ }; do + upload_image $image_url $TOKEN + done fi fi @@ -1207,7 +1207,7 @@ fi if is_service_enabled nova && is_baremetal; then # create special flavor for baremetal if we know what images to associate [[ -n "$BM_DEPLOY_KERNEL_ID" ]] && [[ -n "$BM_DEPLOY_RAMDISK_ID" ]] && \ - create_baremetal_flavor $BM_DEPLOY_KERNEL_ID $BM_DEPLOY_RAMDISK_ID + create_baremetal_flavor $BM_DEPLOY_KERNEL_ID $BM_DEPLOY_RAMDISK_ID # otherwise user can manually add it later by calling nova-baremetal-manage [[ -n "$BM_FIRST_MAC" ]] && add_baremetal_node @@ -1229,7 +1229,7 @@ fi CURRENT_RUN_TIME=$(date "+$TIMESTAMP_FORMAT") echo "# $CURRENT_RUN_TIME" >$TOP_DIR/.stackenv for i in BASE_SQL_CONN ENABLED_SERVICES HOST_IP LOGFILE \ - SERVICE_HOST SERVICE_PROTOCOL STACK_USER TLS_IP; do + SERVICE_HOST SERVICE_PROTOCOL STACK_USER TLS_IP; do echo $i=${!i} >>$TOP_DIR/.stackenv done From c8b37ff1932fc023f581ff77b4cdab2a7135f78f Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 22 Oct 2013 11:31:21 -0400 Subject: [PATCH 11/23] add support for heredoc folding of lines this change in the parser allows for us to have heredocs folded into logical lines. Change-Id: I51ebe6cd7b89b5f7194e947896f20b6750e972e3 --- tools/bash8.py | 35 +++++++++++++++++++++++++++++++++-- 1 file changed, 33 insertions(+), 2 deletions(-) diff --git a/tools/bash8.py b/tools/bash8.py index 82a10107e1..edf7da4645 100755 --- a/tools/bash8.py +++ b/tools/bash8.py @@ -55,10 +55,41 @@ def check_indents(line): print_error('E003: Indent not multiple of 4', line) +def starts_multiline(line): + m = re.search("[^<]<<\s*(?P\w+)", line) + if m: + return m.group('token') + else: + return False + + +def end_of_multiline(line, token): + if token: + return re.search("^%s\s*$" % token, line) is not None + return False + + def check_files(files): + in_multiline = False + logical_line = "" + token = False for line in fileinput.input(files): - check_no_trailing_whitespace(line) - check_indents(line) + # NOTE(sdague): multiline processing of heredocs is interesting + if not in_multiline: + logical_line = line + token = starts_multiline(line) + if token: + in_multiline = True + continue + else: + logical_line = logical_line + line + if not end_of_multiline(line, token): + continue + else: + in_multiline = False + + check_no_trailing_whitespace(logical_line) + check_indents(logical_line) def get_options(): From 2eac56d7c0156376e900195c52433f895851f44a Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 22 Oct 2013 11:37:35 -0400 Subject: [PATCH 12/23] final bash8 files for the rest of devstack With this devstack/master is bash8 clean, and ready for enforcement Change-Id: I03fc89b401e6b7a23224d71472122c1bfa3ad0bd --- tools/build_bm_multi.sh | 4 +- tools/build_uec.sh | 6 +- tools/create_userrc.sh | 8 +- tools/jenkins/jenkins_home/build_jenkins.sh | 16 +-- tools/xen/install_os_domU.sh | 10 +- tools/xen/scripts/install-os-vpx.sh | 114 ++++++++++---------- tools/xen/scripts/uninstall-os-vpx.sh | 58 +++++----- 7 files changed, 108 insertions(+), 108 deletions(-) diff --git a/tools/build_bm_multi.sh b/tools/build_bm_multi.sh index 52b9b4ea32..328d5762fc 100755 --- a/tools/build_bm_multi.sh +++ b/tools/build_bm_multi.sh @@ -22,8 +22,8 @@ run_bm STACKMASTER $HEAD_HOST "ENABLED_SERVICES=g-api,g-reg,key,n-api,n-sch,n-vn if [ ! "$TERMINATE" = "1" ]; then echo "Waiting for head node ($HEAD_HOST) to start..." if ! timeout 60 sh -c "while ! wget -q -O- http://$HEAD_HOST | grep -q username; do sleep 1; done"; then - echo "Head node did not start" - exit 1 + echo "Head node did not start" + exit 1 fi fi diff --git a/tools/build_uec.sh b/tools/build_uec.sh index 6c4a26c2e3..bce051a0b7 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -229,8 +229,8 @@ EOF # (re)start a metadata service ( - pid=`lsof -iTCP@192.168.$GUEST_NETWORK.1:4567 -n | awk '{print $2}' | tail -1` - [ -z "$pid" ] || kill -9 $pid + pid=`lsof -iTCP@192.168.$GUEST_NETWORK.1:4567 -n | awk '{print $2}' | tail -1` + [ -z "$pid" ] || kill -9 $pid ) cd $vm_dir/uec python meta.py 192.168.$GUEST_NETWORK.1:4567 & @@ -268,7 +268,7 @@ if [ "$WAIT_TILL_LAUNCH" = "1" ]; then sleep 2 while [ ! -e "$vm_dir/console.log" ]; do - sleep 1 + sleep 1 done tail -F $vm_dir/console.log & diff --git a/tools/create_userrc.sh b/tools/create_userrc.sh index 44b0f6bba0..8383fe7d77 100755 --- a/tools/create_userrc.sh +++ b/tools/create_userrc.sh @@ -105,15 +105,15 @@ if [ -z "$OS_PASSWORD" ]; then fi if [ -z "$OS_TENANT_NAME" -a -z "$OS_TENANT_ID" ]; then - export OS_TENANT_NAME=admin + export OS_TENANT_NAME=admin fi if [ -z "$OS_USERNAME" ]; then - export OS_USERNAME=admin + export OS_USERNAME=admin fi if [ -z "$OS_AUTH_URL" ]; then - export OS_AUTH_URL=http://localhost:5000/v2.0/ + export OS_AUTH_URL=http://localhost:5000/v2.0/ fi USER_PASS=${USER_PASS:-$OS_PASSWORD} @@ -249,7 +249,7 @@ if [ $MODE != "create" ]; then for user_id_at_name in `keystone user-list --tenant-id $tenant_id | awk 'BEGIN {IGNORECASE = 1} /true[[:space:]]*\|[^|]*\|$/ {print $2 "@" $4}'`; do read user_id user_name <<< `echo "$user_id_at_name" | sed 's/@/ /'` if [ $MODE = one -a "$user_name" != "$USER_NAME" ]; then - continue; + continue; fi add_entry "$user_id" "$user_name" "$tenant_id" "$tenant_name" "$USER_PASS" done diff --git a/tools/jenkins/jenkins_home/build_jenkins.sh b/tools/jenkins/jenkins_home/build_jenkins.sh index e0e774ee9e..a556db0f1d 100755 --- a/tools/jenkins/jenkins_home/build_jenkins.sh +++ b/tools/jenkins/jenkins_home/build_jenkins.sh @@ -6,8 +6,8 @@ set -o errexit # Make sure only root can run our script if [[ $EUID -ne 0 ]]; then - echo "This script must be run as root" - exit 1 + echo "This script must be run as root" + exit 1 fi # This directory @@ -31,15 +31,15 @@ apt-get install -y --force-yes $DEPS # Install jenkins if [ ! -e /var/lib/jenkins ]; then - echo "Jenkins installation failed" - exit 1 + echo "Jenkins installation failed" + exit 1 fi # Make sure user has configured a jenkins ssh pubkey if [ ! -e /var/lib/jenkins/.ssh/id_rsa.pub ]; then - echo "Public key for jenkins is missing. This is used to ssh into your instances." - echo "Please run "su -c ssh-keygen jenkins" before proceeding" - exit 1 + echo "Public key for jenkins is missing. This is used to ssh into your instances." + echo "Please run "su -c ssh-keygen jenkins" before proceeding" + exit 1 fi # Setup sudo @@ -96,7 +96,7 @@ PLUGINS=http://hudson-ci.org/downloads/plugins/build-timeout/1.6/build-timeout.h # Configure plugins for plugin in ${PLUGINS//,/ }; do - name=`basename $plugin` + name=`basename $plugin` dest=/var/lib/jenkins/plugins/$name if [ ! -e $dest ]; then curl -L $plugin -o $dest diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index 0f314bfa9a..9a2f5a8c03 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -44,9 +44,9 @@ source $THIS_DIR/xenrc xe_min() { - local cmd="$1" - shift - xe "$cmd" --minimal "$@" + local cmd="$1" + shift + xe "$cmd" --minimal "$@" } # @@ -132,8 +132,8 @@ HOST_IP=$(xenapi_ip_on "$MGT_BRIDGE_OR_NET_NAME") # Set up ip forwarding, but skip on xcp-xapi if [ -a /etc/sysconfig/network ]; then if ! grep -q "FORWARD_IPV4=YES" /etc/sysconfig/network; then - # FIXME: This doesn't work on reboot! - echo "FORWARD_IPV4=YES" >> /etc/sysconfig/network + # FIXME: This doesn't work on reboot! + echo "FORWARD_IPV4=YES" >> /etc/sysconfig/network fi fi # Also, enable ip forwarding in rc.local, since the above trick isn't working diff --git a/tools/xen/scripts/install-os-vpx.sh b/tools/xen/scripts/install-os-vpx.sh index 7469e0c10b..7b0d891493 100755 --- a/tools/xen/scripts/install-os-vpx.sh +++ b/tools/xen/scripts/install-os-vpx.sh @@ -42,69 +42,69 @@ EOF get_params() { - while getopts "hbn:r:l:t:" OPTION; - do - case $OPTION in - h) usage - exit 1 - ;; - n) - BRIDGE=$OPTARG - ;; - l) - NAME_LABEL=$OPTARG - ;; - t) - TEMPLATE_NAME=$OPTARG - ;; - ?) - usage - exit - ;; - esac - done - if [[ -z $BRIDGE ]] - then - BRIDGE=xenbr0 - fi - - if [[ -z $TEMPLATE_NAME ]]; then - echo "Please specify a template name" >&2 - exit 1 - fi - - if [[ -z $NAME_LABEL ]]; then - echo "Please specify a name-label for the new VM" >&2 - exit 1 - fi + while getopts "hbn:r:l:t:" OPTION; + do + case $OPTION in + h) usage + exit 1 + ;; + n) + BRIDGE=$OPTARG + ;; + l) + NAME_LABEL=$OPTARG + ;; + t) + TEMPLATE_NAME=$OPTARG + ;; + ?) + usage + exit + ;; + esac + done + if [[ -z $BRIDGE ]] + then + BRIDGE=xenbr0 + fi + + if [[ -z $TEMPLATE_NAME ]]; then + echo "Please specify a template name" >&2 + exit 1 + fi + + if [[ -z $NAME_LABEL ]]; then + echo "Please specify a name-label for the new VM" >&2 + exit 1 + fi } xe_min() { - local cmd="$1" - shift - xe "$cmd" --minimal "$@" + local cmd="$1" + shift + xe "$cmd" --minimal "$@" } find_network() { - result=$(xe_min network-list bridge="$1") - if [ "$result" = "" ] - then - result=$(xe_min network-list name-label="$1") - fi - echo "$result" + result=$(xe_min network-list bridge="$1") + if [ "$result" = "" ] + then + result=$(xe_min network-list name-label="$1") + fi + echo "$result" } create_vif() { - local v="$1" - echo "Installing VM interface on [$BRIDGE]" - local out_network_uuid=$(find_network "$BRIDGE") - xe vif-create vm-uuid="$v" network-uuid="$out_network_uuid" device="0" + local v="$1" + echo "Installing VM interface on [$BRIDGE]" + local out_network_uuid=$(find_network "$BRIDGE") + xe vif-create vm-uuid="$v" network-uuid="$out_network_uuid" device="0" } @@ -112,20 +112,20 @@ create_vif() # Make the VM auto-start on server boot. set_auto_start() { - local v="$1" - xe vm-param-set uuid="$v" other-config:auto_poweron=true + local v="$1" + xe vm-param-set uuid="$v" other-config:auto_poweron=true } destroy_vifs() { - local v="$1" - IFS=, - for vif in $(xe_min vif-list vm-uuid="$v") - do - xe vif-destroy uuid="$vif" - done - unset IFS + local v="$1" + IFS=, + for vif in $(xe_min vif-list vm-uuid="$v") + do + xe vif-destroy uuid="$vif" + done + unset IFS } diff --git a/tools/xen/scripts/uninstall-os-vpx.sh b/tools/xen/scripts/uninstall-os-vpx.sh index ac260949c4..1ed249433a 100755 --- a/tools/xen/scripts/uninstall-os-vpx.sh +++ b/tools/xen/scripts/uninstall-os-vpx.sh @@ -22,63 +22,63 @@ set -ex # By default, don't remove the templates REMOVE_TEMPLATES=${REMOVE_TEMPLATES:-"false"} if [ "$1" = "--remove-templates" ]; then - REMOVE_TEMPLATES=true + REMOVE_TEMPLATES=true fi xe_min() { - local cmd="$1" - shift - xe "$cmd" --minimal "$@" + local cmd="$1" + shift + xe "$cmd" --minimal "$@" } destroy_vdi() { - local vbd_uuid="$1" - local type=$(xe_min vbd-list uuid=$vbd_uuid params=type) - local dev=$(xe_min vbd-list uuid=$vbd_uuid params=userdevice) - local vdi_uuid=$(xe_min vbd-list uuid=$vbd_uuid params=vdi-uuid) + local vbd_uuid="$1" + local type=$(xe_min vbd-list uuid=$vbd_uuid params=type) + local dev=$(xe_min vbd-list uuid=$vbd_uuid params=userdevice) + local vdi_uuid=$(xe_min vbd-list uuid=$vbd_uuid params=vdi-uuid) - if [ "$type" == 'Disk' ] && [ "$dev" != 'xvda' ] && [ "$dev" != '0' ]; then - xe vdi-destroy uuid=$vdi_uuid - fi + if [ "$type" == 'Disk' ] && [ "$dev" != 'xvda' ] && [ "$dev" != '0' ]; then + xe vdi-destroy uuid=$vdi_uuid + fi } uninstall() { - local vm_uuid="$1" - local power_state=$(xe_min vm-list uuid=$vm_uuid params=power-state) + local vm_uuid="$1" + local power_state=$(xe_min vm-list uuid=$vm_uuid params=power-state) - if [ "$power_state" != "halted" ]; then - xe vm-shutdown vm=$vm_uuid force=true - fi + if [ "$power_state" != "halted" ]; then + xe vm-shutdown vm=$vm_uuid force=true + fi - for v in $(xe_min vbd-list vm-uuid=$vm_uuid | sed -e 's/,/ /g'); do - destroy_vdi "$v" - done + for v in $(xe_min vbd-list vm-uuid=$vm_uuid | sed -e 's/,/ /g'); do + destroy_vdi "$v" + done - xe vm-uninstall vm=$vm_uuid force=true >/dev/null + xe vm-uninstall vm=$vm_uuid force=true >/dev/null } uninstall_template() { - local vm_uuid="$1" + local vm_uuid="$1" - for v in $(xe_min vbd-list vm-uuid=$vm_uuid | sed -e 's/,/ /g'); do - destroy_vdi "$v" - done + for v in $(xe_min vbd-list vm-uuid=$vm_uuid | sed -e 's/,/ /g'); do + destroy_vdi "$v" + done - xe template-uninstall template-uuid=$vm_uuid force=true >/dev/null + xe template-uninstall template-uuid=$vm_uuid force=true >/dev/null } # remove the VMs and their disks for u in $(xe_min vm-list other-config:os-vpx=true | sed -e 's/,/ /g'); do - uninstall "$u" + uninstall "$u" done # remove the templates if [ "$REMOVE_TEMPLATES" == "true" ]; then - for u in $(xe_min template-list other-config:os-vpx=true | sed -e 's/,/ /g'); do - uninstall_template "$u" - done + for u in $(xe_min template-list other-config:os-vpx=true | sed -e 's/,/ /g'); do + uninstall_template "$u" + done fi From ac6c10253a66803f73678e5cbdcee5ee9801f8e2 Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Tue, 19 Nov 2013 21:06:29 -0800 Subject: [PATCH 13/23] Set swift timeouts higher Devstack is commonly run in a small slow environment, so bump the timeouts up. node_timeout is how long between read operations a node takes to respond to the proxy server conn_timeout is all about how long it takes a connect() system call to return Change-Id: Ib437466a3fc9274b8aa49b19e4fe7fa26f553419 Co-Authored-By: Peter Portante Related-Bug: #1252514 (cherry picked from commit a2f2bba4eca9837a589acc1b5f22789915108b25) --- lib/swift | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/lib/swift b/lib/swift index 8726f1e7fc..a9d8c9ace7 100644 --- a/lib/swift +++ b/lib/swift @@ -260,6 +260,15 @@ function configure_swift() { iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port ${SWIFT_DEFAULT_BIND_PORT:-8080} + # Devstack is commonly run in a small slow environment, so bump the + # timeouts up. + # node_timeout is how long between read operations a node takes to + # respond to the proxy server + # conn_timeout is all about how long it takes a connect() system call to + # return + iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server node_timeout 120 + iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server conn_timeout 20 + # Configure Ceilometer if is_service_enabled ceilometer; then iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer use "egg:ceilometer#swift" From f7e7287c73187f0d4e2eebd79b5c77ab1ce68ac1 Mon Sep 17 00:00:00 2001 From: Sahid Orentino Ferdjaoui Date: Sun, 24 Nov 2013 14:08:01 +0000 Subject: [PATCH 14/23] Backport Havana: excercices: aggregates needs to be more flexible The actual regex checks a result in python format and because of the change in the bug 1132961, Jekins failed. I have update the regex to work with the old result and the new result. From: 75e851a Change-Id: Idb23ddd32f7f7bf0eeecba9794c705365e3b2122 Closes-Bug: 1239726 --- exercises/aggregates.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/exercises/aggregates.sh b/exercises/aggregates.sh index e2baecdb11..e5fc7dec84 100755 --- a/exercises/aggregates.sh +++ b/exercises/aggregates.sh @@ -100,7 +100,7 @@ META_DATA_2_KEY=foo META_DATA_3_KEY=bar #ensure no additional metadata is set -nova aggregate-details $AGGREGATE_ID | egrep "{u'availability_zone': u'$AGGREGATE_A_ZONE'}|{}" +nova aggregate-details $AGGREGATE_ID | egrep "\|[{u ]*'availability_zone.+$AGGREGATE_A_ZONE'[ }]*\|" nova aggregate-set-metadata $AGGREGATE_ID ${META_DATA_1_KEY}=123 nova aggregate-details $AGGREGATE_ID | grep $META_DATA_1_KEY @@ -117,7 +117,7 @@ nova aggregate-details $AGGREGATE_ID | grep $META_DATA_3_KEY nova aggregate-details $AGGREGATE_ID | grep $META_DATA_2_KEY && die $LINENO "ERROR metadata was not cleared" nova aggregate-set-metadata $AGGREGATE_ID $META_DATA_3_KEY $META_DATA_1_KEY -nova aggregate-details $AGGREGATE_ID | egrep "{u'availability_zone': u'$AGGREGATE_A_ZONE'}|{}" +nova aggregate-details $AGGREGATE_ID | egrep "\|[{u ]*'availability_zone.+$AGGREGATE_A_ZONE'[ }]*\|" # Test aggregate-add/remove-host From 1376e570946858c95e2ace0cf6c325c78232a71a Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 16 Dec 2013 16:35:44 -0500 Subject: [PATCH 15/23] add libxslt1-dev to tempest required files tempest actually needs a pretty new lxml, which means it's going to build it from source. To do that it needs libxslt1-dev to compile. We should be good and let devstack do this out of the box so it works on minimal environments. Change-Id: Ia527905c1c15fb8c6793f0ce543ad05e25a88179 (cherry picked from commit db54311552d6c1efad7d9958a539848b3aeea775) --- files/apts/tempest | 1 + files/rpms/tempest | 1 + 2 files changed, 2 insertions(+) create mode 100644 files/apts/tempest create mode 100644 files/rpms/tempest diff --git a/files/apts/tempest b/files/apts/tempest new file mode 100644 index 0000000000..f244e4e783 --- /dev/null +++ b/files/apts/tempest @@ -0,0 +1 @@ +libxslt1-dev \ No newline at end of file diff --git a/files/rpms/tempest b/files/rpms/tempest new file mode 100644 index 0000000000..de32b81504 --- /dev/null +++ b/files/rpms/tempest @@ -0,0 +1 @@ +libxslt-dev \ No newline at end of file From 032c3445f9ae5df128cdf098bd6bd5ba9ce7a102 Mon Sep 17 00:00:00 2001 From: Bob Ball Date: Fri, 18 Oct 2013 15:33:26 +0100 Subject: [PATCH 16/23] Create-stack-user script should have execute permissions Currently running stack.sh as root advises you about this script, which is not executable This is needed in stable/havana because grenade says to run this version. Change-Id: I674af044b8f3c31bcc86be5c6552e8086453d5cd (cherry picked from commit 105c6e8718da2db50e48cb4a68be8522a80e101e) --- tools/create-stack-user.sh | 0 1 file changed, 0 insertions(+), 0 deletions(-) mode change 100644 => 100755 tools/create-stack-user.sh diff --git a/tools/create-stack-user.sh b/tools/create-stack-user.sh old mode 100644 new mode 100755 From fc6c44edb2f57ba4fe74638e6b50c79db02290a4 Mon Sep 17 00:00:00 2001 From: Tomoe Sugihara Date: Thu, 14 Nov 2013 20:02:47 +0000 Subject: [PATCH 17/23] Make tempest L3 capable plugin aware With this patch, the public network config in tempest.conf will be done for the plugins that support L3. This is required to enable third-party CI for stable/havana Conflicts: lib/neutron lib/tempest Change-Id: I820fe300fac45ff92d1281ff0c43ebc137783210 (cherry picked from commit afbc631cb8c89316bbecbf0f2c601103304e1994) --- lib/neutron | 9 +++++++++ lib/tempest | 3 ++- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/lib/neutron b/lib/neutron index 44fb9e1005..5617cba23e 100644 --- a/lib/neutron +++ b/lib/neutron @@ -110,6 +110,15 @@ Q_ALLOW_OVERLAPPING_IP=${Q_ALLOW_OVERLAPPING_IP:-True} Q_USE_DEBUG_COMMAND=${Q_USE_DEBUG_COMMAND:-False} # The name of the default q-l3 router Q_ROUTER_NAME=${Q_ROUTER_NAME:-router1} + +# The next two variables are configured by plugin +# e.g. _configure_neutron_l3_agent or lib/neutron_plugins/* +# +# The plugin supports L3. +Q_L3_ENABLED=${Q_L3_ENABLED:-False} +# L3 routers exist per tenant +Q_L3_ROUTER_PER_TENANT=${Q_L3_ROUTER_PER_TENANT:-False} + # List of config file names in addition to the main plugin config file # See _configure_neutron_common() for details about setting it up declare -a Q_PLUGIN_EXTRA_CONF_FILES diff --git a/lib/tempest b/lib/tempest index 8e4e5210ea..3ad04e7bc3 100644 --- a/lib/tempest +++ b/lib/tempest @@ -14,6 +14,7 @@ # - ``PUBLIC_NETWORK_NAME`` # - ``Q_USE_NAMESPACE`` # - ``Q_ROUTER_NAME`` +# - ``Q_L3_ENABLED`` # - ``VIRT_DRIVER`` # - ``LIBVIRT_TYPE`` # - ``KEYSTONE_SERVICE_PROTOCOL``, ``KEYSTONE_SERVICE_HOST`` from lib/keystone @@ -186,7 +187,7 @@ function configure_tempest() { tenant_networks_reachable=true fi - if is_service_enabled q-l3; then + if [ "$Q_L3_ENABLED" = "True" ]; then public_network_id=$(neutron net-list | grep $PUBLIC_NETWORK_NAME | \ awk '{print $2}') if [ "$Q_USE_NAMESPACE" == "False" ]; then From b921bb386bb18ab310c7ac18dd6ffdfdb9fd354c Mon Sep 17 00:00:00 2001 From: Salvatore Orlando Date: Sun, 22 Dec 2013 07:59:37 -0800 Subject: [PATCH 18/23] Neutron: create network resources when agents are started Creating network resources before the agents start with the ml2 plugin might result in bnding failures for some resources such as DHCP ports because the resources are created before the agents report to the server. This patch should ensure all agents have started and reported their state to the server before creating network resources. Change-Id: Ifafb73bd3c5409a555a573ad9a94b96d79061c38 Related-Bug: #1253896 (cherry picked from commit 6fbb28d021d168271bb2a0643059e8c65c8ce74b) --- stack.sh | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/stack.sh b/stack.sh index 3f2ed33f96..df2c0fb31b 100755 --- a/stack.sh +++ b/stack.sh @@ -1087,10 +1087,7 @@ fi if is_service_enabled q-svc; then echo_summary "Starting Neutron" - start_neutron_service_and_check - create_neutron_initial_network - setup_neutron_debug elif is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-net; then NM_CONF=${NOVA_CONF} if is_service_enabled n-cell; then @@ -1110,6 +1107,12 @@ fi if is_service_enabled neutron; then start_neutron_agents fi +# Once neutron agents are started setup initial network elements +if is_service_enabled q-svc; then + echo_summary "Creating initial neutron network elements" + create_neutron_initial_network + setup_neutron_debug +fi if is_service_enabled nova; then echo_summary "Starting Nova" start_nova From ed079cc4c90721224c952bb71f2729c990530113 Mon Sep 17 00:00:00 2001 From: Jakub Libosvar Date: Tue, 14 Jan 2014 18:52:51 +0100 Subject: [PATCH 19/23] Run neutron-debug with admin tenant in neutron-adv-test Because neutron-debug create-probe needs admin role only, demo tenants cannot create ports. neutron-debug is wrapped in order to run it only with admin tenant. Change-Id: I149d8e4e8fac6aa5c496b8f186aa0afb28bf81c2 Closes-Bug: #1269090 (cherry picked from commit 52a7b6ecbad11c08dcd77a6fcd8bfef6a20324a9) --- exercises/neutron-adv-test.sh | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/exercises/neutron-adv-test.sh b/exercises/neutron-adv-test.sh index f8cfff7a7a..7a13723bec 100755 --- a/exercises/neutron-adv-test.sh +++ b/exercises/neutron-adv-test.sh @@ -179,6 +179,14 @@ function confirm_server_active { fi } +function neutron_debug_admin { + local os_username=$OS_USERNAME + local os_tenant_id=$OS_TENANT_ID + source $TOP_DIR/openrc admin admin + neutron-debug $@ + source $TOP_DIR/openrc $os_username $os_tenant_id +} + function add_tenant { local TENANT=$1 local USER=$2 @@ -234,7 +242,7 @@ function create_network { source $TOP_DIR/openrc $TENANT $TENANT local NET_ID=$(neutron net-create --tenant_id $TENANT_ID $NET_NAME $EXTRA| grep ' id ' | awk '{print $4}' ) neutron subnet-create --ip_version 4 --tenant_id $TENANT_ID --gateway $GATEWAY $NET_ID $CIDR - neutron-debug probe-create --device-owner compute $NET_ID + neutron_debug_admin probe-create --device-owner compute $NET_ID source $TOP_DIR/openrc demo demo } From be904424c2a5437a4ea1930c7ffc3560bb9ac571 Mon Sep 17 00:00:00 2001 From: Emilien Macchi Date: Thu, 16 Jan 2014 18:03:38 -0500 Subject: [PATCH 20/23] Fix stop_neutron metadata agent function Currently, stop_neutron fails in Jenkins because it kills itself. This patch ensure we kill only neutron metadata agent, and not the awk process in itself. Change-Id: I0f1f296b639096f08c7018cd1daebfcd9a054d01 Closes-bug: #1269982 (cherry picked from commit 04f6dc24a7845ee139977fa5b0c5e53aad8e99bd) --- lib/neutron | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lib/neutron b/lib/neutron index 44fb9e1005..5dfed2511b 100644 --- a/lib/neutron +++ b/lib/neutron @@ -460,8 +460,7 @@ function stop_neutron() { [ ! -z "$pid" ] && sudo kill -9 $pid fi if is_service_enabled q-meta; then - pid=$(ps aux | awk '/neutron-ns-metadata-proxy/ { print $2 }') - [ ! -z "$pid" ] && sudo kill -9 $pid + sudo pkill -9 neutron-ns-metadata-proxy || : fi } From 6c223b3204605f5df9dcf7c11891120126dc2ab0 Mon Sep 17 00:00:00 2001 From: Jakub Libosvar Date: Tue, 28 Jan 2014 23:01:38 +0100 Subject: [PATCH 21/23] Stop all neutron-ns-metadata-proxy with stop_neutron Process name is actually python therefore neutron-ns-metadata-proxy pattern didn't match wanted process. Closes-bug: #1269982 Change-Id: Ib4439b0d32f103253b461841fa903c65763ff280 (cherry picked from commit 1f76328027bb5cee0b0ea7077f4c59c919f1c4ae) --- lib/neutron | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron b/lib/neutron index 8ced2ad84c..3abc13e786 100644 --- a/lib/neutron +++ b/lib/neutron @@ -469,7 +469,7 @@ function stop_neutron() { [ ! -z "$pid" ] && sudo kill -9 $pid fi if is_service_enabled q-meta; then - sudo pkill -9 neutron-ns-metadata-proxy || : + sudo pkill -9 -f neutron-ns-metadata-proxy || : fi } From 3059cc97878cbf11f346c4daa813166fa530d57f Mon Sep 17 00:00:00 2001 From: Mark McClain Date: Wed, 5 Feb 2014 17:43:08 -0500 Subject: [PATCH 22/23] Disable key injection by default. Change-Id: Ib618da1bd21da09f8855ec4691bff79c4c3b3d9c --- lib/nova | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lib/nova b/lib/nova index 99ded024b9..35675d4fa0 100644 --- a/lib/nova +++ b/lib/nova @@ -490,6 +490,12 @@ function create_nova_conf() { iniset $NOVA_CONF DEFAULT ec2_dmz_host "$EC2_DMZ_HOST" iniset_rpc_backend nova $NOVA_CONF DEFAULT iniset $NOVA_CONF DEFAULT glance_api_servers "$GLANCE_HOSTPORT" + + if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then + # File injection is being disabled by default in the near future - + # disable it here for now to avoid surprises later. + iniset $NOVA_CONF libvirt inject_partition '-2' + fi } function init_nova_cells() { From 16891addddd15c14da33ec5e894325d58d7ef7e3 Mon Sep 17 00:00:00 2001 From: Scott Devoid Date: Thu, 17 Apr 2014 20:26:07 +0000 Subject: [PATCH 23/23] extra.d to use Sheepdog as devstack backing store. Change-Id: Iedd543323d2c1679b01dd565d1556e283f16d720 --- extras.d/40-sheepdog.sh | 27 ++++++++++++ files/apts/sheepdog | 1 + files/rpms-suse/sheepdog | 1 + files/rpms/sheepdog | 1 + lib/storage/sheepdog | 92 ++++++++++++++++++++++++++++++++++++++++ 5 files changed, 122 insertions(+) create mode 100644 extras.d/40-sheepdog.sh create mode 100644 files/apts/sheepdog create mode 100644 files/rpms-suse/sheepdog create mode 100644 files/rpms/sheepdog create mode 100644 lib/storage/sheepdog diff --git a/extras.d/40-sheepdog.sh b/extras.d/40-sheepdog.sh new file mode 100644 index 0000000000..755984fe36 --- /dev/null +++ b/extras.d/40-sheepdog.sh @@ -0,0 +1,27 @@ +# sheepdog.sh - Devstack extras script to enable sheepdog storage backend + +if is_service_enabled sheepdog; then + if [[ "$1" == "source" ]]; then + # Initial source + source $TOP_DIR/lib/sheepdog + elif [[ "$1" == "stack" && "$2" == "install" ]]; then + echo_summary "Installing Sheepdog" + install_sheepdog + elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then + # NOTE (scott-devoid): we do everything here because we need to have + # sheepdog started before other openstack components, e.g. glance in + # order to upload images to the sheepdog cluster. + echo_summary "Configuring Sheepdog cluster" + configure_sheepdog + echo_summary "Starting Sheepdog cluster" + start_sheepdog + fi + if [[ "$1" == "unstack" ]]; then + echo_summary "Stopping Sheepdog cluster" + stop_sheepdog + fi + if [[ "$1" == "clean" ]]; then + echo_summary "Clearing Sheepdog cluster of data" + cleanup_sheepdog + fi +fi diff --git a/files/apts/sheepdog b/files/apts/sheepdog new file mode 100644 index 0000000000..9676984071 --- /dev/null +++ b/files/apts/sheepdog @@ -0,0 +1 @@ +sheepdog # NOPRIME diff --git a/files/rpms-suse/sheepdog b/files/rpms-suse/sheepdog new file mode 100644 index 0000000000..9676984071 --- /dev/null +++ b/files/rpms-suse/sheepdog @@ -0,0 +1 @@ +sheepdog # NOPRIME diff --git a/files/rpms/sheepdog b/files/rpms/sheepdog new file mode 100644 index 0000000000..9676984071 --- /dev/null +++ b/files/rpms/sheepdog @@ -0,0 +1 @@ +sheepdog # NOPRIME diff --git a/lib/storage/sheepdog b/lib/storage/sheepdog new file mode 100644 index 0000000000..b4a4f81275 --- /dev/null +++ b/lib/storage/sheepdog @@ -0,0 +1,92 @@ +# lib/sheepdog +# Functions to control the configuration of sheepdog storage backend and +# enable it in cinder, glance and nova. + +# Dependencies +# ``functions`` file +# ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined +# ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined +# ``SERVICE_HOST`` +# ``KEYSTONE_TOKEN_FORMAT`` must be defined + +# ``stack.sh`` calls the entry points in this order: +# +# install_sheepdog +# configure_sheepdog +# start_sheepdog +# stop_sheepdog +# cleanup_sheepdog + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + +# Defaults +# -------- + +# Set ``SHEEPDOG_DATA_DIR`` to the location of Sheepdog ``/meta`` and devices. +# Default is to place these in ``/var/lib/sheepdog`` +SHEEPDOG_DATA_DIR=${SHEEPDOG_DATA_DIR:-${DATA_DIR}/sheepdog} +SHEEPDOG_WORKER_COUNT=${SHEEPDOG_WORKER_COUNT:-3} +SHEEPDOG_WORKER_STARTING_PORT=${SHEEPDOG_WORKER_STARTING_PORT:-7000} +SHEEPDOG_STORE_CHUNK_SIZE=${SHEEPDOG_STORE_CHUNK_SIZE:-64} +SHEEPDOG_CLIENT_COMMAND=${SHEEPDOG_CLIENT_COMMAND:-collie} + + +# Functions +# --------- + +# install_sheepdog() - Install Sheepdog dependencies +function install_sheepdog() { + install_package sheepdog +} + +# configure_sheepdog() - Make all the services use sheepdog! +function configure_sheepdog() { + if is_service_enabled glance; then + iniset $GLANCE_API_CONF DEFAULT default_store sheepdog + iniset $GLANCE_API_CONF DEFAULT sheepdog_store_port ${SHEEPDOG_WORKER_STARTING_PORT} + iniset $GLANCE_API_CONF DEFAULT sheepdog_store_chunk_size ${SHEEPDOG_STORE_CHUNK_SIZE} + fi + if is_service_enabled cinder; then + iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.sheepdog.SheepdogDriver" + fi + if is_service_enabled nova; then + iniset $NOVA_CONF DEFAULT images_sheepdog_port ${SHEEPDOG_WORKER_STARTING_PORT} + iniset $NOVA_CONF DEFAULT libvirt_images_type "sheepdog" + fi +} + + +# start_sheepdog() - Create Sheepdog cluster +function start_sheepdog() { + for i in $(seq 0 $(($SHEEPDOG_WORKER_COUNT-1))); do + if [[ ! -d $SHEEPDOG_DATA_DIR/$i ]]; then + mkdir -p $SHEEPDOG_DATA_DIR/$i; + fi + port=$((${SHEEPDOG_WORKER_STARTING_PORT} + $i)) + sheep -c local -d ${SHEEPDOG_DATA_DIR}/$i -z $i -p $port; + sleep 1; + done + status=$(${SHEEPDOG_CLIENT_COMMAND} cluster info | grep "Waiting for cluster to be formatted") + if [ ! -z "$status" ]; then + ${SHEEPDOG_CLIENT_COMMAND} cluster format + fi +} + +# stop_sheepdog() - Shutdown Sheepdog cluster +function stop_sheepdog() { + ${SHEEPDOG_CLIENT_COMMAND} cluster shutdown +} + +# cleanup_sheepdog() - Delete data from Sheepdog cluster +function cleanup_sheepdog() { + for i in $(seq 0 $(($SHEEPDOG_WORKER_COUNT-1))); do + if [[ -d ${SHEEPDOG_DATA_DIR}/$i ]]; then + rm -r ${SHEEPDOG_DATA_DIR}/$i; + fi + done +} + +# Restore xtrace +$XTRACE