Updating the OpenStack guide to be Dave Neary's content 71/17271/2
authorDave Neary <dneary@redhat.com>
Sat, 28 Mar 2015 02:04:23 +0000 (21:04 -0500)
committerMathieu Lemay <mlemay@inocybe.com>
Tue, 7 Apr 2015 17:04:14 +0000 (17:04 +0000)
Change-Id: I44b7a3bafc930073ceececbbb5b8988bd71f1fb4
Signed-off-by: Dave Neary <dneary@redhat.com>
Signed-off-by: Colin Dixon <colin@colindixon.com>
18 files changed:
manuals/howto-openstack/UserGuide.xpr [deleted file]
manuals/howto-openstack/bk-howto-openstack.xml [deleted file]
manuals/howto-openstack/ch_install.xml [deleted file]
manuals/howto-openstack/images/Horizon-OpenDaylight-e1392513990486.jpg [deleted file]
manuals/howto-openstack/images/OVSDB-Architecture.png [deleted file]
manuals/howto-openstack/images/Overlay-OpenDaylight-OVSDB-OpenFlow.png [deleted file]
manuals/howto-openstack/images/VirtualBox-HostOnly-Networks.png [deleted file]
manuals/howto-openstack/images/VirtualBox-HostOnly-Nics.png [deleted file]
manuals/howto-openstack/pom.xml
manuals/howto-openstack/section_configure_devstack.xml [deleted file]
manuals/howto-openstack/section_configure_fedora_images.xml [deleted file]
manuals/howto-openstack/section_configuring_openstack.xml [deleted file]
manuals/howto-openstack/section_create_multi_network.xml [deleted file]
manuals/howto-openstack/section_ovsdb_project.xml [deleted file]
manuals/howto-openstack/section_start_odl_controller.xml [deleted file]
manuals/howto-openstack/section_unstack_and_cleanup.xml [deleted file]
manuals/howto-openstack/section_verifying_openstack.xml [deleted file]
manuals/howto-openstack/src/main/asciidoc/openstack.adoc [new file with mode: 0644]

diff --git a/manuals/howto-openstack/UserGuide.xpr b/manuals/howto-openstack/UserGuide.xpr
deleted file mode 100644 (file)
index fab6175..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<project version="15.1">
-    <meta>
-        <filters directoryPatterns="" filePatterns="" positiveFilePatterns="" showHiddenFiles="false"/>
-        <options>
-            <serialized version="15.1" xml:space="preserve">
-                <map>
-                    <entry>
-                        <String>validation.scenarios</String>
-                        <validationScenario-array/>
-                    </entry>
-                </map>
-            </serialized>
-        </options>
-    </meta>
-    <projectTree name="UserGuide.xpr">
-        <folder path="."/>
-    </projectTree>
-</project>
\ No newline at end of file
diff --git a/manuals/howto-openstack/bk-howto-openstack.xml b/manuals/howto-openstack/bk-howto-openstack.xml
deleted file mode 100644 (file)
index 4cac127..0000000
+++ /dev/null
@@ -1,78 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE book>
-<book version="5.0" xml:id="os-user-guide" xmlns="http://docbook.org/ns/docbook"
-  xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xi="http://www.w3.org/2001/XInclude"
-  xmlns:svg="http://www.w3.org/2000/svg" xmlns:raxm="http://docs.rackspace.com/api/metadata"
-  xmlns:m="http://www.w3.org/1998/Math/MathML" xmlns:html="http://www.w3.org/1999/xhtml"
-  xmlns:db="http://docbook.org/ns/docbook">
-  <title>OpenDaylight OpenStack How To</title>
-  <titleabbrev>OpenDaylight OpenStack How To</titleabbrev>
-  <info>
-    <author>
-      <personname><firstname/>
-        <surname/></personname>
-      <affiliation>
-        <orgname>Linux Foundation</orgname>
-      </affiliation>
-    </author>
-    <copyright>
-      <year>2014</year>
-      <holder>Linux Foundation</holder>
-    </copyright>
-    <releaseinfo>Lithium</releaseinfo>
-    <productname>OpenDaylight</productname>
-    <pubdate/>
-    <legalnotice role="cc-by">
-      <annotation>
-        <remark>Copyright details are filled in by the template.</remark>
-      </annotation>
-    </legalnotice>
-    <abstract>
-      <para>OpenDaylight is an open platform for network programmability to enable SDN and create a
-        solid foundation for NFV for networks at any size and scale. OpenDaylight software is a
-        combination of components including a fully pluggable controller, interfaces, protocol
-        plug-ins and applications. </para>
-    </abstract>
-    <revhistory>
-      <revision>
-        <date>2014-02-24</date>
-        <revdescription>
-          <itemizedlist>
-            <listitem>
-              <para>First edition of this document.</para>
-            </listitem>
-          </itemizedlist>
-        </revdescription>
-      </revision>
-    </revhistory>
-  </info>
-  <xi:include href="ch_install.xml">
-  </xi:include>
-  <chapter xmlns="http://docbook.org/ns/docbook"
-      xmlns:xi="http://www.w3.org/2001/XInclude"
-      xmlns:xlink="http://www.w3.org/1999/xlink"
-      xmlns:raxm="http://docs.rackspace.com/api/metadata" version="5.0"
-      xml:id="ch_openstack_howto">
-    <info>
-      <title>OpenDaylight OpenStack How To</title>
-    </info>
-    <xi:include href="section_ovsdb_project.xml">
-    </xi:include>
-    <xi:include href="section_configure_fedora_images.xml">
-    </xi:include>
-    <xi:include href="section_start_odl_controller.xml">
-    </xi:include>
-    <xi:include href="section_configure_devstack.xml">
-    </xi:include>
-    <xi:include href="section_configuring_openstack.xml">
-    </xi:include>
-    <xi:include href="section_verifying_openstack.xml">
-    </xi:include>
-    <xi:include href="section_create_multi_network.xml">
-    </xi:include>
-    <xi:include href="section_unstack_and_cleanup.xml">
-    </xi:include>
-  </chapter>
-  <xi:include href="../common/app_support.xml"> 
-  </xi:include>
-</book>
diff --git a/manuals/howto-openstack/ch_install.xml b/manuals/howto-openstack/ch_install.xml
deleted file mode 100644 (file)
index be3174c..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE chapter [
-]>
-<chapter xmlns="http://docbook.org/ns/docbook"
-    xmlns:xi="http://www.w3.org/2001/XInclude"
-    xmlns:xlink="http://www.w3.org/1999/xlink"
-    xmlns:raxm="http://docs.rackspace.com/api/metadata" version="5.0"
-    xml:id="ch_install">
-    <info>
-        <title>OpenDaylight Installation</title>
-    </info>
-    <para>The OpenDaylight Installation process is straight forward and self contained. OpenDaylight
-        can be installed in your environment by using release archives, RPM, VirtualBox images or
-        even via Docker containers. </para>
-    <!-- these xml files don't exist yet
-    <?hard-pagebreak?>
-    <xi:include href="section_install_zip.xml"/>
-    <?hard-pagebreak?>
-    <xi:include href="section_install_rpm.xml"/> 
-    <?hard-pagebreak?>
-    <xi:include href="section_install_virtualbox.xml"/> 
-    <?hard-pagebreak?>
-    <xi:include href="section_install_docker.xml"/> 
-    -->
-</chapter>
diff --git a/manuals/howto-openstack/images/Horizon-OpenDaylight-e1392513990486.jpg b/manuals/howto-openstack/images/Horizon-OpenDaylight-e1392513990486.jpg
deleted file mode 100644 (file)
index fcd9e17..0000000
Binary files a/manuals/howto-openstack/images/Horizon-OpenDaylight-e1392513990486.jpg and /dev/null differ
diff --git a/manuals/howto-openstack/images/OVSDB-Architecture.png b/manuals/howto-openstack/images/OVSDB-Architecture.png
deleted file mode 100644 (file)
index e494b04..0000000
Binary files a/manuals/howto-openstack/images/OVSDB-Architecture.png and /dev/null differ
diff --git a/manuals/howto-openstack/images/Overlay-OpenDaylight-OVSDB-OpenFlow.png b/manuals/howto-openstack/images/Overlay-OpenDaylight-OVSDB-OpenFlow.png
deleted file mode 100644 (file)
index 87402c4..0000000
Binary files a/manuals/howto-openstack/images/Overlay-OpenDaylight-OVSDB-OpenFlow.png and /dev/null differ
diff --git a/manuals/howto-openstack/images/VirtualBox-HostOnly-Networks.png b/manuals/howto-openstack/images/VirtualBox-HostOnly-Networks.png
deleted file mode 100644 (file)
index 6db6061..0000000
Binary files a/manuals/howto-openstack/images/VirtualBox-HostOnly-Networks.png and /dev/null differ
diff --git a/manuals/howto-openstack/images/VirtualBox-HostOnly-Nics.png b/manuals/howto-openstack/images/VirtualBox-HostOnly-Nics.png
deleted file mode 100644 (file)
index fafad24..0000000
Binary files a/manuals/howto-openstack/images/VirtualBox-HostOnly-Nics.png and /dev/null differ
index b18462c6230f8db11cf232c8f628225e5cbdbe5f..141d85258917f34870f4664a492e7c0333353f0f 100644 (file)
@@ -8,35 +8,82 @@
     <relativePath>../pom.xml</relativePath>
   </parent>
   <modelVersion>4.0.0</modelVersion>
-  <artifactId>opendaylight-howto-openstack</artifactId>
+  <artifactId>openstack</artifactId>
   <packaging>jar</packaging>
-  <name>OpenDaylight Docs - Manuals - OpenStack How To</name>
+  <name>OpenDaylight Docs - Manuals - OpenStack Howto</name>
   <properties>
     <!-- This is set by Jenkins according to the branch. -->
     <release.path.name>local</release.path.name>
     <comments.enabled>1</comments.enabled>
+    <bookname>openstack</bookname>
   </properties>
   <!-- ################################################ -->
   <!-- USE "mvn clean generate-sources" to run this POM -->
   <!-- ################################################ -->
   <build>
     <plugins>
+      <plugin>
+         <groupId>org.asciidoctor</groupId>
+         <artifactId>asciidoctor-maven-plugin</artifactId>
+         <version>${asciidoctor.version}</version>
+         <executions>
+         <execution>
+            <id>output-docbook</id>
+            <phase>generate-sources</phase>
+            <goals>
+                <goal>process-asciidoc</goal>
+            </goals>
+            <configuration>
+                <backend>docbook5</backend>
+                <doctype>book</doctype>
+            </configuration>
+        </execution>
+        </executions>
+        <configuration>
+           <sourceDirectory>src/main/asciidoc</sourceDirectory>
+           <sourceDocumentName>${bookname}.adoc</sourceDocumentName>
+           <imagesDir>./images</imagesDir>
+        </configuration>
+      </plugin>
+      <plugin>
+        <artifactId>maven-resources-plugin</artifactId>
+        <version>2.6</version>
+        <executions>
+          <execution>
+            <id>copy-resources</id>
+            <phase>generate-resources</phase>
+            <goals>
+              <goal>copy-resources</goal>
+            </goals>
+            <configuration>
+              <outputDirectory>${basedir}/target/generated-docs</outputDirectory>
+              <resources>
+                <resource>
+                  <directory>src/main/resources</directory>
+                  <includes>
+                    <include>**/*.*</include>
+                  </includes>
+               </resource>
+              </resources>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
       <plugin>
         <groupId>com.inocybe.api</groupId>
         <artifactId>sdndocs-maven-plugin</artifactId>
         <version>0.1.0</version>
-<!-- version is set in ../pom.xml file -->
         <executions>
-          <!-- Configuration for OpenStack End User Guide -->
           <execution>
             <id>generate-webhelp</id>
             <goals>
               <goal>generate-webhelp</goal>
             </goals>
-            <phase>generate-sources</phase>
+            <phase>compile</phase>
             <configuration>
               <profileAudience>enduser</profileAudience>
-              <includes>bk-howto-openstack.xml</includes>
+              <includes>target/generated-docs/${bookname}.xml</includes>
+            <!--  <includes>bk-install-guide.xml</includes> -->
               <generateToc>
                 appendix  toc,title
                 article/appendix  nop
@@ -50,8 +97,8 @@
                 reference toc,title
                 set       toc,title
               </generateToc>
-              <webhelpDirname>howto-openstack</webhelpDirname>
-              <pdfFilenameBase>howto-openstack</pdfFilenameBase>
+              <webhelpDirname>${bookname}</webhelpDirname>
+              <pdfFilenameBase>${bookname}</pdfFilenameBase>
             </configuration>
           </execution>
         </executions>
           <disqusShortname>os-user-guide</disqusShortname>
           <enableGoogleAnalytics>1</enableGoogleAnalytics>
           <googleAnalyticsId>UA-17511903-1</googleAnalyticsId>
-    -->      <suppressFooterNavigation>0</suppressFooterNavigation>
-          <canonicalUrlBase>http://docs.opendaylight.org/howto-openstack/content/</canonicalUrlBase>
+    -->   <suppressFooterNavigation>0</suppressFooterNavigation>
+          <canonicalUrlBase>http://docs.opendaylight.org/user-guide/content/</canonicalUrlBase>
           <glossaryCollection>${basedir}/../glossary/glossary-terms.xml</glossaryCollection>
         </configuration>
       </plugin>
+      <plugin>
+       <groupId>org.apache.maven.plugins</groupId>
+       <artifactId>maven-site-plugin</artifactId>
+       <version>3.1</version>
+       <configuration>
+          <inputDirectory>${project.build.directory}/docbkx/webhelp</inputDirectory>
+       </configuration>
+       <dependencies>
+           <dependency>
+               <groupId>org.apache.maven.wagon</groupId>
+               <artifactId>wagon-webdav-jackrabbit</artifactId>
+               <version>2.2</version>
+           </dependency>
+           <dependency>
+               <groupId>org.slf4j</groupId>
+               <artifactId>slf4j-api</artifactId>
+               <version>1.6.1</version>
+           </dependency>
+       </dependencies>
+     </plugin>
     </plugins>
   </build>
 </project>
diff --git a/manuals/howto-openstack/section_configure_devstack.xml b/manuals/howto-openstack/section_configure_devstack.xml
deleted file mode 100644 (file)
index 2664d3c..0000000
+++ /dev/null
@@ -1,173 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE section [
- <!-- Some useful entities borrowed from HTML -->
-<!ENTITY ndash  "&#x2013;">
-<!ENTITY mdash  "&#x2014;">
-<!ENTITY hellip "&#x2026;">
-]>
-<section xmlns="http://docbook.org/ns/docbook" xmlns:xi="http://www.w3.org/2001/XInclude"
-    xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0" xml:id="configure_devstack">
-    <title>Configure the DevStack for the Openstack Controller</title>
-    <para>Make sure all bridges are removed only if you have previously “stacked”</para>
-    <para>
-        <screen><command>$sudo ovs-vsctl show</command></screen>
-    </para>
-    <para>Once the OpenDaylight Controller is running, stack the OpenStack Controller:</para>
-    <para><emphasis role="bold">Fedora 19:</emphasis></para>
-    <para>
-        <screen><command>
-$ cd ~/
-$ cd devstack
-$ cp local.conf.control local.conf
-$ vi local.conf</command></screen>
-    </para>
-    <para><emphasis role="bold">Fedora 20:</emphasis></para>
-    <para>
-        <screen><command>
-$ cd ~/
-$ cp local.conf.control devstack/local.conf
-$ cd devstack
-$ vi local.conf</command></screen>
-    </para>
-    <para>Edit the local.conf you just copied with the appropriate IPs. Replace all instances with
-        brackets to the Daylight SDN controller, the OpenStack controller IP or the Openstack
-        compute IP (Compute ethX address is only on the compute node).</para>
-    <para>In the local.conf you will see four lines that require the hardcoding of an IP
-        address.</para>
-    <para>
-        <screen><command>
-SERVICE_HOST=
-HOST_IP=
-VNCSERVER_PROXYCLIENT_ADDRESS=
-url=http://:8080/controller/nb/v2/neutron</command></screen>
-    </para>
-    <para>The following is the OpenStack controller local.conf for this tutorial:</para>
-    <para>
-        <screen><command>
-[[local|localrc]]
-LOGFILE=stack.sh.log
-# Logging Section
-SCREEN_LOGDIR=/opt/stack/data/log
-LOG_COLOR=False
-# Prevent refreshing of dependencies and DevStack recloning
-OFFLINE=True
-#RECLONE=yes
-
-disable_service rabbit
-enable_service qpid
-enable_service n-cpu
-enable_service n-cond
-disable_service n-net
-enable_service q-svc
-enable_service q-dhcp
-enable_service q-l3
-enable_service q-meta
-enable_service quantum
-enable_service tempest
-
-Q_HOST=$SERVICE_HOST
-HOST_IP=172.16.86.129
-
-Q_PLUGIN=ml2
-Q_ML2_PLUGIN_MECHANISM_DRIVERS=opendaylight,logger
-ENABLE_TENANT_TUNNELS=True
-NEUTRON_REPO=https://github.com/CiscoSystems/neutron.git
-NEUTRON_BRANCH=odl_ml2
-
-VNCSERVER_PROXYCLIENT_ADDRESS=172.16.86.129
-VNCSERVER_LISTEN=0.0.0.0
-
-HOST_NAME=fedora-odl-1
-SERVICE_HOST_NAME=${HOST_NAME}
-SERVICE_HOST=172.16.86.129
-
-FLOATING_RANGE=192.168.210.0/24
-PUBLIC_NETWORK_GATEWAY=192.168.75.254
-MYSQL_HOST=$SERVICE_HOST
-RABBIT_HOST=$SERVICE_HOST
-GLANCE_HOSTPORT=$SERVICE_HOST:9292
-KEYSTONE_AUTH_HOST=$SERVICE_HOST
-KEYSTONE_SERVICE_HOST=$SERVICE_HOST
-
-MYSQL_PASSWORD=mysql
-RABBIT_PASSWORD=rabbit
-QPID_PASSWORD=rabbit
-SERVICE_TOKEN=service
-SERVICE_PASSWORD=admin
-ADMIN_PASSWORD=admin
-
-[[post-config|/etc/neutron/plugins/ml2/ml2_conf.ini]]
-[agent]
-minimize_polling=True
-
-[ml2_odl]
-url=http://172.16.86.129:8080/controller/nb/v2/neutron
-username=admin
-password=admin</command></screen>
-    </para>
-    <para>Verify the local.conf by greping for the IP prefix used:</para>
-    <para>
-        <screen><command>
-$ grep 172.16 local.conf
-HOST_IP=172.16.86.129
-VNCSERVER_PROXYCLIENT_ADDRESS=172.16.86.129
-SERVICE_HOST=172.16.86.129
-url=http://172.16.86.129:8080/controller/nb/v2/neutron</command></screen>
-    </para>
-    <para>Finally execute the stack.sh shell script:</para>
-    <para>
-        <screen><command>$ ./stack.sh</command></screen>
-    </para>
-    <para>You should see activity in your OSGI console as Neutron adds the default private and
-        public networks like so:</para>
-    <para>
-        <screen><computeroutput>
-osgi&amp;gt; 2014-02-06 20:58:27.418 UTC [http-bio-8080-exec-1] INFO o.o.c.u.internal.UserManager - Local Authentication Succeeded for User: "admin"
-2014-02-06 20:58:27.419 UTC [http-bio-8080-exec-1] INFO o.o.c.u.internal.UserManager - User "admin" authorized for the following role(s): [Network-Admin]</computeroutput></screen>
-    </para>
-    <para>You will see more activity as ODL programs the OVSDB server running on the OpenStack
-        node.</para>
-    <para>Here is the state of Open vSwitch after the stack completes and prior to booting a VM
-        instance. If you do not see the is_connected: true boolean after Manager (OVSDB) and
-        Controller (OpenFlow), an error has occured, check that the controller/manager IPs are
-        reachable and the ports are bound using the lsof command listed earlier:</para>
-    <screen><prompt>[odl@fedora-odl-1 devstack]$</prompt><command>sudo ovs-vsctl show</command>
-<computeroutput>
-17074e89-2ac5-4bba-997a-1a5a3527cf56
-Manager "tcp:172.16.86.129:6640"
-is_connected: true
-Bridge br-int
-Controller "tcp:172.16.86.129:6633"
-is_connected: true
-fail_mode: secure
-Port br-int
-Interface br-int
-type: internal
-Port "tap1e3dfa54-9c"
-Interface "tap1e3dfa54-9c"
-Bridge br-ex
-Controller "tcp:172.16.86.129:6633"
-is_connected: true
-Port "tap9301c38d-d8"
-Interface "tap9301c38d-d8"
-Port br-ex
-Interface br-ex
-type: internal
-ovs_version: "2.0.0"
-
-Here are the OpenFlow v1.3 flow rules for the default namespace ports in OVS (qdhcp / qrouter):
-
-[crayon-5326f94b7c170907686501 lang="bash" ]OFPST_FLOW reply (OF1.3) (xid=0x2):
-cookie=0x0, duration=202.138s, table=0, n_packets=0, n_bytes=0, send_flow_rem in_port=1,dl_src=fa:16:3e:fb:4a:32 actions=set_field:0x2-&amp;gt;tun_id,goto_table:10
-cookie=0x0, duration=202.26s, table=0, n_packets=0, n_bytes=0, send_flow_rem in_port=1,dl_src=fa:16:3e:2e:29:d3 actions=set_field:0x1-&amp;gt;tun_id,goto_table:10
-cookie=0x0, duration=202.246s, table=0, n_packets=0, n_bytes=0, send_flow_rem priority=8192,in_port=1 actions=drop
-cookie=0x0, duration=202.302s, table=0, n_packets=0, n_bytes=0, send_flow_rem dl_type=0x88cc actions=CONTROLLER:56
-cookie=0x0, duration=202.186s, table=10, n_packets=0, n_bytes=0, send_flow_rem priority=8192,tun_id=0x1 actions=goto_table:20
-cookie=0x0, duration=202.063s, table=10, n_packets=0, n_bytes=0, send_flow_rem priority=8192,tun_id=0x2 actions=goto_table:20
-cookie=0x0, duration=202.14s, table=20, n_packets=0, n_bytes=0, send_flow_rem priority=8192,tun_id=0x1 actions=drop
-cookie=0x0, duration=202.046s, table=20, n_packets=0, n_bytes=0, send_flow_rem priority=8192,tun_id=0x2 actions=drop
-cookie=0x0, duration=202.2s, table=20, n_packets=0, n_bytes=0, send_flow_rem priority=16384,tun_id=0x1,dl_dst=01:00:00:00:00:00/01:00:00:00:00:00 actions=output:1
-cookie=0x0, duration=202.083s, table=20, n_packets=0, n_bytes=0, send_flow_rem priority=16384,tun_id=0x2,dl_dst=01:00:00:00:00:00/01:00:00:00:00:00 actions=output:1
-cookie=0x0, duration=202.211s, table=20, n_packets=0, n_bytes=0, send_flow_rem tun_id=0x1,dl_dst=fa:16:3e:2e:29:d3 actions=output:1
-cookie=0x0, duration=202.105s, table=20, n_packets=0, n_bytes=0, send_flow_rem tun_id=0x2,dl_dst=fa:16:3e:fb:4a:32 actions=output:1</computeroutput></screen>
-  </section>
diff --git a/manuals/howto-openstack/section_configure_fedora_images.xml b/manuals/howto-openstack/section_configure_fedora_images.xml
deleted file mode 100644 (file)
index 4be914f..0000000
+++ /dev/null
@@ -1,182 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE section [
- <!-- Some useful entities borrowed from HTML -->
-<!ENTITY ndash  "&#x2013;">
-<!ENTITY mdash  "&#x2014;">
-<!ENTITY hellip "&#x2026;">
-]>
-<section xmlns="http://docbook.org/ns/docbook" xmlns:xi="http://www.w3.org/2001/XInclude"
-    xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0" xml:id="configure_fedora_images">
-    <title>Configure the Fedora Images for your Environment</title>
-    <para>To configure you have 2 options, Fedora 19 and Fedora 20. Fedora 19 is recommanded due to
-        Fderoa 20 having an issue with MariaDB and hostnames. For assistance with getting the stack
-        going, ping the OVSDB Listserv and check the archives for answers.</para>
-    <para>Download the pre-built image that contains OpenDaylight, DevStack installing Ice House
-        OpenStack, Open vSwitch all on Fedora:</para>
-    <para><emphasis role="bold">Fedora 19:</emphasis></para>
-    <para>
-        <screen><command>curl -O https://wiki.opendaylight.org/images/HostedFiles/ODL_Devstack_Fedora19.zip
-$ unzip ODL_Devstack_Fedora19.zip
-# Two files contained
-ODL-Devstack-Fedora19-disk1.vmdk
-ODL-Devstack-Fedora19.ovf</command></screen>
-    </para>
-    <para><emphasis role="bold">Fedora 20:</emphasis></para>
-    <para>
-        <screen><command>$ curl -O https://wiki.opendaylight.org/images/HostedFiles/OpenDaylight_DevStack_Fedora20.ova</command></screen>
-    </para>
-    <para>Clone this Virtual Machine image into two images. One is for the Control (This VM runs
-        both the OpenStack Controller and OpenDaylight Controller) and the other for the Compute
-        instance. If you use VM Fusion the vanilla image works as is with no need to change any
-        adaptor settings. Use the ‘ip addr’ configuration output as a reference in the next section.
-        I recommend using SSH to connect to the host rather then using the TTY interface.</para>
-    <para/>
-    <para>Here are two screenshots with VirtualBox network adaptor examples. The first are the two
-        networks you can create. vxboxnet0 is there by default. Create the 2nd network with the +w/a
-        nic picture in the following example. Note: you have to manually fill in the DHCP server
-        settings on the new network. Refer to the existing if unsure of the values to use. When
-        complete the host OS should be able to reach the guest OS.<inlinemediaobject>
-            <imageobject>
-                <imagedata fileref="images/VirtualBox-HostOnly-Networks.png"/>
-            </imageobject>
-        </inlinemediaobject></para>
-    <para>The second example is what the VirtualBox NIC setup can look like without have to deal
-        with the NAT Network option in VirtualBox. VM Fusion has integrated hooks in to resolve the
-        need for host only etc. NAT and Host only work fine with NAT so the host can reach your
-        networks default gateway and get to the Inets as needed. With host only that is not the case
-        but it is plenty to run the stack and integration.<inlinemediaobject>
-            <imageobject>
-                <imagedata fileref="images/VirtualBox-HostOnly-Nics.png"/>
-            </imageobject>
-        </inlinemediaobject></para>
-    <para>Boot both guest VMs write down the four IP addresses from both NICs. You will primarily
-        only use one of them other then a gateway or out of band SSH connectivity etc.</para>
-    <para><emphasis role="bold">Fedora 19:</emphasis></para>
-    <para>
-        <screen><command>
-Login: fedora
-Passwd: opendaylight</command></screen>
-    </para>
-    <para><emphasis role="bold">Fedora 20:</emphasis></para>
-    <para>
-        <screen><command>
-Login: odl
-Passwd: odl</command></screen>
-    </para>
-    <para>In this example the configuration of the IP addresses are as follows:</para>
-    <para>
-        <screen><command>
-Openstack Controller IP == 172.16.86.129
-Openstack Compute IP == 172.16.86.128
-OpenDaylight Controller IP == 172.16.86.129</command></screen>
-    </para>
-    <para>Record the IP addresses of both of the hosts:</para>
-    <para>Controller IP addresses:</para>
-    <para>
-        <screen><prompt>[odl@fedora-odl-1 devstack]$</prompt><command>ip addr</command>
-<computeroutput>
-1: lo: loopback,up,lower_up, mtu 65536 qdisc noqueue state UNKNOWN group default
-link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
-inet 127.0.0.1/8 scope host lo
-valid_lft forever preferred_lft forever
-inet6 ::1/128 scope host
-valid_lft forever preferred_lft forever
-2: eth0: &amp;lt;broadcast,multicast,up,lower_up&amp;gt; mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
-link/ether 00:0c:29:35:0b:65 brd ff:ff:ff:ff:ff:ff
-inet 172.16.47.134/24 brd 172.16.47.255 scope global dynamic eth0
-valid_lft 1023sec preferred_lft 1023sec
-inet6 fe80::20c:29ff:fe35:b65/64 scope link
-valid_lft forever preferred_lft forever
-3: eth1: &amp;lt;broadcast,multicast,up,lower_up&amp;gt; mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
-link/ether 00:0c:29:35:0b:6f brd ff:ff:ff:ff:ff:ff
-inet 172.16.86.129/24 brd 172.16.86.255 scope global dynamic eth1
-valid_lft 1751sec preferred_lft 1751sec
-inet6 fe80::20c:29ff:fe35:b6f/64 scope link
-valid_lft forever preferred_lft forever</computeroutput></screen>
-    </para>
-    <para>Compute IP addresses:</para>
-    <para>
-        <screen><prompt>[odl@fedora-odl-2 ~]$</prompt><command>ip addr</command>
-<computeroutput>
-1: lo: &amp;lt;loopback,up,lower_up&amp;gt; mtu 65536 qdisc noqueue state UNKNOWN group default
-link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
-inet 127.0.0.1/8 scope host lo
-valid_lft forever preferred_lft forever
-inet6 ::1/128 scope host
-valid_lft forever preferred_lft forever
-2: eth0: &amp;lt;broadcast,multicast,up,lower_up&amp;gt; mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
-link/ether 00:0c:29:85:2d:f2 brd ff:ff:ff:ff:ff:ff
-inet 172.16.47.133/24 brd 172.16.47.255 scope global dynamic eth0
-valid_lft 1774sec preferred_lft 1774sec
-inet6 fe80::20c:29ff:fe85:2df2/64 scope link
-valid_lft forever preferred_lft forever
-3: eth1: &amp;lt;broadcast,multicast,up,lower_up mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
-link/ether 00:0c:29:85:2d:fc brd ff:ff:ff:ff:ff:ff
-inet 172.16.86.128/24 brd 172.16.86.255 scope global dynamic eth1
-valid_lft 1716sec preferred_lft 1716sec
-inet6 fe80::20c:29ff:fe85:2dfc/64 scope link
-valid_lft forever preferred_lft forever</computeroutput></screen>
-    </para>
-    <para>Go to the home directory of the user id odl:</para>
-    <para>
-        <screen><command>$ cd ~/</command></screen>
-    </para>
-    <para>Start the OVS Service (DevStack should start this svc). This startup script can be loaded
-        at startup of OVS to load at the OS init.</para>
-    <para>
-        <screen><command> sudo /sbin/service openvswitch start </command></screen>
-    </para>
-    <para>Configure the /etc/hosts file to reflect your controller and compute hostname mappings.
-        While not necessarily required it can cause issues for Nova output.</para>
-    <para>Verify the OpenStack Controller /etc/hosts file. The only edit is adding the compute IP to
-        hostname mapping. E.g. x.x.x.x fedora-odl-2</para>
-    <para>
-        <screen><prompt>[odl@fedora-odl-1 ~]$</prompt><command>sudo vi /etc/hosts</command>
-<computeroutput>
-127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 fedora-odl-1
-172.16.86.128 fedora-odl-2
-::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 </computeroutput></screen>
-    </para>
-    <para>Edit the compute nodes /etc/hosts from fedora-odl-1 to fedora-odl-2:</para>
-    <para>
-        <screen><prompt>[odl@fedora-odl-2 ~]$</prompt><command>sudo vi /etc/hosts</command>
-<computeroutput>
-$ cat /etc/hosts
-127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 fedora-odl-2
-172.16.86.129 fedora-odl-1
-::1 localhost localhost.localdomain localhost6 localhost6.localdomain6</computeroutput></screen>
-    </para>
-    <para>Then, change the compute hostname from (compute only):</para>
-    <para>
-        <screen><command>$ sudo vi /etc/hostname</command>
-<command># Change to:</command>
-<command>$ cat /etc/hostname</command>
-<command>fedora-odl-2</command>
-<command>$sudo vi /etc/sysconfig/network</command>
-<command>#Change HOSTNAME=fedora-odl-1 to HOSTNAME=fedora-odl-2</command>
-<command>$sudo hostname -b fedora-odl-2</command></screen>
-    </para>
-    <para>Then, reboot the cloned Compute node for the change to take affect:</para>
-    <para>
-        <screen><command>sudo shutdown -r now</command></screen>
-    </para>
-    <para>After the host restarts verify the hostnames like so:</para>
-    <para>
-        <screen><command> $ hostname</command>
-<command>fedora-odl-2</command></screen>
-    </para>
-    <para>Note: Iin the Fedora 20 VM, commenting out “#127.0.0.1 localhost fedora-odl-1″ will result
-        in a crash of MySql. Avoid doing any changes to the host name locally resolving to
-        127.0.0.1.</para>
-    <para>
-        <screen><computeroutput>
-An unexpected error prevented the server from fulfilling your request. (OperationalError) (1045, "Access denied for user 'root'@'fedora-odl-1' (using password: YES)") None None (HTTP 500)
-2014-02-10 04:03:28 + KEYSTONE_SERVICE=
-2014-02-10 04:03:28 + keystone endpoint-create --region RegionOne --service_id --publicurl http://172.16.86.129:5000/v2.0 --adminurl http://172.16.86.129:35357/v2.0 --internalurl http://172.16.86.129:5000/v2.0
-2014-02-10 04:03:28 usage: keystone endpoint-create [--region ] --service
-2014-02-10 04:03:28 --publicurl 2014-02-10 04:03:28 [--adminurl ]
-2014-02-10 04:03:28 [--internalurl ]
-2014-02-10 04:03:28 keystone endpoint-create: error: argument --service/--service-id/--service_id: expected one argument
-2014-02-10 04:03:28 ++ failed</computeroutput></screen>
-    </para>
-  </section>
diff --git a/manuals/howto-openstack/section_configuring_openstack.xml b/manuals/howto-openstack/section_configuring_openstack.xml
deleted file mode 100644 (file)
index 195eaf0..0000000
+++ /dev/null
@@ -1,114 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE section [
- <!-- Some useful entities borrowed from HTML -->
-<!ENTITY ndash  "&#x2013;">
-<!ENTITY mdash  "&#x2014;">
-<!ENTITY hellip "&#x2026;">
-]>
-<section xmlns="http://docbook.org/ns/docbook" xmlns:xi="http://www.w3.org/2001/XInclude"
-    xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0"
-    xml:id="configuring_openstack">
-    <title>Configuring the Openstack Compute Node</title>
-    <para>The compute configuration steps are virtually identical to the controller other then the
-        configurations and that it does not run the Daylight controller.</para>
-    <para><emphasis role="bold">Fedora 19:</emphasis></para>
-    <para>
-        <screen><command>
-$ cd ~/
-$ cd devstack
-$ cp local.conf.compute local.conf
-$ vi local.conf</command></screen>
-    </para>
-    <para><emphasis role="bold">Fedora 20:</emphasis></para>
-    <para>
-        <screen><command>
-$ cd /home/odl/
-$ cp local.conf.compute devstack/local.conf
-$ cd devstack
-$ vi local.conf</command></screen>
-    </para>
-    <para>Edit the local.conf you just copied with the appropriate IPs in the devstack directory on
-        the compute host like the following example with your controller and compute host
-        IPs:</para>
-    <para>
-        <screen><command>
-[[local|localrc]]
-LOGFILE=stack.sh.log
-#LOG_COLOR=False
-#SCREEN_LOGDIR=/opt/stack/data/log
-OFFLINE=true
-#RECLONE=yes
-
-disable_all_services
-enable_service neutron nova n-cpu quantum n-novnc qpid
-
-HOST_NAME=fedora-odl-2
-HOST_IP=172.16.86.128
-SERVICE_HOST_NAME=fedora-odl-1
-SERVICE_HOST=172.16.86.129
-VNCSERVER_PROXYCLIENT_ADDRESS=172.16.86.128
-VNCSERVER_LISTEN=0.0.0.0
-
-FLOATING_RANGE=192.168.210.0/24
-
-NEUTRON_REPO=https://github.com/CiscoSystems/neutron.git
-NEUTRON_BRANCH=odl_ml2
-Q_PLUGIN=ml2
-Q_ML2_PLUGIN_MECHANISM_DRIVERS=opendaylight,linuxbridge
-ENABLE_TENANT_TUNNELS=True
-Q_HOST=$SERVICE_HOST
-
-MYSQL_HOST=$SERVICE_HOST
-RABBIT_HOST=$SERVICE_HOST
-GLANCE_HOSTPORT=$SERVICE_HOST:9292
-KEYSTONE_AUTH_HOST=$SERVICE_HOST
-KEYSTONE_SERVICE_HOST=$SERVICE_HOST
-
-MYSQL_PASSWORD=mysql
-RABBIT_PASSWORD=rabbit
-QPID_PASSWORD=rabbit
-SERVICE_TOKEN=service
-SERVICE_PASSWORD=admin
-ADMIN_PASSWORD=admin
-
-[[post-config|/etc/neutron/plugins/ml2/ml2_conf.ini]]
-[agent]
-minimize_polling=True
-
-[ml2_odl]
-url=http://172.16.86.129:8080/controller/nb/v2/neutron
-username=admin
-password=admin</command></screen>
-    </para>
-    <para>Or check the conf file quickly by grepping it.</para>
-    <para>
-        <screen><prompt>[ odl @ fedora - odl - 2 devstack ] $</prompt><command>grep 172 local .conf</command>
-<computeroutput>
-HOST_IP=172.16.86.128
-SERVICE_HOST=172.16.86.129
-VNCSERVER_PROXYCLIENT_ADDRESS=172.16.86.128
-url=http://172.16.86.129:8080/controller/nb/v2/neutron</computeroutput></screen>
-    </para>
-    <para>And now stack the compute host:</para>
-    <para>
-        <screen><command>$ ./stack.sh </command></screen>
-    </para>
-    <para>Once you get the stack working SNAPSHOT the image, it can be a handy timesaver. So is
-        leaving DevStack “offline=true” and “reclone=no” except for when you need to pull a
-        patch.</para>
-    <para>The state of OVS after the stack should be the following:</para>
-    <para>
-        <screen><prompt>[odl@fedora-odl-2 devstack]$</prompt><command>sudo ovs-vsctl show</command>
-<computeroutput>
-17074e89-2ac5-4bba-997a-1a5a3527cf56
-Manager "tcp:172.16.86.129:6640"
-is_connected: true
-Bridge br-int
-Controller "tcp:172.16.86.129:6633"
-is_connected: true
-fail_mode: secure
-Port br-int
-Interface br-int
-ovs_version: "2.0.0"</computeroutput></screen>
-    </para> 
-</section>
diff --git a/manuals/howto-openstack/section_create_multi_network.xml b/manuals/howto-openstack/section_create_multi_network.xml
deleted file mode 100644 (file)
index 0e4a1c9..0000000
+++ /dev/null
@@ -1,242 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE section [
- <!-- Some useful entities borrowed from HTML -->
-<!ENTITY ndash  "&#x2013;">
-<!ENTITY mdash  "&#x2014;">
-<!ENTITY hellip "&#x2026;">
-]>
-<section xmlns="http://docbook.org/ns/docbook" xmlns:xi="http://www.w3.org/2001/XInclude"
-    xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0" xml:id="create_multi_network">
-    <title>Create Multi Network Types, GRE and VXLan</title>
-    <para>Create some hosts in an overlay using the VXLAN encap with specified segmentation IDs
-        (VNIs):</para>
-    <para>
-        <screen><command>
-neutron net-create vxlan-net1 --tenant_id $(keystone tenant-list | grep '\sadmin' | awk '{print $2}') --provider:network_type vxlan --provider:segmentation_id 1600
-neutron subnet-create vxlan-net1 10.100.1.0/24 --name vxlan-net1
-
-neutron net-create vxlan-net2 --tenant_id $(keystone tenant-list | grep '\sadmin' | awk '{print $2}') --provider:network_type vxlan --provider:segmentation_id 1601
-neutron subnet-create vxlan-net2 10.100.2.0/24 --name vxlan-net2
-
-neutron net-create vxlan-net3 --tenant_id $(keystone tenant-list | grep '\sadmin' | awk '{print $2}') --provider:network_type vxlan --provider:segmentation_id 1603
-neutron subnet-create vxlan-net3 10.100.3.0/24 --name vxlan-net3 </command></screen>
-    </para>
-    <para>Next, take a look at the networks which were just created.</para>
-    <para>
-        <screen><prompt>[odl@fedora-odl-1 devstack]$</prompt><command>neutron net-list</command>
-<computeroutput>
-+--------------------------------------+------------+-------------------------------------------------------+
-| id                                   | name       | subnets                                               |
-+--------------------------------------+------------+-------------------------------------------------------+
-| 03e3f964-8bc8-48fa-b4c9-9b8390f37b93 | private    | b06d716b-527f-4da2-adda-5fc362456d34 10.0.0.0/24      |
-| 4eaf08d3-2234-4632-b1e7-d11704b1238a | vxlan-net2 | b54c30fd-e157-4935-b9c2-cefa145162a8 10.100.2.0/24    |
-| af8aa29d-a302-4ecf-a0b1-e52ff9c10b63 | vxlan-net1 | c44f9bee-adca-4bca-a197-165d545bcef9 10.100.1.0/24    |
-| e6f3c605-6c0b-4f7d-a64f-6e593c5e647a | vxlan-net3 | 640cf2d1-b470-41dd-a4d8-193d705ea73e 10.100.3.0/24    |
-| f6aede62-67a5-4fe6-ad61-2c1a88b08874 | public     | 1e945d93-caeb-4890-8b58-ed00297a7f03 192.168.210.0/24 |
-+--------------------------------------+------------+-------------------------------------------------------+ </computeroutput></screen>
-    </para>
-    <para>Now, boot the VMS</para>
-    <para>
-        <screen><command>
-nova boot --flavor m1.tiny --image $(nova image-list | grep $IMAGE'\s' | awk '{print $2}') --nic net-id=$(neutron net-list | grep vxlan-net1 | awk '{print $2}') vxlan-host1 --availability_zone=nova:fedora-odl-2
-
-nova boot --flavor m1.tiny --image $(nova image-list | grep $IMAGE'\s' | awk '{print $2}') --nic net-id=$(neutron net-list | grep vxlan-net2 | awk '{print $2}') vxlan-host2 --availability_zone=nova:fedora-odl-2
-
-nova boot --flavor m1.tiny --image $(nova image-list | grep $IMAGE'\s' | awk '{print $2}') --nic net-id=$(neutron net-list | grep vxlan-net2 | awk '{print $2}') vxlan-host3 --availability_zone=nova:fedora-odl-2 </command></screen>
-    </para>
-    <para>To pull up the Horizon UI  to verify the nodes you have, point your web browser at the
-        controller IP (port 80).</para>
-    <figure>
-        <title>Horizon-OpenDaylight-e1392513990486.jpg</title>
-        <mediaobject>
-            <imageobject>
-                <imagedata fileref="images/Horizon-OpenDaylight-e1392513990486.jpg"/>
-            </imageobject>
-        </mediaobject>
-    </figure>
-    <para>Now, Ping one of the hosts just created to verify it is functional:</para>
-    <para>
-        <screen><prompt> [odl@fedora-odl-1 devstack]$</prompt><command>ip netns</command>
-<computeroutput>
-qdhcp-4eaf08d3-2234-4632-b1e7-d11704b1238a
-qdhcp-af8aa29d-a302-4ecf-a0b1-e52ff9c10b63
-qrouter-bed7005f-4c51-4c3a-b23b-3830b5e7663a
-[odl@fedora-odl-1 devstack]$ nova list
-+--------------------------------------+-------------+--------+------------+-------------+-----------------------+
-| ID                                   | Name        | Status | Task State | Power State | Networks              |
-+--------------------------------------+-------------+--------+------------+-------------+-----------------------+
-| f34ed046-5daf-42f5-9b2c-644f5ab6b2bc | vxlan-host1 | ACTIVE | -          | Running     | vxlan-net1=10.100.1.2 |
-| 6b65d0f2-c621-4dc5-87ca-82a2c44734b2 | vxlan-host2 | ACTIVE | -          | Running     | vxlan-net2=10.100.2.2 |
-| f3d5179a-e974-4eb4-984b-399d1858ab76 | vxlan-host3 | ACTIVE | -          | Running     | vxlan-net2=10.100.2.4 |
-+--------------------------------------+-------------+--------+------------+-------------+-----------------------+
-[odl@fedora-odl-1 devstack]$ sudo ip netns exec qdhcp-af8aa29d-a302-4ecf-a0b1-e52ff9c10b63 ping 10.100.1.2
-PING 10.100.1.2 (10.100.1.2) 56(84) bytes of data.
-64 bytes from 10.100.1.2: icmp_seq=1 ttl=64 time=2.63 ms
-64 bytes from 10.100.1.2: icmp_seq=2 ttl=64 time=1.15 ms
-^C
---- 10.100.1.2 ping statistics ---
-2 packets transmitted, 2 received, 0% packet loss, time 1001ms
-rtt min/avg/max/mdev = 1.151/1.892/2.633/0.741 ms </computeroutput></screen>
-    </para>
-    <para>Now, create three new Neutron networks using the GRE encapsulation. (Note: With too many
-        VMs you can make them crash if too much memory is used).</para>
-    <para>
-        <screen><command>
-### Create the Networks and corresponding Subnets ###
-neutron net-create gre-net1 --tenant_id $(keystone tenant-list | grep '\sadmin' | awk '{print $2}') --provider:network_type gre --provider:segmentation_id 1700
-neutron subnet-create gre-net1 10.100.1.0/24 --name gre-net1
-
-neutron net-create gre-net2 --tenant_id $(keystone tenant-list | grep '\sadmin' | awk '{print $2}') --provider:network_type gre --provider:segmentation_id 1701
-neutron subnet-create gre-net2 10.100.2.0/24 --name gre-net2
-
-neutron net-create gre-net3 --tenant_id $(keystone tenant-list | grep '\sadmin' | awk '{print $2}') --provider:network_type gre --provider:segmentation_id 1703
-neutron subnet-create gre-net3 10.100.3.0/24 --name gre-net3</command></screen>
-    </para>
-    <para>
-        <screen><command>
-### Boot the VMs ###
-
-nova boot --flavor m1.tiny --image $(nova image-list | grep $IMAGE'\s' | awk '{print $2}') --nic net-id=$(neutron net-list | grep gre-net1 | awk '{print $2}') gre-host1 --availability_zone=nova:fedora-odl-2
-
-nova boot --flavor m1.tiny --image $(nova image-list | grep $IMAGE'\s' | awk '{print $2}') --nic net-id=$(neutron net-list | grep gre-net2 | awk '{print $2}') gre-host2 --availability_zone=nova:fedora-odl-2
-
-nova boot --flavor m1.tiny --image $(nova image-list | grep $IMAGE'\s' | awk '{print $2}') --nic net-id=$(neutron net-list | grep gre-net2 | awk '{print $2}') gre-host3 --availability_zone=nova:fedora-odl-2</command></screen>
-    </para>
-    <para>Here is an example of a OVS configuration. (Note: Since the tunnel ID is being set, use
-        the OpenFlow OXM metadata field to set the logical port OFPXMT_OFB_TUNNEL_ID implemented in
-        OpenFlow v1.3.)</para>
-    <para>
-        <screen><prompt>[odl@fedora-odl-1 devstack]$</prompt><command>nova list</command>
-<computeroutput>
-+--------------------------------------+-------------+--------+------------+-------------+-----------------------+
-| ID                                   | Name        | Status | Task State | Power State | Networks              |
-+--------------------------------------+-------------+--------+------------+-------------+-----------------------+
-| 8db56e44-36db-4447-aeb9-e6679ca420b6 | gre-host1   | ACTIVE | -          | Running     | gre-net1=10.100.1.2   |
-| 36fec86d-d9e6-462c-a686-f3c0929a2c21 | gre-host2   | ACTIVE | -          | Running     | gre-net2=10.100.2.2   |
-| 67d97a8e-ecd3-4913-886c-423170ef3635 | gre-host3   | ACTIVE | -          | Running     | gre-net2=10.100.2.4   |
-| f34ed046-5daf-42f5-9b2c-644f5ab6b2bc | vxlan-host1 | ACTIVE | -          | Running     | vxlan-net1=10.100.1.2 |
-| 6b65d0f2-c621-4dc5-87ca-82a2c44734b2 | vxlan-host2 | ACTIVE | -          | Running     | vxlan-net2=10.100.2.2 |
-| f3d5179a-e974-4eb4-984b-399d1858ab76 | vxlan-host3 | ACTIVE | -          | Running     | vxlan-net2=10.100.2.4 |
-+--------------------------------------+-------------+--------+------------+-------------+-----------------------+</computeroutput></screen>
-    </para>
-    <para>Neutron mappings from the Neutron client output:</para>
-    <para>
-        <screen><prompt>[odl@fedora-odl-1 devstack]$</prompt><command>neutron net-list</command>
-<computeroutput>
-+--------------------------------------+------------+-------------------------------------------------------+
-| id                                   | name       | subnets                                               |
-+--------------------------------------+------------+-------------------------------------------------------+
-| 03e3f964-8bc8-48fa-b4c9-9b8390f37b93 | private    | b06d716b-527f-4da2-adda-5fc362456d34 10.0.0.0/24      |
-| 4eaf08d3-2234-4632-b1e7-d11704b1238a | vxlan-net2 | b54c30fd-e157-4935-b9c2-cefa145162a8 10.100.2.0/24    |
-| a33c5794-3830-4220-8724-95752d8f94bd | gre-net1   | d32c8a70-70c6-4bdc-b741-af718b3ba4cd 10.100.1.0/24    |
-| af8aa29d-a302-4ecf-a0b1-e52ff9c10b63 | vxlan-net1 | c44f9bee-adca-4bca-a197-165d545bcef9 10.100.1.0/24    |
-| e6f3c605-6c0b-4f7d-a64f-6e593c5e647a | vxlan-net3 | 640cf2d1-b470-41dd-a4d8-193d705ea73e 10.100.3.0/24    |
-| f6aede62-67a5-4fe6-ad61-2c1a88b08874 | public     | 1e945d93-caeb-4890-8b58-ed00297a7f03 192.168.210.0/24 |
-| fa44d171-4935-4fae-9507-0ecf2d521b49 | gre-net2   | f8151c73-cda4-47e4-bf7c-8a73a7b4ef5f 10.100.2.0/24    |
-| ffc7da40-8252-4cdf-a9a2-d538f4986215 | gre-net3   | 146931d8-9146-4abf-9957-d6a8a3db43e4 10.100.3.0/24    |
-+--------------------------------------+------------+-------------------------------------------------------+</computeroutput></screen>
-    </para>
-    <para><?oxy_custom_start type="oxy_content_highlight" color="255,255,0"?>Next, verify the Open
-        vSwitch configuration. Worthy of note is the tunnel IPv4 src/dest endpoints are defined
-        using OVSDB but the Tunnel ID is set using the flowmod in OpenFlow using key=flow. This
-        tells OVSDB to look for the tunnel ID in the flowmod. There is also a similar concept for
-        IPv4 tunnel source/destination using Nicira extensions with NXM_NX_TUN_IPV4_SRC and
-        NXM_NX_TUN_IPV4_DST that was implemented in OVS 2.0. The NXM code points are referenced in
-        the OF v1.3 specification but it seems pretty nascent wether the ONF is looking to handle
-        tunnel operations with OF-Config or via flowmods such as the NXM references. The NXM code
-        points are defined the ODL openflowjava project that implements the library model for OFv1.3
-        and would just need to be plumbed through the MD-SAL convertor.<?oxy_custom_end?></para>
-    <para>
-        <screen><prompt>[odl@fedora-odl-2 devstack]$</prompt><command>sudo ovs-vsctl show</command>
-<computeroutput>17074e89-2ac5-4bba-997a-1a5a3527cf56
-Manager "tcp:172.16.86.129:6640"
-is_connected: true
-Bridge br-int
-Controller "tcp:172.16.86.129:6633"
-is_connected: true
-fail_mode: secure
-Port "tap8b31df39-d4"
-Interface "tap8b31df39-d4"
-Port br-int
-Interface br-int
-Port "gre-172.16.86.129"
-Interface "gre-172.16.86.129"
-type: gre
-options: {key=flow, local_ip="172.16.86.128", remote_ip="172.16.86.129"}
-ovs_version: "2.0.0"</computeroutput></screen>
-    </para>
-    <para>And then the OF v1.3 flowmods:</para>
-    <para>
-        <screen><prompt>[odl@fedora-odl-2 devstack]$</prompt><command>sudo ovs-ofctl -O OpenFlow13 dump-flows br-int</command>
-<computeroutput>
-OFPST_FLOW reply (OF1.3) (xid=0x2):
-cookie=0x0, duration=2415.341s, table=0, n_packets=30, n_bytes=2586, send_flow_rem in_port=4,dl_src=fa:16:3e:1a:49:61 actions=set_field:0x641-&amp;gt;tun_id,goto_table:10
-cookie=0x0, duration=2425.095s, table=0, n_packets=39, n_bytes=3300, send_flow_rem in_port=2,dl_src=fa:16:3e:93:20:1e actions=set_field:0x640-&amp;gt;tun_id,goto_table:10
-cookie=0x0, duration=2415.981s, table=0, n_packets=37, n_bytes=2880, send_flow_rem in_port=5,dl_src=fa:16:3e:02:28:8d actions=set_field:0x641-&amp;gt;tun_id,goto_table:10
-cookie=0x0, duration=877.732s, table=0, n_packets=27, n_bytes=2348, send_flow_rem in_port=6,dl_src=fa:16:3e:20:cd:8e actions=set_field:0x6a4-&amp;gt;tun_id,goto_table:10
-cookie=0x0, duration=878.981s, table=0, n_packets=31, n_bytes=2908, send_flow_rem in_port=7,dl_src=fa:16:3e:86:08:5f actions=set_field:0x6a5-&amp;gt;tun_id,goto_table:10
-cookie=0x0, duration=882.297s, table=0, n_packets=32, n_bytes=2670, send_flow_rem in_port=8,dl_src=fa:16:3e:68:40:4a actions=set_field:0x6a5-&amp;gt;tun_id,goto_table:10
-cookie=0x0, duration=884.983s, table=0, n_packets=16, n_bytes=1888, send_flow_rem tun_id=0x6a4,in_port=3 actions=goto_table:20
-cookie=0x0, duration=2429.719s, table=0, n_packets=33, n_bytes=3262, send_flow_rem tun_id=0x640,in_port=1 actions=goto_table:20
-cookie=0x0, duration=881.723s, table=0, n_packets=29, n_bytes=3551, send_flow_rem tun_id=0x6a5,in_port=3 actions=goto_table:20
-cookie=0x0, duration=2418.434s, table=0, n_packets=33, n_bytes=3866, send_flow_rem tun_id=0x641,in_port=1 actions=goto_table:20
-cookie=0x0, duration=2426.048s, table=0, n_packets=0, n_bytes=0, send_flow_rem tun_id=0x2,in_port=3 actions=goto_table:20
-cookie=0x0, duration=2428.34s, table=0, n_packets=0, n_bytes=0, send_flow_rem tun_id=0x1,in_port=3 actions=goto_table:20
-cookie=0x0, duration=878.961s, table=0, n_packets=0, n_bytes=0, send_flow_rem priority=8192,in_port=7 actions=drop
-cookie=0x0, duration=882.211s, table=0, n_packets=0, n_bytes=0, send_flow_rem priority=8192,in_port=8 actions=drop
-cookie=0x0, duration=877.562s, table=0, n_packets=0, n_bytes=0, send_flow_rem priority=8192,in_port=6 actions=drop
-cookie=0x0, duration=2415.941s, table=0, n_packets=0, n_bytes=0, send_flow_rem priority=8192,in_port=5 actions=drop
-cookie=0x0, duration=2415.249s, table=0, n_packets=0, n_bytes=0, send_flow_rem priority=8192,in_port=4 actions=drop
-cookie=0x0, duration=2425.04s, table=0, n_packets=0, n_bytes=0, send_flow_rem priority=8192,in_port=2 actions=drop
-cookie=0x0, duration=2711.147s, table=0, n_packets=970, n_bytes=88270, send_flow_rem dl_type=0x88cc actions=CONTROLLER:56
-cookie=0x0, duration=873.508s, table=0, n_packets=0, n_bytes=0, send_flow_rem priority=1,in_port=3,dl_dst=00:00:00:00:00:00 actions=output:1
-cookie=0x0, duration=873.508s, table=0, n_packets=0, n_bytes=0, send_flow_rem priority=1,in_port=1,dl_dst=00:00:00:00:00:00 actions=output:1
-cookie=0x0, duration=877.224s, table=10, n_packets=0, n_bytes=0, send_flow_rem priority=8192,tun_id=0x6a4 actions=goto_table:20
-cookie=0x0, duration=2415.783s, table=10, n_packets=7, n_bytes=294, send_flow_rem priority=8192,tun_id=0x641 actions=goto_table:20
-cookie=0x0, duration=881.907s, table=10, n_packets=3, n_bytes=169, send_flow_rem priority=8192,tun_id=0x6a5 actions=goto_table:20
-cookie=0x0, duration=2424.811s, table=10, n_packets=0, n_bytes=0, send_flow_rem priority=8192,tun_id=0x640 actions=goto_table:20
-cookie=0x0, duration=881.623s, table=10, n_packets=37, n_bytes=3410, send_flow_rem priority=16384,tun_id=0x6a5,dl_dst=01:00:00:00:00:00/01:00:00:00:00:00 actions=output:3,goto_table:20
-cookie=0x0, duration=2429.661s, table=10, n_packets=18, n_bytes=1544, send_flow_rem priority=16384,tun_id=0x640,dl_dst=01:00:00:00:00:00/01:00:00:00:00:00 actions=output:1,goto_table:20
-cookie=0x0, duration=2418.33s, table=10, n_packets=36, n_bytes=3088, send_flow_rem priority=16384,tun_id=0x641,dl_dst=01:00:00:00:00:00/01:00:00:00:00:00 actions=output:1,goto_table:20
-cookie=0x0, duration=2428.227s, table=10, n_packets=0, n_bytes=0, send_flow_rem priority=16384,tun_id=0x1,dl_dst=01:00:00:00:00:00/01:00:00:00:00:00 actions=output:3,goto_table:20
-cookie=0x0, duration=884.854s, table=10, n_packets=15, n_bytes=1306, send_flow_rem priority=16384,tun_id=0x6a4,dl_dst=01:00:00:00:00:00/01:00:00:00:00:00 actions=output:3,goto_table:20
-cookie=0x0, duration=2425.966s, table=10, n_packets=0, n_bytes=0, send_flow_rem priority=16384,tun_id=0x2,dl_dst=01:00:00:00:00:00/01:00:00:00:00:00 actions=output:3,goto_table:20
-cookie=0x0, duration=885.097s, table=10, n_packets=12, n_bytes=1042, send_flow_rem tun_id=0x6a4,dl_dst=fa:16:3e:5d:3d:cd actions=output:3,goto_table:20
-cookie=0x0, duration=2426.083s, table=10, n_packets=0, n_bytes=0, send_flow_rem tun_id=0x2,dl_dst=fa:16:3e:fa:77:36 actions=output:3,goto_table:20
-cookie=0x0, duration=2429.782s, table=10, n_packets=21, n_bytes=1756, send_flow_rem tun_id=0x640,dl_dst=fa:16:3e:f8:d0:96 actions=output:1,goto_table:20
-cookie=0x0, duration=873.509s, table=10, n_packets=23, n_bytes=1999, send_flow_rem tun_id=0x6a5,dl_dst=fa:16:3e:21:eb:65 actions=output:3,goto_table:20
-cookie=0x0, duration=2418.518s, table=10, n_packets=24, n_bytes=2084, send_flow_rem tun_id=0x641,dl_dst=fa:16:3e:9b:c1:c7 actions=output:1,goto_table:20
-cookie=0x0, duration=2428.443s, table=10, n_packets=0, n_bytes=0, send_flow_rem tun_id=0x1,dl_dst=fa:16:3e:ea:1d:9d actions=output:3,goto_table:20
-cookie=0x0, duration=877.119s, table=20, n_packets=12, n_bytes=1042, send_flow_rem priority=8192,tun_id=0x6a4 actions=drop
-cookie=0x0, duration=2415.73s, table=20, n_packets=31, n_bytes=2378, send_flow_rem priority=8192,tun_id=0x641 actions=drop
-cookie=0x0, duration=881.815s, table=20, n_packets=26, n_bytes=2168, send_flow_rem priority=8192,tun_id=0x6a5 actions=drop
-cookie=0x0, duration=2424.74s, table=20, n_packets=21, n_bytes=1756, send_flow_rem priority=8192,tun_id=0x640 actions=drop
-cookie=0x0, duration=882.005s, table=20, n_packets=37, n_bytes=3410, priority=16384,tun_id=0x6a5,dl_dst=01:00:00:00:00:00/01:00:00:00:00:00 actions=output:8,output:7
-cookie=0x0, duration=2424.884s, table=20, n_packets=22, n_bytes=1864, send_flow_rem priority=16384,tun_id=0x640,dl_dst=01:00:00:00:00:00/01:00:00:00:00:00 actions=output:2
-cookie=0x0, duration=2415.83s, table=20, n_packets=38, n_bytes=3228, send_flow_rem priority=16384,tun_id=0x641,dl_dst=01:00:00:00:00:00/01:00:00:00:00:00 actions=output:5,output:4
-cookie=0x0, duration=877.333s, table=20, n_packets=15, n_bytes=1306, send_flow_rem priority=16384,tun_id=0x6a4,dl_dst=01:00:00:00:00:00/01:00:00:00:00:00 actions=output:6
-cookie=0x0, duration=878.799s, table=20, n_packets=15, n_bytes=1818, send_flow_rem tun_id=0x6a5,dl_dst=fa:16:3e:86:08:5f actions=output:7
-cookie=0x0, duration=2415.884s, table=20, n_packets=15, n_bytes=1818, send_flow_rem tun_id=0x641,dl_dst=fa:16:3e:02:28:8d actions=output:5
-cookie=0x0, duration=877.468s, table=20, n_packets=15, n_bytes=1818, send_flow_rem tun_id=0x6a4,dl_dst=fa:16:3e:20:cd:8e actions=output:6
-cookie=0x0, duration=882.102s, table=20, n_packets=14, n_bytes=1733, send_flow_rem tun_id=0x6a5,dl_dst=fa:16:3e:68:40:4a actions=output:8
-cookie=0x0, duration=2415.171s, table=20, n_packets=15, n_bytes=1818, send_flow_rem tun_id=0x641,dl_dst=fa:16:3e:1a:49:61 actions=output:4
-cookie=0x0, duration=2424.998s, table=20, n_packets=24, n_bytes=2532, send_flow_rem tun_id=0x640,dl_dst=fa:16:3e:93:20:1e actions=output:2</computeroutput></screen>
-    </para>
-    <para><?oxy_custom_start type="oxy_content_highlight" color="255,255,0"?>For more on TEPs please
-        see a nice document authored by Ben Pfaff who needs no introduction, that can be found <link
-            xlink:href="http://benpfaff.org/~blp/ovs-fields.pdf">here</link>.</para>
-    <para>Next take a look at the flowmods. The pipelines have been broken down into three tables, a
-        classifier, egress and ingress. Over the next 6 months we will be adding services into
-        pipeline for a much more complete implementation. We are looking for user contributions in
-        the roadmap and even better, pushing code upstream as the project continues to grow.</para>
-    <para>Lastly if you want to force availability zones from say the “demo” UID. You can add the
-        admin role to different UIDs using the following Keystone client calls.</para>
-    <?oxy_custom_end?>
-    <para>
-        <screen><command>
-$ keystone user-role-add --user $(keystone user-list | grep '\sdemo' | awk '{print $2}') \
---role $(keystone role-list | grep 'admin' | awk '{print $2}') \
---tenant_id $(keystone tenant-list | grep '\sdemo' | awk '{print $2}')
-$ . ./openrc demo demo </command></screen>
-    </para>
-</section>
diff --git a/manuals/howto-openstack/section_ovsdb_project.xml b/manuals/howto-openstack/section_ovsdb_project.xml
deleted file mode 100644 (file)
index 8e9b013..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE section [
- <!-- Some useful entities borrowed from HTML -->
-<!ENTITY ndash  "&#x2013;">
-<!ENTITY mdash  "&#x2014;">
-<!ENTITY hellip "&#x2026;">
-]>
-<section xmlns="http://docbook.org/ns/docbook" xmlns:xi="http://www.w3.org/2001/XInclude"
-    xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0" xml:id="ovsdb_project">
-    <title>OVSDB Project Control and Management Logic</title>
-    <para>OpenFlow v1.3 and OVSDB we're used in the OVSBD project Openstacj implementation. We chose
-        not to use any extensions or the use of agents. Open vSwitch supported the necessary
-        OpenFlow v1.3 and OVSDB functionality we required for this architecture. Those of us in the
-        OVSDB project are pretty agnostic to southbound protocols as long as there is a healthy
-        adoption so as not to waste our time and based on open standards such as OpenFlow v1.3, RFCs
-        7047 (Informational OVSDB RFC) and/or de facto drafts like
-        draft-mahalingam-dutt-dcops-vxlan(VXLAN framing). We are keen to see NXM extension
-        functionality upstream into the OpenFlow specification. OVS ARP responder is something we
-        are beginning to work on proofing now. NXM and OXM extensions merging for ARP and Tunnel
-        feature parity would make our design and coding lives easier. The overall architecture looks
-        something like the following. I have hardware TEPs in the diagram. We have cycles to help
-        hardware vendors implement the hardware_vtep database schema (assuming they prescribe to
-        open operating systems):</para>
-    <para><inlinemediaobject>
-            <imageobject>
-                <imagedata fileref="images/Overlay-OpenDaylight-OVSDB-OpenFlow.png"/>
-            </imageobject>
-        </inlinemediaobject></para>
-    <para>The provider segmentation keys used in the encap (GRE key/VNI) is a hash of Network and
-        Tenant ID since as long as we are subnet bound, networks will always need to support
-        multi-tenant logical networks until we eradicate L2 all together. The design is flexible and
-        as generic as possible to allow for any vendor to add differentiation on top of the base
-        network virtualization. Of course, we have plenty to do between now and stability, so moving
-        right along.</para>
-    <para>A quick visual of the OVSDB Neutron implementation code flow itself and how it ties into
-        the controller project and OpenStack:</para>
-    <para><inlinemediaobject>
-            <imageobject>
-                <imagedata fileref="images/OVSDB-Architecture.png"/>
-            </imageobject>
-        </inlinemediaobject></para>
-  </section>
diff --git a/manuals/howto-openstack/section_start_odl_controller.xml b/manuals/howto-openstack/section_start_odl_controller.xml
deleted file mode 100644 (file)
index ab4786b..0000000
+++ /dev/null
@@ -1,54 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE section [
- <!-- Some useful entities borrowed from HTML -->
-<!ENTITY ndash  "&#x2013;">
-<!ENTITY mdash  "&#x2014;">
-<!ENTITY hellip "&#x2026;">
-]>
-<section xmlns="http://docbook.org/ns/docbook" xmlns:xi="http://www.w3.org/2001/XInclude"
-    xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0" xml:id="start_odl_controller">
-    <title>Starting ODL Controller on the Openstack Node</title>
-    <para>
-        <screen><command>$ cd odl/opendaylight/</command></screen>
-    </para>
-    <para>Check that the configuration is set for OpenFlow v1.3 with the following to ensure that
-        ovsdb.of.version=1.3 is uncommented: </para>
-    <para>
-        <screen><command>$ grep ovsdb.of.version configuration/config.ini</command>
-<command>ovsdb.of.version=1.3</command></screen>
-    </para>
-    <para>If it is not uncommented, adjust the config.ini file to uncomment the line
-        ovsdb.of.version=1.3The file is located at
-        /home/odl/opendaylight/configuration/config.ini</para>
-    <para>
-        <screen><computeroutput>### Before ###
-# ovsdb.of.version=1.3
-### After ###
-ovsdb.of.version=1.3</computeroutput></screen>
-    </para>
-    <para>Or, paste the following:</para>
-    <para>
-        <screen><command>sudo sed -i 's/#\ ovsdb.of.version=1.3/ovsdb.of.version=1.3/' /home/odl/opendaylight/configuration/config.ini</command></screen>
-    </para>
-    <para>Lastly, start the ODL controller w/ the following:</para>
-    <para>
-        <screen><command>./run.sh -XX:MaxPermSize=384m -virt ovsdb -of13</command></screen>
-    </para>
-    <para>When the controller is finished loading here are some typical messages in the OSGI
-        console:</para>
-    <para>
-        <screen><computeroutput>
-2014-02-06 20:41:22.458 UTC [pool-2-thread-4] INFO o.o.controller.frm.flow.FlowProvider - Flow Config Provider started.
-2014-02-06 20:41:22.461 UTC [pool-2-thread-4] INFO o.o.c.frm.group.GroupProvider - Group Config Provider started.
-2014-02-06 20:41:22.507 UTC [pool-2-thread-4] INFO o.o.c.frm.meter.MeterProvider - Meter Config Provider started.
-2014-02-06 20:41:22.515 UTC [pool-2-thread-6] INFO o.o.c.m.s.manager.StatisticsProvider - Statistics Provider started.</computeroutput></screen>
-    </para>
-    <para>You can verify the sockets/ports are bound with the following command. Ports 6633, 6640
-        and 6653 should all be bound and listening:</para>
-    <para>
-        <screen><computeroutput>$ lsof -iTCP | grep 66
-java 1330 odl 154u IPv6 15262 0t0 TCP *:6640 (LISTEN)
-java 1330 odl 330u IPv6 15392 0t0 TCP *:6633 (LISTEN)
-java 1330 odl 374u IPv6 14306 0t0 TCP *:6653 (LISTEN)</computeroutput></screen>
-    </para>
-  </section>
diff --git a/manuals/howto-openstack/section_unstack_and_cleanup.xml b/manuals/howto-openstack/section_unstack_and_cleanup.xml
deleted file mode 100644 (file)
index 8161c2e..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE section [
- <!-- Some useful entities borrowed from HTML -->
-<!ENTITY ndash  "&#x2013;">
-<!ENTITY mdash  "&#x2014;">
-<!ENTITY hellip "&#x2026;">
-]>
-<section xmlns="http://docbook.org/ns/docbook" xmlns:xi="http://www.w3.org/2001/XInclude"
-    xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0" xml:id="installing_from_zip">
-    <title>Unstack and Cleanup DevStack</title>
-    <para>Use the following to teardown the stack and reset the state of the VM to pre-stack.</para>
-    <para>Running unstack.sh will kill the stack. Also, look at the OVS config and make sure all
-        bridges have been deleted:</para>
-    <para>
-        <screen><command>sudo ovs-vsctl show</command></screen>
-    </para>
-    <para>A handy cleanup is to run a few commands to ensure the stack was effectively torn down.
-        Paste the following to create a shell script called ./reallyunstack.sh. </para>
-    <para>
-        <screen><command>echo 'sudo killall nova-api nova-conductor nova-cert nova-scheduler nova-consoleauth nova-compute</command>
-<command>sudo pkill -9 -f qemu</command>
-<command>sudo  ovs - vsctl  del - manager</command>
-<command>sudo  ovs - vsctl  del - br  br - int</command>
-<command>sudo  ovs - vsctl  del - br  br - tun</command>
-<command>sudo  pkill / usr / bin / python</command>
-<command>sudo  systemctl  restart  qpidd .service ' &amp; gt ; reallyunstack .sh</command>
-<command>chmod + x reallyunstack .sh</command>
-<command>. / reallyunstack .sh</command></screen>
-    </para>
-  </section>
diff --git a/manuals/howto-openstack/section_verifying_openstack.xml b/manuals/howto-openstack/section_verifying_openstack.xml
deleted file mode 100644 (file)
index 45dd3e6..0000000
+++ /dev/null
@@ -1,129 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE section [
- <!-- Some useful entities borrowed from HTML -->
-<!ENTITY ndash  "&#x2013;">
-<!ENTITY mdash  "&#x2014;">
-<!ENTITY hellip "&#x2026;">
-]>
-<section xmlns="http://docbook.org/ns/docbook" xmlns:xi="http://www.w3.org/2001/XInclude"
-    xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0" xml:id="verifying_openstack">
-    <title>Verifying Openstack is Functioning</title>
-    <para>Verify the stack with the following on either host. </para>
-    <para>There are two KVM hypervisors registered with Nova. *Note openrc will populate the proper
-        Keystone credentials for service client commands. These can be viewed using the export
-        command from your shell:</para>
-    <para>
-        <screen><prompt>[ odl @ fedora - odl - 1 devstack ] $</prompt><command>. . / openrc admin admin</command>
-<prompt>[ odl @ fedora - odl - 1 devstack ] $</prompt><command>nova hypervisor - list</command>
-<computeroutput>
-+----+---------------------+
-| ID | Hypervisor hostname |
-+----+---------------------+
-| 1  | fedora-odl-1        |
-| 2  | fedora-odl-2        |
-+----+---------------------+</computeroutput></screen>
-    </para>
-    <para>Note: During the VM Boot Instances, there is a minor configuration differece between
-        Fedora 19 and Fedora 20</para>
-    <para><emphasis role="bold">Fedora 19</emphasis>:</para>
-    <para>
-        <screen><command>~/devstack/addimage.sh</command>
-<command>export IMAGE=cirros-0.3.0-i386-disk.img</command></screen>
-    </para>
-    <para><emphasis role="bold">Fedora 20</emphasis>:</para>
-    <para>
-        <screen><command>export IMAGE = cirros - 0.3.1 - x86_64 - uec</command></screen>
-    </para>
-    <para>Next, boot a couple of VMs and verify the network overlay is created by
-            ODL/OVSDB.</para>
-        <para>
-            <screen><command>nova boot -- flavor m1 .tiny -- image $ ( nova image - list | grep $IMAGE '\s' | awk '{print $2}' ) -- nic net - id = $ ( neutron net - list | grep private | awk '{print $2}' ) admin - private1</command></screen>
-        </para>            
-        <para>Boot a 2nd node:</para>
-        <para>
-            <screen><command>nova boot -- flavor m1 .tiny -- image $ ( nova image - list | grep $IMAGE '\s' | awk '{print $2}' ) -- nic net - id = $ ( neutron net - list | grep private | awk '{print $2}' ) admin - private2</command></screen>
-        </para>
-        <para>You can also force a host to boot to a particular hypervisor using the following
-            (note: this requires an admin role which is implicitly granted to the admin
-            user):</para>
-        <para>
-            <screen><command>nova boot -- flavor m1 .tiny -- image $ ( nova image - list | grep $IMAGE '\s' | awk '{print $2}' ) -- nic net - id = $ ( neutron net - list | grep private | awk '{print $2}' ) demo - private -- availability_zone = nova : fedora - odl - 1</command></screen>
-        </para>    
-        <para>View the state of the VMs</para>
-        <para>
-            <screen><prompt>[odl@fedora-odl-1 devstack]$</prompt><userinput>nova list</userinput>
-<computeroutput>
-+--------------------------------------+----------------+--------+------------+-------------+------------------+
-| ID                                   |        Name    | Status | Task State | Power State | Networks         |
-+--------------------------------------+----------------+--------+------------+-------------+------------------+
-| 01c30219-255a-4376-867a-45d52e349e87 | admin-private1 | ACTIVE | -          | Running     | private=10.0.0.2 |
-| bdcfd05b-ebaf-452d-b8c8-81f391a0bb75 | admin-private2 | ACTIVE | -          | Running     | private=10.0.0.4 |
-+--------------------------------------+----------------+--------+------------+-------------+------------------+</computeroutput></screen>
-        </para>
-        <para>To determine where the host is located, look directly at Libvirt using Virsh:</para>
-        <para>
-            <screen><prompt>[odl@fedora-odl-2 devstack]$</prompt><command>sudo virsh list</command>
-<computeroutput>Id Name State
-----------------------------------------------------
-2 instance-00000002 running</computeroutput></screen>
-        </para>
-    <para>Ping the endpoints by grabbing a namespace for qdhcp or qrouter. This provides an L3 source
-        to ping the VMs. These will only exist on the controller or wherever you are running those
-        services in your cloud:</para>
-    <para>
-        <screen>[odl@fedora-odl-1 devstack]$ ip netns
-qdhcp-3f0cfbd2-f23c-481a-8698-3b2dcb7c2657
-qrouter-992e450a-875c-4721-9c82-606c283d4f92
-[odl@fedora-odl-1 devstack]$ sudo ip netns exec qdhcp-3f0cfbd2-f23c-481a-8698-3b2dcb7c2657 ping 10.0.0.2
-PING 10.0.0.2 (10.0.0.2) 56(84) bytes of data.
-64 bytes from 10.0.0.2: icmp_seq=1 ttl=64 time=0.737 ms
-64 bytes from 10.0.0.2: icmp_seq=2 ttl=64 time=0.578 ms
-^C
---- 10.0.0.2 ping statistics ---
-2 packets transmitted, 2 received, 0% packet loss, time 1001ms
-rtt min/avg/max/mdev = 0.578/0.657/0.737/0.083 ms
-[odl@fedora-odl-1 devstack]$ sudo ip netns exec qdhcp-3f0cfbd2-f23c-481a-8698-3b2dcb7c2657 ping 10.0.0.4
-PING 10.0.0.4 (10.0.0.4) 56(84) bytes of data.
-64 bytes from 10.0.0.4: icmp_seq=1 ttl=64 time=2.02 ms
-64 bytes from 10.0.0.4: icmp_seq=2 ttl=64 time=1.03 ms
-^C
---- 10.0.0.4 ping statistics ---
-2 packets transmitted, 2 received, 0% packet loss, time 1001ms
-rtt min/avg/max/mdev = 1.037/1.530/2.023/0.493 ms</screen>
-    </para>
-    <para>Verify the OF13 flow modifications.</para>
-    <para>
-        <screen>[odl@fedora-odl-2 devstack]$ sudo ovs-ofctl -O OpenFlow13 dump-flows br-int
-OFPST_FLOW reply (OF1.3) (xid=0x2):
-cookie=0x0, duration=2044.758s, table=0, n_packets=23, n_bytes=2292, send_flow_rem in_port=2,dl_src=fa:16:3e:f5:03:2e actions=set_field:0x1-&amp;gt;tun_id,goto_table:10
-cookie=0x0, duration=2051.364s, table=0, n_packets=30, n_bytes=3336, send_flow_rem tun_id=0x1,in_port=1 actions=goto_table:20
-cookie=0x0, duration=2049.553s, table=0, n_packets=0, n_bytes=0, send_flow_rem tun_id=0x2,in_port=1 actions=goto_table:20
-cookie=0x0, duration=2044.724s, table=0, n_packets=0, n_bytes=0, send_flow_rem priority=8192,in_port=2 actions=drop
-cookie=0x0, duration=2576.478s, table=0, n_packets=410, n_bytes=36490, send_flow_rem dl_type=0x88cc actions=CONTROLLER:56
-cookie=0x0, duration=2044.578s, table=10, n_packets=0, n_bytes=0, send_flow_rem priority=8192,tun_id=0x1 actions=goto_table:20
-cookie=0x0, duration=2051.322s, table=10, n_packets=10, n_bytes=1208, send_flow_rem priority=16384,tun_id=0x1,dl_dst=01:00:00:00:00:00/01:00:00:00:00:00 actions=output:1,goto_table:20
-cookie=0x0, duration=2049.477s, table=10, n_packets=0, n_bytes=0, send_flow_rem priority=16384,tun_id=0x2,dl_dst=01:00:00:00:00:00/01:00:00:00:00:00 actions=output:1,goto_table:20
-cookie=0x0, duration=2050.621s, table=10, n_packets=11, n_bytes=944, send_flow_rem tun_id=0x1,dl_dst=fa:16:3e:00:c4:97 actions=output:1,goto_table:20
-cookie=0x0, duration=2049.641s, table=10, n_packets=0, n_bytes=0, send_flow_rem tun_id=0x2,dl_dst=fa:16:3e:c6:00:e1 actions=output:1,goto_table:20
-cookie=0x0, duration=2051.415s, table=10, n_packets=2, n_bytes=140, send_flow_rem tun_id=0x1,dl_dst=fa:16:3e:f7:3d:96 actions=output:1,goto_table:20
-cookie=0x0, duration=2048.058s, table=10, n_packets=0, n_bytes=0, send_flow_rem tun_id=0x1,dl_dst=fa:16:3e:e1:a7:e1 actions=output:1,goto_table:20
-cookie=0x0, duration=2044.517s, table=20, n_packets=13, n_bytes=1084, send_flow_rem priority=8192,tun_id=0x1 actions=drop
-cookie=0x0, duration=2044.608s, table=20, n_packets=21, n_bytes=2486, send_flow_rem priority=16384,tun_id=0x1,dl_dst=01:00:00:00:00:00/01:00:00:00:00:00 actions=output:2
-cookie=0x0, duration=2044.666s, table=20, n_packets=17, n_bytes=1898, send_flow_rem tun_id=0x1,dl_dst=fa:16:3e:f5:03:2e actions=output:2</screen>
-    </para>
-    <para>Define new networks with encaps of VXLAN or GRE along with specifying the segmentation ID.
-        In this case GRE:</para>
-    <para>
-        <screen><command>neutron net-create gre1 --tenant_id $(keystone tenant-list | grep '\sadmin' | awk '{print $2}') --provider:network_type gre --provider:segmentation_id 1300</command>
-<command>neutron subnet-create gre1 10.200.1.0/24 --name gre1</command></screen>
-    </para>
-    <para>
-        <screen><command>neutron net-create gre2 --tenant_id $(keystone tenant-list | grep '\sadmin' | awk '{print $2}') --provider:network_type gre --provider:segmentation_id 1310</command>
-<command>neutron subnet-create gre2 10.200.2.0/24 --name gre2</command></screen>
-    </para>
-    <para>And then boot those instances using those networks:</para>
-    <para>
-        <screen><command>nova boot --flavor m1.tiny --image $(nova image-list | grep $IMAGE'\s' | awk '{print $2}') --nic net-id=$(neutron net-list | grep 'gre1' | awk '{print $2}') gre1-host</command>
-<command>nova boot --flavor m1.tiny --image $(nova image-list | grep $IMAGE'\s' | awk '{print $2}') --nic net-id=$(neutron net-list | grep 'gre2' | awk '{print $2}') gre2-host</command></screen>
-    </para>
-  </section>
diff --git a/manuals/howto-openstack/src/main/asciidoc/openstack.adoc b/manuals/howto-openstack/src/main/asciidoc/openstack.adoc
new file mode 100644 (file)
index 0000000..8c03ad7
--- /dev/null
@@ -0,0 +1,235 @@
+= OpenStack and OpenDaylight
+
+[preface]
+== Overview
+http://www.openstack.org[OpenStack] is a popular open source Infrastructure
+as a service project, covering compute, storage and network management.
+OpenStack can use OpenDaylight as its network management provider through the
+Modular Layer 2 (ML2) north-bound plug-in. OpenDaylight manages the network
+flows for the OpenStack compute nodes via the OVSDB south-bound plug-in. This
+page describes how to set that up, and how to tell when everything is working.
+
+== Installing OpenStack
+
+Installing OpenStack is out of scope for this document, but to get started, it
+is useful to have a minimal multi-node OpenStack deployment.
+
+The reference deployment we will use for this document is a 3 node cluster:
+
+* One control node containing all of the management services for OpenStack
+   (Nova, Neutron, Glance, Swift, Cinder, Keystone)
+* Two compute nodes running nova-compute
+* Neutron using the OVS back-end and vxlan for tunnels
+
+Once you have installed OpenStack, verify that it is working by connecting
+to Horizon and performing a few operations. To check the Neutron
+configuration, create two instances on a private subnet bridging to your
+public network, and verify that you can connect to them, and that they can
+see each other.
+
+== Installing OpenDaylight
+
+'''Prerequisites:''' OpenDaylight requires Java 1.7.0.
+
+* On the control host, http://www.opendaylight.org/software/downloads[Download
+  the latest OpenDaylight release] (at the time of writing, this is
+  0.2.1-Helium-SR1.1)
+* Uncompress it as root, and start OpenDaylight (you can start OpenDaylight
+  by running karaf directly, but exiting from the shell will shut it down):
+....
+$ tar xvfz distribution-karaf-0.2.1-Helium-SR1.1.tar.gz
+$ cd distribution-karaf-0.2.0-Helium
+$ ./bin/start # Start OpenDaylight as a server process
+....
+* Connect to the Karaf shell, and install the odl-ovsdb-openstack bundle,
+  dlux and their dependencies:
+....
+$ ./bin/client # Connect to OpenDaylight with the client
+opendaylight-user@root> feature:install odl-base-all odl-aaa-authn odl-restconf odl-nsf-all odl-adsal-northbound odl-mdsal-apidocs \
+odl-ovsdb-openstack odl-ovsdb-northbound odl-dlux-core
+....
+* If everything is installed correctly, you should now be able to log in to
+  the dlux interface on `http://$CONTROL_HOST:8181/dlux/index.html` - the
+  default username and password is "admin/admin" (see screenshot below)
+
+[[File:Dlux default.png|center|thumbnail|400px|Default DLUX screen]]
+
+== Ensuring OpenStack network state is clean
+
+When using OpenDaylight as the Neutron back-end, ODL expects to be the only
+source of truth for Open vSwitch configuration. Because of this, it is
+necessary to remove existing OpenStack and Open vSwitch configurations to
+give OpenDaylight a clean slate.
+
+* Delete instances
+....
+$ nova list
+$ nova delete <instance names>
+....
+* Remove link from subnets to routers
+....
+$ neutron subnet-list
+$ neutron router-list
+$ neutron router-port-list <router name>
+$ neutron router-interface-delete <router name> <subnet ID or name>
+....
+* Delete subnets, nets, routers
+....
+$ neutron subnet-delete <subnet name>
+$ neutron net-list
+$ neutron net-delete <net name>
+$ neutron router-delete <router name>
+....
+* Check that all ports have been cleared - at this point, this should be an
+  empty list
+....
+$ neutron port-list
+....
+
+== Ensure Neutron is stopped
+
+While Neutron is managing the OVS instances on compute and control nodes,
+OpenDaylight and Neutron can be in conflict. To prevent issues, we turn off
+Neutron server on the network controller, and Neutron's Open vSwitch agents
+on all hosts.
+
+* Turn off neutron-server on control node
+....
+# systemctl stop neutron-server
+....
+* On each node in the cluster, shut down and disable Neutron's agent services to ensure that they do not restart after a reboot:
+....
+# systemctl stop neutron-openvswitch-agent
+# systemctl disable neutron-openvswitch-agent
+....
+
+== Configuring Open vSwitch to be managed by OpenDaylight
+
+On each host (both compute and control nodes) we will clear the pre-existing
+Open vSwitch config and set OpenDaylight to manage the switch:
+
+* Stop the Open vSwitch service, and clear existing OVSDB (ODL expects to
+manage vSwitches completely)
+....
+# systemctl stop openvswitch
+# rm -rf /var/log/openvswitch/*
+# rm -rf /etc/openvswitch/conf.db
+# systemctl start openvswitch
+....
+* At this stage, your Open vSwitch configuration should be empty:
+....
+[root@dneary-odl-compute2 ~]# ovs-vsctl show
+9f3b38cb-eefc-4bc7-828b-084b1f66fbfd
+    ovs_version: "2.1.3"
+....
+* Set OpenDaylight as the manager on all nodes
+....
+# ovs-vsctl set-manager tcp:${CONTROL_HOST}:6640
+....
+* You should now see a new section in your Open vSwitch configuration
+  showing that you are connected to the OpenDaylight server, and OpenDaylight
+  will automatically create a br-int bridge:
+....
+[root@dneary-odl-compute2 ~]# ovs-vsctl show
+9f3b38cb-eefc-4bc7-828b-084b1f66fbfd
+    Manager "tcp:172.16.21.56:6640"
+        is_connected: true
+    Bridge br-int
+        Controller "tcp:172.16.21.56:6633"
+        fail_mode: secure
+        Port br-int
+            Interface br-int
+    ovs_version: "2.1.3"
+....
+* (BUG WORKAROUND) If SELinux is enabled, you may not have a security
+  context in place which allows Open vSwitch remote administration. If you
+  do not see the result above (specifically, if you do not see
+  "is_connected: true" in the Manager section), set SELinux to Permissive
+  mode on all nodes and ensure it stays that way after boot:
+....
+# setenforce 0
+# sed -i -e 's/SELINUX=enforcing/SELINUX=permissive/g' /etc/selinux/config
+....
+* Make sure all nodes, including the control node, are connected to
+  OpenDaylight
+* If you reload DLUX, you should now see that all of your Open vSwitch nodes
+  are now connected to OpenDaylight
++
+[[File:Dlux with switches.png|center|thumbnail|400px|DLUX showing Open vSwitch nodes]]
+* If something has gone wrong, check <code>data/log/karaf.log</code> under
+  the OpenDaylight distribution directory. If you do not see any interesting
+  log entries, set logging for OVSDB to TRACE level inside Karaf and try again:
+....
+log:set TRACE ovsdb
+....
+
+== Configuring Neutron to use OpenDaylight
+
+Once you have configured the vSwitches to connect to OpenDaylight, you can
+now ensure that OpenStack Neutron is using OpenDaylight.
+
+First, ensure that port 8080 (which will be used by OpenDaylight to listen
+for REST calls) is available. By default, swift-proxy-service listens on the
+same port, and you may need to move it (to another port or another host), or
+disable that service. I moved it to port 8081 by editing
+<code>/etc/swift/proxy-server.conf</code> and
+<code>/etc/cinder/cinder.conf</code>, modifying iptables appropriately, and
+restarting swift-proxy-service and OpenDaylight.
+
+* Configure Neutron to use OpenDaylight's ML2 driver:
+....
+crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 mechanism_drivers opendaylight 
+crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 tenant_network_types vxlan
+
+cat <<EOT>> /etc/neutron/plugins/ml2/ml2_conf.ini
+[ml2_odl]
+password = admin
+username = admin
+url = http://${CONTROL_HOST}:8080/controller/nb/v2/neutron
+EOT
+....
+* Reset Neutron's ML2 database
+....
+mysql -e "drop database if exists neutron_ml2;"
+mysql -e "create database neutron_ml2 character set utf8;"
+mysql -e "grant all on neutron_ml2.* to 'neutron'@'%';"
+neutron-db-manage --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf \
+--config-file /etc/neutron/plugin.ini upgrade head
+....
+* Restart neutron-server:
+    systemctl start neutron-server
+
+== Verifying it works ==
+
+* Verify that OpenDaylight's ML2 interface is working:
+....
+curl -u admin:admin http://${CONTROL_HOST}:8080/controller/nb/v2/neutron/networks
+
+{
+   "networks" : [ ]
+}
+....
+
+If this does not work or gives an error, check Neutron's log file in
+<code>/var/log/neutron/server.log</code>. Error messages here should give
+some clue as to what the problem is in the connection with OpenDaylight
+
+* Create a net, subnet, router, connect ports, and start an instance using
+the Neutron CLI:
+....
+neutron router-create router1
+neutron net-create private
+neutron subnet-create private --name=private_subnet 10.10.5.0/24
+neutron router-interface-add router1 private_subnet
+nova boot --flavor <flavor> --image <image id> --nic net-id=<network id> test1
+nova boot --flavor <flavor> --image <image id> --nic net-id=<network id> test2
+....
+
+At this point, you have confirmed that OpenDaylight is creating network
+end-points for instances on your network and managing traffic to them.
+
+Congratulations! You're done!
+
+[[Category:Documentation]]
+[[Category:OpenStack]]
+