.project
.settings
MANIFEST.MF
+opendaylight/northbound/integrationtest/logs/*
import org.junit.Test;\r
\r
public class ArphandlerTest extends TestCase {\r
- \r
- @Test\r
- public void testArphandlerCreation() {\r
- \r
- ArpHandler ah = null;\r
- ah = new ArpHandler();\r
- Assert.assertTrue(ah != null);\r
- \r
- }\r
+\r
+ @Test\r
+ public void testArphandlerCreation() {\r
+\r
+ ArpHandler ah = null;\r
+ ah = new ArpHandler();\r
+ Assert.assertTrue(ah != null);\r
+\r
+ }\r
\r
}\r
<configuration scan="true">
-
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>%d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n
\r
public class TestClusteringTest extends TestCase {\r
\r
- @Test\r
- public void testComplexClass() {\r
- ComplexClass cc = new ComplexClass("cplxc1");\r
- Assert.assertTrue(cc.whoAmI().equals("ComplexClass_cplxc1"));\r
- cc.IAm("cplxc2");\r
- Assert.assertTrue(cc.whoAmI().equals("ComplexClass_cplxc2"));\r
- }\r
- \r
- @Test\r
- public void testComplexClass1() {\r
- ComplexClass1 cc1 = new ComplexClass1("cplxc1a");\r
- Assert.assertTrue(cc1.whoAmI().equals("ComplexClass1_cplxc1a"));\r
- cc1.IAm("cplxc1b");\r
- Assert.assertTrue(cc1.whoAmI().equals("ComplexClass1_cplxc1b"));\r
- }\r
- \r
- \r
- @Test\r
- public void testComplexContainer() {\r
- ComplexContainer cplxcontnr1 = new ComplexContainer("cct1", 5);\r
- Assert.assertTrue(cplxcontnr1.getIdentity().equals("[ComplexClass_cct1]-[ComplexClass1_cct1]"));\r
- Assert.assertTrue(cplxcontnr1.getState() == 5);\r
- \r
- cplxcontnr1.setIdentity("cct2");\r
- Assert.assertTrue(cplxcontnr1.getIdentity().equals("[ComplexClass_cct2]-[ComplexClass1_cct2]"));\r
- \r
- Assert.assertTrue(cplxcontnr1.toString().equals(\r
- "{ID:[ComplexClass_cct2]-[ComplexClass1_cct2],STATE:5}"));\r
- }\r
- \r
- @Test\r
- public void testStringContainer() {\r
- StringContainer strcontainer1 = new StringContainer();\r
- Assert.assertTrue(strcontainer1.getMystring() == null);\r
- Assert.assertTrue(strcontainer1.hashCode() == 0);\r
- \r
- StringContainer strcontainer2 = new StringContainer("foo");\r
- Assert.assertTrue(strcontainer2.getMystring() != null);\r
- Assert.assertTrue(strcontainer2.hashCode() != 0);\r
- \r
- strcontainer1.setMystring("foo");\r
- Assert.assertTrue(strcontainer2.equals(strcontainer1));\r
- \r
- Assert.assertTrue(strcontainer2.toString().equals("{foo}"));\r
- }\r
- \r
+ @Test\r
+ public void testComplexClass() {\r
+ ComplexClass cc = new ComplexClass("cplxc1");\r
+ Assert.assertTrue(cc.whoAmI().equals("ComplexClass_cplxc1"));\r
+ cc.IAm("cplxc2");\r
+ Assert.assertTrue(cc.whoAmI().equals("ComplexClass_cplxc2"));\r
+ }\r
+\r
+ @Test\r
+ public void testComplexClass1() {\r
+ ComplexClass1 cc1 = new ComplexClass1("cplxc1a");\r
+ Assert.assertTrue(cc1.whoAmI().equals("ComplexClass1_cplxc1a"));\r
+ cc1.IAm("cplxc1b");\r
+ Assert.assertTrue(cc1.whoAmI().equals("ComplexClass1_cplxc1b"));\r
+ }\r
+\r
+\r
+ @Test\r
+ public void testComplexContainer() {\r
+ ComplexContainer cplxcontnr1 = new ComplexContainer("cct1", 5);\r
+ Assert.assertTrue(cplxcontnr1.getIdentity().equals("[ComplexClass_cct1]-[ComplexClass1_cct1]"));\r
+ Assert.assertTrue(cplxcontnr1.getState() == 5);\r
+\r
+ cplxcontnr1.setIdentity("cct2");\r
+ Assert.assertTrue(cplxcontnr1.getIdentity().equals("[ComplexClass_cct2]-[ComplexClass1_cct2]"));\r
+\r
+ Assert.assertTrue(cplxcontnr1.toString().equals(\r
+ "{ID:[ComplexClass_cct2]-[ComplexClass1_cct2],STATE:5}"));\r
+ }\r
+\r
+ @Test\r
+ public void testStringContainer() {\r
+ StringContainer strcontainer1 = new StringContainer();\r
+ Assert.assertTrue(strcontainer1.getMystring() == null);\r
+ Assert.assertTrue(strcontainer1.hashCode() == 0);\r
+\r
+ StringContainer strcontainer2 = new StringContainer("foo");\r
+ Assert.assertTrue(strcontainer2.getMystring() != null);\r
+ Assert.assertTrue(strcontainer2.hashCode() != 0);\r
+\r
+ strcontainer1.setMystring("foo");\r
+ Assert.assertTrue(strcontainer2.equals(strcontainer1));\r
+\r
+ Assert.assertTrue(strcontainer2.toString().equals("{foo}"));\r
+ }\r
+\r
\r
}\r
<manifestLocation>${project.basedir}/META-INF</manifestLocation>\r
</configuration>\r
</plugin>\r
+ <plugin>\r
+ <groupId>org.apache.maven.plugins</groupId>\r
+ <artifactId>maven-checkstyle-plugin</artifactId>\r
+ <version>${checkstyle.version}</version>\r
+ <configuration>\r
+ <failsOnError>true</failsOnError>\r
+ <configLocation>${project.parent.parent.basedir}/space_and_tabs_checks.xml</configLocation>\r
+ </configuration>\r
+ </plugin>\r
</plugins>\r
</build>\r
<dependencies>\r
public interface Acceptor<I> {\r
\r
/**\r
- * \r
+ *\r
* @param input\r
* @return true if input is accepted.\r
*/\r
\r
import java.util.Collection;\r
/**\r
- * \r
+ *\r
* @author Tony Tkacik\r
*\r
* @param <I>\r
/**\r
* Transformer which aggregates multiple implementations of\r
* {@link InputClassBasedTransformer}.\r
- * \r
+ *\r
* The transformation process is driven by {@link Class} of input. The selection\r
* of used {@link InputClassBasedTransformer} is done by using the {@link Class}\r
* of input as a key to select the transformer.\r
- * \r
+ *\r
* This approach provides quick resolution of transformer, but does not support\r
* registering a super type of input to provide transformation support for all\r
* subclasses, one must register a new instance of transformer for each valid\r
* input class.\r
- * \r
+ *\r
* If you need more flexible selection of transformation consider using\r
* {@link CompositeConditionalTransformer} which is slower but most flexible or\r
* {@link RuleBasedTransformer} which provides declarative approach for\r
* transformation.\r
- * \r
+ *\r
* See {@link #transform(Object)} for more information about tranformation\r
* process.\r
- * \r
+ *\r
* @author Tony Tkacik <ttkacik@cisco.com>\r
- * \r
+ *\r
* @param <I>\r
* Input super-type\r
* @param <P>\r
\r
/**\r
* Transforms an input into instance of Product class.\r
- * \r
+ *\r
* The final registered transformer is the one which match following\r
* condition:\r
- * \r
+ *\r
* <code>input.getClass() == transformer.getInputClass()</code>\r
- * \r
+ *\r
* This means that transformers are not resolved by class hierarchy, only\r
* selected based on final class of the input. If you need more flexible\r
* selection of transformation consider using\r
* {@link CompositeConditionalTransformer} which is slower but more\r
* flexible.\r
- * \r
+ *\r
*/\r
@Override\r
public P transform(I input) {\r
\r
/**\r
* Registers a new transformer.\r
- * \r
+ *\r
* The transformer is registered for class returned by\r
* {@link InputClassBasedTransformer#getInputClass()}. Only one transformer\r
* can be registered for particular input class.\r
- * \r
+ *\r
*/\r
public void addTransformer(\r
InputClassBasedTransformer<I, ? extends I, P> transformer)\r
\r
/**\r
* Removes an registered transformer.\r
- * \r
+ *\r
* Note: Removal is currently unsupported.\r
- * \r
+ *\r
* @param transformer\r
* Tranformer to be removed.\r
* @throws IllegalArgumentException\r
throws IllegalArgumentException {\r
throw new UnsupportedOperationException("Not implemented yet");\r
}\r
- \r
+\r
@Override\r
public Collection<P> transformAll(Collection<? extends I> inputs) {\r
Collection<P> ret = new ArrayList<P>();\r
\r
/**\r
* Input class based transformer\r
- * \r
+ *\r
* {@link Transformer} which accepts / transforms only specific classes of\r
* input, and is useful if the selection of transformer should be based on the\r
* class of the input and there is one-to-one mapping between input class and\r
* transformer.\r
- * \r
- * \r
+ *\r
+ *\r
* @author Tony Tkacik\r
- * \r
+ *\r
* @param <S>\r
* Common supertype of input\r
* @param <I>\r
\r
/**\r
* Returns an {@link Class} of input which is acceptable for transformation.\r
- * \r
+ *\r
* @return {@link Class} of input which is acceptable for transformation.\r
*/\r
Class<? extends S> getInputClass();\r
\r
/**\r
* Transformer with set of acceptance rules\r
- * \r
+ *\r
* The transformer provides a set of {@link Acceptor}s, which could be used to\r
* verify if the input will produce result using the transformer.\r
- * \r
+ *\r
* The transormer is able to produce result if ANY of associated\r
* {@link Acceptor}s accepted result.\r
- * \r
+ *\r
* @author Tony Tkacik\r
- * \r
+ *\r
* @param <I>\r
* Input class for transformation\r
* @param <P>\r
/**\r
* Set of {@link Acceptor}, which could be used to verify if the input is\r
* usable by transformer.\r
- * \r
+ *\r
* The transformer is able to produce result if ANY of associated\r
* {@link Acceptor}s accepted result.\r
- * \r
+ *\r
* @return Set of input acceptance rules associated to this transformer.\r
*/\r
Set<Acceptor<I>> getRules();\r
\r
/**\r
* Simple condition-based transformer\r
- * \r
+ *\r
* The transformer provides {@link #isAcceptable(Object)} method,\r
* which could be used to query transformer if the input will produce\r
* result.\r
- * \r
+ *\r
* This interface is simplified version of {@link RuleBasedTransformer} - does not\r
* provide decoupling of Acceptance rule from transformer, and should be used only\r
* for simple use-cases.\r
- * \r
+ *\r
* @author Tony Tkacik\r
*\r
* @param <I> Input class for transformation\r
\r
\r
/**\r
- * Checks if the input is acceptable \r
+ * Checks if the input is acceptable\r
* for processing by the transformer.\r
- * \r
+ *\r
* @return true it the input is acceptable for processing by transformer.\r
*/\r
@Override\r
\r
/**\r
* Factory which produces product based on input object\r
- * \r
+ *\r
* @author Tony Tkacik\r
*\r
* @param <I> Input\r
public interface Transformer<I,P> {\r
/**\r
* Transforms input into instance of product.\r
- * \r
+ *\r
* @param input Input which drives transformation\r
* @return Instance of product which was created from supplied input.\r
*/\r
<plugin>
<groupId>org.ops4j.pax.exam</groupId>
<artifactId>maven-paxexam-plugin</artifactId>
+ <version>1.2.4</version>
<executions>
<execution>
<id>generate-config</id>
</execution>
</executions>
</plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-checkstyle-plugin</artifactId>
+ <version>${checkstyle.version}</version>
+ <configuration>
+ <failsOnError>true</failsOnError>
+ <configLocation>${project.parent.parent.basedir}/space_and_tabs_checks.xml</configLocation>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>properties-maven-plugin</artifactId>
+ <version>${propertymavenplugin.version}</version>
+ <executions>
+ <execution>
+ <goals>
+ <goal>set-system-properties</goal>
+ </goals>
+ <configuration>
+ <properties>
+ <property>
+ <name>logback.configurationFile</name>
+ <value>${project.parent.parent.basedir}/logback.xml</value>
+ </property>
+ </properties>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
</plugins>
</build>
</project>
<artifactId>commons.opendaylight</artifactId>
<version>1.4.0-SNAPSHOT</version>
<packaging>pom</packaging>
+ <parent>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>commons.parent</artifactId>
+ <version>1.0.0-SNAPSHOT</version>
+ <relativePath>../parent</relativePath>
+ </parent>
<properties>
+ <propertymavenplugin.version>1.0-alpha-2</propertymavenplugin.version>
<sonar.host.url>https://sonar.opendaylight.org/</sonar.host.url>
- <nexusproxy>http://nexus.opendaylight.org/content</nexusproxy>
<sitedeploy>dav:http://nexus.opendaylight.org/content/sites/site</sitedeploy>
<siteplugin>3.2</siteplugin>
<projectinfo>2.6</projectinfo>
<jersey.version>1.17</jersey.version>
<virgo.version>3.6.0.RELEASE</virgo.version>
<geminiweb.version>2.2.0.RELEASE</geminiweb.version>
+ <checkstyle.version>2.10</checkstyle.version>
</properties>
<pluginRepositories>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-checkstyle-plugin</artifactId>
- <version>2.10</version>
+ <version>${checkstyle.version}</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>properties-maven-plugin</artifactId>
- <version>1.0-alpha-2</version>
+ <version>${propertymavenplugin.version}</version>
<executions>
<execution>
<goals>
<properties>
<property>
<name>logback.configurationFile</name>
- <value>${project.parent.relativePath}/logback.xml</value>
+ <value>${project.parent.basedir}/logback.xml</value>
</property>
</properties>
</configuration>
<compilerSource>1.6</compilerSource>
<compilerCompliance>1.6</compilerCompliance>
<compilerTargetPlatform>1.6</compilerTargetPlatform>
- <configFile>${project.parent.relativePath}/sun_coding_style.xml</configFile>
+ <configFile>${project.parent.basedir}/sun_coding_style.xml</configFile>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-checkstyle-plugin</artifactId>
+ <version>${checkstyle.version}</version>
+ <executions>
+ <execution>
+ <phase>process-sources</phase>
+ <goals>
+ <goal>check</goal>
+ </goals>
+ </execution>
+ </executions>
+ <configuration>
+ <failsOnError>true</failsOnError>
+ <configLocation>${project.parent.basedir}/space_and_tabs_checks.xml</configLocation>
+ <consoleOutput>true</consoleOutput>
+ <includeTestSourceDirectory>true</includeTestSourceDirectory>
</configuration>
</plugin>
<plugin>
</action>
</pluginExecution>
<pluginExecution>
- <pluginExecutionFilter>
- <groupId>org.ops4j.pax.exam</groupId>
- <artifactId>maven-paxexam-plugin</artifactId>
- <versionRange>[1.2.4,)</versionRange>
- <goals>
- <goal>generate-depends-file</goal>
- </goals>
- </pluginExecutionFilter>
- <action>
- <ignore></ignore>
- </action>
+ <pluginExecutionFilter>
+ <groupId>org.ops4j.pax.exam</groupId>
+ <artifactId>maven-paxexam-plugin</artifactId>
+ <versionRange>[1.2.4,)</versionRange>
+ <goals>
+ <goal>generate-depends-file</goal>
+ </goals>
+ </pluginExecutionFilter>
+ <action>
+ <ignore/>
+ </action>
+ </pluginExecution>
+ <pluginExecution>
+ <pluginExecutionFilter>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-checkstyle-plugin</artifactId>
+ <versionRange>[2.0,)</versionRange>
+ <goals>
+ <goal>check</goal>
+ </goals>
+ </pluginExecutionFilter>
+ <action>
+ <ignore/>
+ </action>
</pluginExecution>
</pluginExecutions>
</lifecycleMappingMetadata>
--- /dev/null
+<?xml version="1.0"?>\r
+<!DOCTYPE module PUBLIC\r
+ "-//Puppy Crawl//DTD Check Configuration 1.2//EN"\r
+ "http://www.puppycrawl.com/dtds/configuration_1_2.dtd">\r
+\r
+<module name="Checker">\r
+ <module name="FileTabCharacter">\r
+ <property name="eachLine" value="true"/>\r
+ </module>\r
+\r
+ <module name="RegexpSingleline">\r
+ <!-- \s matches whitespace character, $ matches end of line. -->\r
+ <property name="format" value="\s+$"/>\r
+ <property name="message" value="Line has trailing spaces."/>\r
+ </module>\r
+\r
+</module>\r
--- /dev/null
+This contains a dummy parent project. The use case of this is for
+example the maven-checkstyle plugin call in commons/opendaylight in
+there the plugin references the spaces_tabs_checks.xml from the
+parent, it works well for all the childs of commons/opendaylight
+artifact but when trying to deploy that one itself it breaks because
+the plugin section will still refer to the parent when there is no
+parent.
+This fake parents allow to still set a parent for the
+commons/opendaylight even if pretty much empty or limited to the bare
+minimum and yet allow the commons/opendaylight to reference a parent.
--- /dev/null
+<configuration scan="true">
+
+ <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
+ <encoder>
+ <pattern>%date{"yyyy-MM-dd HH:mm:ss.SSS z"} [%thread] %-5level %logger{36} - %msg%n</pattern>
+ </encoder>
+ </appender>
+
+ <root level="error">
+ <appender-ref ref="STDOUT" />
+ </root>
+</configuration>
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<project xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd" xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+ <modelVersion>4.0.0</modelVersion>
+ <prerequisites>
+ <maven>3.0</maven>
+ </prerequisites>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>commons.parent</artifactId>
+ <version>1.0.0-SNAPSHOT</version>
+ <packaging>pom</packaging>
+
+ <properties>
+ <nexusproxy>http://nexus.opendaylight.org/content</nexusproxy>
+ </properties>
+ <distributionManagement>
+ <!-- OpenDayLight Released artifact -->
+ <repository>
+ <id>opendaylight-release</id>
+ <url>${nexusproxy}/repositories/opendaylight.release/</url>
+ </repository>
+ <!-- OpenDayLight Snapshot artifact -->
+ <snapshotRepository>
+ <id>opendaylight-snapshot</id>
+ <url>${nexusproxy}/repositories/opendaylight.snapshot/</url>
+ </snapshotRepository>
+ <!-- Site deployment -->
+ <site>
+ <id>website</id>
+ <url>${sitedeploy}</url>
+ </site>
+ </distributionManagement>
+</project>
--- /dev/null
+<?xml version="1.0"?>\r
+<!DOCTYPE module PUBLIC\r
+ "-//Puppy Crawl//DTD Check Configuration 1.2//EN"\r
+ "http://www.puppycrawl.com/dtds/configuration_1_2.dtd">\r
+\r
+<module name="Checker">\r
+ <module name="FileTabCharacter">\r
+ <property name="eachLine" value="true"/>\r
+ </module>\r
+\r
+ <module name="RegexpSingleline">\r
+ <!-- \s matches whitespace character, $ matches end of line. -->\r
+ <property name="format" value="\s+$"/>\r
+ <property name="message" value="Line has trailing spaces."/>\r
+ </module>\r
+\r
+</module>\r
--- /dev/null
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<profiles version="12">
+<profile kind="CodeFormatterProfile" name="SunCodingStyle" version="12">
+<setting id="org.eclipse.jdt.core.formatter.comment.insert_new_line_before_root_tags" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.disabling_tag" value="@formatter:off"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_annotation" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_type_parameters" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_type_declaration" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_type_arguments" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.brace_position_for_anonymous_type_declaration" value="end_of_line"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_colon_in_case" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_brace_in_array_initializer" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.comment.new_lines_at_block_boundaries" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_in_empty_annotation_declaration" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_before_closing_brace_in_array_initializer" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_annotation" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.blank_lines_before_field" value="0"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_while" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.use_on_off_tags" value="false"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_annotation_type_member_declaration" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_before_else_in_if_statement" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_prefix_operator" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.keep_else_statement_on_same_line" value="false"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_ellipsis" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.comment.insert_new_line_for_parameter" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_annotation_type_declaration" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.indent_breaks_compare_to_cases" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_at_in_annotation" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_multiple_fields" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_expressions_in_array_initializer" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_conditional_expression" value="80"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_for" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_binary_operator" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_question_in_wildcard" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.brace_position_for_array_initializer" value="end_of_line"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_enum_constant" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_before_finally_in_try_statement" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_local_variable" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_before_catch_in_try_statement" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_while" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.blank_lines_after_package" value="1"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_type_parameters" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.continuation_indentation" value="2"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_postfix_operator" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_arguments_in_method_invocation" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_angle_bracket_in_type_arguments" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_superinterfaces" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.blank_lines_before_new_chunk" value="1"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_binary_operator" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.blank_lines_before_package" value="0"/>
+<setting id="org.eclipse.jdt.core.compiler.source" value="1.7"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_enum_constant_arguments" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_constructor_declaration" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.comment.format_line_comments" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_closing_angle_bracket_in_type_arguments" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_enum_declarations" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.join_wrapped_lines" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_block" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_arguments_in_explicit_constructor_call" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_method_invocation_arguments" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.blank_lines_before_member_type" value="1"/>
+<setting id="org.eclipse.jdt.core.formatter.align_type_members_on_columns" value="false"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_enum_constant" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_for" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_method_declaration" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_selector_in_method_invocation" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_switch" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_unary_operator" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_colon_in_case" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.comment.indent_parameter_description" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_method_declaration" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_switch" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_enum_declaration" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_angle_bracket_in_type_parameters" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.comment.clear_blank_lines_in_block_comment" value="false"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_in_empty_type_declaration" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.lineSplit" value="80"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_if" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_between_brackets_in_array_type_reference" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_parenthesized_expression" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_explicitconstructorcall_arguments" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_constructor_declaration" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.blank_lines_before_first_class_body_declaration" value="0"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_method" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.indentation.size" value="8"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_method_declaration" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.enabling_tag" value="@formatter:on"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_enum_constant" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_superclass_in_type_declaration" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_assignment" value="0"/>
+<setting id="org.eclipse.jdt.core.compiler.problem.assertIdentifier" value="error"/>
+<setting id="org.eclipse.jdt.core.formatter.tabulation.char" value="space"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_constructor_declaration_parameters" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_semicolon_in_try_resources" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_prefix_operator" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.indent_statements_compare_to_body" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.blank_lines_before_method" value="1"/>
+<setting id="org.eclipse.jdt.core.formatter.wrap_outer_expressions_when_nested" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.format_guardian_clause_on_one_line" value="false"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_colon_in_for" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_cast" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_parameters_in_constructor_declaration" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_colon_in_labeled_statement" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.brace_position_for_annotation_type_declaration" value="end_of_line"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_in_empty_method_body" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_method_declaration" value="0"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_method_invocation" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_try" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_bracket_in_array_allocation_expression" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_enum_constant" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_annotation" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_at_in_annotation_type_declaration" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_method_declaration_throws" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_if" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.brace_position_for_switch" value="end_of_line"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_method_declaration_throws" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_parenthesized_expression_in_return" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_annotation" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_question_in_conditional" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_question_in_wildcard" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_try" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_bracket_in_array_allocation_expression" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.comment.preserve_white_space_between_code_and_line_comments" value="false"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_parenthesized_expression_in_throw" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_type_arguments" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.compiler.problem.enumIdentifier" value="error"/>
+<setting id="org.eclipse.jdt.core.formatter.indent_switchstatements_compare_to_switch" value="false"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_ellipsis" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.brace_position_for_block" value="end_of_line"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_for_inits" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.brace_position_for_method_declaration" value="end_of_line"/>
+<setting id="org.eclipse.jdt.core.formatter.compact_else_if" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.wrap_before_or_operator_multicatch" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_array_initializer" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_for_increments" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.format_line_comment_starting_on_first_column" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_bracket_in_array_reference" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_field" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.comment.indent_root_tags" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.brace_position_for_enum_constant" value="end_of_line"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_enum_declarations" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_union_type_in_multicatch" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_explicitconstructorcall_arguments" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_switch" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_method_declaration_parameters" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_superinterfaces" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_allocation_expression" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.tabulation.size" value="4"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_bracket_in_array_type_reference" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_after_opening_brace_in_array_initializer" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_closing_brace_in_block" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_bracket_in_array_reference" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_in_empty_enum_constant" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_angle_bracket_in_type_arguments" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_constructor_declaration" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_constructor_declaration_throws" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_if" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.comment.clear_blank_lines_in_javadoc_comment" value="false"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_throws_clause_in_constructor_declaration" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_assignment_operator" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_assignment_operator" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.indent_empty_lines" value="false"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_synchronized" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_closing_paren_in_cast" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_method_declaration_parameters" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.brace_position_for_block_in_case" value="end_of_line"/>
+<setting id="org.eclipse.jdt.core.formatter.number_of_empty_lines_to_preserve" value="1"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_method_declaration" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_catch" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_constructor_declaration" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_method_invocation" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_bracket_in_array_reference" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_arguments_in_qualified_allocation_expression" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_and_in_type_parameter" value="insert"/>
+<setting id="org.eclipse.jdt.core.compiler.compliance" value="1.7"/>
+<setting id="org.eclipse.jdt.core.formatter.continuation_indentation_for_array_initializer" value="2"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_between_empty_brackets_in_array_allocation_expression" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_at_in_annotation_type_declaration" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_arguments_in_allocation_expression" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_cast" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_unary_operator" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_angle_bracket_in_parameterized_type_reference" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_anonymous_type_declaration" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.keep_empty_array_initializer_on_one_line" value="false"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_in_empty_enum_declaration" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.keep_imple_if_on_one_line" value="false"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_constructor_declaration_parameters" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_closing_angle_bracket_in_type_parameters" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_at_end_of_file_if_missing" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_colon_in_for" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_colon_in_labeled_statement" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_parameterized_type_reference" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_superinterfaces_in_type_declaration" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_binary_expression" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.brace_position_for_enum_declaration" value="end_of_line"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_type" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_while" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.compiler.codegen.inlineJsrBytecode" value="enabled"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_try" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.put_empty_statement_on_new_line" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_after_label" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_parameter" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_angle_bracket_in_type_parameters" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_method_invocation" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_before_while_in_do_statement" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_arguments_in_enum_constant" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.comment.format_javadoc_comments" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.comment.line_length" value="80"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_package" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.blank_lines_between_import_groups" value="1"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_enum_constant_arguments" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_semicolon" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.brace_position_for_constructor_declaration" value="end_of_line"/>
+<setting id="org.eclipse.jdt.core.formatter.number_of_blank_lines_at_beginning_of_method_body" value="0"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_colon_in_conditional" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_type_header" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_annotation_type_member_declaration" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.wrap_before_binary_operator" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_enum_declaration_header" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.blank_lines_between_type_declarations" value="1"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_synchronized" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.indent_statements_compare_to_block" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_superinterfaces_in_enum_declaration" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.join_lines_in_comments" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_question_in_conditional" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_multiple_field_declarations" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_compact_if" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_for_inits" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.indent_switchstatements_compare_to_cases" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_array_initializer" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_colon_in_default" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_and_in_type_parameter" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_constructor_declaration" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.blank_lines_before_imports" value="1"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_colon_in_assert" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.comment.format_html" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_throws_clause_in_method_declaration" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_angle_bracket_in_type_parameters" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_bracket_in_array_allocation_expression" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_in_empty_anonymous_type_declaration" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_colon_in_conditional" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_angle_bracket_in_parameterized_type_reference" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_for" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_postfix_operator" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.comment.format_source_code" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_synchronized" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_allocation_expression" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_constructor_declaration_throws" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_parameters_in_method_declaration" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_brace_in_array_initializer" value="insert"/>
+<setting id="org.eclipse.jdt.core.compiler.codegen.targetPlatform" value="1.7"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_resources_in_try" value="80"/>
+<setting id="org.eclipse.jdt.core.formatter.use_tabs_only_for_leading_indentations" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_arguments_in_annotation" value="0"/>
+<setting id="org.eclipse.jdt.core.formatter.comment.format_header" value="false"/>
+<setting id="org.eclipse.jdt.core.formatter.comment.format_block_comments" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_enum_constant" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_enum_constants" value="0"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_in_empty_block" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_annotation_declaration_header" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_parenthesized_expression" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_parenthesized_expression" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_catch" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_multiple_local_declarations" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_switch" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_for_increments" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_method_invocation" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_colon_in_assert" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.brace_position_for_type_declaration" value="end_of_line"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_array_initializer" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_between_empty_braces_in_array_initializer" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_method_declaration" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_semicolon_in_for" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_catch" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_angle_bracket_in_parameterized_type_reference" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_multiple_field_declarations" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_annotation" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_parameterized_type_reference" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_method_invocation_arguments" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.comment.new_lines_at_javadoc_boundaries" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.blank_lines_after_imports" value="1"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_multiple_local_declarations" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_enum_constant_header" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_semicolon_in_for" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.never_indent_line_comments_on_first_column" value="false"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_semicolon_in_try_resources" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_angle_bracket_in_type_arguments" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.never_indent_block_comments_on_first_column" value="false"/>
+<setting id="org.eclipse.jdt.core.formatter.keep_then_statement_on_same_line" value="false"/>
+</profile>
+</profiles>
}
public int getConfigurationAwareListSize() {
- return this.configurationAwareList.size();
+ return this.configurationAwareList.size();
}
-
+
public void removeConfigurationContainerAware(
IConfigurationContainerAware configurationAware) {
this.configurationAwareList.remove(configurationAware);
Status status = configurationAware.saveConfiguration();
if (!status.isSuccess()) {
- success = false;
- logger.info("Failed to save config for {}",
- configurationAware.getClass().getSimpleName());
+ success = false;
+ logger.info("Failed to save config for {}",
+ configurationAware.getClass().getSimpleName());
}
}
if (success) {
return new Status(StatusCode.SUCCESS, null);
} else {
return new Status(StatusCode.INTERNALERROR,
- "Failed to Save All Configurations");
+ "Failed to Save All Configurations");
}
}
private Set<IConfigurationAware> configurationAwareList = (Set<IConfigurationAware>) Collections
.synchronizedSet(new HashSet<IConfigurationAware>());
-
+
public int getConfigurationAwareListSize() {
- return this.configurationAwareList.size();
+ return this.configurationAwareList.size();
}
-
+
public void addConfigurationAware(IConfigurationAware configurationAware) {
if (!this.configurationAwareList.contains(configurationAware)) {
this.configurationAwareList.add(configurationAware);
public Status saveConfigurations() {
boolean success = true;
for (IConfigurationAware configurationAware : configurationAwareList) {
- Status status = configurationAware.saveConfiguration();
+ Status status = configurationAware.saveConfiguration();
if (!status.isSuccess()) {
- success = false;
- logger.info("Failed to save config for {}",
- configurationAware.getClass().getName());
+ success = false;
+ logger.info("Failed to save config for {}",
+ configurationAware.getClass().getName());
}
}
if (success) {
return new Status(StatusCode.SUCCESS, null);
} else {
return new Status(StatusCode.INTERNALERROR,
- "Failed to Save All Configurations");
+ "Failed to Save All Configurations");
}
}
public class ConfigurationAwareTest implements
IConfigurationAware {
-
- @Override
- public Status saveConfiguration() {
- return null;
- }
+
+ @Override
+ public Status saveConfiguration() {
+ return null;
+ }
}
public class ConfigurationContainerAwareTest implements
IConfigurationContainerAware {
-
-
-
- @Override
- public Status saveConfiguration() {
- return null;
- }
-
-
+
+
+
+ @Override
+ public Status saveConfiguration() {
+ return null;
+ }
+
+
}
-public class ConfigurationContainerImplTest {
-
-
- @Test
- public void testAddRemoveSaveConfiguration() {
-
- ConfigurationContainerImpl configurationContainerImpl = new ConfigurationContainerImpl();
- IConfigurationContainerAware testConfigurationContainerAware = new ConfigurationContainerAwareTest();
-
- configurationContainerImpl.addConfigurationContainerAware(testConfigurationContainerAware);
- configurationContainerImpl.addConfigurationContainerAware(testConfigurationContainerAware);
-
- Assert.assertEquals(1, configurationContainerImpl.getConfigurationAwareListSize());
-
- IConfigurationContainerAware testConfigurationAware1 = new ConfigurationContainerAwareTest();
- configurationContainerImpl.addConfigurationContainerAware(testConfigurationAware1);
-
- Assert.assertEquals(2, configurationContainerImpl.getConfigurationAwareListSize());
-
- IConfigurationContainerAware testConfigurationAware2 = new ConfigurationContainerAwareTest();
- configurationContainerImpl.addConfigurationContainerAware(testConfigurationAware2);
-
- Assert.assertEquals(3, configurationContainerImpl.getConfigurationAwareListSize());
-
- IConfigurationContainerAware testConfigurationAware3 = new ConfigurationContainerAwareTest();
- configurationContainerImpl.addConfigurationContainerAware(testConfigurationAware3);
-
- Assert.assertEquals(4, configurationContainerImpl.getConfigurationAwareListSize());
-
- configurationContainerImpl.removeConfigurationContainerAware(testConfigurationContainerAware);
- Assert.assertEquals(3, configurationContainerImpl.getConfigurationAwareListSize());
-
- configurationContainerImpl.removeConfigurationContainerAware(testConfigurationContainerAware);
- Assert.assertEquals(3, configurationContainerImpl.getConfigurationAwareListSize());
-
- configurationContainerImpl.removeConfigurationContainerAware(testConfigurationAware3);
- Assert.assertEquals(2, configurationContainerImpl.getConfigurationAwareListSize());
-
- configurationContainerImpl.removeConfigurationContainerAware(testConfigurationAware2);
- Assert.assertEquals(1, configurationContainerImpl.getConfigurationAwareListSize());
-
- configurationContainerImpl.removeConfigurationContainerAware(testConfigurationAware1);
- Assert.assertEquals(0, configurationContainerImpl.getConfigurationAwareListSize());
-
-
- }
-
-}
+public class ConfigurationContainerImplTest {
+
+
+ @Test
+ public void testAddRemoveSaveConfiguration() {
+
+ ConfigurationContainerImpl configurationContainerImpl = new ConfigurationContainerImpl();
+ IConfigurationContainerAware testConfigurationContainerAware = new ConfigurationContainerAwareTest();
+
+ configurationContainerImpl.addConfigurationContainerAware(testConfigurationContainerAware);
+ configurationContainerImpl.addConfigurationContainerAware(testConfigurationContainerAware);
+
+ Assert.assertEquals(1, configurationContainerImpl.getConfigurationAwareListSize());
+
+ IConfigurationContainerAware testConfigurationAware1 = new ConfigurationContainerAwareTest();
+ configurationContainerImpl.addConfigurationContainerAware(testConfigurationAware1);
+
+ Assert.assertEquals(2, configurationContainerImpl.getConfigurationAwareListSize());
+
+ IConfigurationContainerAware testConfigurationAware2 = new ConfigurationContainerAwareTest();
+ configurationContainerImpl.addConfigurationContainerAware(testConfigurationAware2);
+
+ Assert.assertEquals(3, configurationContainerImpl.getConfigurationAwareListSize());
+ IConfigurationContainerAware testConfigurationAware3 = new ConfigurationContainerAwareTest();
+ configurationContainerImpl.addConfigurationContainerAware(testConfigurationAware3);
+
+ Assert.assertEquals(4, configurationContainerImpl.getConfigurationAwareListSize());
+
+ configurationContainerImpl.removeConfigurationContainerAware(testConfigurationContainerAware);
+ Assert.assertEquals(3, configurationContainerImpl.getConfigurationAwareListSize());
+
+ configurationContainerImpl.removeConfigurationContainerAware(testConfigurationContainerAware);
+ Assert.assertEquals(3, configurationContainerImpl.getConfigurationAwareListSize());
+
+ configurationContainerImpl.removeConfigurationContainerAware(testConfigurationAware3);
+ Assert.assertEquals(2, configurationContainerImpl.getConfigurationAwareListSize());
+
+ configurationContainerImpl.removeConfigurationContainerAware(testConfigurationAware2);
+ Assert.assertEquals(1, configurationContainerImpl.getConfigurationAwareListSize());
+
+ configurationContainerImpl.removeConfigurationContainerAware(testConfigurationAware1);
+ Assert.assertEquals(0, configurationContainerImpl.getConfigurationAwareListSize());
+
+
+ }
+
+}
import org.junit.*;
import org.opendaylight.controller.configuration.IConfigurationAware;
-public class ConfigurationImplTest {
-
-
- @Test
- public void testAddRemoveSaveConfiguration() {
-
- ConfigurationImpl configurationImpl = new ConfigurationImpl();
- IConfigurationAware testConfigurationAware = new ConfigurationAwareTest();
-
- configurationImpl.addConfigurationAware(testConfigurationAware);
- configurationImpl.addConfigurationAware(testConfigurationAware);
-
- Assert.assertEquals(1, configurationImpl.getConfigurationAwareListSize());
-
- ConfigurationAwareTest testConfigurationAware1 = new ConfigurationAwareTest();
- configurationImpl.addConfigurationAware(testConfigurationAware1);
-
- Assert.assertEquals(2, configurationImpl.getConfigurationAwareListSize());
-
- ConfigurationAwareTest testConfigurationAware2 = new ConfigurationAwareTest();
- configurationImpl.addConfigurationAware(testConfigurationAware2);
-
- Assert.assertEquals(3, configurationImpl.getConfigurationAwareListSize());
-
- ConfigurationAwareTest testConfigurationAware3 = new ConfigurationAwareTest();
- configurationImpl.addConfigurationAware(testConfigurationAware3);
-
- Assert.assertEquals(4, configurationImpl.getConfigurationAwareListSize());
-
-
- configurationImpl.removeConfigurationAware(testConfigurationAware);
- Assert.assertEquals(3, configurationImpl.getConfigurationAwareListSize());
-
- configurationImpl.removeConfigurationAware(testConfigurationAware);
- Assert.assertEquals(3, configurationImpl.getConfigurationAwareListSize());
-
- configurationImpl.removeConfigurationAware(testConfigurationAware3);
- Assert.assertEquals(2, configurationImpl.getConfigurationAwareListSize());
-
- configurationImpl.removeConfigurationAware(testConfigurationAware1);
- Assert.assertEquals(1, configurationImpl.getConfigurationAwareListSize());
-
- configurationImpl.removeConfigurationAware(testConfigurationAware2);
- Assert.assertEquals(0, configurationImpl.getConfigurationAwareListSize());
-
- }
-
-}
+public class ConfigurationImplTest {
+
+
+ @Test
+ public void testAddRemoveSaveConfiguration() {
+
+ ConfigurationImpl configurationImpl = new ConfigurationImpl();
+ IConfigurationAware testConfigurationAware = new ConfigurationAwareTest();
+
+ configurationImpl.addConfigurationAware(testConfigurationAware);
+ configurationImpl.addConfigurationAware(testConfigurationAware);
+
+ Assert.assertEquals(1, configurationImpl.getConfigurationAwareListSize());
+
+ ConfigurationAwareTest testConfigurationAware1 = new ConfigurationAwareTest();
+ configurationImpl.addConfigurationAware(testConfigurationAware1);
+
+ Assert.assertEquals(2, configurationImpl.getConfigurationAwareListSize());
+
+ ConfigurationAwareTest testConfigurationAware2 = new ConfigurationAwareTest();
+ configurationImpl.addConfigurationAware(testConfigurationAware2);
+
+ Assert.assertEquals(3, configurationImpl.getConfigurationAwareListSize());
+ ConfigurationAwareTest testConfigurationAware3 = new ConfigurationAwareTest();
+ configurationImpl.addConfigurationAware(testConfigurationAware3);
+
+ Assert.assertEquals(4, configurationImpl.getConfigurationAwareListSize());
+
+
+ configurationImpl.removeConfigurationAware(testConfigurationAware);
+ Assert.assertEquals(3, configurationImpl.getConfigurationAwareListSize());
+
+ configurationImpl.removeConfigurationAware(testConfigurationAware);
+ Assert.assertEquals(3, configurationImpl.getConfigurationAwareListSize());
+
+ configurationImpl.removeConfigurationAware(testConfigurationAware3);
+ Assert.assertEquals(2, configurationImpl.getConfigurationAwareListSize());
+
+ configurationImpl.removeConfigurationAware(testConfigurationAware1);
+ Assert.assertEquals(1, configurationImpl.getConfigurationAwareListSize());
+
+ configurationImpl.removeConfigurationAware(testConfigurationAware2);
+ Assert.assertEquals(0, configurationImpl.getConfigurationAwareListSize());
+
+ }
+
+}
/**
* Returns a list of Containers that currently exist.
*
- * @return array of String Container names
+ * @return array of String Container names
*/
public boolean hasNonDefaultContainer();
/**
* Returns a list of Containers that currently exist.
*
- * @return array of String Container names
+ * @return array of String Container names
*/
public List<String> getContainerNames();
* TODO : REMOVE THIS FUNCTION and make Save as a service rather than the
* current hack of calling individual save routines.
*
- * @return status code
+ * @return status code
*/
public Status saveContainerConfig();
}
\r
public class ContainerImplTest {\r
\r
- @Test\r
- public void test() {\r
- \r
- ContainerImpl container1 = new ContainerImpl();\r
- \r
- //Create Component for init\r
- ComponentImpl component1 = new ComponentImpl(null, null, null);\r
- component1.setInterface("serviceTestName", null);\r
-\r
- //container1 does not have name yet\r
- container1.init(component1);\r
- assertNull(container1.getName());\r
- \r
- //Sets container1 name to TestName\r
- Hashtable<String, String> properties = new Hashtable<String, String>();\r
- properties.put("dummyKey", "dummyValue");\r
- properties.put("containerName", "TestName");\r
- component1.setInterface("serviceTestName", properties);\r
-\r
- container1.init(component1);\r
- assertEquals("TestName", container1.getName());\r
- \r
- //getContainerFlows always returns null for now\r
- assertNull(container1.getContainerFlows());\r
- \r
- //getTag always returns 0 for now\r
- Node n = NodeCreator.createOFNode(1L);\r
- assertEquals(0, container1.getTag(n));\r
- \r
- //getNodeConnectors always returns null for now\r
- assertNull(container1.getNodeConnectors());\r
- \r
- //getNodes always returns null for now\r
- assertNull(container1.getNodes());\r
- \r
- }\r
+ @Test\r
+ public void test() {\r
+\r
+ ContainerImpl container1 = new ContainerImpl();\r
+\r
+ //Create Component for init\r
+ ComponentImpl component1 = new ComponentImpl(null, null, null);\r
+ component1.setInterface("serviceTestName", null);\r
+\r
+ //container1 does not have name yet\r
+ container1.init(component1);\r
+ assertNull(container1.getName());\r
+\r
+ //Sets container1 name to TestName\r
+ Hashtable<String, String> properties = new Hashtable<String, String>();\r
+ properties.put("dummyKey", "dummyValue");\r
+ properties.put("containerName", "TestName");\r
+ component1.setInterface("serviceTestName", properties);\r
+\r
+ container1.init(component1);\r
+ assertEquals("TestName", container1.getName());\r
+\r
+ //getContainerFlows always returns null for now\r
+ assertNull(container1.getContainerFlows());\r
+\r
+ //getTag always returns 0 for now\r
+ Node n = NodeCreator.createOFNode(1L);\r
+ assertEquals(0, container1.getTag(n));\r
+\r
+ //getNodeConnectors always returns null for now\r
+ assertNull(container1.getNodeConnectors());\r
+\r
+ //getNodes always returns null for now\r
+ assertNull(container1.getNodes());\r
+\r
+ }\r
\r
}\r
\r
public class ContainerManagerTest {\r
\r
- @Test\r
- public void test() {\r
- ContainerManager cm = new ContainerManager();\r
- \r
- cm.init();\r
- \r
- ArrayList<String> names = (ArrayList<String>) cm.getContainerNames();\r
- assertEquals(1, names.size());\r
- assertEquals(GlobalConstants.DEFAULT.toString(), names.get(0));\r
- \r
- assertFalse(cm.hasNonDefaultContainer());\r
- assertNull(cm.saveContainerConfig());\r
- \r
- cm.destroy();\r
-\r
- }\r
+ @Test\r
+ public void test() {\r
+ ContainerManager cm = new ContainerManager();\r
+\r
+ cm.init();\r
+\r
+ ArrayList<String> names = (ArrayList<String>) cm.getContainerNames();\r
+ assertEquals(1, names.size());\r
+ assertEquals(GlobalConstants.DEFAULT.toString(), names.get(0));\r
+\r
+ assertFalse(cm.hasNonDefaultContainer());\r
+ assertNull(cm.saveContainerConfig());\r
+\r
+ cm.destroy();\r
+\r
+ }\r
\r
}\r
<module>../../hosttracker/api</module>
<module>../../hosttracker/implementation</module>
<module>../../hosttracker/integrationtest</module>
+ <module>../../hosttracker_new/api</module>
+ <module>../../hosttracker_new/implementation</module>
<module>../../containermanager/api</module>
<module>../../containermanager/implementation</module>
<module>../../switchmanager/api</module>
<exclude>org.opendaylight.controller:logging.bridge</exclude>
<exclude>org.opendaylight.controller:protocol_plugins.stub</exclude>
<exclude>org.opendaylight.controller:*.integrationtest</exclude>
+ <exclude>org.opendaylight.controller:hosttracker_new</exclude>
+ <exclude>org.opendaylight.controller:hosttracker_new.implementation</exclude>
</excludes>
<binaries>
<outputDirectory>opendaylight/plugins</outputDirectory>
import java.net.InetAddress;
import java.util.concurrent.ConcurrentMap;
/**
- *
+ *
* This interface provides APIs to configure and manage static routes.
*
*/
import org.opendaylight.controller.sal.utils.Status;
/**
- *
+ *
* This Interface provides APIs to manage and query the static routes
*
*/
public Status isValid() {
if ((name == null) || (name.trim().length() < 1)) {
return new Status(StatusCode.BADREQUEST,
- "Invalid Static Route name");
+ "Invalid Static Route name");
}
if (!isValidStaticRouteEntry()) {
return new Status(StatusCode.BADREQUEST,
- "Invalid Static Route entry. Please use the " +
- "IPAddress/mask format. Default gateway " +
- "(0.0.0.0/0) is NOT supported.");
+ "Invalid Static Route entry. Please use the " +
+ "IPAddress/mask format. Default gateway " +
+ "(0.0.0.0/0) is NOT supported.");
}
if (!isValidNextHop()) {
return new Status(StatusCode.BADREQUEST,
- "Invalid NextHop IP Address configuration. " +
- "Please use the X.X.X.X format.");
+ "Invalid NextHop IP Address configuration. " +
+ "Please use the X.X.X.X format.");
}
return new Status(StatusCode.SUCCESS, null);
}
}
-
+
private Status saveConfig() {
// Publish the save config event to the cluster nodes
configSaveEvent.put(new Date().getTime(), SAVE);
}
@SuppressWarnings("deprecation")
- private void allocateCaches() {
+ private void allocateCaches() {
if (this.clusterContainerService == null) {
log
.info("un-initialized clusterContainerService, can't create cache");
}
@SuppressWarnings("deprecation")
- private void destroyCaches() {
+ private void destroyCaches() {
if (this.clusterContainerService == null) {
log
.info("un-initialized clusterContainerService, can't destroy cache");
return status;
}
if (staticRouteConfigs.get(config.getName()) != null) {
- return new Status(StatusCode.CONFLICT,
- "A valid Static Route configuration with this name " +
- "already exists. Please use a different name");
+ return new Status(StatusCode.CONFLICT,
+ "A valid Static Route configuration with this name " +
+ "already exists. Please use a different name");
}
for (StaticRouteConfig s : staticRouteConfigs.values()) {
if (s.equals(config)) {
- return new Status(StatusCode.CONFLICT,
- "This conflicts with an existing Static Route " +
- "Configuration. Please check the configuration " +
- "and try again");
+ return new Status(StatusCode.CONFLICT,
+ "This conflicts with an existing Static Route " +
+ "Configuration. Please check the configuration " +
+ "and try again");
}
}
StaticRoute sRoute = new StaticRoute(config);
staticRoutes.put(config.getName(), sRoute);
checkAndUpdateListeners(sRoute, true);
- return status;
+ return status;
}
public Status removeStaticRoute(String name) {
checkAndUpdateListeners(sRoute, false);
return new Status(StatusCode.SUCCESS, null);
}
- return new Status(StatusCode.NOTFOUND,
- "Static Route with name " + name + " is not found");
+ return new Status(StatusCode.NOTFOUND,
+ "Static Route with name " + name + " is not found");
}
void setClusterContainerService(IClusterContainerServices s) {
import org.opendaylight.controller.sal.utils.StatusCode;
public class StaticRouteConfigTest {
-
- @Test
- public void testStaticRouteSetGet() {
- StaticRouteConfig staticRouteConfig1 = new StaticRouteConfig();
- staticRouteConfig1.setName("route");
- staticRouteConfig1.setStaticRoute("10.1.1.2/32");
- staticRouteConfig1.setNextHop("200.2.2.2");
- staticRouteConfig1.setNextHopType(NextHopType.IPADDRESS.toString());
- StaticRouteConfig staticRouteConfig2 = new StaticRouteConfig("route", "10.1.1.2/32", "200.2.2.2");
-
- Assert.assertEquals(staticRouteConfig2.getName(), staticRouteConfig1.getName());
- Assert.assertEquals(staticRouteConfig2.getStaticRoute(), staticRouteConfig1.getStaticRoute());
- Assert.assertEquals(staticRouteConfig2.getNextHop(), staticRouteConfig1.getNextHop());
- Assert.assertEquals("nexthop-ip", staticRouteConfig1.getNextHopType());
- }
-
- @Test
- public void testStaticRouteisValid() {
+
+ @Test
+ public void testStaticRouteSetGet() {
+ StaticRouteConfig staticRouteConfig1 = new StaticRouteConfig();
+ staticRouteConfig1.setName("route");
+ staticRouteConfig1.setStaticRoute("10.1.1.2/32");
+ staticRouteConfig1.setNextHop("200.2.2.2");
+ staticRouteConfig1.setNextHopType(NextHopType.IPADDRESS.toString());
+ StaticRouteConfig staticRouteConfig2 = new StaticRouteConfig("route", "10.1.1.2/32", "200.2.2.2");
+
+ Assert.assertEquals(staticRouteConfig2.getName(), staticRouteConfig1.getName());
+ Assert.assertEquals(staticRouteConfig2.getStaticRoute(), staticRouteConfig1.getStaticRoute());
+ Assert.assertEquals(staticRouteConfig2.getNextHop(), staticRouteConfig1.getNextHop());
+ Assert.assertEquals("nexthop-ip", staticRouteConfig1.getNextHopType());
+ }
+
+ @Test
+ public void testStaticRouteisValid() {
StaticRouteConfig staticRouteConfig1 = new StaticRouteConfig("route1", "10.1.1.254/24", "100.1.1.1");
- Status receivedResponse1 = staticRouteConfig1.isValid();
- Status expectedResponse1 = new Status(StatusCode.SUCCESS, null);
- Assert.assertEquals(expectedResponse1.toString(), receivedResponse1.toString());
-
+ Status receivedResponse1 = staticRouteConfig1.isValid();
+ Status expectedResponse1 = new Status(StatusCode.SUCCESS, null);
+ Assert.assertEquals(expectedResponse1.toString(), receivedResponse1.toString());
+
StaticRouteConfig staticRouteConfig2 = new StaticRouteConfig("", "", "100.1.1.1");
- Status receivedResponse2 = staticRouteConfig2.isValid();
- Status expectedResponse2 = new Status(StatusCode.BADREQUEST,
- "Invalid Static Route name");
- Assert.assertEquals(expectedResponse2.toString(), receivedResponse2.toString());
+ Status receivedResponse2 = staticRouteConfig2.isValid();
+ Status expectedResponse2 = new Status(StatusCode.BADREQUEST,
+ "Invalid Static Route name");
+ Assert.assertEquals(expectedResponse2.toString(), receivedResponse2.toString());
StaticRouteConfig staticRouteConfig3 = new StaticRouteConfig("route1", "10.1.1.254", "100.1.1.1");
- Status receivedResponse3 = staticRouteConfig3.isValid();
- Status expectedResponse3 = new Status(StatusCode.BADREQUEST,
- "Invalid Static Route entry. Please use the " +
- "IPAddress/mask format. Default gateway " +
- "(0.0.0.0/0) is NOT supported.");
- Assert.assertEquals(expectedResponse3.toString(), receivedResponse3.toString());
+ Status receivedResponse3 = staticRouteConfig3.isValid();
+ Status expectedResponse3 = new Status(StatusCode.BADREQUEST,
+ "Invalid Static Route entry. Please use the " +
+ "IPAddress/mask format. Default gateway " +
+ "(0.0.0.0/0) is NOT supported.");
+ Assert.assertEquals(expectedResponse3.toString(), receivedResponse3.toString());
StaticRouteConfig staticRouteConfig4 = new StaticRouteConfig("route1", "289.1.1.254/24", "100.1.1.1");
- Status receivedResponse4 = staticRouteConfig4.isValid();
- Status expectedResponse4 = new Status(StatusCode.BADREQUEST,
- "Invalid Static Route entry. Please use the " +
- "IPAddress/mask format. Default gateway " +
- "(0.0.0.0/0) is NOT supported.");
- Assert.assertEquals(expectedResponse4.toString(), receivedResponse4.toString());
-
+ Status receivedResponse4 = staticRouteConfig4.isValid();
+ Status expectedResponse4 = new Status(StatusCode.BADREQUEST,
+ "Invalid Static Route entry. Please use the " +
+ "IPAddress/mask format. Default gateway " +
+ "(0.0.0.0/0) is NOT supported.");
+ Assert.assertEquals(expectedResponse4.toString(), receivedResponse4.toString());
+
StaticRouteConfig staticRouteConfig5 = new StaticRouteConfig("route1", "10.1.1.254/24", "100.1.1");
- Status receivedResponse5 = staticRouteConfig5.isValid();
- Status expectedResponse5 = new Status(StatusCode.BADREQUEST,
- "Invalid NextHop IP Address configuration. " +
- "Please use the X.X.X.X format.");
- Assert.assertEquals(expectedResponse5.toString(), receivedResponse5.toString());
- }
-
- @Test
- public void testGetStaticRouteIP() {
+ Status receivedResponse5 = staticRouteConfig5.isValid();
+ Status expectedResponse5 = new Status(StatusCode.BADREQUEST,
+ "Invalid NextHop IP Address configuration. " +
+ "Please use the X.X.X.X format.");
+ Assert.assertEquals(expectedResponse5.toString(), receivedResponse5.toString());
+ }
+
+ @Test
+ public void testGetStaticRouteIP() {
StaticRouteConfig staticRouteConfig1 = new StaticRouteConfig("route1", "10.1.1.0/24", "100.1.1.1");
InetAddress ip1 = staticRouteConfig1.getStaticRouteIP();
- Assert.assertEquals("10.1.1.0", ip1.getHostAddress());
-
+ Assert.assertEquals("10.1.1.0", ip1.getHostAddress());
+
StaticRouteConfig staticRouteConfig2 = new StaticRouteConfig("route1", "10.1.1.0/80", "100.1.1.1");
InetAddress ip2 = staticRouteConfig2.getStaticRouteIP();
- Assert.assertEquals(null, ip2);
+ Assert.assertEquals(null, ip2);
+
+ }
- }
-
- @Test
- public void testGetStaticRouteMask() {
+ @Test
+ public void testGetStaticRouteMask() {
StaticRouteConfig staticRouteConfig1 = new StaticRouteConfig("route1", "10.1.1.0/24", "100.1.1.1");
- Short receivedMaskLen1 = staticRouteConfig1.getStaticRouteMask();
- Short expectedMaskLen1 = 24;
- Assert.assertEquals(expectedMaskLen1, receivedMaskLen1);
-
- StaticRouteConfig staticRouteConfig2 = new StaticRouteConfig("route1", "10.1.1.0/40", "100.1.1.1");
- Short receivedMaskLen2 = staticRouteConfig2.getStaticRouteMask();
- Short expectedMaskLen2 = 0;
- Assert.assertEquals(expectedMaskLen2, receivedMaskLen2);
- }
-
- @Test
- public void testGetNextHopIP() {
+ Short receivedMaskLen1 = staticRouteConfig1.getStaticRouteMask();
+ Short expectedMaskLen1 = 24;
+ Assert.assertEquals(expectedMaskLen1, receivedMaskLen1);
+
+ StaticRouteConfig staticRouteConfig2 = new StaticRouteConfig("route1", "10.1.1.0/40", "100.1.1.1");
+ Short receivedMaskLen2 = staticRouteConfig2.getStaticRouteMask();
+ Short expectedMaskLen2 = 0;
+ Assert.assertEquals(expectedMaskLen2, receivedMaskLen2);
+ }
+
+ @Test
+ public void testGetNextHopIP() {
StaticRouteConfig staticRouteConfig1 = new StaticRouteConfig("route1", "10.1.1.254/24", "100.1.1.1");
InetAddress ip1 = staticRouteConfig1.getNextHopIP();
- Assert.assertEquals("100.1.1.1", ip1.getHostAddress());
+ Assert.assertEquals("100.1.1.1", ip1.getHostAddress());
- StaticRouteConfig staticRouteConfig2 = new StaticRouteConfig("route1", "10.1.1.254/24", "100.1.1");
+ StaticRouteConfig staticRouteConfig2 = new StaticRouteConfig("route1", "10.1.1.254/24", "100.1.1");
InetAddress ip2 = staticRouteConfig2.getNextHopIP();
- Assert.assertEquals(null, ip2);
- }
-
-}
+ Assert.assertEquals(null, ip2);
+ }
+}
import org.opendaylight.controller.sal.utils.NodeCreator;
public class StaticRouteTest {
-
- @Test
- public void testStaticRouteGetSet() {
- StaticRoute staticRoute = new StaticRoute();
- InetAddress networkAddress = null;
- InetAddress mask = null;
- InetAddress nextHopAddress = null;
- try {
- networkAddress = InetAddress.getByName("10.1.1.0");
- mask = InetAddress.getByName("255.255.255.0");
- nextHopAddress = InetAddress.getByName("200.0.0.1");
-
- } catch (UnknownHostException e) {
- Assert.assertTrue(false);
- }
- staticRoute.setNetworkAddress(networkAddress);
- Assert.assertEquals(networkAddress.getHostAddress(), staticRoute.getNetworkAddress().getHostAddress());
- staticRoute.setMask(mask);
- Assert.assertEquals(mask.getHostAddress(), staticRoute.getMask().getHostAddress());
- staticRoute.setType(NextHopType.IPADDRESS);
- Assert.assertEquals("nexthop-ip", staticRoute.getType().toString());
- staticRoute.setNextHopAddress(nextHopAddress);
- Assert.assertEquals(nextHopAddress.getHostAddress(), staticRoute.getNextHopAddress().getHostAddress());
- Node node = NodeCreator.createOFNode(((long)10));
- staticRoute.setNode(node);
- Assert.assertEquals(node, staticRoute.getNode());
- NodeConnector nc0 = NodeConnectorCreator.createOFNodeConnector((short)20, node);
- staticRoute.setPort(nc0);
- Assert.assertEquals(nc0, staticRoute.getPort());
+
+ @Test
+ public void testStaticRouteGetSet() {
+ StaticRoute staticRoute = new StaticRoute();
+ InetAddress networkAddress = null;
+ InetAddress mask = null;
+ InetAddress nextHopAddress = null;
+ try {
+ networkAddress = InetAddress.getByName("10.1.1.0");
+ mask = InetAddress.getByName("255.255.255.0");
+ nextHopAddress = InetAddress.getByName("200.0.0.1");
+
+ } catch (UnknownHostException e) {
+ Assert.assertTrue(false);
+ }
+ staticRoute.setNetworkAddress(networkAddress);
+ Assert.assertEquals(networkAddress.getHostAddress(), staticRoute.getNetworkAddress().getHostAddress());
+ staticRoute.setMask(mask);
+ Assert.assertEquals(mask.getHostAddress(), staticRoute.getMask().getHostAddress());
+ staticRoute.setType(NextHopType.IPADDRESS);
+ Assert.assertEquals("nexthop-ip", staticRoute.getType().toString());
+ staticRoute.setNextHopAddress(nextHopAddress);
+ Assert.assertEquals(nextHopAddress.getHostAddress(), staticRoute.getNextHopAddress().getHostAddress());
+ Node node = NodeCreator.createOFNode(((long)10));
+ staticRoute.setNode(node);
+ Assert.assertEquals(node, staticRoute.getNode());
+ NodeConnector nc0 = NodeConnectorCreator.createOFNodeConnector((short)20, node);
+ staticRoute.setPort(nc0);
+ Assert.assertEquals(nc0, staticRoute.getPort());
InetAddress ip1 = null;
HostNodeConnector h1 = null;
try {
}
staticRoute.setHost(h1);
Assert.assertEquals(h1, staticRoute.getHost());
- }
-
- @Test
- public void testStaticRouteComparison() {
+ }
+
+ @Test
+ public void testStaticRouteComparison() {
StaticRouteConfig staticRouteConfig1 = new StaticRouteConfig("route1", "10.1.1.0/24", "100.1.1.1");
StaticRouteConfig staticRouteConfig2 = new StaticRouteConfig("route2", "10.1.1.0/24", "100.2.1.1");
StaticRouteConfig staticRouteConfig3 = new StaticRouteConfig("route3", "10.2.1.0/24", "100.3.1.1");
Assert.assertTrue(staticRoute1.equals(staticRoute2));
Assert.assertFalse(staticRoute1.equals(staticRoute3));
Assert.assertFalse(staticRoute1.equals(staticRoute4));
-
+
Assert.assertTrue(staticRoute1.compareTo(staticRoute2) == 0 ? true : false);
Assert.assertFalse(staticRoute1.compareTo(staticRoute3) == 0 ? true : false);
Assert.assertTrue(staticRoute1.compareTo(staticRoute4) == 0 ? true : false);
-
- }
-
- @Test
- public void testLongestPrefixMatch() {
+
+ }
+
+ @Test
+ public void testLongestPrefixMatch() {
StaticRouteConfig staticRouteConfig1 = new StaticRouteConfig("route1", "10.1.1.254/24", "100.1.1.1");
StaticRoute staticRoute1 = new StaticRoute(staticRouteConfig1);
- InetAddress ip1 = null;
- InetAddress ip2 = null;
- try {
- ip1 = InetAddress.getByName("10.1.0.2");
- ip2 = InetAddress.getByName("10.1.1.2");
- } catch (UnknownHostException e) {
- Assert.assertTrue(false);
- }
+ InetAddress ip1 = null;
+ InetAddress ip2 = null;
+ try {
+ ip1 = InetAddress.getByName("10.1.0.2");
+ ip2 = InetAddress.getByName("10.1.1.2");
+ } catch (UnknownHostException e) {
+ Assert.assertTrue(false);
+ }
InetAddress rxdIp1 = staticRoute1.longestPrefixMatch(ip1);
InetAddress rxdIp2 = staticRoute1.longestPrefixMatch(ip2);
- Assert.assertEquals(null, rxdIp1);
- Assert.assertEquals("10.1.1.0", rxdIp2.getHostAddress());
- }
+ Assert.assertEquals(null, rxdIp1);
+ Assert.assertEquals("10.1.1.0", rxdIp2.getHostAddress());
+ }
}
import org.junit.Test;
public class StaticRoutingImplementationTest {
-
- @Test
- public void isIPv4AddressValidTest() {
- StaticRoutingImplementation staticRouteImpl = new StaticRoutingImplementation();
- Assert.assertTrue(staticRouteImpl.isIPv4AddressValid("192.168.100.0/24"));
- Assert.assertFalse(staticRouteImpl.isIPv4AddressValid("192.168.100.0/36"));
- Assert.assertFalse(staticRouteImpl.isIPv4AddressValid("192.168.300.0/32"));
- }
-}
-
-
-
+ @Test
+ public void isIPv4AddressValidTest() {
+ StaticRoutingImplementation staticRouteImpl = new StaticRoutingImplementation();
+ Assert.assertTrue(staticRouteImpl.isIPv4AddressValid("192.168.100.0/24"));
+ Assert.assertFalse(staticRouteImpl.isIPv4AddressValid("192.168.100.0/36"));
+ Assert.assertFalse(staticRouteImpl.isIPv4AddressValid("192.168.300.0/32"));
+ }
+}
/**
* Return the actual Flow contained in this entry
- *
+ *
* @return the flow
*/
public Flow getFlow() {
/**
* Merges the current Flow with the passed Container Flow
- *
+ *
* Note: Container Flow merging is not an injective function. Be m1 and m2
* two different matches, and be f() the flow merge function, such that y1 =
* f(m1) and y2 = f(m2) are the two merged matches, we may have: y1 = y2
- *
- *
+ *
+ *
* @param containerFlow
* @return this merged FlowEntry
*/
* install, the container flow with which that entry had to be merged and the
* resultant merged flow entry, which is the one that was eventually installed
* on the network node
- *
+ *
* Note: If the container flow is null, the install entry will be a clone of the
* original entry
- *
+ *
*/
public class FlowEntryInstall {
private FlowEntry original;
public void setRequestId(long rid) {
this.requestId = rid;
}
-
+
public long getRequestId() {
return requestId;
}
/**
* Interface that describes methods for installing or removing forwarding rules
* and to access to the flows database.
- *
+ *
*/
public interface IForwardingRulesManager {
* SDN protocol plugin to install the flow on the network node. Based on the
* result of this operation FRM will update its database accordingly and
* will return the proper {@code Status} code.
- *
+ *
* @param flow
* the flow entry to install
* @return the {@code Status} object indicating the result of this action.
* protocol plugin to uninstall the flow from the network node. Based on the
* result of this operation FRM will update its database accordingly and
* will return the proper {@code Status} code.
- *
+ *
* @param flow
* the flow entry to uninstall
* @return the {@code Status} object indicating the result of this action
* modify message depending on the SDN protocol specifications If the
* current flow is equal to the new one it will be a no op and success code
* is returned.
- *
+ *
* @param current
* the current flow entry to modify
* @param newone
* it, it will request plugin to add the new flow. If the passed entry is
* not valid an error code is returned. If the existing flow is equal to the
* passed one it will be a no op and success code is returned.
- *
- *
+ *
+ *
* @param newone
* the new flow entry to install
* @return the {@code Status} object indicating the result of this action
* SDN protocol plugin to install the flow on the network node. As immediate
* result of this asynchronous call, FRM will update its flow database as if
* the flow was successfully installed.
- *
+ *
* @param flow
* the flow entry to install
* @return the status of this request containing the request id associated
* SDN protocol plugin to uninstall the flow from the network node. As
* immediate result of this asynchronous call, FRM will update its flow
* database as if the flow was successfully installed.
- *
+ *
* @param flow
* the flow entry to uninstall
* @return the status of this request containing the unique id associated to
* this message to the network node. It could be a delete + add or a single
* modify message depending on the SDN protocol specifications. If the
* current flow is equal to the new one it will be a no op.
- *
+ *
* @param current
* the current flow entry to modify
* @param newone
* it, it will request plugin to add the new flow. If the passed entry is
* not valid a zero request id is returned. If the existing flow is equal to
* the passed one it will be a no op.
- *
+ *
* @param newone
* the new flow entry to install
* @return the unique id associated to this request. In case of not
* blocked until the solicitation response is received from the network node
* or receive timeout. Otherwise, it is a non-blocking call and does not
* guarantee the node will respond in any given time.
- *
+ *
* @param node
* The network node to solicit a response
* @param blocking
/**
* Check whether the passed flow entry conflicts with the Container flows
- *
+ *
* @param flow
* the flow entry to test
* @return true if conflicts, false otherwise
/**
* Returns the list of Flow entries across network nodes which are part of
* the same flow group, policy
- *
+ *
* @param group
* the group name
* @return the list of flow entries belonging to the specified group
/**
* Add a list of output port to the flow with the specified name on the
* specified network node
- *
+ *
* @param node
* the network node
* @param flowName
/**
* Remove a list of output port from the flow with the specified name on the
* specified network node
- *
+ *
* @param node
* the network node
* @param flowName
/**
* Replace the current output port in the specified flow with the specified
* one
- *
+ *
* @param node
* the network node
* @param groupName
/**
* Returns the output port configured on the specified flow
- *
+ *
* @param node
* the network node
* @param flowName
* Returns all the troubleshooting information that applications have set
* along with the policy they have configured through forwarding rules
* manger.
- *
+ *
* @return the collection of troubleshooting objects
*/
public Map<String, Object> getTSPolicyData();
/**
* Set the troubleshooting information for the policy
- *
+ *
* @param policyname
* the flow group name
* @param o
/**
* Returns the troubleshooting information that was set for the specified
* policy
- *
+ *
* @param groupName
* the flows group name
* @return the troubleshooting info object
/**
* Returns the specifications of all the flows configured for all the
* switches on the current container
- *
+ *
* @return the list of flow configurations present in the database
*/
public List<FlowConfig> getStaticFlows();
/**
* Returns the specifications of all the flows configured for the given
* switch on the current container
- *
+ *
* @param node
* the network node identifier
* @return the list of {@code FlowConfig} objects
/**
* Returns the specification of the flow configured for the given network
* node on the current container
- *
+ *
* @param name
* the flow name
* @param n
/**
* Returns the list of names of flows configured for the given Network node
* on the current container
- *
+ *
* @param node
* the network node identifier
* @return the list of flow names
/**
* Returns the list of Node(s) for which a static flow has been configured
- *
+ *
* @return the list of network nodes
*/
public List<Node> getListNodeWithConfiguredFlows();
/**
* Save the flow configured so far to file
- *
+ *
* @return the {@code Status} object indicating the result of this action.
*/
public Status saveConfig();
/**
* Add a flow specified by the {@code FlowConfig} object on the current
* container
- *
+ *
* @param config
* the {@code FlowConfig} object representing the static flow
* @param restore
/**
* Remove a flow specified by the {@code FlowConfig} object on the current
* container
- *
+ *
* @param config
* the {@code FlowConfig} object representing the static flow
* @return the {@code Status} object indicating the result of this action
* Replace the flow identified by the {@code FlowConfig.name} name for the
* {@code FlowConfig.node} network node with the new flow specified by
* {@code FlowConfig} object
- *
+ *
* @param config
* the {@code FlowConfig} object
* @returnthe {@code Status} object indicating the result of this action
/**
* Remove the flow specified by name on the passed network node
- *
+ *
* @param name
* for the static flow
* @param node
* Toggle the installation status of the specified configured flow If the
* flow configuration status is active, this call will change the flow
* status to inactive and vice-versa
- *
+ *
* @param configObject
* the {@code FlowConfig} object
* @return the {@code Status} object indicating the result of this action
* Toggle the installation status of the specified configured flow If the
* flow configuration status is active, this call will change the flow
* status to inactive and vice-versa
- *
+ *
* @param name
* for the static flow
* @param node
/**
* The interface which describes the methods forwarding rules manager will call
* for notifying the listeners of policy installation updates.
- *
+ *
*/
public interface IForwardingRulesManagerAware {
/**
* Inform the listeners that a troubleshooting information was added or
* removed for the specified policy.
- *
+ *
* @param policyName
* the policy affected
* @param add
* Inform listeners that the network node has notified us about a failure in
* executing the controller generated asynchronous request identified by the
* passed unique id.
- *
+ *
* @param requestId
* the unique id associated with the request which failed to be
* executed on the network node
/**
* PortGroup is a simple data-structure to represent any arbitrary group of
* ports on a Switch (that is represented using its switch-ID).
- *
+ *
* PortGroup is used by PortGroupProvider application to signal a set of ports
* that represent a configured PortGroupConfig.
- *
- *
+ *
+ *
*/
public class PortGroup {
private long matrixSwitchId;
/**
* PortGroup Constructor using Switch and Ports.
- *
+ *
* @param matrixSwitchId
* Switch Id that represents an openflow Switch
* @param ports
/**
* PortGroup Constructor using Switch.
- *
+ *
* @param matrixSwitchId
* Switch-Id that represents an openflow Switch
*/
/**
* Returns the switchId representing the Switch that makes this PortGroup.
- *
+ *
* @return long switchId
*/
public long getMatrixSwitchId() {
/**
* Assigns a Switch to this PortGroup
- *
+ *
* @param matrixSwitchId
* Switch-Id that represents an openflow Switch
*/
/**
* Returns the Set of Ports that makes this PortGroup.
- *
+ *
* @return Set of short values representing openflow port-ids.
*/
public Set<Short> getPorts() {
/**
* Assigns a set of openflow ports to this PortGroup
- *
+ *
* @param ports
* Set of short values representing openflow port-ids.
*/
/**
* Adds a port to this PortGroup
- *
+ *
* @param port
* Short value of a openflow port.
*/
/**
* PortGroupChangeListener listens to the PortGroup updates provided by the
* PortGroupProvider.
- *
- *
+ *
+ *
*/
public interface PortGroupChangeListener {
/**
* This method is invoked by PortGroupProvider whenever it detects a change
* in PortGroup membership for a given PortGroupConfig.
- *
+ *
* @param config
* Port Group Configuration
* @param portGroupData
/**
* PortGroupConfig class represents the User's Configuration with a Opaque
* Regular Expression String that is parsed and handled by PortGroupProvider.
- *
+ *
* Typically, the opaque matchString will be a Regular Expression String
* supported by a particular PortGroupProvider based on Customer requirements.
- *
- *
- *
+ *
+ *
+ *
*/
public class PortGroupConfig implements Serializable {
private static final long serialVersionUID = 1L;
/**
* Constructor to create a Port Group Configuration using a Group Name and
* an Opaque String that is managed by PortGroupProvider.
- *
+ *
* @param name
* Group Name representing a Port Group configuration
* @param matchString
/**
* Returns the user configured PortGroup Configuration name.
- *
+ *
* @return Configuration Name
*/
public String getName() {
/**
* Assigns a name to the configuration
- *
+ *
* @param name
* configuration name
*/
/**
* Returns the Opaque string
- *
+ *
* @return
*/
public String getMatchString() {
/**
* Assigns an opaque String to the Configuration.
- *
+ *
* @param matchString
* Opaque string handled by PortGroupProvider
*/
/**
* Returns the names of all the configurable fields in PortGroupConfig. This
* method is typically used by NorthBound apis.
- *
+ *
* @return List of Field names that can be configured.
*/
public static List<String> getFieldsNames() {
* Returns the names of all the configurable fields in PortGroupConfig in
* human readable format for UI purposes. This method is typically used by
* Web/UI apis.
- *
+ *
* @return List of Human readable Strings that corresponds to the
* configurable field names.
*/
* application to implement in order to provide Port Grouping Service. Custom
* Application that implements this interface will have to handle the opaque
* match criteria passed to it via PortGroupConfig.
- *
- *
- *
+ *
+ *
+ *
*/
public interface PortGroupProvider {
/**
* This method is invoked by the Controller towards the Provider when a new
* port group is configured.
- *
+ *
* @param config
* New PortGroupConfig object created by user Configuration.
* @return true if successful. false otherwise.
/**
* This method is invoked by the Controller towards the Provider when an
* existing port group is deleted.
- *
+ *
* @param config
* Existing Port Group Configuration deleted by the user.
* @return true if successful. false otherwise.
* Configuration. Its the PortGroupProvider Application's responsibility to
* manage the Switches & the Set of its Ports that correspond to each of the
* Configuration and return it to the Controller when requested.
- *
+ *
* @param config
* User Configuration
* @see PortGroupConfig
* PortGroupProvider Application's responsibility to manage the Switches &
* the Set of its Ports that correspond to each of the Configuration and
* return it to the Controller when requested.
- *
+ *
* @param config
* User Configuration
* @param matrixSwitchId
/**
* Registers a Listener for Port Group membership changes based on Custom
* application algorithm.
- *
+ *
* @param listener
* A Controller module that listens to events from the Custom
* Port Grouping Application.
* implement Custom Algorithm for Port Grouping. This method exposes the
* custom algorithm to the user so that the user can configure the
* matchString regular expression in PortGroupConfig appropriately.
- *
+ *
* @return Usage string.
*/
public String getApplicationDrivenMatchCriteriaUsage();
/**
* Returns the name of the Custom Application that implements
* PortGroupProvider interface.
- *
+ *
* @return Provider Name
*/
public String getProviderName();
/**
* Controller uses this method to check with the Provider supports the
* matchCriteria String configured by the User.
- *
+ *
* @param matchCriteria
* @return true if the Provider supports the matchCriteria String. false
* otherwise.
/**
* Function called when the activator starts just after some initializations
* are done by the ComponentActivatorAbstractBase.
- *
+ *
*/
public void init() {
/**
* Function called when the activator stops just before the cleanup done by
* ComponentActivatorAbstractBase
- *
+ *
*/
public void destroy() {
/**
* Function that is used to communicate to dependency manager the list of
* known implementations for services inside a container
- *
- *
+ *
+ *
* @return An array containing all the CLASS objects that will be
* instantiated in order to get an fully working implementation
* Object
/**
* Function that is called when configuration of the dependencies is
* required.
- *
+ *
* @param c
* dependency manager Component object, used for configuring the
* dependencies exported and imported
* Adds a flow entry onto the network node It runs various validity checks
* and derive the final container flows merged entries that will be
* attempted to be installed
- *
+ *
* @param flowEntry
* the original flow entry application requested to add
* @param async
* will contain the unique id assigned to this request
*/
private Status addEntry(FlowEntry flowEntry, boolean async) {
-
+
// Sanity Check
if (flowEntry == null || flowEntry.getNode() == null) {
String msg = "Invalid FlowEntry";
* complication for now and assume we will always deal with
* one flow only per request
*/
- succeded = ret;
+ succeded = ret;
} else {
error = ret;
log.warn("Failed to install the entry: {}. The failure is: {}",
* flow entry is congruent with all the N container flows, then the output
* install entry list will contain N entries. If the output list is empty,
* it means the passed flow entry conflicts with all the container flows.
- *
+ *
* @param cFlowList
* The list of container flows
* @return the list of container flow merged entries good to be installed on
/**
* Modify a flow entry with a new one It runs various validity check and
* derive the final container flows merged flow entries to work with
- *
+ *
* @param currentFlowEntry
* @param newFlowEntry
* @param async
* This is only possible when the new entry and current entry have
* different match. In this scenario the modification would ultimately
* be handled as a remove and add operations in the protocol plugin.
- *
+ *
* Also, if any of the new flow entries would clash with an existing
* one, we cannot proceed with the modify operation, because it would
* fail for some entries and leave stale entries on the network node.
* Modify path can be taken only if it can be performed completely, for
* all entries.
- *
+ *
* So, for the above two cases, to simplify, let's decouple the modify
* in: 1) remove current entries 2) install new entries
*/
* (and might be wrong) that the same container flows that were
* satisfied by the current entries are the same that are satisfied
* by the new entries. Let's take the risk for now.
- *
+ *
* Note: modification has to be complete. If any entry modification
* fails, we need to stop, restore the already modified entries, and
* declare failure.
* This is the function that modifies the final container flows merged
* entries on the network node and update the database. It expects that all
* the validity checks are passed
- *
+ *
* @param currentEntries
* @param newEntries
* @param async
private Status modifyEntryInternal(FlowEntryInstall currentEntries,
FlowEntryInstall newEntries, boolean async) {
// Modify the flow on the network node
- Status status = (async)?
+ Status status = (async)?
programmer.modifyFlowAsync(currentEntries.getNode(),
currentEntries.getInstall().getFlow(), newEntries.getInstall()
.getFlow()) :
/**
* Remove a flow entry. If the entry is not present in the software view
* (entry or node not present), it return successfully
- *
+ *
* @param flowEntry
* the flow entry to remove
* @param async
* This is the function that removes the final container flows merged entry
* from the network node and update the database. It expects that all the
* validity checks are passed
- *
+ *
* @param entry
* the flow entry to remove
* @param async
.getInstall().getFlow()) :
programmer.removeFlow(entry.getNode(), entry
.getInstall().getFlow());
-
+
if (!status.isSuccess()) {
log.warn(
* on the network node and updates the database. It expects that all the
* validity and conflict checks are passed. That means it does not check
* whether this flow would conflict or overwrite an existing one.
- *
+ *
* @param entry
* the flow entry to install
* @param async
* means that if the function returns true, the passed flow entry is
* congruent with at least one container flow, hence it is good to be
* installed on this container.
- *
+ *
* @param flowEntry
* @return true if flow conflicts with all the container flows, false
* otherwise
}
return status;
}
-
+
@Override
public Status modifyFlowEntry(FlowEntry currentFlowEntry,
FlowEntry newFlowEntry) {
}
}
-
+
/**
* Try to find in the database if a Flow with the same Match and priority of
* the passed one already exists for the specified network node. Flow,
* priority and network node are all specified in the FlowEntry If found,
* the respective FlowEntryInstall Object is returned
- *
+ *
* @param flowEntry
* the FlowEntry to be tested against the ones installed
* @param looseCheck
* multiple entry configuration (PortGroup) and hardware installation is
* NOT done directly on this event. 3. The User prefers to retain the
* configuration in Controller and skip hardware installation.
- *
+ *
* Hence it is safe to update the StaticFlow DB at this point.
- *
+ *
* Note : For the case of PortGrouping, it is essential to have this DB
* populated before the PortGroupListeners can query for the DB
* triggered using portGroupChanged event...
/**
* Remove from the databases all the flows installed on the node
- *
+ *
* @param node
*/
private synchronized void cleanDatabaseForNode(Node node) {
/**
* Function called by the dependency manager when all the required
* dependencies are satisfied
- *
+ *
*/
void init() {
frmAware = Collections
* Function called by the dependency manager when at least one dependency
* become unsatisfied or when the component is shutting down because for
* example bundle is being stopped.
- *
+ *
*/
void destroy() {
destroyCaches();
/**
* Function called by dependency manager after "init ()" is called and after
* the services provided by the class are registered in the service registry
- *
+ *
*/
void start() {
/*
* Function called by the dependency manager before the services exported by
* the component are unregistered, this will be followed by a "destroy ()"
* calls
- *
+ *
*/
void stop() {
}
/*
* If this was for a flow install, remove the corresponding entry
* from the software view. If it was a Looking for the rid going through the
- * software database.
- * TODO: A more efficient rid <-> FlowEntryInstall mapping will
+ * software database.
+ * TODO: A more efficient rid <-> FlowEntryInstall mapping will
* have to be added in future
*/
Set<FlowEntryInstall> entries = nodeFlows.get(node);
this.updateLocalDatabase(target, false);
}
}
-
+
// Notify listeners
if (frmAware != null) {
synchronized (frmAware) {
}
}
}
-
+
@Override
public Status solicitStatusResponse(Node node, boolean blocking) {
Status rv = new Status(StatusCode.INTERNALERROR);
-
+
if (this.programmer != null) {
if (blocking) {
rv = programmer.syncSendBarrierMessage(node);
} else {
- rv = programmer.asyncSendBarrierMessage(node);
+ rv = programmer.asyncSendBarrierMessage(node);
}
}
-
+
return rv;
}
}
-
/*
* Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
*
import org.opendaylight.controller.hosttracker.hostAware.HostNodeConnector;
/**
- * This interface defines the method to notify detected Host on the
- * network. The information includes Host's IP address, MAC address,
- * switch ID, port, and VLAN.
+ * This interface defines the method to notify detected Host on the network. The
+ * information includes Host's IP address, MAC address, switch ID, port, and
+ * VLAN.
*
*/
public interface IfHostListener {
/**
- * Learns new Hosts. Called by ArpHandler and implemented in
- * HostTracker.java. If a Host is learned for the first time then
- * adds it to the local database and informs other applications
- * of coming up a new Host. For the hosts which it has already
- * learned, it refreshes them.
+ * Learns new Hosts. Called by ArpHandler and implemented in
+ * HostTracker.java. If a Host is learned for the first time then adds it to
+ * the local database and informs other applications of coming up a new
+ * Host. For the hosts which it has already learned, it refreshes them.
*
- * @param host Host info encapsulated in HostNodeConnector class
+ * @param host
+ * Host info encapsulated in HostNodeConnector class
*/
public void hostListener(HostNodeConnector host);
}
-
/*
* Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
*
import java.util.List;
import java.util.Set;
import java.util.concurrent.Future;
-import org.opendaylight.controller.sal.core.NodeConnector;
+
import org.opendaylight.controller.hosttracker.hostAware.HostNodeConnector;
+import org.opendaylight.controller.sal.core.NodeConnector;
import org.opendaylight.controller.sal.utils.Status;
/**
- * This interface defines the methods to retrieve information about
- * learned Hosts. Also provides methods to statically add/remove
- * Hosts from the local database.
+ * This interface defines the methods to retrieve information about learned
+ * Hosts. Also provides methods to statically add/remove Hosts from the local
+ * database.
*
*/
public interface IfIptoHost {
/**
- * Applications call this interface methods to determine IP address to MAC binding and its
- * connectivity to an OpenFlow switch in term of Node, Port, and VLAN. These
- * bindings are learned dynamically as well as can be added statically through
- * Northbound APIs. If a binding is unknown, then an ARP request is initiated
- * immediately to discover the host.
+ * Applications call this interface methods to determine IP address to MAC
+ * binding and its connectivity to an OpenFlow switch in term of Node, Port,
+ * and VLAN. These bindings are learned dynamically as well as can be added
+ * statically through Northbound APIs. If a binding is unknown, then an ARP
+ * request is initiated immediately to discover the host.
*
- * @param networkAddress IP Address of the Host encapsulated in class InetAddress
- * @return {@link org.opendaylight.controller.hosttracker.hostAware.HostNodeConnector}
- * Class that contains the Host info such as its MAC address,
- * Switch ID, port, VLAN. If Host is not found, returns NULL
+ * @param networkAddress
+ * IP Address of the Host encapsulated in class InetAddress
+ * @return {@link org.opendaylight.controller.hosttracker.hostAware.HostNodeConnector}
+ * Class that contains the Host info such as its MAC address, Switch
+ * ID, port, VLAN. If Host is not found, returns NULL
*/
public HostNodeConnector hostFind(InetAddress networkAddress);
* Checks the local Host Database to see if a Host has been learned for a
* given IP address.
*
- * @param networkAddress IP Address of the Host encapsulated in class InetAddress
- * @return {@link org.opendaylight.controller.hosttracker.hostAware.HostNodeConnector}
- * Class that contains the Host info such as its MAC address,
- * Switch ID, port, VLAN. If Host is not found, returns NULL
+ * @param networkAddress
+ * IP Address of the Host encapsulated in class InetAddress
+ * @return {@link org.opendaylight.controller.hosttracker.hostAware.HostNodeConnector}
+ * Class that contains the Host info such as its MAC address, Switch
+ * ID, port, VLAN. If Host is not found, returns NULL
*
*/
public HostNodeConnector hostQuery(InetAddress networkAddress);
* Initiates an immediate discovery of the Host for a given IP address. This
* provides for the calling applications to block on the host discovery.
*
- * @param networkAddress IP address encapsulated in InetAddress class
- * @return Future {@link org.opendaylight.controller.hosttracker.HostTrackerCallable}
+ * @param networkAddress
+ * IP address encapsulated in InetAddress class
+ * @return Future
+ * {@link org.opendaylight.controller.hosttracker.HostTrackerCallable}
*/
public Future<HostNodeConnector> discoverHost(InetAddress networkAddress);
/**
- * Returns the Network Hierarchy for a given Host. This API is typically used by
- * applications like Hadoop for Rack Awareness functionality.
+ * Returns the Network Hierarchy for a given Host. This API is typically
+ * used by applications like Hadoop for Rack Awareness functionality.
*
- * @param IP address of the Host encapsulated in InetAddress class
- * @return List of String ArrayList containing the Hierarchies.
+ * @param IP
+ * address of the Host encapsulated in InetAddress class
+ * @return List of String ArrayList containing the Hierarchies.
*/
public List<List<String>> getHostNetworkHierarchy(InetAddress hostAddress);
/**
- * Returns all the the Hosts either learned dynamically or added statically via
- * Northbound APIs.
+ * Returns all the the Hosts either learned dynamically or added statically
+ * via Northbound APIs.
*
- * @return Set of {@link org.opendaylight.controller.hosttracker.hostAware.HostNodeConnector}.
- * Class that contains the Host info such as its MAC address,
- * Switch ID, port, VLAN.
+ * @return Set of
+ * {@link org.opendaylight.controller.hosttracker.hostAware.HostNodeConnector}
+ * . Class that contains the Host info such as its MAC address,
+ * Switch ID, port, VLAN.
*/
public Set<HostNodeConnector> getAllHosts();
/**
- * Returns all the "Active Hosts" learned "Statically" via Northbound APIs. These Hosts
- * are categorized as "Active" because the Switch and Port they are connected to, are in
- * up state.
+ * Returns all the "Active Hosts" learned "Statically" via Northbound APIs.
+ * These Hosts are categorized as "Active" because the Switch and Port they
+ * are connected to, are in up state.
*
- * @return Set of {@link org.opendaylight.controller.hosttracker.hostAware.HostNodeConnector}.
- * Class that contains the Host info such as MAC address,
- * Switch ID, port, VLAN. If Host is not found, returns NULL
+ * @return Set of
+ * {@link org.opendaylight.controller.hosttracker.hostAware.HostNodeConnector}
+ * . Class that contains the Host info such as MAC address, Switch
+ * ID, port, VLAN. If Host is not found, returns NULL
*/
public Set<HostNodeConnector> getActiveStaticHosts();
/**
- * Returns all the "Inactive Hosts" learned "Statically" via Northbound APIs. These Hosts
- * are categorized as "Inactive" because either the Switch or the Port they are connected
- * to, is in down state.
+ * Returns all the "Inactive Hosts" learned "Statically" via Northbound
+ * APIs. These Hosts are categorized as "Inactive" because either the Switch
+ * or the Port they are connected to, is in down state.
*
- * @return Set of HostNodeConnector {@link org.opendaylight.controller.hosttracker.hostAware.HostNodeConnector}.
- * HostNodeConnector is Class that
- * contains the Host info such as its MAC address, OpenFlowNode
- * ID, port, VLAN.
+ * @return Set of HostNodeConnector
+ * {@link org.opendaylight.controller.hosttracker.hostAware.HostNodeConnector}
+ * . HostNodeConnector is Class that contains the Host info such as
+ * its MAC address, OpenFlowNode ID, port, VLAN.
*/
public Set<HostNodeConnector> getInactiveStaticHosts();
/**
- * Hosts can be learned dynamically or added statically. This method allows the addition
- * of a Host to the local database statically.
+ * Hosts can be learned dynamically or added statically. This method allows
+ * the addition of a Host to the local database statically.
*
- * @param networkAddress IP Address of the Host
- * @param dataLayerAddress MAC Address of the Host
- * @param nc NodeConnector to which the host is attached
- * @param vlan VLAN the host belongs to
- * @return The status object as described in {@code Status}
- * indicating the result of this action.
+ * @param networkAddress
+ * IP Address of the Host
+ * @param dataLayerAddress
+ * MAC Address of the Host
+ * @param nc
+ * NodeConnector to which the host is attached
+ * @param vlan
+ * VLAN the host belongs to
+ * @return The status object as described in {@code Status} indicating the
+ * result of this action.
*/
public Status addStaticHost(String networkAddress, String dataLayerAddress,
- NodeConnector nc, String vlan);
+ NodeConnector nc, String vlan);
/**
* Allows the deletion of statically learned Host
*
* @param networkAddress
- * @return The status object as described in {@code Status}
- * indicating the result of this action.
+ * @return The status object as described in {@code Status} indicating the
+ * result of this action.
*/
public Status removeStaticHost(String networkAddress);
}
-
/*
* Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
*
import org.opendaylight.controller.hosttracker.hostAware.HostNodeConnector;
/**
- * This Interface defines the methods for client applications of
- * Host Tracker to get notifications when a new host is learned or
- * existing host is removed from the network.
+ * This Interface defines the methods for client applications of Host Tracker to
+ * get notifications when a new host is learned or existing host is removed from
+ * the network.
*
*/
public interface IfNewHostNotify {
/**
* Notifies the HostTracker Clients that a new Host has been learned
*
- * @param host Host Info encapsulated in HostNodeConnector class
+ * @param host
+ * Host Info encapsulated in HostNodeConnector class
*/
public void notifyHTClient(HostNodeConnector host);
/**
- * Notifies the HostTracker Clients that a Host which was learned in
- * the past has been removed either due to switch/port down event or
- * due to ARP Aging
+ * Notifies the HostTracker Clients that a Host which was learned in the
+ * past has been removed either due to switch/port down event or due to ARP
+ * Aging
*
- * @param host Host Info encapsulated in HostNodeConnector class
+ * @param host
+ * Host Info encapsulated in HostNodeConnector class
*/
public void notifyHTClientHostRemoved(HostNodeConnector host);
}
-
/*
* Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
*
import org.opendaylight.controller.sal.core.NodeConnector;
import org.opendaylight.controller.sal.packet.address.EthernetAddress;
-@XmlRootElement(name="host")
+@XmlRootElement(name = "host")
@XmlAccessorType(XmlAccessType.NONE)
public class HostNodeConnector extends Host {
private static final long serialVersionUID = 1L;
return !Arrays.equals(emptyArray, macaddr);
}
- /* (non-Javadoc)
+ /*
+ * (non-Javadoc)
+ *
* @see java.lang.Object#toString()
*/
@Override
-
/*
* Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
*
package org.opendaylight.controller.hosttracker.hostAware;
import java.net.InetAddress;
-import org.opendaylight.controller.hosttracker.hostAware.HostNodeConnector;
-
/**
- * This Interface defines the methods to trigger the discovery of
- * a Host and to probe if a learned Host is still in the network.
+ * This Interface defines the methods to trigger the discovery of a Host and to
+ * probe if a learned Host is still in the network.
*
*
*
*/
public interface IHostFinder {
/**
- * This method initiates the discovery of a host based on its IP address. This is triggered
- * by query of an application to the HostTracker. The requested IP address
- * doesn't exist in the local database at this point.
+ * This method initiates the discovery of a host based on its IP address.
+ * This is triggered by query of an application to the HostTracker. The
+ * requested IP address doesn't exist in the local database at this point.
*
- * @param networkAddress IP Address encapsulated in InetAddress class
+ * @param networkAddress
+ * IP Address encapsulated in InetAddress class
*
*/
public void find(InetAddress networkAddress);
/**
- * This method is called by HostTracker to see if a learned Host is still in the network.
- * Used mostly for ARP Aging.
+ * This method is called by HostTracker to see if a learned Host is still in
+ * the network. Used mostly for ARP Aging.
*
- * @param host The Host that needs to be probed
+ * @param host
+ * The Host that needs to be probed
*/
public void probe(HostNodeConnector host);
}
-\r
/*\r
* Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.\r
*\r
import org.opendaylight.controller.sal.core.NodeConnector;\r
import org.opendaylight.controller.sal.utils.NodeCreator;\r
\r
-\r
public class HostNodeConnectorTest extends TestCase {\r
\r
- @Test\r
- public void testHostNodeConnector() throws UnknownHostException {\r
- HostNodeConnector hostnodeconnector_1, hostnodeconnector_2, hostnodeconnector_3;\r
- InetAddress hostIP_1 = InetAddress.getByName("192.168.0.8");\r
- InetAddress hostIP_2 = InetAddress.getByName("2001:420:281:1004:e123:e688:d655:a1b0");\r
- InetAddress hostIP_3 = InetAddress.getByName("192.168.0.28");\r
- byte[] hostMAC_2 = new byte[]{(byte)0x11,(byte)0x22,(byte)0x33,(byte)0x22,(byte)0x22,(byte)0x22};\r
- byte[] hostMAC_3 = new byte[]{(byte)0x11,(byte)0x22,(byte)0x33,(byte)0x33,(byte)0x33,(byte)0x33};\r
- \r
- Node node = NodeCreator.createOFNode(1L);\r
- NodeConnector nc1 = NodeConnectorCreator.createOFNodeConnector((short) 2, node);\r
- NodeConnector nc2 = NodeConnectorCreator.createOFNodeConnector((short) 1, node);\r
- \r
- try {\r
- hostnodeconnector_1 = new HostNodeConnector(hostIP_1);\r
- Assert.assertTrue(hostnodeconnector_1.equalsByIP(hostIP_1));\r
- Assert.assertTrue(hostnodeconnector_1.isV4Host());\r
- Assert.assertTrue(hostnodeconnector_1.equalsByIP(hostIP_1));\r
- } catch (ConstructionException e) {\r
- Assert.assertTrue(false);\r
- }\r
- \r
- try {\r
- hostnodeconnector_2 = new HostNodeConnector(\r
- hostMAC_2, hostIP_2, nc1, (short)2);\r
- Assert.assertTrue(hostnodeconnector_2.isV6Host());\r
- Assert.assertTrue(hostnodeconnector_2.getnodeConnector().equals(nc1));\r
- Assert.assertTrue(hostnodeconnector_2.getnodeconnectorNode().equals(node));\r
- Assert.assertTrue(node.equals(hostnodeconnector_2.getnodeconnectorNode()));\r
- } catch (ConstructionException e) {\r
- Assert.assertTrue(false);\r
- }\r
- \r
- try {\r
- hostnodeconnector_3 = new HostNodeConnector(\r
- new EthernetAddress(hostMAC_3), hostIP_3, nc2, (short)3);\r
- byte[] hostMAC_3_rb = hostnodeconnector_3.getDataLayerAddressBytes();\r
- HostNodeConnector hostnodeconnector_3rb = new HostNodeConnector(\r
- new EthernetAddress(hostMAC_3_rb), hostIP_3, nc2, (short)3);\r
- Assert.assertTrue(hostnodeconnector_3.equals(hostnodeconnector_3rb));\r
- \r
- Assert.assertTrue(hostnodeconnector_3.getVlan() == (short)3);\r
- \r
- hostnodeconnector_3.setStaticHost(true);\r
- Assert.assertTrue(hostnodeconnector_3.isStaticHost());\r
- \r
- Assert.assertTrue(hostnodeconnector_3.isRewriteEnabled());\r
- \r
- hostnodeconnector_3.initArpSendCountDown().setArpSendCountDown((short) 10);\r
- Assert.assertTrue(hostnodeconnector_3.getArpSendCountDown() == (short)10);\r
- \r
- } catch (ConstructionException e) {\r
- Assert.assertTrue(false);\r
- }\r
- \r
- }\r
+ @Test\r
+ public void testHostNodeConnector() throws UnknownHostException {\r
+ HostNodeConnector hostnodeconnector_1, hostnodeconnector_2, hostnodeconnector_3;\r
+ InetAddress hostIP_1 = InetAddress.getByName("192.168.0.8");\r
+ InetAddress hostIP_2 = InetAddress\r
+ .getByName("2001:420:281:1004:e123:e688:d655:a1b0");\r
+ InetAddress hostIP_3 = InetAddress.getByName("192.168.0.28");\r
+ byte[] hostMAC_2 = new byte[] { (byte) 0x11, (byte) 0x22, (byte) 0x33,\r
+ (byte) 0x22, (byte) 0x22, (byte) 0x22 };\r
+ byte[] hostMAC_3 = new byte[] { (byte) 0x11, (byte) 0x22, (byte) 0x33,\r
+ (byte) 0x33, (byte) 0x33, (byte) 0x33 };\r
+\r
+ Node node = NodeCreator.createOFNode(1L);\r
+ NodeConnector nc1 = NodeConnectorCreator.createOFNodeConnector(\r
+ (short) 2, node);\r
+ NodeConnector nc2 = NodeConnectorCreator.createOFNodeConnector(\r
+ (short) 1, node);\r
+\r
+ try {\r
+ hostnodeconnector_1 = new HostNodeConnector(hostIP_1);\r
+ Assert.assertTrue(hostnodeconnector_1.equalsByIP(hostIP_1));\r
+ Assert.assertTrue(hostnodeconnector_1.isV4Host());\r
+ Assert.assertTrue(hostnodeconnector_1.equalsByIP(hostIP_1));\r
+ } catch (ConstructionException e) {\r
+ Assert.assertTrue(false);\r
+ }\r
+\r
+ try {\r
+ hostnodeconnector_2 = new HostNodeConnector(hostMAC_2, hostIP_2,\r
+ nc1, (short) 2);\r
+ Assert.assertTrue(hostnodeconnector_2.isV6Host());\r
+ Assert.assertTrue(hostnodeconnector_2.getnodeConnector()\r
+ .equals(nc1));\r
+ Assert.assertTrue(hostnodeconnector_2.getnodeconnectorNode()\r
+ .equals(node));\r
+ Assert.assertTrue(node.equals(hostnodeconnector_2\r
+ .getnodeconnectorNode()));\r
+ } catch (ConstructionException e) {\r
+ Assert.assertTrue(false);\r
+ }\r
+\r
+ try {\r
+ hostnodeconnector_3 = new HostNodeConnector(new EthernetAddress(\r
+ hostMAC_3), hostIP_3, nc2, (short) 3);\r
+ byte[] hostMAC_3_rb = hostnodeconnector_3\r
+ .getDataLayerAddressBytes();\r
+ HostNodeConnector hostnodeconnector_3rb = new HostNodeConnector(\r
+ new EthernetAddress(hostMAC_3_rb), hostIP_3, nc2, (short) 3);\r
+ Assert.assertTrue(hostnodeconnector_3.equals(hostnodeconnector_3rb));\r
+\r
+ Assert.assertTrue(hostnodeconnector_3.getVlan() == (short) 3);\r
+\r
+ hostnodeconnector_3.setStaticHost(true);\r
+ Assert.assertTrue(hostnodeconnector_3.isStaticHost());\r
+\r
+ Assert.assertTrue(hostnodeconnector_3.isRewriteEnabled());\r
+\r
+ hostnodeconnector_3.initArpSendCountDown().setArpSendCountDown(\r
+ (short) 10);\r
+ Assert.assertTrue(hostnodeconnector_3.getArpSendCountDown() == (short) 10);\r
+\r
+ } catch (ConstructionException e) {\r
+ Assert.assertTrue(false);\r
+ }\r
+\r
+ }\r
\r
}\r
-
/*
* Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
*
package org.opendaylight.controller.hosttracker.internal;
import org.apache.felix.dm.Component;
-import org.opendaylight.controller.hosttracker.internal.HostTracker;
+import org.opendaylight.controller.clustering.services.IClusterContainerServices;
import org.opendaylight.controller.hosttracker.IfHostListener;
import org.opendaylight.controller.hosttracker.IfIptoHost;
import org.opendaylight.controller.hosttracker.IfNewHostNotify;
import org.opendaylight.controller.hosttracker.hostAware.IHostFinder;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.opendaylight.controller.clustering.services.IClusterContainerServices;
import org.opendaylight.controller.sal.core.ComponentActivatorAbstractBase;
import org.opendaylight.controller.switchmanager.IInventoryListener;
import org.opendaylight.controller.switchmanager.ISwitchManager;
import org.opendaylight.controller.switchmanager.ISwitchManagerAware;
import org.opendaylight.controller.topologymanager.ITopologyManager;
import org.opendaylight.controller.topologymanager.ITopologyManagerAware;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class Activator extends ComponentActivatorAbstractBase {
protected static final Logger logger = LoggerFactory
.getLogger(Activator.class);
/**
- * Function called when the activator starts just after some
- * initializations are done by the
- * ComponentActivatorAbstractBase.
+ * Function called when the activator starts just after some initializations
+ * are done by the ComponentActivatorAbstractBase.
*
*/
public void init() {
}
/**
- * Function called when the activator stops just before the
- * cleanup done by ComponentActivatorAbstractBase
+ * Function called when the activator stops just before the cleanup done by
+ * ComponentActivatorAbstractBase
*
*/
public void destroy() {
}
/**
- * Function that is used to communicate to dependency manager the
- * list of known implementations for services inside a container
+ * Function that is used to communicate to dependency manager the list of
+ * known implementations for services inside a container
*
*
* @return An array containing all the CLASS objects that will be
- * instantiated in order to get an fully working implementation
- * Object
+ * instantiated in order to get an fully working implementation
+ * Object
*/
public Object[] getImplementations() {
Object[] res = { HostTracker.class };
}
/**
- * Function that is called when configuration of the dependencies
- * is required.
+ * Function that is called when configuration of the dependencies is
+ * required.
*
- * @param c dependency manager Component object, used for
- * configuring the dependencies exported and imported
- * @param imp Implementation class that is being configured,
- * needed as long as the same routine can configure multiple
- * implementations
- * @param containerName The containerName being configured, this allow
- * also optional per-container different behavior if needed, usually
- * should not be the case though.
+ * @param c
+ * dependency manager Component object, used for configuring the
+ * dependencies exported and imported
+ * @param imp
+ * Implementation class that is being configured, needed as long
+ * as the same routine can configure multiple implementations
+ * @param containerName
+ * The containerName being configured, this allow also optional
+ * per-container different behavior if needed, usually should not
+ * be the case though.
*/
public void configureInstance(Component c, Object imp, String containerName) {
if (imp.equals(HostTracker.class)) {
// export the service
- c.setInterface(new String[] { ISwitchManagerAware.class.getName(),
- IInventoryListener.class.getName(),
- IfIptoHost.class.getName(), IfHostListener.class.getName(),
- ITopologyManagerAware.class.getName() }, null);
+ c.setInterface(
+ new String[] { ISwitchManagerAware.class.getName(),
+ IInventoryListener.class.getName(),
+ IfIptoHost.class.getName(),
+ IfHostListener.class.getName(),
+ ITopologyManagerAware.class.getName() }, null);
- c.add(createContainerServiceDependency(containerName).setService(
- ISwitchManager.class).setCallbacks("setSwitchManager",
- "unsetSwitchManager").setRequired(false));
- c.add(createContainerServiceDependency(containerName).setService(
- IClusterContainerServices.class).setCallbacks(
- "setClusterContainerService",
- "unsetClusterContainerService").setRequired(true));
- c.add(createContainerServiceDependency(containerName).setService(
- IHostFinder.class).setCallbacks("setArpHandler",
- "unsetArpHandler").setRequired(false));
- c.add(createContainerServiceDependency(containerName).setService(
- ITopologyManager.class).setCallbacks("setTopologyManager",
- "unsetTopologyManager").setRequired(false));
- c.add(createContainerServiceDependency(containerName).setService(
- IfNewHostNotify.class).setCallbacks("setnewHostNotify",
- "unsetnewHostNotify").setRequired(false));
+ c.add(createContainerServiceDependency(containerName)
+ .setService(ISwitchManager.class)
+ .setCallbacks("setSwitchManager", "unsetSwitchManager")
+ .setRequired(false));
+ c.add(createContainerServiceDependency(containerName)
+ .setService(IClusterContainerServices.class)
+ .setCallbacks("setClusterContainerService",
+ "unsetClusterContainerService").setRequired(true));
+ c.add(createContainerServiceDependency(containerName)
+ .setService(IHostFinder.class)
+ .setCallbacks("setArpHandler", "unsetArpHandler")
+ .setRequired(false));
+ c.add(createContainerServiceDependency(containerName)
+ .setService(ITopologyManager.class)
+ .setCallbacks("setTopologyManager", "unsetTopologyManager")
+ .setRequired(false));
+ c.add(createContainerServiceDependency(containerName)
+ .setService(IfNewHostNotify.class)
+ .setCallbacks("setnewHostNotify", "unsetnewHostNotify")
+ .setRequired(false));
}
}
/**
- * Method which tells how many Global implementations are
- * supported by the bundle. This way we can tune the number of
- * components created. This components will be created ONLY at the
- * time of bundle startup and will be destroyed only at time of
- * bundle destruction, this is the major difference with the
- * implementation retrieved via getImplementations where all of
- * them are assumed to be in a container !
+ * Method which tells how many Global implementations are supported by the
+ * bundle. This way we can tune the number of components created. This
+ * components will be created ONLY at the time of bundle startup and will be
+ * destroyed only at time of bundle destruction, this is the major
+ * difference with the implementation retrieved via getImplementations where
+ * all of them are assumed to be in a container !
*
*
- * @return The list of implementations the bundle will support,
- * in Global version
+ * @return The list of implementations the bundle will support, in Global
+ * version
*/
protected Object[] getGlobalImplementations() {
return null;
/**
* Configure the dependency for a given instance Global
*
- * @param c Component assigned for this instance, this will be
- * what will be used for configuration
- * @param imp implementation to be configured
- * @param containerName container on which the configuration happens
+ * @param c
+ * Component assigned for this instance, this will be what will
+ * be used for configuration
+ * @param imp
+ * implementation to be configured
+ * @param containerName
+ * container on which the configuration happens
*/
protected void configureGlobalInstance(Component c, Object imp) {
if (imp.equals(HostTracker.class)) {
import org.opendaylight.controller.clustering.services.CacheExistException;
import org.opendaylight.controller.clustering.services.IClusterContainerServices;
import org.opendaylight.controller.clustering.services.IClusterServices;
-import org.opendaylight.controller.hosttracker.hostAware.HostNodeConnector;
-import org.opendaylight.controller.hosttracker.hostAware.IHostFinder;
import org.opendaylight.controller.hosttracker.IfHostListener;
import org.opendaylight.controller.hosttracker.IfIptoHost;
import org.opendaylight.controller.hosttracker.IfNewHostNotify;
+import org.opendaylight.controller.hosttracker.hostAware.HostNodeConnector;
+import org.opendaylight.controller.hosttracker.hostAware.IHostFinder;
import org.opendaylight.controller.sal.core.ConstructionException;
import org.opendaylight.controller.sal.core.Edge;
import org.opendaylight.controller.sal.core.Host;
*/
removePendingARPFromList(i);
logger.debug("Host Removed from ARPPending List, IP: {}",
- networkAddr);
+ networkAddr);
return;
}
}
* there
*/
if (logger.isTraceEnabled()) {
- logger.trace(
- "ARP Probing ({}) for {}({})",
- new Object[] {
- arp_cntdown,
- host.getNetworkAddress().getHostAddress(),
- HexEncode.bytesToHexString(host
- .getDataLayerAddressBytes()) });
+ logger.trace(
+ "ARP Probing ({}) for {}({})",
+ new Object[] {
+ arp_cntdown,
+ host.getNetworkAddress()
+ .getHostAddress(),
+ HexEncode.bytesToHexString(host
+ .getDataLayerAddressBytes()) });
}
host.setArpSendCountDown(arp_cntdown);
hostFinder.probe(host);
for (Entry<InetAddress, HostNodeConnector> entry : hostsDB.entrySet()) {
HostNodeConnector host = entry.getValue();
if (host.getnodeConnector().equals(nodeConnector)) {
- logger.debug(" NodeConnector: {} is down, remove from Hosts_DB", nodeConnector);
+ logger.debug(
+ " NodeConnector: {} is down, remove from Hosts_DB",
+ nodeConnector);
removeKnownHost(entry.getKey());
notifyHostLearnedOrRemoved(host, false);
}
-
/*
* Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
*
/**
* This Class provides methods to discover Host through a blocking call
- * mechanism. Applications can make use of these methods if they don't
- * find a host in HostTracker's database and want to discover the host
+ * mechanism. Applications can make use of these methods if they don't
+ * find a host in HostTracker's database and want to discover the host
* in the same thread without being called by a callback function.
*/
import java.net.InetAddress;
-\r
/*\r
* Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.\r
*\r
\r
package org.opendaylight.controller.hosttracker.internal;\r
\r
-\r
import java.net.InetAddress;\r
import java.net.UnknownHostException;\r
import java.util.concurrent.Future;\r
\r
+import junit.framework.TestCase;\r
+\r
import org.junit.Assert;\r
import org.junit.Test;\r
-\r
-import junit.framework.TestCase;\r
import org.opendaylight.controller.hosttracker.hostAware.HostNodeConnector;\r
\r
public class HostTrackerTest extends TestCase {\r
\r
- @Test\r
- public void testHostTrackerCallable() throws UnknownHostException {\r
- \r
- HostTracker hostTracker = null;\r
- hostTracker = new HostTracker();\r
- Assert.assertFalse(hostTracker== null);\r
- \r
- InetAddress hostIP = InetAddress.getByName("192.168.0.8");\r
- \r
- HostTrackerCallable htCallable = new HostTrackerCallable (hostTracker, hostIP);\r
- Assert.assertTrue(htCallable.trackedHost.equals(hostIP));\r
- Assert.assertTrue(htCallable.hostTracker.equals(hostTracker));\r
-\r
- long count = htCallable.latch.getCount();\r
- htCallable.wakeup();\r
- Assert.assertTrue(htCallable.latch.getCount() == --count );\r
- } \r
- \r
- \r
- \r
- @Test\r
- public void testHostTracker() throws UnknownHostException {\r
- HostTracker hostTracker = null;\r
- hostTracker = new HostTracker();\r
- Assert.assertFalse(hostTracker== null);\r
- \r
- InetAddress hostIP_1 = InetAddress.getByName("192.168.0.8");\r
- InetAddress hostIP_2 = InetAddress.getByName("192.168.0.18");\r
- Future<HostNodeConnector> dschost = hostTracker.discoverHost(hostIP_1);\r
- dschost = hostTracker.discoverHost(hostIP_2);\r
- hostTracker.nonClusterObjectCreate();\r
- }\r
- \r
+ @Test\r
+ public void testHostTrackerCallable() throws UnknownHostException {\r
+\r
+ HostTracker hostTracker = null;\r
+ hostTracker = new HostTracker();\r
+ Assert.assertFalse(hostTracker == null);\r
+\r
+ InetAddress hostIP = InetAddress.getByName("192.168.0.8");\r
+\r
+ HostTrackerCallable htCallable = new HostTrackerCallable(hostTracker,\r
+ hostIP);\r
+ Assert.assertTrue(htCallable.trackedHost.equals(hostIP));\r
+ Assert.assertTrue(htCallable.hostTracker.equals(hostTracker));\r
+\r
+ long count = htCallable.latch.getCount();\r
+ htCallable.wakeup();\r
+ Assert.assertTrue(htCallable.latch.getCount() == --count);\r
+ }\r
+\r
+ @Test\r
+ public void testHostTracker() throws UnknownHostException {\r
+ HostTracker hostTracker = null;\r
+ hostTracker = new HostTracker();\r
+ Assert.assertFalse(hostTracker == null);\r
+\r
+ InetAddress hostIP_1 = InetAddress.getByName("192.168.0.8");\r
+ InetAddress hostIP_2 = InetAddress.getByName("192.168.0.18");\r
+ Future<HostNodeConnector> dschost = hostTracker.discoverHost(hostIP_1);\r
+ dschost = hostTracker.discoverHost(hostIP_2);\r
+ hostTracker.nonClusterObjectCreate();\r
+ }\r
\r
}\r
\r
@RunWith(PaxExam.class)\r
public class HostTrackerIT {\r
- private Logger log = LoggerFactory\r
- .getLogger(HostTrackerIT.class);\r
+ private Logger log = LoggerFactory.getLogger(HostTrackerIT.class);\r
// get the OSGI bundle context\r
@Inject\r
private BundleContext bc;\r
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>\r
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"\r
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">\r
+ <modelVersion>4.0.0</modelVersion>\r
+ <parent>\r
+ <groupId>org.opendaylight.controller</groupId>\r
+ <artifactId>commons.opendaylight</artifactId>\r
+ <version>1.4.0-SNAPSHOT</version>\r
+ <relativePath>../../commons/opendaylight</relativePath>\r
+ </parent>\r
+ <artifactId>hosttracker_new</artifactId>\r
+ <version>0.4.0-SNAPSHOT</version>\r
+ <packaging>bundle</packaging>\r
+\r
+ <build>\r
+ <plugins>\r
+ <plugin>\r
+ <groupId>org.apache.felix</groupId>\r
+ <artifactId>maven-bundle-plugin</artifactId>\r
+ <version>2.3.6</version>\r
+ <extensions>true</extensions>\r
+ <configuration>\r
+ <instructions>\r
+ <Export-Package>\r
+ org.opendaylight.controller.hosttracker_new,\r
+ org.opendaylight.controller.hosttracker_new.hostAware\r
+ </Export-Package>\r
+ <Import-Package>\r
+ org.opendaylight.controller.sal.core,\r
+ org.opendaylight.controller.sal.utils,\r
+ org.opendaylight.controller.topologymanager,\r
+ org.opendaylight.controller.sal.packet.address,\r
+ org.opendaylight.controller.switchmanager,\r
+ org.opendaylight.controller.clustering.services,\r
+ javax.xml.bind.annotation,\r
+ javax.xml.bind,\r
+ org.apache.felix.dm,\r
+ org.apache.commons.lang3.builder,\r
+ org.osgi.service.component,\r
+ org.slf4j,\r
+ org.eclipse.osgi.framework.console,\r
+ org.osgi.framework\r
+ </Import-Package>\r
+ </instructions>\r
+ <manifestLocation>${project.basedir}/META-INF</manifestLocation>\r
+ </configuration>\r
+ </plugin>\r
+ </plugins>\r
+ </build>\r
+ <dependencies>\r
+ <dependency>\r
+ <groupId>org.opendaylight.controller</groupId>\r
+ <artifactId>topologymanager</artifactId>\r
+ <version>0.4.0-SNAPSHOT</version>\r
+ </dependency>\r
+ <dependency>\r
+ <groupId>org.opendaylight.controller</groupId>\r
+ <artifactId>switchmanager</artifactId>\r
+ <version>0.4.0-SNAPSHOT</version>\r
+ </dependency>\r
+ <dependency>\r
+ <groupId>org.opendaylight.controller</groupId>\r
+ <artifactId>clustering.services</artifactId>\r
+ <version>0.4.0-SNAPSHOT</version>\r
+ </dependency>\r
+ <dependency>\r
+ <groupId>org.opendaylight.controller</groupId>\r
+ <artifactId>sal</artifactId>\r
+ <version>0.5.0-SNAPSHOT</version>\r
+ </dependency>\r
+ </dependencies>\r
+</project>\r
--- /dev/null
+/*
+ * Copyright (c) 2011,2012 Big Switch Networks, Inc.
+ *
+ * Licensed under the Eclipse Public License, Version 1.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at
+ *
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Originally created by David Erickson, Stanford University
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an "AS
+ * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language
+ * governing permissions and limitations under the License.
+ */
+
+package org.opendaylight.controller.hosttracker;
+
+import java.util.Date;
+
+import org.opendaylight.controller.sal.core.NodeConnector;
+
+/**
+ * An entity on the network is a visible trace of a device that corresponds to a
+ * packet received from a particular interface on the edge of a network, with a
+ * particular VLAN tag, and a particular MAC address, along with any other
+ * packet characteristics we might want to consider as helpful for
+ * disambiguating devices.
+ *
+ * Entities are the most basic element of devices; devices consist of one or
+ * more entities. Entities are immutable once created, except for the last seen
+ * timestamp.
+ *
+ * @author readams
+ *
+ */
+public class Entity implements Comparable<Entity> {
+ /**
+ * Timeout for computing {@link Entity#activeSince}.
+ *
+ * @see {@link Entity#activeSince}
+ */
+ protected static int ACTIVITY_TIMEOUT = 30000;
+
+ /**
+ * The MAC address associated with this entity
+ */
+ protected long macAddress;
+
+ /**
+ * The IP address associated with this entity, or null if no IP learned from
+ * the network observation associated with this entity
+ */
+ protected Integer ipv4Address;
+
+ /**
+ * The VLAN tag on this entity, or null if untagged
+ */
+ protected Short vlan;
+
+ /**
+ * The attachment point for this entity
+ */
+ NodeConnector port;
+
+ /**
+ * The last time we observed this entity on the network
+ */
+ protected Date lastSeenTimestamp;
+
+ /**
+ * The time between {@link Entity#activeSince} and
+ * {@link Entity#lastSeenTimestamp} is a period of activity for this entity
+ * where it was observed repeatedly. If, when the entity is observed, the is
+ * longer ago than the activity timeout, {@link Entity#lastSeenTimestamp}
+ * and {@link Entity#activeSince} will be set to the current time.
+ */
+ protected Date activeSince;
+
+ private int hashCode = 0;
+
+ // ************
+ // Constructors
+ // ************
+
+ /**
+ * Create a new entity
+ *
+ * @param macAddress
+ * @param vlan
+ * @param ipv4Address
+ * @param switchDPID
+ * @param switchPort
+ * @param lastSeenTimestamp
+ */
+ public Entity(long macAddress, Short vlan, Integer ipv4Address,
+ NodeConnector port, Date lastSeenTimestamp) {
+ this.macAddress = macAddress;
+ this.ipv4Address = ipv4Address;
+ this.vlan = vlan;
+ this.port = port;
+ this.lastSeenTimestamp = lastSeenTimestamp;
+ this.activeSince = lastSeenTimestamp;
+ }
+
+ // ***************
+ // Getters/Setters
+ // ***************
+
+ // @JsonSerialize(using=MACSerializer.class)
+ public long getMacAddress() {
+ return macAddress;
+ }
+
+ // @JsonSerialize(using=IPv4Serializer.class)
+ public Integer getIpv4Address() {
+ return ipv4Address;
+ }
+
+ public Short getVlan() {
+ return vlan;
+ }
+
+ public NodeConnector getPort() {
+ return port;
+ }
+
+ // @JsonIgnore
+ public boolean hasSwitchPort() {
+ return port != null;
+ }
+
+ public Date getLastSeenTimestamp() {
+ return lastSeenTimestamp;
+ }
+
+ /**
+ * Set the last seen timestamp and also update {@link Entity#activeSince} if
+ * appropriate
+ *
+ * @param lastSeenTimestamp
+ * the new last seen timestamp
+ * @see {@link Entity#activeSince}
+ */
+ public void setLastSeenTimestamp(Date lastSeenTimestamp) {
+ if (activeSince == null
+ || (activeSince.getTime() + ACTIVITY_TIMEOUT) < lastSeenTimestamp
+ .getTime())
+ this.activeSince = lastSeenTimestamp;
+ this.lastSeenTimestamp = lastSeenTimestamp;
+ }
+
+ public Date getActiveSince() {
+ return activeSince;
+ }
+
+ public void setActiveSince(Date activeSince) {
+ this.activeSince = activeSince;
+ }
+
+ @Override
+ public int hashCode() {
+ if (hashCode != 0)
+ return hashCode;
+ final int prime = 31;
+ hashCode = 1;
+ hashCode = prime * hashCode
+ + ((ipv4Address == null) ? 0 : ipv4Address.hashCode());
+ hashCode = prime * hashCode + (int) (macAddress ^ (macAddress >>> 32));
+ hashCode = prime * hashCode + ((port == null) ? 0 : port.hashCode());
+ hashCode = prime * hashCode + ((vlan == null) ? 0 : vlan.hashCode());
+ return hashCode;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj)
+ return true;
+ if (obj == null)
+ return false;
+ if (getClass() != obj.getClass())
+ return false;
+ Entity other = (Entity) obj;
+ if (ipv4Address == null) {
+ if (other.ipv4Address != null)
+ return false;
+ } else if (!ipv4Address.equals(other.ipv4Address))
+ return false;
+ if (macAddress != other.macAddress)
+ return false;
+ if (port == null) {
+ if (other.port != null)
+ return false;
+ } else if (!port.equals(other.port))
+ return false;
+ if (vlan == null) {
+ if (other.vlan != null)
+ return false;
+ } else if (!vlan.equals(other.vlan))
+ return false;
+ return true;
+ }
+
+ @Override
+ public String toString() {
+ return "Entity [macAddress=" + macAddress + ", ipv4Address="
+ + ipv4Address + ", vlan=" + vlan + ", port=" + port + "]";
+ }
+
+ @SuppressWarnings({ "rawtypes", "unchecked" })
+ @Override
+ public int compareTo(Entity o) {
+ int r;
+ if (port == null)
+ r = o.port == null ? 0 : -1;
+ else if (o.port == null)
+ r = 1;
+ else {
+ // XXX - the node id is only defined as an object rather
+ // than something useful. We're just going to have to
+ // blindly cast to Comparable and hope it works.
+ Comparable switchId = (Comparable) port.getNode().getID();
+ Comparable oswitchId = (Comparable) o.port.getNode().getID();
+ r = switchId.compareTo(oswitchId);
+ if (r != 0)
+ return r;
+
+ Comparable portId = (Comparable) port.getID();
+ Comparable oportId = (Comparable) o.port.getID();
+ r = portId.compareTo(oportId);
+ }
+ return r;
+ }
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2011,2012 Big Switch Networks, Inc.
+ *
+ * Licensed under the Eclipse Public License, Version 1.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at
+ *
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Originally created by David Erickson, Stanford University
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an "AS
+ * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language
+ * governing permissions and limitations under the License.
+ */
+
+package org.opendaylight.controller.hosttracker;
+
+import java.util.Date;
+
+/**
+ * Represents an independent device on the network. A device consists of a set
+ * of entities, and all the information known about a given device comes only
+ * from merging all associated entities for that device.
+ *
+ * @author readams
+ */
+public interface IDevice {
+ /**
+ * Get the primary key for this device.
+ *
+ * @return the primary key
+ */
+ public Long getDeviceKey();
+
+ /**
+ * Get the MAC address of the device as a Long value.
+ *
+ * @return the MAC address for the device
+ */
+ public long getMACAddress();
+
+ /**
+ * Get the MAC address of the device as a String value.
+ *
+ * @return the MAC address for the device
+ */
+ public String getMACAddressString();
+
+ /**
+ * Get all unique VLAN IDs for the device. If the device has untagged
+ * entities, then the value -1 will be returned.
+ *
+ * @return an array containing all unique VLAN IDs for the device.
+ */
+ public Short[] getVlanId();
+
+ /**
+ * Get all unique IPv4 addresses associated with the device.
+ *
+ * @return an array containing the unique IPv4 addresses for the device.
+ */
+ public Integer[] getIPv4Addresses();
+
+ /**
+ * Get all unique attachment points associated with the device. This will
+ * not include any blocked attachment points.
+ *
+ * @return an array containing all unique attachment points for the device
+ */
+ public SwitchPort[] getAttachmentPoints();
+
+ /**
+ * Get all old attachment points associated with the device. this is used in
+ * host movement scenario.
+ *
+ * @return an array containing all unique old attachment points for the
+ * device
+ */
+ public SwitchPort[] getOldAP();
+
+ /**
+ * Get all unique attachment points associated with the device.
+ *
+ * @param includeError
+ * whether to include blocked attachment points. Blocked
+ * attachment points should not be used for forwarding, but could
+ * be useful to show to a user
+ * @return an array containing all unique attachment points for the device
+ */
+ public SwitchPort[] getAttachmentPoints(boolean includeError);
+
+ /**
+ * Returns all unique VLAN IDs for the device that were observed on the
+ * given switch port
+ *
+ * @param swp
+ * the switch port to query
+ * @return an array containing the unique VLAN IDs
+ */
+ public Short[] getSwitchPortVlanIds(SwitchPort swp);
+
+ /**
+ * Get the most recent timestamp for this device
+ *
+ * @return the last seen timestamp
+ */
+ public Date getLastSeen();
+
+ /**
+ * Get the entity class for the device.
+ *
+ * @return the entity class
+ * @see IEntityClassifierService
+ */
+ public IEntityClass getEntityClass();
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2011 Big Switch Networks, Inc.
+ *
+ * Licensed under the Eclipse Public License, Version 1.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at
+ *
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Originally created by David Erickson, Stanford University
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an "AS
+ * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language
+ * governing permissions and limitations under the License.
+ */
+
+package org.opendaylight.controller.hosttracker;
+
+import org.opendaylight.controller.sal.utils.IListener;
+
+/**
+ * Implementors of this interface can receive updates from DeviceManager about
+ * the state of devices under its control.
+ *
+ * @author David Erickson (daviderickson@cs.stanford.edu)
+ */
+public interface IDeviceListener extends IListener<String> {
+ /**
+ * Called when a new Device is found
+ *
+ * @param device
+ * the device that changed
+ */
+ public void deviceAdded(IDevice device);
+
+ /**
+ * Called when a Device is removed, this typically occurs when the port the
+ * Device is attached to goes down, or the switch it is attached to is
+ * removed.
+ *
+ * @param device
+ * the device that changed
+ */
+ public void deviceRemoved(IDevice device);
+
+ /**
+ * Called when a Device has moved to a new location on the network. Note
+ * that either the switch or the port or both has changed.
+ *
+ * @param device
+ * the device that changed
+ */
+ public void deviceMoved(IDevice device);
+
+ /**
+ * Called when a network address has been added or remove from a device
+ *
+ * @param device
+ * the device that changed
+ */
+ public void deviceIPV4AddrChanged(IDevice device);
+
+ /**
+ * Called when a VLAN tag for the device has been added or removed
+ *
+ * @param device
+ * the device that changed
+ */
+ public void deviceVlanChanged(IDevice device);
+}
--- /dev/null
+/*
+ * Copyright (c) 2011,2012 Big Switch Networks, Inc.
+ *
+ * Licensed under the Eclipse Public License, Version 1.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at
+ *
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Originally created by David Erickson, Stanford University
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an "AS
+ * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language
+ * governing permissions and limitations under the License.
+ */
+
+package org.opendaylight.controller.hosttracker;
+
+import java.util.Collection;
+import java.util.EnumSet;
+import java.util.Iterator;
+import java.util.Set;
+
+import org.opendaylight.controller.sal.core.NodeConnector;
+import org.osgi.service.device.Device;
+
+/**
+ * Device manager allows interacting with devices on the network. Note that
+ * under normal circumstances, {@link Device} objects should be retrieved from
+ * the {@link FloodlightContext} rather than from {@link IDeviceManager}.
+ */
+public interface IDeviceService {
+
+ /**
+ * Fields used in devices for indexes and querying
+ *
+ * @see IDeviceService#addIndex
+ */
+ enum DeviceField {
+ MAC, IPV4, VLAN, SWITCHPORT
+ }
+
+ /**
+ * The source device for the current packet-in, if applicable.
+ */
+ // public static final String CONTEXT_SRC_DEVICE =
+ // "net.floodlightcontroller.devicemanager.srcDevice";
+
+ /**
+ * The destination device for the current packet-in, if applicable.
+ */
+ // public static final String CONTEXT_DST_DEVICE =
+ // / "net.floodlightcontroller.devicemanager.dstDevice";
+
+ /**
+ * The original destination device for the current packet-in
+ */
+ // public static final String CONTEXT_ORIG_DST_DEVICE =
+ // "net.floodlightcontroller.devicemanager.origDstDevice";
+
+ /**
+ * A FloodlightContextStore object that can be used to interact with the
+ * FloodlightContext information created by BVS manager.
+ */
+ // public static final FloodlightContextStore<IDevice> fcStore =
+ // new FloodlightContextStore<IDevice>();
+
+ /**
+ * Get the device with the given device key.
+ *
+ * @param deviceKey
+ * the key to search for
+ * @return the device associated with the key, or null if no such device
+ * @see IDevice#getDeviceKey()
+ */
+ public IDevice getDevice(Long deviceKey);
+
+ /**
+ * Search for a device exactly matching the provided device fields. This is
+ * the same lookup process that is used for packet_in processing and device
+ * learning. Thus, findDevice() can be used to match flow entries from
+ * switches to devices. Only the key fields as defined by the
+ * {@link IEntityClassifierService} will be important in this search. All
+ * key fields MUST be supplied.
+ *
+ * {@link queryDevices()} might be more appropriate!
+ *
+ * @param macAddress
+ * The MAC address
+ * @param vlan
+ * the VLAN. Null means no VLAN and is valid even if VLAN is a
+ * key field.
+ * @param ipv4Address
+ * the ipv4 address
+ * @param port
+ * the node connector
+ * @return an {@link IDevice} or null if no device is found.
+ * @see IDeviceManager#setEntityClassifier(IEntityClassifierService)
+ * @throws IllegalArgumentException
+ * if not all key fields of the current
+ * {@link IEntityClassifierService} are specified.
+ */
+ public IDevice findDevice(long macAddress, Short vlan, Integer ipv4Address,
+ NodeConnector port) throws IllegalArgumentException;
+
+ /**
+ * Get a destination device using entity fields that corresponds with the
+ * given source device. The source device is important since there could be
+ * ambiguity in the destination device without the attachment point
+ * information. Search for a device in a given entity class. This is the
+ * same as the lookup process for destination devices.
+ *
+ * Only the key fields as defined by the reference entity class will be
+ * important in this search. All key fields MUST be supplied.
+ *
+ * @param entityClass
+ * The entity class in which to perform the lookup.
+ * @param macAddress
+ * The MAC address for the destination
+ * @param vlan
+ * the VLAN if available
+ * @param ipv4Address
+ * The IP address if available.
+ * @return an {@link IDevice} or null if no device is found.
+ * @see IDeviceService#findDevice(long, Short, Integer, Long, Integer)
+ * @throws IllegalArgumentException
+ * if not all key fields of the source's {@link IEntityClass}
+ * are specified.
+ */
+ public IDevice findClassDevice(IEntityClass entityClass, long macAddress,
+ Short vlan, Integer ipv4Address) throws IllegalArgumentException;
+
+ /**
+ * Get an unmodifiable collection view over all devices currently known.
+ *
+ * @return the collection of all devices
+ */
+ public Collection<? extends IDevice> getAllDevices();
+
+ /**
+ * Create an index over a set of fields. This allows efficient lookup of
+ * devices when querying using the indexed set of specified fields. The
+ * index must be registered before any device learning takes place, or it
+ * may be incomplete. It's OK if this is called multiple times with the same
+ * fields; only one index will be created for each unique set of fields.
+ *
+ * @param perClass
+ * set to true if the index should be maintained for each entity
+ * class separately.
+ * @param keyFields
+ * the set of fields on which to index
+ */
+ public void addIndex(boolean perClass, EnumSet<DeviceField> keyFields);
+
+ /**
+ * Find devices that match the provided query. Any fields that are null will
+ * not be included in the query. If there is an index for the query, then it
+ * will be performed efficiently using the index. Otherwise, there will be a
+ * full scan of the device list.
+ *
+ * @param macAddress
+ * The MAC address
+ * @param vlan
+ * the VLAN
+ * @param ipv4Address
+ * the ipv4 address
+ * @param port
+ * the switch port
+ * @return an iterator over a set of devices matching the query
+ * @see IDeviceService#queryClassDevices(IEntityClass, Long, Short, Integer,
+ * Long, Integer)
+ */
+ public Iterator<? extends IDevice> queryDevices(Long macAddress,
+ Short vlan, Integer ipv4Address, NodeConnector port);
+
+ /**
+ * Find devices that match the provided query. Only the index for the
+ * specified class will be searched. Any fields that are null will not be
+ * included in the query. If there is an index for the query, then it will
+ * be performed efficiently using the index. Otherwise, there will be a full
+ * scan of the device list.
+ *
+ * @param entityClass
+ * The entity class in which to perform the query
+ * @param macAddress
+ * The MAC address
+ * @param vlan
+ * the VLAN
+ * @param ipv4Address
+ * the ipv4 address
+ * @param port
+ * the switch port
+ * @return an iterator over a set of devices matching the query
+ * @see IDeviceService#queryClassDevices(Long, Short, Integer, Long,
+ * Integer)
+ */
+ public Iterator<? extends IDevice> queryClassDevices(
+ IEntityClass entityClass, Long macAddress, Short vlan,
+ Integer ipv4Address, NodeConnector port);
+
+ /**
+ * Adds a listener to listen for IDeviceManagerServices notifications
+ *
+ * @param listener
+ * The listener that wants the notifications
+ * @param type
+ * The type of the listener
+ */
+ public void addListener(IDeviceListener listener);
+
+ /**
+ * Specify points in the network where attachment points are not to be
+ * learned.
+ *
+ * @param sw
+ * @param port
+ */
+ public void addSuppressAPs(NodeConnector port);
+
+ public void removeSuppressAPs(NodeConnector port);
+
+ public Set<SwitchPort> getSuppressAPs();
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2011,2012 Big Switch Networks, Inc.
+ *
+ * Licensed under the Eclipse Public License, Version 1.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at
+ *
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Originally created by David Erickson, Stanford University
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an "AS
+ * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language
+ * governing permissions and limitations under the License.
+ */
+
+package org.opendaylight.controller.hosttracker;
+
+import java.util.EnumSet;
+
+import org.opendaylight.controller.hosttracker.IDeviceService.DeviceField;
+import org.osgi.service.device.Device;
+
+/**
+ * Entities within an entity class are grouped into {@link Device} objects based
+ * on the {@link IEntityClass}, and the key fields specified by the entity
+ * class. A set of entities are considered to be the same device if and only if
+ * they belong to the same entity class and they match on all key fields for
+ * that entity class. A field is effectively wildcarded by not including it in
+ * the list of key fields returned by {@link IEntityClassifierService} and/or
+ * {@link IEntityClass}.
+ *
+ * Note that if you're not using static objects, you'll need to override
+ * {@link Object#equals(Object)} and {@link Object#hashCode()}.
+ *
+ * @author readams
+ *
+ */
+public interface IEntityClass {
+ /**
+ * Return the set of key fields for this entity class. Entities belonging to
+ * this class that differ in fields not included in this collection will be
+ * considered the same device. The key fields for an entity class must not
+ * change unless associated with a flush of that entity class.
+ *
+ * @return a set containing the fields that should not be wildcarded. May be
+ * null to indicate that all fields are key fields.
+ */
+ EnumSet<DeviceField> getKeyFields();
+
+ /**
+ * Returns a user-friendly, unique name for this EntityClass
+ *
+ * @return the name of the entity class
+ */
+ String getName();
+}
--- /dev/null
+/*
+ * Copyright (c) 2011 Big Switch Networks, Inc.
+ *
+ * Licensed under the Eclipse Public License, Version 1.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at
+ *
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Originally created by David Erickson, Stanford University
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an "AS
+ * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language
+ * governing permissions and limitations under the License.
+ */
+
+package org.opendaylight.controller.hosttracker;
+
+import java.util.Set;
+
+/**
+ * Implementors of this interface can receive updates from the Entity Classifier
+ * about the changes to entity Classes.
+ *
+ * @author Ananth Suryanarayana (Ananth.Suryanarayana@bigswitch.com)
+ */
+public interface IEntityClassListener {
+
+ /**
+ * Process entity classes change event.
+ *
+ * @param entityClassNames
+ * Set of entity classes changed
+ */
+ public void entityClassChanged(Set<String> entityClassNames);
+}
--- /dev/null
+/*
+ * Copyright (c) 2011,2012 Big Switch Networks, Inc.
+ *
+ * Licensed under the Eclipse Public License, Version 1.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at
+ *
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Originally created by David Erickson, Stanford University
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an "AS
+ * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language
+ * governing permissions and limitations under the License.
+ */
+
+package org.opendaylight.controller.hosttracker;
+
+import java.util.Collection;
+import java.util.EnumSet;
+
+import org.opendaylight.controller.hosttracker.IDeviceService.DeviceField;
+
+/**
+ * A component that wishes to participate in entity classification needs to
+ * implement the IEntityClassifier interface, and register with the Device
+ * Manager as an entity classifier. An entity is classified by the classifier
+ * into an {@link IEntityClass}
+ *
+ * @author readams
+ */
+public interface IEntityClassifierService {
+ /**
+ * Classify the given entity into an IEntityClass. It is important that the
+ * key fields returned by {@link IEntityClassifierService#getKeyFields()} be
+ * sufficient for classifying entities. That is, if two entities are
+ * identical except for a field that is not a key field, they must be
+ * assigned the same class. Furthermore, entity classification must be
+ * transitive: For all entities x, y, z, if x and y belong to a class c, and
+ * y and z belong class c, then x and z must belong to class c.
+ *
+ * @param entity
+ * the entity to classify
+ * @return the IEntityClass resulting from the classification.
+ * @see IEntityClassifierService#getKeyFields()
+ */
+ IEntityClass classifyEntity(Entity entity);
+
+ /**
+ * Return the most general list of fields that should be used as key fields.
+ * If devices differ in any fields not listed here, they can never be
+ * considered a different device by any {@link IEntityClass} returned by
+ * {@link IEntityClassifierService#classifyEntity}. The key fields for an
+ * entity classifier must not change unless associated with a flush of all
+ * entity state. The list of key fields must be the union of all key fields
+ * that could be returned by {@link IEntityClass#getKeyFields()}.
+ *
+ * @return a set containing the fields that should not be wildcarded. May be
+ * null to indicate that all fields are key fields.
+ * @see {@link IEntityClass#getKeyFields()}
+ * @see {@link IEntityClassifierService#classifyEntity}
+ */
+ EnumSet<DeviceField> getKeyFields();
+
+ /**
+ * Reclassify the given entity into a class. When reclassifying entities, it
+ * can be helpful to take into account the current classification either as
+ * an optimization or to allow flushing any cached state tied to the key for
+ * that device. The entity will be assigned to a new device with a new
+ * object if the entity class returned is different from the entity class
+ * for curDevice.
+ *
+ * <p>
+ * Note that you must take steps to ensure you always return classes in some
+ * consistent ordering.
+ *
+ * @param curDevice
+ * the device currently associated with the entity
+ * @param entity
+ * the entity to reclassify
+ * @return the IEntityClass resulting from the classification
+ */
+ IEntityClass reclassifyEntity(IDevice curDevice, Entity entity);
+
+ /**
+ * Once reclassification is complete for a device, this method will be
+ * called. If any entities within the device changed their classification,
+ * it will split into one or more new devices for each of the entities. If
+ * two devices are merged because of a reclassification, then this will be
+ * called on each of the devices, with the same device in the newDevices
+ * collection.
+ *
+ * @param oldDevice
+ * the original device object
+ * @param newDevices
+ * all the new devices derived from the entities of the old
+ * device. If null, the old device was unchanged.
+ */
+ void deviceUpdate(IDevice oldDevice,
+ Collection<? extends IDevice> newDevices);
+
+ /**
+ * Adds a listener to listen for IEntityClassifierServices notifications
+ *
+ * @param listener
+ * The listener that wants the notifications
+ */
+ public void addListener(IEntityClassListener listener);
+}
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.hosttracker;
+
+import org.opendaylight.controller.hosttracker.hostAware.HostNodeConnector;
+
+/**
+ * This interface defines the method to notify detected Host on the network. The
+ * information includes Host's IP address, MAC address, switch ID, port, and
+ * VLAN.
+ *
+ */
+
+public interface IfHostListener {
+ /**
+ * Learns new Hosts. Called by ArpHandler and implemented in
+ * HostTracker.java. If a Host is learned for the first time then adds it to
+ * the local database and informs other applications of coming up a new
+ * Host. For the hosts which it has already learned, it refreshes them.
+ *
+ * @param host
+ * Host info encapsulated in HostNodeConnector class
+ */
+ public void hostListener(HostNodeConnector host);
+}
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.hosttracker;
+
+import java.net.InetAddress;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.Future;
+
+import org.opendaylight.controller.hosttracker.hostAware.HostNodeConnector;
+import org.opendaylight.controller.sal.core.NodeConnector;
+import org.opendaylight.controller.sal.utils.Status;
+
+/**
+ * This interface defines the methods to retrieve information about learned
+ * Hosts. Also provides methods to statically add/remove Hosts from the local
+ * database.
+ *
+ */
+
+public interface IfIptoHost {
+ /**
+ * Applications call this interface methods to determine IP address to MAC
+ * binding and its connectivity to an OpenFlow switch in term of Node, Port,
+ * and VLAN. These bindings are learned dynamically as well as can be added
+ * statically through Northbound APIs. If a binding is unknown, then an ARP
+ * request is initiated immediately to discover the host.
+ *
+ * @param networkAddress
+ * IP Address of the Host encapsulated in class InetAddress
+ * @return {@link org.opendaylight.controller.hosttracker.hostAware.HostNodeConnector}
+ * Class that contains the Host info such as its MAC address, Switch
+ * ID, port, VLAN. If Host is not found, returns NULL
+ */
+ public HostNodeConnector hostFind(InetAddress networkAddress);
+
+ /**
+ * Checks the local Host Database to see if a Host has been learned for a
+ * given IP address.
+ *
+ * @param networkAddress
+ * IP Address of the Host encapsulated in class InetAddress
+ * @return {@link org.opendaylight.controller.hosttracker.hostAware.HostNodeConnector}
+ * Class that contains the Host info such as its MAC address, Switch
+ * ID, port, VLAN. If Host is not found, returns NULL
+ *
+ */
+ public HostNodeConnector hostQuery(InetAddress networkAddress);
+
+ /**
+ * Initiates an immediate discovery of the Host for a given IP address. This
+ * provides for the calling applications to block on the host discovery.
+ *
+ * @param networkAddress
+ * IP address encapsulated in InetAddress class
+ * @return Future
+ * {@link org.opendaylight.controller.hosttracker.HostTrackerCallable}
+ */
+ public Future<HostNodeConnector> discoverHost(InetAddress networkAddress);
+
+ /**
+ * Returns the Network Hierarchy for a given Host. This API is typically
+ * used by applications like Hadoop for Rack Awareness functionality.
+ *
+ * @param IP
+ * address of the Host encapsulated in InetAddress class
+ * @return List of String ArrayList containing the Hierarchies.
+ */
+ public List<List<String>> getHostNetworkHierarchy(InetAddress hostAddress);
+
+ /**
+ * Returns all the the Hosts either learned dynamically or added statically
+ * via Northbound APIs.
+ *
+ * @return Set of
+ * {@link org.opendaylight.controller.hosttracker.hostAware.HostNodeConnector}
+ * . Class that contains the Host info such as its MAC address,
+ * Switch ID, port, VLAN.
+ */
+ public Set<HostNodeConnector> getAllHosts();
+
+ /**
+ * Returns all the "Active Hosts" learned "Statically" via Northbound APIs.
+ * These Hosts are categorized as "Active" because the Switch and Port they
+ * are connected to, are in up state.
+ *
+ * @return Set of
+ * {@link org.opendaylight.controller.hosttracker.hostAware.HostNodeConnector}
+ * . Class that contains the Host info such as MAC address, Switch
+ * ID, port, VLAN. If Host is not found, returns NULL
+ */
+ public Set<HostNodeConnector> getActiveStaticHosts();
+
+ /**
+ * Returns all the "Inactive Hosts" learned "Statically" via Northbound
+ * APIs. These Hosts are categorized as "Inactive" because either the Switch
+ * or the Port they are connected to, is in down state.
+ *
+ * @return Set of HostNodeConnector
+ * {@link org.opendaylight.controller.hosttracker.hostAware.HostNodeConnector}
+ * . HostNodeConnector is Class that contains the Host info such as
+ * its MAC address, OpenFlowNode ID, port, VLAN.
+ */
+ public Set<HostNodeConnector> getInactiveStaticHosts();
+
+ /**
+ * Hosts can be learned dynamically or added statically. This method allows
+ * the addition of a Host to the local database statically.
+ *
+ * @param networkAddress
+ * IP Address of the Host
+ * @param dataLayerAddress
+ * MAC Address of the Host
+ * @param nc
+ * NodeConnector to which the host is attached
+ * @param vlan
+ * VLAN the host belongs to
+ * @return The status object as described in {@code Status} indicating the
+ * result of this action.
+ */
+ public Status addStaticHost(String networkAddress, String dataLayerAddress,
+ NodeConnector nc, String vlan);
+
+ /**
+ * Allows the deletion of statically learned Host
+ *
+ * @param networkAddress
+ * @return The status object as described in {@code Status} indicating the
+ * result of this action.
+ */
+ public Status removeStaticHost(String networkAddress);
+}
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.hosttracker;
+
+import org.opendaylight.controller.hosttracker.hostAware.HostNodeConnector;
+
+/**
+ * This Interface defines the methods for client applications of Host Tracker to
+ * get notifications when a new host is learned or existing host is removed from
+ * the network.
+ *
+ */
+public interface IfNewHostNotify {
+ /**
+ * Notifies the HostTracker Clients that a new Host has been learned
+ *
+ * @param host
+ * Host Info encapsulated in HostNodeConnector class
+ */
+ public void notifyHTClient(HostNodeConnector host);
+
+ /**
+ * Notifies the HostTracker Clients that a Host which was learned in the
+ * past has been removed either due to switch/port down event or due to ARP
+ * Aging
+ *
+ * @param host
+ * Host Info encapsulated in HostNodeConnector class
+ */
+ public void notifyHTClientHostRemoved(HostNodeConnector host);
+}
--- /dev/null
+/*
+ * Copyright (c) 2012 Big Switch Networks, Inc.
+ *
+ * Licensed under the Eclipse Public License, Version 1.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at
+ *
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Originally created by David Erickson, Stanford University
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an "AS
+ * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language
+ * governing permissions and limitations under the License.
+ */
+
+package org.opendaylight.controller.hosttracker;
+
+import org.opendaylight.controller.sal.core.NodeConnector;
+
+/**
+ * A simple switch DPID/port pair This class is immutable
+ *
+ * @author readams
+ *
+ */
+public class SwitchPort {
+ public enum ErrorStatus {
+ DUPLICATE_DEVICE("duplicate-device");
+
+ private String value;
+
+ ErrorStatus(String v) {
+ value = v;
+ }
+
+ @Override
+ public String toString() {
+ return value;
+ }
+
+ public static ErrorStatus fromString(String str) {
+ for (ErrorStatus m : ErrorStatus.values()) {
+ if (m.value.equals(str)) {
+ return m;
+ }
+ }
+ return null;
+ }
+ }
+
+ private final NodeConnector port;
+ private final ErrorStatus errorStatus;
+
+ /**
+ * Simple constructor
+ *
+ * @param switchDPID
+ * the dpid
+ * @param port
+ * the port
+ * @param errorStatus
+ * any error status for the switch port
+ */
+ public SwitchPort(NodeConnector port, ErrorStatus errorStatus) {
+ super();
+ this.port = port;
+ this.errorStatus = errorStatus;
+ }
+
+ /**
+ * Simple constructor
+ *
+ * @param switchDPID
+ * the dpid
+ * @param port
+ * the port
+ */
+ public SwitchPort(NodeConnector port) {
+ super();
+ this.port = port;
+ this.errorStatus = null;
+ }
+
+ // ***************
+ // Getters/Setters
+ // ***************
+
+ public NodeConnector getPort() {
+ return port;
+ }
+
+ public ErrorStatus getErrorStatus() {
+ return errorStatus;
+ }
+
+ // ******
+ // Object
+ // ******
+
+ @Override
+ public int hashCode() {
+ final int prime = 31;
+ int result = 1;
+ result = prime * result
+ + ((errorStatus == null) ? 0 : errorStatus.hashCode());
+ result = prime * result + ((port == null) ? 0 : port.hashCode());
+ return result;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj)
+ return true;
+ if (obj == null)
+ return false;
+ if (getClass() != obj.getClass())
+ return false;
+ SwitchPort other = (SwitchPort) obj;
+ if (errorStatus != other.errorStatus)
+ return false;
+ if (port == null) {
+ if (other.port != null)
+ return false;
+ } else if (!port.equals(other.port))
+ return false;
+ return true;
+ }
+
+ @Override
+ public String toString() {
+ return "SwitchPort [port=" + port + ", errorStatus=" + errorStatus
+ + "]";
+ }
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.hosttracker.hostAware;
+
+import java.net.Inet4Address;
+import java.net.Inet6Address;
+import java.net.InetAddress;
+import java.util.Arrays;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+
+import org.apache.commons.lang3.builder.ReflectionToStringBuilder;
+import org.opendaylight.controller.sal.core.ConstructionException;
+import org.opendaylight.controller.sal.core.Host;
+import org.opendaylight.controller.sal.core.Node;
+import org.opendaylight.controller.sal.core.NodeConnector;
+import org.opendaylight.controller.sal.packet.address.EthernetAddress;
+
+@XmlRootElement(name = "host")
+@XmlAccessorType(XmlAccessType.NONE)
+public class HostNodeConnector extends Host {
+ private static final long serialVersionUID = 1L;
+ @XmlElement
+ private NodeConnector nodeConnector;
+ @XmlElement
+ private short vlan;
+ @XmlElement
+ private boolean staticHost;
+ private transient short arpSendCountDown;
+
+ /**
+ * Private constructor used for JAXB mapping
+ */
+ @SuppressWarnings("unused")
+ private HostNodeConnector() {
+ }
+
+ public HostNodeConnector(InetAddress ip) throws ConstructionException {
+ this(ip, null);
+ }
+
+ public HostNodeConnector(InetAddress ip, NodeConnector nc)
+ throws ConstructionException {
+ this(new EthernetAddress(new byte[] { (byte) 0x00, (byte) 0x00,
+ (byte) 0x00, (byte) 0x00, (byte) 0x00, (byte) 0x00 }), ip, nc,
+ (short) 0);
+ }
+
+ public HostNodeConnector(byte[] mac, InetAddress ip, NodeConnector nc,
+ short vlan) throws ConstructionException {
+ this(new EthernetAddress(mac.clone()), ip, nc, vlan);
+ }
+
+ public HostNodeConnector(EthernetAddress eaddr, InetAddress naddr,
+ NodeConnector nc, short vlan) throws ConstructionException {
+ super(eaddr, naddr);
+ this.nodeConnector = nc;
+ this.vlan = vlan;
+ }
+
+ /**
+ * @return the NodeConnector
+ */
+ public NodeConnector getnodeConnector() {
+ return this.nodeConnector;
+ }
+
+ /**
+ * @return the Node
+ */
+ public Node getnodeconnectorNode() {
+ return this.nodeConnector.getNode();
+ }
+
+ /**
+ * @return the NodeId
+ */
+ public Long getnodeconnectornodeId() {
+ return (Long) this.nodeConnector.getNode().getID();
+ }
+
+ /**
+ * @return the port
+ */
+ public Short getnodeconnectorportId() {
+ return (Short) this.nodeConnector.getID();
+ }
+
+ /**
+ * @return the DataLayerAddress
+ */
+ public byte[] getDataLayerAddressBytes() {
+ byte[] macaddr = null;
+ if (getDataLayerAddress() instanceof EthernetAddress) {
+ EthernetAddress e = (EthernetAddress) getDataLayerAddress();
+ macaddr = e.getValue();
+ }
+ return macaddr;
+ }
+
+ /**
+ * @return the vlan
+ */
+ public short getVlan() {
+ return this.vlan;
+ }
+
+ public boolean isStaticHost() {
+ return this.staticHost;
+ }
+
+ public HostNodeConnector setStaticHost(boolean statically_learned) {
+ this.staticHost = statically_learned;
+ return this;
+ }
+
+ public HostNodeConnector initArpSendCountDown() {
+ this.arpSendCountDown = 24;
+ return this;
+ }
+
+ public short getArpSendCountDown() {
+ return (this.arpSendCountDown);
+ }
+
+ public HostNodeConnector setArpSendCountDown(short cntdown) {
+ this.arpSendCountDown = cntdown;
+ return this;
+ }
+
+ @Override
+ public int hashCode() {
+ final int prime = 31;
+ int result = super.hashCode();
+ result = prime * result
+ + ((nodeConnector == null) ? 0 : nodeConnector.hashCode());
+ result = prime * result + (staticHost ? 1231 : 1237);
+ result = prime * result + vlan;
+ return result;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj)
+ return true;
+ if (!super.equals(obj))
+ return false;
+ if (getClass() != obj.getClass())
+ return false;
+ HostNodeConnector other = (HostNodeConnector) obj;
+ if (nodeConnector == null) {
+ if (other.nodeConnector != null)
+ return false;
+ } else if (!nodeConnector.equals(other.nodeConnector))
+ return false;
+ if (staticHost != other.staticHost)
+ return false;
+ if (vlan != other.vlan)
+ return false;
+ return true;
+ }
+
+ public boolean equalsByIP(InetAddress networkAddress) {
+ return (this.getNetworkAddress().equals(networkAddress));
+ }
+
+ public boolean isRewriteEnabled() {
+ byte[] emptyArray = new byte[] { (byte) 0x00, (byte) 0x00, (byte) 0x00,
+ (byte) 0x00, (byte) 0x00, (byte) 0x00 };
+ byte[] macaddr = null;
+ if (getDataLayerAddress() instanceof EthernetAddress) {
+ EthernetAddress e = (EthernetAddress) getDataLayerAddress();
+ macaddr = e.getValue();
+ }
+ if (macaddr == null)
+ return false;
+ return !Arrays.equals(emptyArray, macaddr);
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see java.lang.Object#toString()
+ */
+ @Override
+ public String toString() {
+ return "HostNodeConnector[" + ReflectionToStringBuilder.toString(this)
+ + "]";
+ }
+
+ public boolean isV4Host() {
+ return (getNetworkAddress() instanceof Inet4Address);
+ }
+
+ public boolean isV6Host() {
+ return (getNetworkAddress() instanceof Inet6Address);
+ }
+
+ public String toJson() {
+ return "{\"host\":\"" + super.toString() + "\", " + "\"vlan\":\""
+ + String.valueOf(vlan) + "\",\"NodeConnector\":\""
+ + nodeConnector.toString() + "\"," + "\"static\":\""
+ + String.valueOf(isStaticHost()) + "\"}";
+ }
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.hosttracker.hostAware;
+
+import java.net.InetAddress;
+
+/**
+ * This Interface defines the methods to trigger the discovery of a Host and to
+ * probe if a learned Host is still in the network.
+ *
+ *
+ *
+ */
+public interface IHostFinder {
+ /**
+ * This method initiates the discovery of a host based on its IP address.
+ * This is triggered by query of an application to the HostTracker. The
+ * requested IP address doesn't exist in the local database at this point.
+ *
+ * @param networkAddress
+ * IP Address encapsulated in InetAddress class
+ *
+ */
+ public void find(InetAddress networkAddress);
+
+ /**
+ * This method is called by HostTracker to see if a learned Host is still in
+ * the network. Used mostly for ARP Aging.
+ *
+ * @param host
+ * The Host that needs to be probed
+ */
+ public void probe(HostNodeConnector host);
+}
--- /dev/null
+/*\r
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.\r
+ *\r
+ * This program and the accompanying materials are made available under the\r
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,\r
+ * and is available at http://www.eclipse.org/legal/epl-v10.html\r
+ */\r
+\r
+package org.opendaylight.controller.hosttracker.hostAware;\r
+\r
+import java.net.InetAddress;\r
+import java.net.UnknownHostException;\r
+\r
+import org.junit.Assert;\r
+import org.junit.Test;\r
+\r
+import org.opendaylight.controller.hosttracker.hostAware.HostNodeConnector;\r
+import org.opendaylight.controller.sal.core.ConstructionException;\r
+import org.opendaylight.controller.sal.core.Node;\r
+\r
+import junit.framework.TestCase;\r
+\r
+import org.opendaylight.controller.sal.packet.address.EthernetAddress;\r
+\r
+import org.opendaylight.controller.sal.utils.NodeConnectorCreator;\r
+import org.opendaylight.controller.sal.core.NodeConnector;\r
+import org.opendaylight.controller.sal.utils.NodeCreator;\r
+\r
+public class HostNodeConnectorTest extends TestCase {\r
+\r
+ @Test\r
+ public void testHostNodeConnector() throws UnknownHostException {\r
+ HostNodeConnector hostnodeconnector_1, hostnodeconnector_2, hostnodeconnector_3;\r
+ InetAddress hostIP_1 = InetAddress.getByName("192.168.0.8");\r
+ InetAddress hostIP_2 = InetAddress\r
+ .getByName("2001:420:281:1004:e123:e688:d655:a1b0");\r
+ InetAddress hostIP_3 = InetAddress.getByName("192.168.0.28");\r
+ byte[] hostMAC_2 = new byte[] { (byte) 0x11, (byte) 0x22, (byte) 0x33,\r
+ (byte) 0x22, (byte) 0x22, (byte) 0x22 };\r
+ byte[] hostMAC_3 = new byte[] { (byte) 0x11, (byte) 0x22, (byte) 0x33,\r
+ (byte) 0x33, (byte) 0x33, (byte) 0x33 };\r
+\r
+ Node node = NodeCreator.createOFNode(1L);\r
+ NodeConnector nc1 = NodeConnectorCreator.createOFNodeConnector(\r
+ (short) 2, node);\r
+ NodeConnector nc2 = NodeConnectorCreator.createOFNodeConnector(\r
+ (short) 1, node);\r
+\r
+ try {\r
+ hostnodeconnector_1 = new HostNodeConnector(hostIP_1);\r
+ Assert.assertTrue(hostnodeconnector_1.equalsByIP(hostIP_1));\r
+ Assert.assertTrue(hostnodeconnector_1.isV4Host());\r
+ Assert.assertTrue(hostnodeconnector_1.equalsByIP(hostIP_1));\r
+ } catch (ConstructionException e) {\r
+ Assert.assertTrue(false);\r
+ }\r
+\r
+ try {\r
+ hostnodeconnector_2 = new HostNodeConnector(hostMAC_2, hostIP_2,\r
+ nc1, (short) 2);\r
+ Assert.assertTrue(hostnodeconnector_2.isV6Host());\r
+ Assert.assertTrue(hostnodeconnector_2.getnodeConnector()\r
+ .equals(nc1));\r
+ Assert.assertTrue(hostnodeconnector_2.getnodeconnectorNode()\r
+ .equals(node));\r
+ Assert.assertTrue(node.getID().equals(\r
+ hostnodeconnector_2.getnodeconnectornodeId()));\r
+ Assert.assertTrue(hostnodeconnector_2.getnodeconnectorportId()\r
+ .equals((short) 2));\r
+ } catch (ConstructionException e) {\r
+ Assert.assertTrue(false);\r
+ }\r
+\r
+ try {\r
+ hostnodeconnector_3 = new HostNodeConnector(new EthernetAddress(\r
+ hostMAC_3), hostIP_3, nc2, (short) 3);\r
+ byte[] hostMAC_3_rb = hostnodeconnector_3\r
+ .getDataLayerAddressBytes();\r
+ HostNodeConnector hostnodeconnector_3rb = new HostNodeConnector(\r
+ new EthernetAddress(hostMAC_3_rb), hostIP_3, nc2, (short) 3);\r
+ Assert.assertTrue(hostnodeconnector_3.equals(hostnodeconnector_3rb));\r
+\r
+ Assert.assertTrue(hostnodeconnector_3.getVlan() == (short) 3);\r
+\r
+ hostnodeconnector_3.setStaticHost(true);\r
+ Assert.assertTrue(hostnodeconnector_3.isStaticHost());\r
+\r
+ Assert.assertTrue(hostnodeconnector_3.isRewriteEnabled());\r
+\r
+ hostnodeconnector_3.initArpSendCountDown().setArpSendCountDown(\r
+ (short) 10);\r
+ Assert.assertTrue(hostnodeconnector_3.getArpSendCountDown() == (short) 10);\r
+\r
+ } catch (ConstructionException e) {\r
+ Assert.assertTrue(false);\r
+ }\r
+\r
+ }\r
+\r
+}\r
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<project
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"
+ xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>commons.opendaylight</artifactId>
+ <version>1.4.0-SNAPSHOT</version>
+ <relativePath>../../commons/opendaylight</relativePath>
+ </parent>
+ <artifactId>hosttracker_new.implementation</artifactId>
+ <version>0.4.0-SNAPSHOT</version>
+ <packaging>bundle</packaging>
+
+ <properties>
+ <!-- Sonar properties using jacoco to retrieve integration test results -->
+ <sonar.java.coveragePlugin>jacoco</sonar.java.coveragePlugin>
+ <sonar.dynamicAnalysis>reuseReports</sonar.dynamicAnalysis>
+ <sonar.jacoco.Reportpath>target/jacoco.exec</sonar.jacoco.Reportpath>
+ <sonar.jacoco.itReportPath>target/jacoco-it.exec</sonar.jacoco.itReportPath>
+ <sonar.language>java</sonar.language>
+ </properties>
+
+ <build>
+ <pluginManagement>
+ <plugins>
+ <plugin>
+ <groupId>org.jacoco</groupId>
+ <artifactId>jacoco-maven-plugin</artifactId>
+ <version>0.5.3.201107060350</version>
+ </plugin>
+ </plugins>
+ </pluginManagement>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.felix</groupId>
+ <artifactId>maven-bundle-plugin</artifactId>
+ <version>2.3.6</version>
+ <extensions>true</extensions>
+ <configuration>
+ <instructions>
+ <Export-Package>
+ </Export-Package>
+ <Import-Package>
+ org.opendaylight.controller.sal.core,
+ org.opendaylight.controller.sal.utils,
+ org.opendaylight.controller.sal.topology,
+ org.opendaylight.controller.sal.packet,
+ org.opendaylight.controller.hosttracker,
+ org.opendaylight.controller.topologymanager,
+ org.opendaylight.controller.sal.packet.address,
+ org.opendaylight.controller.switchmanager,
+ org.opendaylight.controller.clustering.services,
+ org.opendaylight.controller.hosttracker_new.hostAware,
+ javax.xml.bind.annotation,
+ javax.xml.bind,
+ org.apache.felix.dm,
+ org.apache.commons.lang3.builder,
+ org.osgi.service.component,
+ org.slf4j,
+ org.eclipse.osgi.framework.console,
+ org.osgi.framework
+ </Import-Package>
+ <Bundle-Activator>
+ org.opendaylight.controller.hosttracker.internal.Activator
+ </Bundle-Activator>
+ <Service-Component>
+ </Service-Component>
+ </instructions>
+ <manifestLocation>${project.basedir}/META-INF</manifestLocation>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.jacoco</groupId>
+ <artifactId>jacoco-maven-plugin</artifactId>
+ <configuration>
+ <includes>org.opendaylight.controller.*</includes>
+ </configuration>
+ <executions>
+ <execution>
+ <id>pre-test</id>
+ <goals>
+ <goal>prepare-agent</goal>
+ </goals>
+ </execution>
+ <execution>
+ <id>post-test</id>
+ <phase>test</phase>
+ <goals>
+ <goal>report</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>topologymanager</artifactId>
+ <version>0.4.0-SNAPSHOT</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>switchmanager</artifactId>
+ <version>0.4.0-SNAPSHOT</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>clustering.services</artifactId>
+ <version>0.4.0-SNAPSHOT</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal</artifactId>
+ <version>0.5.0-SNAPSHOT</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>hosttracker_new</artifactId>
+ <version>0.4.0-SNAPSHOT</version>
+ </dependency>
+ </dependencies>
+</project>
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.hosttracker.internal;
+
+import java.util.Dictionary;
+import java.util.Hashtable;
+
+import org.apache.felix.dm.Component;
+import org.opendaylight.controller.hosttracker.IDeviceService;
+import org.opendaylight.controller.sal.core.ComponentActivatorAbstractBase;
+import org.opendaylight.controller.sal.packet.IDataPacketService;
+import org.opendaylight.controller.sal.packet.IListenDataPacket;
+import org.opendaylight.controller.switchmanager.ISwitchManager;
+import org.opendaylight.controller.topologymanager.ITopologyManager;
+import org.opendaylight.controller.topologymanager.ITopologyManagerAware;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class Activator extends ComponentActivatorAbstractBase {
+ protected static final Logger logger = LoggerFactory
+ .getLogger(Activator.class);
+
+ @Override
+ protected void init() {
+
+ }
+
+ @Override
+ protected void destroy() {
+
+ }
+
+ /**
+ * Function that is used to communicate to dependency manager the list of
+ * known implementations for services inside a container
+ *
+ *
+ * @return An array containing all the CLASS objects that will be
+ * instantiated in order to get an fully working implementation
+ * Object
+ */
+ @Override
+ public Object[] getImplementations() {
+ Object[] res = { DeviceManagerImpl.class };
+ return res;
+ }
+
+ /**
+ * Function that is called when configuration of the dependencies is
+ * required.
+ *
+ * @param c
+ * dependency manager Component object, used for configuring the
+ * dependencies exported and imported
+ * @param imp
+ * Implementation class that is being configured, needed as long
+ * as the same routine can configure multiple implementations
+ * @param containerName
+ * The containerName being configured, this allow also optional
+ * per-container different behavior if needed, usually should not
+ * be the case though.
+ */
+ @Override
+ public void configureInstance(Component c, Object imp, String containerName) {
+ if (imp.equals(DeviceManagerImpl.class)) {
+ // export the service
+ // XXX - TODO merge with existing APIs
+ Dictionary<String, String> props = new Hashtable<String, String>();
+ props.put("salListenerName", "devicemanager");
+
+ c.setInterface(new String[] { IDeviceService.class.getName(),
+ IListenDataPacket.class.getName(),
+ ITopologyManagerAware.class.getName() }, props);
+
+ c.add(createContainerServiceDependency(containerName)
+ .setService(ISwitchManager.class)
+ .setCallbacks("setSwitchManager", "unsetSwitchManager")
+ .setRequired(false));
+
+ c.add(createContainerServiceDependency(containerName)
+ .setService(IDataPacketService.class)
+ .setCallbacks("setDataPacketService",
+ "unsetDataPacketService").setRequired(true));
+
+ // c.add(createContainerServiceDependency(containerName).setService(
+ // IClusterContainerServices.class).setCallbacks(
+ // "setClusterContainerService",
+ // "unsetClusterContainerService").setRequired(true));
+ c.add(createContainerServiceDependency(containerName)
+ .setService(ITopologyManager.class)
+ .setCallbacks("setTopologyManager", "unsetTopologyManager")
+ .setRequired(false));
+
+ c.add(createContainerServiceDependency(containerName)
+ .setService(IDataPacketService.class)
+ .setCallbacks("setDataPacketService",
+ "unsetDataPacketService").setRequired(true));
+ }
+ }
+
+ /**
+ * Method which tells how many Global implementations are supported by the
+ * bundle. This way we can tune the number of components created. This
+ * components will be created ONLY at the time of bundle startup and will be
+ * destroyed only at time of bundle destruction, this is the major
+ * difference with the implementation retrieved via getImplementations where
+ * all of them are assumed to be in a container !
+ *
+ *
+ * @return The list of implementations the bundle will support, in Global
+ * version
+ */
+ @Override
+ protected Object[] getGlobalImplementations() {
+ return null;
+ }
+
+ /**
+ * Configure the dependency for a given instance Global
+ *
+ * @param c
+ * Component assigned for this instance, this will be what will
+ * be used for configuration
+ * @param imp
+ * implementation to be configured
+ * @param containerName
+ * container on which the configuration happens
+ */
+ @Override
+ protected void configureGlobalInstance(Component c, Object imp) {
+
+ }
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2011,2012 Big Switch Networks, Inc.
+ *
+ * Licensed under the Eclipse Public License, Version 1.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at
+ *
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Originally created by David Erickson, Stanford University
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an "AS
+ * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language
+ * governing permissions and limitations under the License.
+ */
+
+/**
+ * @author Srini
+ */
+
+package org.opendaylight.controller.hosttracker.internal;
+
+import org.opendaylight.controller.sal.core.NodeConnector;
+
+public class AttachmentPoint {
+ NodeConnector port;
+ long activeSince;
+ long lastSeen;
+
+ // Timeout for moving attachment points from OF/broadcast
+ // domain to another.
+ public static final long INACTIVITY_INTERVAL = 30000; // 30 seconds
+ public static final long EXTERNAL_TO_EXTERNAL_TIMEOUT = 5000; // 5 seconds
+ public static final long OPENFLOW_TO_EXTERNAL_TIMEOUT = 30000; // 30 seconds
+ public static final long CONSISTENT_TIMEOUT = 30000; // 30 seconds
+
+ public AttachmentPoint(NodeConnector port, long activeSince, long lastSeen) {
+ this.port = port;
+ this.activeSince = activeSince;
+ this.lastSeen = lastSeen;
+ }
+
+ public AttachmentPoint(NodeConnector port, long lastSeen) {
+ this.port = port;
+ this.lastSeen = lastSeen;
+ this.activeSince = lastSeen;
+ }
+
+ public AttachmentPoint(AttachmentPoint ap) {
+ this.port = ap.port;
+ this.activeSince = ap.activeSince;
+ this.lastSeen = ap.lastSeen;
+ }
+
+ public NodeConnector getPort() {
+ return port;
+ }
+
+ public void setPort(NodeConnector port) {
+ this.port = port;
+ }
+
+ public long getActiveSince() {
+ return activeSince;
+ }
+
+ public void setActiveSince(long activeSince) {
+ this.activeSince = activeSince;
+ }
+
+ public long getLastSeen() {
+ return lastSeen;
+ }
+
+ public void setLastSeen(long lastSeen) {
+ if (this.lastSeen + INACTIVITY_INTERVAL < lastSeen)
+ this.activeSince = lastSeen;
+ if (this.lastSeen < lastSeen)
+ this.lastSeen = lastSeen;
+ }
+
+ @Override
+ public int hashCode() {
+ final int prime = 31;
+ int result = 1;
+ result = prime * result + ((port == null) ? 0 : port.hashCode());
+ return result;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj)
+ return true;
+ if (obj == null)
+ return false;
+ if (getClass() != obj.getClass())
+ return false;
+ AttachmentPoint other = (AttachmentPoint) obj;
+ if (port == null) {
+ if (other.port != null)
+ return false;
+ } else if (!port.equals(other.port))
+ return false;
+ return true;
+ }
+
+ @Override
+ public String toString() {
+ return "AttachmentPoint [port=" + port + ", activeSince=" + activeSince
+ + ", lastSeen=" + lastSeen + "]";
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2011,2012 Big Switch Networks, Inc.
+ *
+ * Licensed under the Eclipse Public License, Version 1.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at
+ *
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Originally created by David Erickson, Stanford University
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an "AS
+ * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language
+ * governing permissions and limitations under the License.
+ */
+
+package org.opendaylight.controller.hosttracker.internal;
+
+import java.util.Collection;
+import java.util.EnumSet;
+
+import org.opendaylight.controller.hosttracker.Entity;
+import org.opendaylight.controller.hosttracker.IDevice;
+import org.opendaylight.controller.hosttracker.IDeviceService;
+import org.opendaylight.controller.hosttracker.IDeviceService.DeviceField;
+import org.opendaylight.controller.hosttracker.IEntityClass;
+import org.opendaylight.controller.hosttracker.IEntityClassListener;
+import org.opendaylight.controller.hosttracker.IEntityClassifierService;
+
+/**
+ * This is a default entity classifier that simply classifies all entities into
+ * a fixed entity class, with key fields of MAC and VLAN.
+ *
+ * @author readams
+ */
+public class DefaultEntityClassifier implements IEntityClassifierService {
+ /**
+ * A default fixed entity class
+ */
+ protected static class DefaultEntityClass implements IEntityClass {
+ String name;
+
+ public DefaultEntityClass(String name) {
+ this.name = name;
+ }
+
+ @Override
+ public EnumSet<IDeviceService.DeviceField> getKeyFields() {
+ return keyFields;
+ }
+
+ @Override
+ public String getName() {
+ return name;
+ }
+ }
+
+ protected static EnumSet<DeviceField> keyFields;
+ static {
+ keyFields = EnumSet.of(DeviceField.MAC, DeviceField.VLAN);
+ }
+ protected static DefaultEntityClass entityClass = new DefaultEntityClass(
+ "DefaultEntityClass");
+
+ @Override
+ public IEntityClass classifyEntity(Entity entity) {
+ return entityClass;
+ }
+
+ @Override
+ public IEntityClass reclassifyEntity(IDevice curDevice, Entity entity) {
+ return entityClass;
+ }
+
+ @Override
+ public void deviceUpdate(IDevice oldDevice,
+ Collection<? extends IDevice> newDevices) {
+ // no-op
+ }
+
+ @Override
+ public EnumSet<DeviceField> getKeyFields() {
+ return keyFields;
+ }
+
+ @Override
+ public void addListener(IEntityClassListener listener) {
+ // no-op
+
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2011,2012 Big Switch Networks, Inc.
+ *
+ * Licensed under the Eclipse Public License, Version 1.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at
+ *
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Originally created by David Erickson, Stanford University
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an "AS
+ * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language
+ * governing permissions and limitations under the License.
+ */
+
+package org.opendaylight.controller.hosttracker.internal;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Date;
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeSet;
+
+import org.opendaylight.controller.hosttracker.Entity;
+import org.opendaylight.controller.hosttracker.IDevice;
+import org.opendaylight.controller.hosttracker.IDeviceService.DeviceField;
+import org.opendaylight.controller.hosttracker.IEntityClass;
+import org.opendaylight.controller.hosttracker.SwitchPort;
+import org.opendaylight.controller.hosttracker.SwitchPort.ErrorStatus;
+import org.opendaylight.controller.sal.core.NodeConnector;
+import org.opendaylight.controller.sal.utils.HexEncode;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Concrete implementation of {@link IDevice}
+ *
+ * @author readams
+ */
+public class Device implements IDevice {
+ protected static Logger log = LoggerFactory.getLogger(Device.class);
+ public static final short VLAN_UNTAGGED = (short) 0xffff;
+
+ private final Long deviceKey;
+ protected final DeviceManagerImpl deviceManager;
+
+ protected final Entity[] entities;
+ private final IEntityClass entityClass;
+
+ protected final String macAddressString;
+ // the vlan Ids from the entities of this device
+ protected final Short[] vlanIds;
+ protected volatile String dhcpClientName;
+
+ /**
+ * These are the old attachment points for the device that were valid no
+ * more than INACTIVITY_TIME ago.
+ */
+ protected volatile List<AttachmentPoint> oldAPs;
+ /**
+ * The current attachment points for the device.
+ */
+ protected volatile List<AttachmentPoint> attachmentPoints;
+
+ // ************
+ // Constructors
+ // ************
+
+ /**
+ * Create a device from an entities
+ *
+ * @param deviceManager
+ * the device manager for this device
+ * @param deviceKey
+ * the unique identifier for this device object
+ * @param entity
+ * the initial entity for the device
+ * @param entityClass
+ * the entity classes associated with the entity
+ */
+ public Device(DeviceManagerImpl deviceManager, Long deviceKey,
+ Entity entity, IEntityClass entityClass) {
+ this.deviceManager = deviceManager;
+ this.deviceKey = deviceKey;
+ this.entities = new Entity[] { entity };
+ this.macAddressString = HexEncode.longToHexString(entity
+ .getMacAddress());
+ this.entityClass = entityClass;
+ Arrays.sort(this.entities);
+
+ this.dhcpClientName = null;
+ this.oldAPs = null;
+ this.attachmentPoints = null;
+
+ if (entity.getPort() != null) {
+ NodeConnector port = entity.getPort();
+
+ if (deviceManager.isValidAttachmentPoint(port)) {
+ AttachmentPoint ap;
+ ap = new AttachmentPoint(port, entity.getLastSeenTimestamp()
+ .getTime());
+
+ this.attachmentPoints = new ArrayList<AttachmentPoint>();
+ this.attachmentPoints.add(ap);
+ }
+ }
+ vlanIds = computeVlandIds();
+ }
+
+ /**
+ * Create a device from a set of entities
+ *
+ * @param deviceManager
+ * the device manager for this device
+ * @param deviceKey
+ * the unique identifier for this device object
+ * @param entities
+ * the initial entities for the device
+ * @param entityClass
+ * the entity class associated with the entities
+ */
+ public Device(DeviceManagerImpl deviceManager, Long deviceKey,
+ String dhcpClientName, Collection<AttachmentPoint> oldAPs,
+ Collection<AttachmentPoint> attachmentPoints,
+ Collection<Entity> entities, IEntityClass entityClass) {
+ this.deviceManager = deviceManager;
+ this.deviceKey = deviceKey;
+ this.dhcpClientName = dhcpClientName;
+ this.entities = entities.toArray(new Entity[entities.size()]);
+ this.oldAPs = null;
+ this.attachmentPoints = null;
+ if (oldAPs != null) {
+ this.oldAPs = new ArrayList<AttachmentPoint>(oldAPs);
+ }
+ if (attachmentPoints != null) {
+ this.attachmentPoints = new ArrayList<AttachmentPoint>(
+ attachmentPoints);
+ }
+ this.macAddressString = HexEncode.longToHexString(this.entities[0]
+ .getMacAddress());
+ this.entityClass = entityClass;
+ Arrays.sort(this.entities);
+ vlanIds = computeVlandIds();
+ }
+
+ /**
+ * Construct a new device consisting of the entities from the old device
+ * plus an additional entity. The caller needs to ensure that the additional
+ * entity is not already present in the array
+ *
+ * @param device
+ * the old device object
+ * @param newEntity
+ * the entity to add. newEntity must be have the same entity
+ * class as device
+ * @param if positive indicates the index in the entities array were the new
+ * entity should be inserted. If negative we will compute the correct
+ * insertion point
+ */
+ public Device(Device device, Entity newEntity, int insertionpoint) {
+ this.deviceManager = device.deviceManager;
+ this.deviceKey = device.deviceKey;
+ this.dhcpClientName = device.dhcpClientName;
+
+ this.entities = new Entity[device.entities.length + 1];
+ if (insertionpoint < 0) {
+ insertionpoint = -(Arrays.binarySearch(device.entities, newEntity) + 1);
+ }
+ if (insertionpoint > 0) {
+ // insertion point is not the beginning:
+ // copy up to insertion point
+ System.arraycopy(device.entities, 0, this.entities, 0,
+ insertionpoint);
+ }
+ if (insertionpoint < device.entities.length) {
+ // insertion point is not the end
+ // copy from insertion point
+ System.arraycopy(device.entities, insertionpoint, this.entities,
+ insertionpoint + 1, device.entities.length - insertionpoint);
+ }
+ this.entities[insertionpoint] = newEntity;
+ /*
+ * this.entities = Arrays.<Entity>copyOf(device.entities,
+ * device.entities.length + 1); this.entities[this.entities.length - 1]
+ * = newEntity; Arrays.sort(this.entities);
+ */
+ this.oldAPs = null;
+ if (device.oldAPs != null) {
+ this.oldAPs = new ArrayList<AttachmentPoint>(device.oldAPs);
+ }
+ this.attachmentPoints = null;
+ if (device.attachmentPoints != null) {
+ this.attachmentPoints = new ArrayList<AttachmentPoint>(
+ device.attachmentPoints);
+ }
+
+ this.macAddressString = HexEncode.longToHexString(this.entities[0]
+ .getMacAddress());
+
+ this.entityClass = device.entityClass;
+ vlanIds = computeVlandIds();
+ }
+
+ private Short[] computeVlandIds() {
+ if (entities.length == 1) {
+ if (entities[0].getVlan() != null) {
+ return new Short[] { entities[0].getVlan() };
+ } else {
+ return new Short[] { Short.valueOf((short) -1) };
+ }
+ }
+
+ TreeSet<Short> vals = new TreeSet<Short>();
+ for (Entity e : entities) {
+ if (e.getVlan() == null)
+ vals.add((short) -1);
+ else
+ vals.add(e.getVlan());
+ }
+ return vals.toArray(new Short[vals.size()]);
+ }
+
+ /**
+ * Given a list of attachment points (apList), the procedure would return a
+ * map of attachment points for each L2 domain. L2 domain id is the key.
+ *
+ * @param apList
+ * @return
+ */
+ private Map<Long, AttachmentPoint> getAPMap(List<AttachmentPoint> apList) {
+
+ if (apList == null)
+ return null;
+ // ITopologyService topology = deviceManager.topology;
+
+ // Get the old attachment points and sort them.
+ List<AttachmentPoint> oldAP = new ArrayList<AttachmentPoint>();
+ if (apList != null)
+ oldAP.addAll(apList);
+
+ // Remove invalid attachment points before sorting.
+ List<AttachmentPoint> tempAP = new ArrayList<AttachmentPoint>();
+ for (AttachmentPoint ap : oldAP) {
+ if (deviceManager.isValidAttachmentPoint(ap.getPort())) {
+ tempAP.add(ap);
+ }
+ }
+ oldAP = tempAP;
+
+ Collections.sort(oldAP, deviceManager.apComparator);
+
+ // Map of attachment point by L2 domain Id.
+ Map<Long, AttachmentPoint> apMap = new HashMap<Long, AttachmentPoint>();
+
+ for (int i = 0; i < oldAP.size(); ++i) {
+ AttachmentPoint ap = oldAP.get(i);
+ // if this is not a valid attachment point, continue
+ if (!deviceManager.isValidAttachmentPoint(ap.getPort()))
+ continue;
+
+ // long id = topology.getL2DomainId(ap.getSw());
+ // XXX - Missing functionality
+ long id = 0;
+
+ apMap.put(id, ap);
+ }
+
+ if (apMap.isEmpty())
+ return null;
+ return apMap;
+ }
+
+ /**
+ * Remove all attachment points that are older than INACTIVITY_INTERVAL from
+ * the list.
+ *
+ * @param apList
+ * @return
+ */
+ private boolean removeExpiredAttachmentPoints(List<AttachmentPoint> apList) {
+
+ List<AttachmentPoint> expiredAPs = new ArrayList<AttachmentPoint>();
+
+ if (apList == null)
+ return false;
+
+ for (AttachmentPoint ap : apList) {
+ if (ap.getLastSeen() + AttachmentPoint.INACTIVITY_INTERVAL < System
+ .currentTimeMillis())
+ expiredAPs.add(ap);
+ }
+ if (expiredAPs.size() > 0) {
+ apList.removeAll(expiredAPs);
+ return true;
+ } else
+ return false;
+ }
+
+ /**
+ * Get a list of duplicate attachment points, given a list of old attachment
+ * points and one attachment point per L2 domain. Given a true attachment
+ * point in the L2 domain, say trueAP, another attachment point in the same
+ * L2 domain, say ap, is duplicate if: 1. ap is inconsistent with trueAP,
+ * and 2. active time of ap is after that of trueAP; and 3. last seen time
+ * of ap is within the last INACTIVITY_INTERVAL
+ *
+ * @param oldAPList
+ * @param apMap
+ * @return
+ */
+ List<AttachmentPoint> getDuplicateAttachmentPoints(
+ List<AttachmentPoint> oldAPList, Map<Long, AttachmentPoint> apMap) {
+ // ITopologyService topology = deviceManager.topology;
+ List<AttachmentPoint> dupAPs = new ArrayList<AttachmentPoint>();
+ long timeThreshold = System.currentTimeMillis()
+ - AttachmentPoint.INACTIVITY_INTERVAL;
+
+ if (oldAPList == null || apMap == null)
+ return dupAPs;
+
+ for (AttachmentPoint ap : oldAPList) {
+ // XXX - Missing functionality
+ // long id = topology.getL2DomainId(ap.getSw());
+ long id = 0;
+ AttachmentPoint trueAP = apMap.get(id);
+
+ if (trueAP == null)
+ continue;
+ // XXX - Missing functionality
+ // boolean c = (topology.isConsistent(trueAP.getSw(),
+ // trueAP.getPort(),
+ // ap.getSw(), ap.getPort()));
+ boolean c = true;
+ boolean active = (ap.getActiveSince() > trueAP.getActiveSince());
+ boolean last = ap.getLastSeen() > timeThreshold;
+ if (!c && active && last) {
+ dupAPs.add(ap);
+ }
+ }
+
+ return dupAPs;
+ }
+
+ /**
+ * Update the known attachment points. This method is called whenever
+ * topology changes. The method returns true if there's any change to the
+ * list of attachment points -- which indicates a possible device move.
+ *
+ * @return
+ */
+ protected boolean updateAttachmentPoint() {
+ boolean moved = false;
+ this.oldAPs = attachmentPoints;
+ if (attachmentPoints == null || attachmentPoints.isEmpty())
+ return false;
+
+ List<AttachmentPoint> apList = new ArrayList<AttachmentPoint>();
+ if (attachmentPoints != null)
+ apList.addAll(attachmentPoints);
+ Map<Long, AttachmentPoint> newMap = getAPMap(apList);
+ if (newMap == null || newMap.size() != apList.size()) {
+ moved = true;
+ }
+
+ // Prepare the new attachment point list.
+ if (moved) {
+ log.info("updateAttachmentPoint: ap {} newmap {} ",
+ attachmentPoints, newMap);
+ List<AttachmentPoint> newAPList = new ArrayList<AttachmentPoint>();
+ if (newMap != null)
+ newAPList.addAll(newMap.values());
+ this.attachmentPoints = newAPList;
+ }
+
+ // Set the oldAPs to null.
+ return moved;
+ }
+
+ /**
+ * Update the list of attachment points given that a new packet-in was seen
+ * from (sw, port) at time (lastSeen). The return value is true if there was
+ * any change to the list of attachment points for the device -- which
+ * indicates a device move.
+ *
+ * @param sw
+ * @param port
+ * @param lastSeen
+ * @return
+ */
+ protected boolean updateAttachmentPoint(NodeConnector port, long lastSeen) {
+ // ITopologyService topology = deviceManager.topology;
+ List<AttachmentPoint> oldAPList;
+ List<AttachmentPoint> apList;
+ boolean oldAPFlag = false;
+
+ if (!deviceManager.isValidAttachmentPoint(port))
+ return false;
+ AttachmentPoint newAP = new AttachmentPoint(port, lastSeen);
+ // Copy the oldAP and ap list.
+ apList = new ArrayList<AttachmentPoint>();
+ if (attachmentPoints != null)
+ apList.addAll(attachmentPoints);
+ oldAPList = new ArrayList<AttachmentPoint>();
+ if (oldAPs != null)
+ oldAPList.addAll(oldAPs);
+
+ // if the sw, port is in old AP, remove it from there
+ // and update the lastSeen in that object.
+ if (oldAPList.contains(newAP)) {
+ int index = oldAPList.indexOf(newAP);
+ newAP = oldAPList.remove(index);
+ newAP.setLastSeen(lastSeen);
+ this.oldAPs = oldAPList;
+ oldAPFlag = true;
+ }
+
+ // newAP now contains the new attachment point.
+
+ // Get the APMap is null or empty.
+ Map<Long, AttachmentPoint> apMap = getAPMap(apList);
+ if (apMap == null || apMap.isEmpty()) {
+ apList.add(newAP);
+ attachmentPoints = apList;
+ // there are no old attachment points - since the device exists,
+ // this
+ // may be because the host really moved (so the old AP port went
+ // down);
+ // or it may be because the switch restarted (so old APs were
+ // nullified).
+ // For now we will treat both cases as host moved.
+ return true;
+ }
+
+ // XXX - Missing functionality
+ // long id = topology.getL2DomainId(sw);
+ long id = 0;
+ AttachmentPoint oldAP = apMap.get(id);
+
+ if (oldAP == null) // No attachment on this L2 domain.
+ {
+ apList = new ArrayList<AttachmentPoint>();
+ apList.addAll(apMap.values());
+ apList.add(newAP);
+ this.attachmentPoints = apList;
+ return true; // new AP found on an L2 island.
+ }
+
+ // There is already a known attachment point on the same L2 island.
+ // we need to compare oldAP and newAP.
+ if (oldAP.equals(newAP)) {
+ // nothing to do here. just the last seen has to be changed.
+ if (newAP.lastSeen > oldAP.lastSeen) {
+ oldAP.setLastSeen(newAP.lastSeen);
+ }
+ this.attachmentPoints = new ArrayList<AttachmentPoint>(
+ apMap.values());
+ return false; // nothing to do here.
+ }
+
+ int x = deviceManager.apComparator.compare(oldAP, newAP);
+ if (x < 0) {
+ // newAP replaces oldAP.
+ apMap.put(id, newAP);
+ this.attachmentPoints = new ArrayList<AttachmentPoint>(
+ apMap.values());
+
+ oldAPList = new ArrayList<AttachmentPoint>();
+ if (oldAPs != null)
+ oldAPList.addAll(oldAPs);
+ oldAPList.add(oldAP);
+ this.oldAPs = oldAPList;
+ // XXX - Missing functionality
+ // if (!topology.isInSameBroadcastDomain(oldAP.getSw(),
+ // oldAP.getPort(),
+ // newAP.getSw(), newAP.getPort()))
+ // return true; // attachment point changed.
+ return true;
+ } else if (oldAPFlag) {
+ // retain oldAP as is. Put the newAP in oldAPs for flagging
+ // possible duplicates.
+ oldAPList = new ArrayList<AttachmentPoint>();
+ if (oldAPs != null)
+ oldAPList.addAll(oldAPs);
+ // Add to oldAPList only if it was picked up from the oldAPList
+ oldAPList.add(newAP);
+ this.oldAPs = oldAPList;
+ // XXX - Missing functionality
+ // if (!topology.isInSameBroadcastDomain(oldAP.getSw(),
+ // oldAP.getPort(),
+ // newAP.getSw(), newAP.getPort()))
+ // return true; // attachment point changed.
+ return true;
+ }
+ return false;
+ }
+
+ /**
+ * Delete (sw,port) from the list of list of attachment points and oldAPs.
+ *
+ * @param sw
+ * @param port
+ * @return
+ */
+ public boolean deleteAttachmentPoint(NodeConnector port) {
+ AttachmentPoint ap = new AttachmentPoint(port, 0);
+
+ if (this.oldAPs != null) {
+ ArrayList<AttachmentPoint> apList = new ArrayList<AttachmentPoint>();
+ apList.addAll(this.oldAPs);
+ int index = apList.indexOf(ap);
+ if (index > 0) {
+ apList.remove(index);
+ this.oldAPs = apList;
+ }
+ }
+
+ if (this.attachmentPoints != null) {
+ ArrayList<AttachmentPoint> apList = new ArrayList<AttachmentPoint>();
+ apList.addAll(this.attachmentPoints);
+ int index = apList.indexOf(ap);
+ if (index > 0) {
+ apList.remove(index);
+ this.attachmentPoints = apList;
+ return true;
+ }
+ }
+ return false;
+ }
+
+ // *******
+ // IDevice
+ // *******
+
+ @Override
+ public SwitchPort[] getOldAP() {
+ List<SwitchPort> sp = new ArrayList<SwitchPort>();
+ SwitchPort[] returnSwitchPorts = new SwitchPort[] {};
+ if (oldAPs == null)
+ return returnSwitchPorts;
+ if (oldAPs.isEmpty())
+ return returnSwitchPorts;
+
+ // copy ap list.
+ List<AttachmentPoint> oldAPList;
+ oldAPList = new ArrayList<AttachmentPoint>();
+
+ if (oldAPs != null)
+ oldAPList.addAll(oldAPs);
+ removeExpiredAttachmentPoints(oldAPList);
+
+ if (oldAPList != null) {
+ for (AttachmentPoint ap : oldAPList) {
+ SwitchPort swport = new SwitchPort(ap.getPort());
+ sp.add(swport);
+ }
+ }
+ return sp.toArray(new SwitchPort[sp.size()]);
+ }
+
+ @Override
+ public SwitchPort[] getAttachmentPoints() {
+ return getAttachmentPoints(false);
+ }
+
+ @Override
+ public SwitchPort[] getAttachmentPoints(boolean includeError) {
+ List<SwitchPort> sp = new ArrayList<SwitchPort>();
+ SwitchPort[] returnSwitchPorts = new SwitchPort[] {};
+ if (attachmentPoints == null)
+ return returnSwitchPorts;
+ if (attachmentPoints.isEmpty())
+ return returnSwitchPorts;
+
+ // copy ap list.
+ List<AttachmentPoint> apList = attachmentPoints;
+
+ if (apList != null) {
+ for (AttachmentPoint ap : apList) {
+ SwitchPort swport = new SwitchPort(ap.getPort());
+ sp.add(swport);
+ }
+ }
+
+ if (!includeError)
+ return sp.toArray(new SwitchPort[sp.size()]);
+
+ List<AttachmentPoint> oldAPList;
+ oldAPList = new ArrayList<AttachmentPoint>();
+
+ if (oldAPs != null)
+ oldAPList.addAll(oldAPs);
+
+ if (removeExpiredAttachmentPoints(oldAPList))
+ this.oldAPs = oldAPList;
+
+ List<AttachmentPoint> dupList;
+ // get AP map.
+ Map<Long, AttachmentPoint> apMap = getAPMap(apList);
+ dupList = this.getDuplicateAttachmentPoints(oldAPList, apMap);
+ if (dupList != null) {
+ for (AttachmentPoint ap : dupList) {
+ SwitchPort swport = new SwitchPort(ap.getPort(),
+ ErrorStatus.DUPLICATE_DEVICE);
+ sp.add(swport);
+ }
+ }
+ return sp.toArray(new SwitchPort[sp.size()]);
+ }
+
+ @Override
+ public Long getDeviceKey() {
+ return deviceKey;
+ }
+
+ @Override
+ public long getMACAddress() {
+ // we assume only one MAC per device for now.
+ return entities[0].getMacAddress();
+ }
+
+ @Override
+ public String getMACAddressString() {
+ return macAddressString;
+ }
+
+ @Override
+ public Short[] getVlanId() {
+ return Arrays.copyOf(vlanIds, vlanIds.length);
+ }
+
+ static final EnumSet<DeviceField> ipv4Fields = EnumSet.of(DeviceField.IPV4);
+
+ @Override
+ public Integer[] getIPv4Addresses() {
+ // XXX - TODO we can cache this result. Let's find out if this
+ // is really a performance bottleneck first though.
+
+ TreeSet<Integer> vals = new TreeSet<Integer>();
+ for (Entity e : entities) {
+ if (e.getIpv4Address() == null)
+ continue;
+
+ // We have an IP address only if among the devices within the class
+ // we have the most recent entity with that IP.
+ boolean validIP = true;
+ Iterator<Device> devices = deviceManager.queryClassByEntity(
+ entityClass, ipv4Fields, e);
+ while (devices.hasNext()) {
+ Device d = devices.next();
+ if (deviceKey.equals(d.getDeviceKey()))
+ continue;
+ for (Entity se : d.entities) {
+ if (se.getIpv4Address() != null
+ && se.getIpv4Address().equals(e.getIpv4Address())
+ && se.getLastSeenTimestamp() != null
+ && 0 < se.getLastSeenTimestamp().compareTo(
+ e.getLastSeenTimestamp())) {
+ validIP = false;
+ break;
+ }
+ }
+ if (!validIP)
+ break;
+ }
+
+ if (validIP)
+ vals.add(e.getIpv4Address());
+ }
+
+ return vals.toArray(new Integer[vals.size()]);
+ }
+
+ @Override
+ public Short[] getSwitchPortVlanIds(SwitchPort swp) {
+ TreeSet<Short> vals = new TreeSet<Short>();
+ for (Entity e : entities) {
+ if (e.getPort().equals(swp.getPort())) {
+ if (e.getVlan() == null)
+ vals.add(VLAN_UNTAGGED);
+ else
+ vals.add(e.getVlan());
+ }
+ }
+ return vals.toArray(new Short[vals.size()]);
+ }
+
+ @Override
+ public Date getLastSeen() {
+ Date d = null;
+ for (int i = 0; i < entities.length; i++) {
+ if (d == null
+ || entities[i].getLastSeenTimestamp().compareTo(d) > 0)
+ d = entities[i].getLastSeenTimestamp();
+ }
+ return d;
+ }
+
+ // ***************
+ // Getters/Setters
+ // ***************
+
+ @Override
+ public IEntityClass getEntityClass() {
+ return entityClass;
+ }
+
+ public Entity[] getEntities() {
+ return entities;
+ }
+
+ public String getDHCPClientName() {
+ return dhcpClientName;
+ }
+
+ // ***************
+ // Utility Methods
+ // ***************
+
+ /**
+ * Check whether the device contains the specified entity
+ *
+ * @param entity
+ * the entity to search for
+ * @return the index of the entity, or <0 if not found
+ */
+ protected int entityIndex(Entity entity) {
+ return Arrays.binarySearch(entities, entity);
+ }
+
+ // ******
+ // Object
+ // ******
+
+ @Override
+ public int hashCode() {
+ final int prime = 31;
+ int result = 1;
+ result = prime * result + Arrays.hashCode(entities);
+ return result;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj)
+ return true;
+ if (obj == null)
+ return false;
+ if (getClass() != obj.getClass())
+ return false;
+ Device other = (Device) obj;
+ if (!deviceKey.equals(other.deviceKey))
+ return false;
+ if (!Arrays.equals(entities, other.entities))
+ return false;
+ return true;
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder builder = new StringBuilder();
+ builder.append("Device [deviceKey=");
+ builder.append(deviceKey);
+ builder.append(", entityClass=");
+ builder.append(entityClass.getName());
+ builder.append(", MAC=");
+ builder.append(macAddressString);
+ builder.append(", IPs=[");
+ boolean isFirst = true;
+ for (Integer ip : getIPv4Addresses()) {
+ if (!isFirst)
+ builder.append(", ");
+ isFirst = false;
+ // builder.append(IPv4.fromIPv4Address(ip));
+ builder.append(ip);
+ }
+ builder.append("], APs=");
+ builder.append(Arrays.toString(getAttachmentPoints(true)));
+ builder.append("]");
+ return builder.toString();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2012 Big Switch Networks, Inc.
+ *
+ * Licensed under the Eclipse Public License, Version 1.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at
+ *
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Originally created by David Erickson, Stanford University
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an "AS
+ * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language
+ * governing permissions and limitations under the License.
+ */
+
+package org.opendaylight.controller.hosttracker.internal;
+
+import java.util.Collection;
+import java.util.EnumSet;
+import java.util.Iterator;
+
+import org.opendaylight.controller.hosttracker.Entity;
+import org.opendaylight.controller.hosttracker.IDeviceService.DeviceField;
+
+/**
+ * An index that maps key fields of an entity to device keys
+ */
+public abstract class DeviceIndex {
+ /**
+ * The key fields for this index
+ */
+ protected EnumSet<DeviceField> keyFields;
+
+ /**
+ * Construct a new device index using the provided key fields
+ *
+ * @param keyFields
+ * the key fields to use
+ */
+ public DeviceIndex(EnumSet<DeviceField> keyFields) {
+ super();
+ this.keyFields = keyFields;
+ }
+
+ /**
+ * Find all device keys in the index that match the given entity on all the
+ * key fields for this index
+ *
+ * @param e
+ * the entity to search for
+ * @return an iterator over device keys
+ */
+ public abstract Iterator<Long> queryByEntity(Entity entity);
+
+ /**
+ * Get all device keys in the index. If certain devices exist multiple
+ * times, then these devices may be returned multiple times
+ *
+ * @return an iterator over device keys
+ */
+ public abstract Iterator<Long> getAll();
+
+ /**
+ * Attempt to update an index with the entities in the provided
+ * {@link Device}. If the update fails because of a concurrent update, will
+ * return false.
+ *
+ * @param device
+ * the device to update
+ * @param deviceKey
+ * the device key for the device
+ * @return true if the update succeeded, false otherwise.
+ */
+ public abstract boolean updateIndex(Device device, Long deviceKey);
+
+ /**
+ * Add a mapping from the given entity to the given device key. This update
+ * will not fail because of a concurrent update
+ *
+ * @param device
+ * the device to update
+ * @param deviceKey
+ * the device key for the device
+ */
+ public abstract void updateIndex(Entity entity, Long deviceKey);
+
+ /**
+ * Remove the entry for the given entity
+ *
+ * @param entity
+ * the entity to remove
+ */
+ public abstract void removeEntity(Entity entity);
+
+ /**
+ * Remove the given device key from the index for the given entity
+ *
+ * @param entity
+ * the entity to search for
+ * @param deviceKey
+ * the key to remove
+ */
+ public abstract void removeEntity(Entity entity, Long deviceKey);
+
+ /**
+ * Remove the give device from the index only if this the collection of
+ * others does not contain an entity that is identical on all the key fields
+ * for this index.
+ *
+ * @param entity
+ * the entity to search for
+ * @param deviceKey
+ * the key to remove
+ * @param others
+ * the others against which to check
+ */
+ public void removeEntityIfNeeded(Entity entity, Long deviceKey,
+ Collection<Entity> others) {
+ IndexedEntity ie = new IndexedEntity(keyFields, entity);
+ for (Entity o : others) {
+ IndexedEntity oio = new IndexedEntity(keyFields, o);
+ if (oio.equals(ie))
+ return;
+ }
+
+ Iterator<Long> keyiter = this.queryByEntity(entity);
+ while (keyiter.hasNext()) {
+ Long key = keyiter.next();
+ if (key.equals(deviceKey)) {
+ removeEntity(entity, deviceKey);
+ break;
+ }
+ }
+ }
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2012 Big Switch Networks, Inc.
+ *
+ * Licensed under the Eclipse Public License, Version 1.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at
+ *
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Originally created by David Erickson, Stanford University
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an "AS
+ * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language
+ * governing permissions and limitations under the License.
+ */
+
+package org.opendaylight.controller.hosttracker.internal;
+
+import java.util.Iterator;
+
+/**
+ * An iterator for handling device index queries
+ */
+public class DeviceIndexInterator implements Iterator<Device> {
+ private DeviceManagerImpl deviceManager;
+ private Iterator<Long> subIterator;
+
+ /**
+ * Construct a new device index iterator referring to a device manager
+ * instance and an iterator over device keys
+ *
+ * @param deviceManager
+ * the device manager
+ * @param subIterator
+ * an iterator over device keys
+ */
+ public DeviceIndexInterator(DeviceManagerImpl deviceManager,
+ Iterator<Long> subIterator) {
+ super();
+ this.deviceManager = deviceManager;
+ this.subIterator = subIterator;
+ }
+
+ @Override
+ public boolean hasNext() {
+ return subIterator.hasNext();
+ }
+
+ @Override
+ public Device next() {
+ Long next = subIterator.next();
+ return deviceManager.deviceMap.get(next);
+ }
+
+ @Override
+ public void remove() {
+ subIterator.remove();
+ }
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2012 Big Switch Networks, Inc.
+ *
+ * Licensed under the Eclipse Public License, Version 1.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at
+ *
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Originally created by David Erickson, Stanford University
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an "AS
+ * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language
+ * governing permissions and limitations under the License.
+ */
+
+package org.opendaylight.controller.hosttracker.internal;
+
+import java.util.Arrays;
+import java.util.Iterator;
+
+import org.opendaylight.controller.hosttracker.IEntityClass;
+import org.opendaylight.controller.hosttracker.SwitchPort;
+import org.opendaylight.controller.sal.core.NodeConnector;
+import org.opendaylight.controller.sal.utils.FilterIterator;
+
+/**
+ * An iterator for handling device queries
+ */
+public class DeviceIterator extends FilterIterator<Device> {
+ private IEntityClass[] entityClasses;
+
+ private Long macAddress;
+ private Short vlan;
+ private Integer ipv4Address;
+ private NodeConnector port;
+
+ /**
+ * Construct a new device iterator over the key fields
+ *
+ * @param subIterator
+ * an iterator over the full data structure to scan
+ * @param entityClasses
+ * the entity classes to search for
+ * @param macAddress
+ * The MAC address
+ * @param vlan
+ * the VLAN
+ * @param ipv4Address
+ * the ipv4 address
+ * @param switchDPID
+ * the switch DPID
+ * @param switchPort
+ * the switch port
+ */
+ public DeviceIterator(Iterator<Device> subIterator,
+ IEntityClass[] entityClasses, Long macAddress, Short vlan,
+ Integer ipv4Address, NodeConnector port) {
+ super(subIterator);
+ this.entityClasses = entityClasses;
+ this.subIterator = subIterator;
+ this.macAddress = macAddress;
+ this.vlan = vlan;
+ this.ipv4Address = ipv4Address;
+ this.port = port;
+ }
+
+ @Override
+ protected boolean matches(Device value) {
+ boolean match;
+ if (entityClasses != null) {
+ IEntityClass clazz = value.getEntityClass();
+ if (clazz == null)
+ return false;
+
+ match = false;
+ for (IEntityClass entityClass : entityClasses) {
+ if (clazz.equals(entityClass)) {
+ match = true;
+ break;
+ }
+ }
+ if (!match)
+ return false;
+ }
+ if (macAddress != null) {
+ if (macAddress.longValue() != value.getMACAddress())
+ return false;
+ }
+ if (vlan != null) {
+ Short[] vlans = value.getVlanId();
+ if (Arrays.binarySearch(vlans, vlan) < 0)
+ return false;
+ }
+ if (ipv4Address != null) {
+ Integer[] ipv4Addresses = value.getIPv4Addresses();
+ if (Arrays.binarySearch(ipv4Addresses, ipv4Address) < 0)
+ return false;
+ }
+ if (port != null) {
+ SwitchPort[] sps = value.getAttachmentPoints();
+ if (sps == null)
+ return false;
+
+ match = false;
+ for (SwitchPort sp : sps) {
+ if (sp.getPort().equals(sp.getPort())) {
+ match = true;
+ break;
+ }
+ }
+ if (!match)
+ return false;
+ }
+ return true;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2011,2012 Big Switch Networks, Inc.
+ *
+ * Licensed under the Eclipse Public License, Version 1.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at
+ *
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Originally created by David Erickson, Stanford University
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an "AS
+ * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language
+ * governing permissions and limitations under the License.
+ */
+
+package org.opendaylight.controller.hosttracker.internal;
+
+import static org.opendaylight.controller.hosttracker.internal.DeviceManagerImpl.DeviceUpdate.Change.ADD;
+import static org.opendaylight.controller.hosttracker.internal.DeviceManagerImpl.DeviceUpdate.Change.CHANGE;
+import static org.opendaylight.controller.hosttracker.internal.DeviceManagerImpl.DeviceUpdate.Change.DELETE;
+
+import java.util.ArrayList;
+import java.util.Calendar;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.Date;
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Queue;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+
+import org.opendaylight.controller.hosttracker.Entity;
+import org.opendaylight.controller.hosttracker.IDevice;
+import org.opendaylight.controller.hosttracker.IDeviceListener;
+import org.opendaylight.controller.hosttracker.IDeviceService;
+import org.opendaylight.controller.hosttracker.IEntityClass;
+import org.opendaylight.controller.hosttracker.IEntityClassListener;
+import org.opendaylight.controller.hosttracker.IEntityClassifierService;
+import org.opendaylight.controller.hosttracker.SwitchPort;
+import org.opendaylight.controller.sal.core.Edge;
+import org.opendaylight.controller.sal.core.NodeConnector;
+import org.opendaylight.controller.sal.core.NodeConnector.NodeConnectorIDType;
+import org.opendaylight.controller.sal.packet.ARP;
+import org.opendaylight.controller.sal.packet.Ethernet;
+import org.opendaylight.controller.sal.packet.IDataPacketService;
+import org.opendaylight.controller.sal.packet.IListenDataPacket;
+import org.opendaylight.controller.sal.packet.Packet;
+import org.opendaylight.controller.sal.packet.PacketResult;
+import org.opendaylight.controller.sal.packet.RawPacket;
+import org.opendaylight.controller.sal.topology.TopoEdgeUpdate;
+import org.opendaylight.controller.sal.utils.ListenerDispatcher;
+import org.opendaylight.controller.sal.utils.MultiIterator;
+import org.opendaylight.controller.sal.utils.SingletonTask;
+import org.opendaylight.controller.switchmanager.ISwitchManager;
+import org.opendaylight.controller.topologymanager.ITopologyManager;
+import org.opendaylight.controller.topologymanager.ITopologyManagerAware;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * DeviceManager creates Devices based upon MAC addresses seen in the network.
+ * It tracks any network addresses mapped to the Device, and its location within
+ * the network.
+ *
+ * @author readams
+ */
+public class DeviceManagerImpl implements IDeviceService, IEntityClassListener,
+ IListenDataPacket, ITopologyManagerAware {
+ protected static Logger logger = LoggerFactory
+ .getLogger(DeviceManagerImpl.class);
+
+ public static final String MODULE_NAME = "devicemanager";
+
+ // protected ITopologyService topology;
+ // protected IStorageSourceService storageSource;
+ // protected IRestApiService restApi;
+ // protected IThreadPoolService threadPool;
+ // protected IFlowReconcileService flowReconcileMgr;
+ // protected IFlowReconcileEngineService flowReconcileEngine;
+ // protected IDebugCounterService debugCounters;
+ // private ISyncService syncService;
+ // private IStoreClient<String,DeviceSyncRepresentation> storeClient;
+ // private DeviceSyncManager deviceSyncManager;
+
+ private ITopologyManager topology;
+ private ISwitchManager switchManager = null;
+ private IDataPacketService dataPacketService = null;
+
+ public static final String CNT_INCOMING = MODULE_NAME + "-incoming";
+ public static final String CNT_RECONCILE_REQUEST = MODULE_NAME
+ + "-reconcileRequest";
+ public static final String CNT_RECONCILE_NO_SOURCE = MODULE_NAME
+ + "-reconcileNoSourceDevice";
+ public static final String CNT_RECONCILE_NO_DEST = MODULE_NAME
+ + "-reconcileNoDestDevice";
+ public static final String CNT_BROADCAST_SOURCE = MODULE_NAME
+ + "-broadcastSource";
+ public static final String CNT_NO_SOURCE = MODULE_NAME + "-noSourceDevice";
+ public static final String CNT_NO_DEST = MODULE_NAME + "-noDestDevice";
+ public static final String CNT_DHCP_CLIENT_NAME_SNOOPED = MODULE_NAME
+ + "-dhcpClientNameSnooped";
+ public static final String CNT_DEVICE_ON_INTERAL_PORT_NOT_LEARNED = MODULE_NAME
+ + "-deviceOnInternalPortNotLearned";
+ public static final String CNT_PACKET_NOT_ALLOWED = MODULE_NAME
+ + "-packetNotAllowed";
+ public static final String CNT_NEW_DEVICE = MODULE_NAME + "-newDevice";
+ public static final String CNT_PACKET_ON_INTERNAL_PORT_FOR_KNOWN_DEVICE = MODULE_NAME
+ + "-packetOnInternalPortForKnownDevice";
+ public static final String CNT_NEW_ENTITY = MODULE_NAME + "-newEntity";
+ public static final String CNT_DEVICE_CHANGED = MODULE_NAME
+ + "-deviceChanged";
+ public static final String CNT_DEVICE_MOVED = MODULE_NAME + "-deviceMoved";
+ public static final String CNT_CLEANUP_ENTITIES_RUNS = MODULE_NAME
+ + "-cleanupEntitiesRuns";
+ public static final String CNT_ENTITY_REMOVED_TIMEOUT = MODULE_NAME
+ + "-entityRemovedTimeout";
+ public static final String CNT_DEVICE_DELETED = MODULE_NAME
+ + "-deviceDeleted";
+ public static final String CNT_DEVICE_RECLASSIFY_DELETE = MODULE_NAME
+ + "-deviceReclassifyDelete";
+ public static final String CNT_DEVICE_STORED = MODULE_NAME
+ + "-deviceStored";
+ public static final String CNT_DEVICE_STORE_THROTTLED = MODULE_NAME
+ + "-deviceStoreThrottled";
+ public static final String CNT_DEVICE_REMOVED_FROM_STORE = MODULE_NAME
+ + "-deviceRemovedFromStore";
+ public static final String CNT_SYNC_EXCEPTION = MODULE_NAME
+ + "-syncException";
+ public static final String CNT_DEVICES_FROM_STORE = MODULE_NAME
+ + "-devicesFromStore";
+ public static final String CNT_CONSOLIDATE_STORE_RUNS = MODULE_NAME
+ + "-consolidateStoreRuns";
+ public static final String CNT_CONSOLIDATE_STORE_DEVICES_REMOVED = MODULE_NAME
+ + "-consolidateStoreDevicesRemoved";
+
+ static final String DEVICE_SYNC_STORE_NAME = DeviceManagerImpl.class
+ .getCanonicalName() + ".stateStore";
+
+ /**
+ * Time interval between writes of entries for the same device to the sync
+ * store.
+ */
+ // static final int DEFAULT_SYNC_STORE_WRITE_INTERVAL_MS =
+ // 5*60*1000; // 5 min
+ // private int syncStoreWriteIntervalMs =
+ // DEFAULT_SYNC_STORE_WRITE_INTERVAL_MS;
+
+ /**
+ * Time after SLAVE->MASTER until we run the consolidate store code.
+ */
+ // static final int DEFAULT_INITIAL_SYNC_STORE_CONSOLIDATE_MS =
+ // 15*1000; // 15 sec
+ // private int initialSyncStoreConsolidateMs =
+ // DEFAULT_INITIAL_SYNC_STORE_CONSOLIDATE_MS;
+
+ /**
+ * Time interval between consolidate store runs.
+ */
+ // static final int DEFAULT_SYNC_STORE_CONSOLIDATE_INTERVAL_MS =
+ // 75*60*1000; // 75 min
+ // private final int syncStoreConsolidateIntervalMs =
+ // DEFAULT_SYNC_STORE_CONSOLIDATE_INTERVAL_MS;
+
+ /**
+ * Time in milliseconds before entities will expire
+ */
+ protected static final int ENTITY_TIMEOUT = 60 * 60 * 1000;
+
+ /**
+ * Time in seconds between cleaning up old entities/devices
+ */
+ protected static final int ENTITY_CLEANUP_INTERVAL = 60 * 60;
+
+ /**
+ * This is the master device map that maps device IDs to {@link Device}
+ * objects.
+ */
+ protected ConcurrentHashMap<Long, Device> deviceMap;
+
+ /**
+ * Counter used to generate device keys
+ */
+ protected long deviceKeyCounter = 0;
+
+ /**
+ * Lock for incrementing the device key counter
+ */
+ protected Object deviceKeyLock = new Object();
+
+ /**
+ * This is the primary entity index that contains all entities
+ */
+ protected DeviceUniqueIndex primaryIndex;
+
+ /**
+ * This stores secondary indices over the fields in the devices
+ */
+ protected Map<EnumSet<DeviceField>, DeviceIndex> secondaryIndexMap;
+
+ /**
+ * This map contains state for each of the {@ref IEntityClass} that exist
+ */
+ protected ConcurrentHashMap<String, ClassState> classStateMap;
+
+ /**
+ * This is the list of indices we want on a per-class basis
+ */
+ protected Set<EnumSet<DeviceField>> perClassIndices;
+
+ /**
+ * The entity classifier currently in use
+ */
+ protected IEntityClassifierService entityClassifier;
+
+ /**
+ * Used to cache state about specific entity classes
+ */
+ protected class ClassState {
+
+ /**
+ * The class index
+ */
+ protected DeviceUniqueIndex classIndex;
+
+ /**
+ * This stores secondary indices over the fields in the device for the
+ * class
+ */
+ protected Map<EnumSet<DeviceField>, DeviceIndex> secondaryIndexMap;
+
+ /**
+ * Allocate a new {@link ClassState} object for the class
+ *
+ * @param clazz
+ * the class to use for the state
+ */
+ public ClassState(IEntityClass clazz) {
+ EnumSet<DeviceField> keyFields = clazz.getKeyFields();
+ EnumSet<DeviceField> primaryKeyFields = entityClassifier
+ .getKeyFields();
+ boolean keyFieldsMatchPrimary = primaryKeyFields.equals(keyFields);
+
+ if (!keyFieldsMatchPrimary)
+ classIndex = new DeviceUniqueIndex(keyFields);
+
+ secondaryIndexMap = new HashMap<EnumSet<DeviceField>, DeviceIndex>();
+ for (EnumSet<DeviceField> fields : perClassIndices) {
+ secondaryIndexMap.put(fields, new DeviceMultiIndex(fields));
+ }
+ }
+ }
+
+ /**
+ * Device manager event listeners reclassifyDeviceListeners are notified
+ * first before reconcileDeviceListeners. This is to make sure devices are
+ * correctly reclassified before reconciliation.
+ */
+ protected ListenerDispatcher<String, IDeviceListener> deviceListeners;
+
+ /**
+ * A device update event to be dispatched
+ */
+ protected static class DeviceUpdate {
+ public enum Change {
+ ADD, DELETE, CHANGE;
+ }
+
+ /**
+ * The affected device
+ */
+ protected Device device;
+
+ /**
+ * The change that was made
+ */
+ protected Change change;
+
+ /**
+ * If not added, then this is the list of fields changed
+ */
+ protected EnumSet<DeviceField> fieldsChanged;
+
+ public DeviceUpdate(Device device, Change change,
+ EnumSet<DeviceField> fieldsChanged) {
+ super();
+ this.device = device;
+ this.change = change;
+ this.fieldsChanged = fieldsChanged;
+ }
+
+ @Override
+ public String toString() {
+ String devIdStr = device.getEntityClass().getName() + "::"
+ + device.getMACAddressString();
+ return "DeviceUpdate [device=" + devIdStr + ", change=" + change
+ + ", fieldsChanged=" + fieldsChanged + "]";
+ }
+
+ }
+
+ /**
+ * AttachmentPointComparator
+ *
+ * Compares two attachment points and returns the latest one. It is assumed
+ * that the two attachment points are in the same L2 domain.
+ *
+ * @author srini
+ */
+ protected class AttachmentPointComparator implements
+ Comparator<AttachmentPoint> {
+ public AttachmentPointComparator() {
+ super();
+ }
+
+ @Override
+ public int compare(AttachmentPoint oldAP, AttachmentPoint newAP) {
+ // First compare based on L2 domain ID;
+
+ // XXX - missing functionality -- need topology
+ // long oldDomain = topology.getL2DomainId(oldSw);
+ // boolean oldBD = topology.isBroadcastDomainPort(oldSw, oldPort);
+ long oldDomain = 0;
+ boolean oldBD = false;
+
+ // XXX - missing functionality -- need topology
+ // long newDomain = topology.getL2DomainId(newSw);
+ // boolean newBD = topology.isBroadcastDomainPort(newSw, newPort);
+ long newDomain = 0;
+ boolean newBD = false;
+
+ if (oldDomain < newDomain)
+ return -1;
+ else if (oldDomain > newDomain)
+ return 1;
+
+ // Give preference to OFPP_LOCAL always
+ if (!oldAP.getPort().getType().equals(NodeConnectorIDType.SWSTACK)
+ && newAP.getPort().getType()
+ .equals(NodeConnectorIDType.SWSTACK)) {
+ return -1;
+ } else if (oldAP.getPort().getType()
+ .equals(NodeConnectorIDType.SWSTACK)
+ && !newAP.getPort().getType()
+ .equals(NodeConnectorIDType.SWSTACK)) {
+ return 1;
+ }
+
+ // We expect that the last seen of the new AP is higher than
+ // old AP, if it is not, just reverse and send the negative
+ // of the result.
+ if (oldAP.getActiveSince() > newAP.getActiveSince())
+ return -compare(newAP, oldAP);
+
+ long activeOffset = 0;
+ // XXX - missing functionality -- need topology
+ // if (!topology.isConsistent(oldSw, oldPort, newSw, newPort)) {
+ if (!newBD && oldBD) {
+ return -1;
+ }
+ if (newBD && oldBD) {
+ activeOffset = AttachmentPoint.EXTERNAL_TO_EXTERNAL_TIMEOUT;
+ } else if (newBD && !oldBD) {
+ activeOffset = AttachmentPoint.OPENFLOW_TO_EXTERNAL_TIMEOUT;
+ }
+
+ // } else {
+ // // The attachment point is consistent.
+ // activeOffset = AttachmentPoint.CONSISTENT_TIMEOUT;
+ // }
+
+ if ((newAP.getActiveSince() > oldAP.getLastSeen() + activeOffset)
+ || (newAP.getLastSeen() > oldAP.getLastSeen()
+ + AttachmentPoint.INACTIVITY_INTERVAL)) {
+ return -1;
+ }
+ return 1;
+ }
+ }
+
+ /**
+ * Comparator for sorting by cluster ID
+ */
+ public AttachmentPointComparator apComparator;
+
+ /**
+ * Switch ports where attachment points shouldn't be learned
+ */
+ private Set<SwitchPort> suppressAPs;
+
+ /**
+ * Periodic task to clean up expired entities
+ */
+ public SingletonTask entityCleanupTask;
+
+ // ********************
+ // Dependency injection
+ // ********************
+
+ void setDataPacketService(IDataPacketService s) {
+ this.dataPacketService = s;
+ }
+
+ void unsetDataPacketService(IDataPacketService s) {
+ if (this.dataPacketService == s) {
+ this.dataPacketService = null;
+ }
+ }
+
+ public void setTopologyManager(ITopologyManager s) {
+ this.topology = s;
+ }
+
+ public void unsetTopologyManager(ITopologyManager s) {
+ if (this.topology == s) {
+ logger.debug("Topology Manager Service removed!");
+ this.topology = null;
+ }
+ }
+
+ private volatile boolean stopped = true;
+ private ScheduledExecutorService ses;
+
+ public void stop() {
+ stopped = true;
+ if (ses != null)
+ ses.shutdownNow();
+ }
+
+ public void start() {
+ this.perClassIndices = new HashSet<EnumSet<DeviceField>>();
+
+ // XXX - TODO need to make it possible to register a non-default
+ // classifier
+ entityClassifier = new DefaultEntityClassifier();
+ this.deviceListeners = new ListenerDispatcher<String, IDeviceListener>();
+ this.suppressAPs = Collections
+ .newSetFromMap(new ConcurrentHashMap<SwitchPort, Boolean>());
+ primaryIndex = new DeviceUniqueIndex(entityClassifier.getKeyFields());
+ secondaryIndexMap = new HashMap<EnumSet<DeviceField>, DeviceIndex>();
+
+ deviceMap = new ConcurrentHashMap<Long, Device>();
+ classStateMap = new ConcurrentHashMap<String, ClassState>();
+ apComparator = new AttachmentPointComparator();
+
+ addIndex(true, EnumSet.of(DeviceField.IPV4));
+
+ // floodlightProvider.addOFMessageListener(OFType.PACKET_IN, this);
+ // floodlightProvider.addHAListener(this.haListenerDelegate);
+ // if (topology != null)
+ // topology.addListener(this);
+ // flowReconcileMgr.addFlowReconcileListener(this);
+ // entityClassifier.addListener(this);
+
+ stopped = false;
+ // XXX - Should use a common threadpool but this doesn't currently exist
+ ses = Executors.newScheduledThreadPool(1);
+ Runnable ecr = new Runnable() {
+ @Override
+ public void run() {
+ cleanupEntities();
+ if (!stopped)
+ entityCleanupTask.reschedule(ENTITY_CLEANUP_INTERVAL,
+ TimeUnit.SECONDS);
+ }
+ };
+ entityCleanupTask = new SingletonTask(ses, ecr);
+ entityCleanupTask.reschedule(ENTITY_CLEANUP_INTERVAL, TimeUnit.SECONDS);
+
+ /*
+ * XXX Missing functionality if (restApi != null) {
+ * restApi.addRestletRoutable(new DeviceRoutable()); } else {
+ * logger.debug("Could not instantiate REST API"); }
+ */
+
+ registerDeviceManagerDebugCounters();
+
+ /*
+ * XXX Missing functionality try {
+ * this.syncService.registerStore(DEVICE_SYNC_STORE_NAME, Scope.LOCAL);
+ * this.storeClient = this.syncService
+ * .getStoreClient(DEVICE_SYNC_STORE_NAME, String.class,
+ * DeviceSyncRepresentation.class); } catch (SyncException e) { throw
+ * new FloodlightModuleException("Error while setting up sync service",
+ * e); }
+ *
+ * Runnable consolidateStoreRunner = new Runnable() {
+ *
+ * @Override public void run() { deviceSyncManager.consolidateStore();
+ * storeConsolidateTask.reschedule(syncStoreConsolidateIntervalMs,
+ * TimeUnit.MILLISECONDS); debugCounters.flushCounters(); } };
+ * storeConsolidateTask = new SingletonTask(ses,
+ * consolidateStoreRunner); if (isMaster)
+ * storeConsolidateTask.reschedule(syncStoreConsolidateIntervalMs,
+ * TimeUnit.MILLISECONDS);
+ */
+ }
+
+ /**
+ * Periodic task to consolidate entries in the store. I.e., delete entries
+ * in the store that are not known to DeviceManager
+ */
+ // XXX - Missing functionality
+ // private SingletonTask storeConsolidateTask;
+
+ // *********************
+ // IDeviceManagerService
+ // *********************
+
+ @Override
+ public IDevice getDevice(Long deviceKey) {
+ return deviceMap.get(deviceKey);
+ }
+
+ @Override
+ public IDevice findDevice(long macAddress, Short vlan, Integer ipv4Address,
+ NodeConnector port) throws IllegalArgumentException {
+ if (vlan != null && vlan.shortValue() <= 0)
+ vlan = null;
+ if (ipv4Address != null && ipv4Address == 0)
+ ipv4Address = null;
+ Entity e = new Entity(macAddress, vlan, ipv4Address, port, null);
+ if (!allKeyFieldsPresent(e, entityClassifier.getKeyFields())) {
+ throw new IllegalArgumentException("Not all key fields specified."
+ + " Required fields: " + entityClassifier.getKeyFields());
+ }
+ return findDeviceByEntity(e);
+ }
+
+ @Override
+ public IDevice findClassDevice(IEntityClass entityClass, long macAddress,
+ Short vlan, Integer ipv4Address) throws IllegalArgumentException {
+ if (vlan != null && vlan.shortValue() <= 0)
+ vlan = null;
+ if (ipv4Address != null && ipv4Address == 0)
+ ipv4Address = null;
+ Entity e = new Entity(macAddress, vlan, ipv4Address, null, null);
+ if (entityClass == null
+ || !allKeyFieldsPresent(e, entityClass.getKeyFields())) {
+ throw new IllegalArgumentException("Not all key fields and/or "
+ + " no source device specified. Required fields: "
+ + entityClassifier.getKeyFields());
+ }
+ return findDestByEntity(entityClass, e);
+ }
+
+ @Override
+ public Collection<? extends IDevice> getAllDevices() {
+ return Collections.unmodifiableCollection(deviceMap.values());
+ }
+
+ @Override
+ public void addIndex(boolean perClass, EnumSet<DeviceField> keyFields) {
+ if (perClass) {
+ perClassIndices.add(keyFields);
+ } else {
+ secondaryIndexMap.put(keyFields, new DeviceMultiIndex(keyFields));
+ }
+ }
+
+ @Override
+ public Iterator<? extends IDevice> queryDevices(Long macAddress,
+ Short vlan, Integer ipv4Address, NodeConnector port) {
+ DeviceIndex index = null;
+ if (secondaryIndexMap.size() > 0) {
+ EnumSet<DeviceField> keys = getEntityKeys(macAddress, vlan,
+ ipv4Address, port);
+ index = secondaryIndexMap.get(keys);
+ }
+
+ Iterator<Device> deviceIterator = null;
+ if (index == null) {
+ // Do a full table scan
+ deviceIterator = deviceMap.values().iterator();
+ } else {
+ // index lookup
+ Entity entity = new Entity((macAddress == null ? 0 : macAddress),
+ vlan, ipv4Address, port, null);
+ deviceIterator = new DeviceIndexInterator(this,
+ index.queryByEntity(entity));
+ }
+
+ DeviceIterator di = new DeviceIterator(deviceIterator, null,
+ macAddress, vlan, ipv4Address, port);
+ return di;
+ }
+
+ @Override
+ public Iterator<? extends IDevice> queryClassDevices(
+ IEntityClass entityClass, Long macAddress, Short vlan,
+ Integer ipv4Address, NodeConnector port) {
+ ArrayList<Iterator<Device>> iterators = new ArrayList<Iterator<Device>>();
+ ClassState classState = getClassState(entityClass);
+
+ DeviceIndex index = null;
+ if (classState.secondaryIndexMap.size() > 0) {
+ EnumSet<DeviceField> keys = getEntityKeys(macAddress, vlan,
+ ipv4Address, port);
+ index = classState.secondaryIndexMap.get(keys);
+ }
+
+ Iterator<Device> iter;
+ if (index == null) {
+ index = classState.classIndex;
+ if (index == null) {
+ // scan all devices
+ return new DeviceIterator(deviceMap.values().iterator(),
+ new IEntityClass[] { entityClass }, macAddress, vlan,
+ ipv4Address, port);
+ } else {
+ // scan the entire class
+ iter = new DeviceIndexInterator(this, index.getAll());
+ }
+ } else {
+ // index lookup
+ Entity entity = new Entity((macAddress == null ? 0 : macAddress),
+ vlan, ipv4Address, port, null);
+ iter = new DeviceIndexInterator(this, index.queryByEntity(entity));
+ }
+ iterators.add(iter);
+
+ return new MultiIterator<Device>(iterators.iterator());
+ }
+
+ protected Iterator<Device> getDeviceIteratorForQuery(Long macAddress,
+ Short vlan, Integer ipv4Address, NodeConnector port) {
+ DeviceIndex index = null;
+ if (secondaryIndexMap.size() > 0) {
+ EnumSet<DeviceField> keys = getEntityKeys(macAddress, vlan,
+ ipv4Address, port);
+ index = secondaryIndexMap.get(keys);
+ }
+
+ Iterator<Device> deviceIterator = null;
+ if (index == null) {
+ // Do a full table scan
+ deviceIterator = deviceMap.values().iterator();
+ } else {
+ // index lookup
+ Entity entity = new Entity((macAddress == null ? 0 : macAddress),
+ vlan, ipv4Address, port, null);
+ deviceIterator = new DeviceIndexInterator(this,
+ index.queryByEntity(entity));
+ }
+
+ DeviceIterator di = new DeviceIterator(deviceIterator, null,
+ macAddress, vlan, ipv4Address, port);
+ return di;
+ }
+
+ @Override
+ public void addListener(IDeviceListener listener) {
+ deviceListeners.addListener("device", listener);
+ logListeners();
+ }
+
+ @Override
+ public void addSuppressAPs(NodeConnector port) {
+ suppressAPs.add(new SwitchPort(port));
+ }
+
+ @Override
+ public void removeSuppressAPs(NodeConnector port) {
+ suppressAPs.remove(new SwitchPort(port));
+ }
+
+ @Override
+ public Set<SwitchPort> getSuppressAPs() {
+ return Collections.unmodifiableSet(suppressAPs);
+ }
+
+ private void logListeners() {
+ List<IDeviceListener> listeners = deviceListeners.getOrderedListeners();
+ if (listeners != null) {
+ StringBuffer sb = new StringBuffer();
+ sb.append("DeviceListeners: ");
+ for (IDeviceListener l : listeners) {
+ sb.append(l.getName());
+ sb.append(",");
+ }
+ logger.debug(sb.toString());
+ }
+ }
+
+ // ***************
+ // IFlowReconcileListener
+ // ***************
+ /*
+ * XXX - Missing functionality
+ *
+ * @Override public Command reconcileFlows(ArrayList<OFMatchReconcile>
+ * ofmRcList) { ListIterator<OFMatchReconcile> iter =
+ * ofmRcList.listIterator(); while (iter.hasNext()) { OFMatchReconcile ofm =
+ * iter.next();
+ *
+ * // Remove the STOPPed flow. if (Command.STOP == reconcileFlow(ofm)) {
+ * iter.remove(); } }
+ *
+ * if (ofmRcList.size() > 0) { return Command.CONTINUE; } else { return
+ * Command.STOP; } }
+ *
+ * protected Command reconcileFlow(OFMatchReconcile ofm) {
+ * debugCounters.updateCounter(CNT_RECONCILE_REQUEST); // Extract source
+ * entity information Entity srcEntity =
+ * getEntityFromFlowMod(ofm.ofmWithSwDpid, true); if (srcEntity == null) {
+ * debugCounters.updateCounter(CNT_RECONCILE_NO_SOURCE); return
+ * Command.STOP; }
+ *
+ * // Find the device by source entity Device srcDevice =
+ * findDeviceByEntity(srcEntity); if (srcDevice == null) {
+ * debugCounters.updateCounter(CNT_RECONCILE_NO_SOURCE); return
+ * Command.STOP; } // Store the source device in the context
+ * fcStore.put(ofm.cntx, CONTEXT_SRC_DEVICE, srcDevice);
+ *
+ * // Find the device matching the destination from the entity // classes of
+ * the source. Entity dstEntity = getEntityFromFlowMod(ofm.ofmWithSwDpid,
+ * false); Device dstDevice = null; if (dstEntity != null) { dstDevice =
+ * findDestByEntity(srcDevice.getEntityClass(), dstEntity); if (dstDevice !=
+ * null) fcStore.put(ofm.cntx, CONTEXT_DST_DEVICE, dstDevice); else
+ * debugCounters.updateCounter(CNT_RECONCILE_NO_DEST); } else {
+ * debugCounters.updateCounter(CNT_RECONCILE_NO_DEST); } if
+ * (logger.isTraceEnabled()) {
+ * logger.trace("Reconciling flow: match={}, srcEntity={}, srcDev={}, " +
+ * "dstEntity={}, dstDev={}", new Object[] {ofm.ofmWithSwDpid.getOfMatch(),
+ * srcEntity, srcDevice, dstEntity, dstDevice } ); } return
+ * Command.CONTINUE; }
+ */
+
+ // *****************
+ // IListenDataPacket
+ // *****************
+
+ @Override
+ public PacketResult receiveDataPacket(RawPacket inPkt) {
+ // XXX - Can this really pass in null? Why would you ever want that?
+ if (inPkt == null) {
+ return PacketResult.IGNORED;
+ }
+ try {
+ throw new Exception("Sample");
+ } catch (Exception e) {
+ logger.error("Sample stack trace", e);
+ }
+
+ Packet formattedPak = this.dataPacketService.decodeDataPacket(inPkt);
+ Ethernet eth;
+ if (formattedPak instanceof Ethernet) {
+ eth = (Ethernet) formattedPak;
+ } else {
+ return PacketResult.IGNORED;
+ }
+
+ // Extract source entity information
+ NodeConnector inPort = inPkt.getIncomingNodeConnector();
+ Entity srcEntity = getSourceEntityFromPacket(eth, inPort);
+ if (srcEntity == null) {
+ // debugCounters.updateCounter(CNT_BROADCAST_SOURCE);
+ return PacketResult.CONSUME;
+ }
+
+ // Learn from ARP packet for special VRRP settings.
+ // In VRRP settings, the source MAC address and sender MAC
+ // addresses can be different. In such cases, we need to learn
+ // the IP to MAC mapping of the VRRP IP address. The source
+ // entity will not have that information. Hence, a separate call
+ // to learn devices in such cases.
+ learnDeviceFromArpResponseData(eth, inPort);
+
+ // Learn/lookup device information
+ Device srcDevice = learnDeviceByEntity(srcEntity);
+ if (srcDevice == null) {
+ // debugCounters.updateCounter(CNT_NO_SOURCE);
+ return PacketResult.CONSUME;
+ }
+ logger.trace("Saw packet from device {}", srcDevice);
+
+ // // Store the source device in the context
+ // fcStore.put(cntx, CONTEXT_SRC_DEVICE, srcDevice);
+ //
+ // // Find the device matching the destination from the entity
+ // // classes of the source.
+ // Entity dstEntity = getDestEntityFromPacket(eth);
+ // Device dstDevice = null;
+ // if (dstEntity != null) {
+ // dstDevice =
+ // findDestByEntity(srcDevice.getEntityClass(), dstEntity);
+ // if (dstDevice != null)
+ // fcStore.put(cntx, CONTEXT_DST_DEVICE, dstDevice);
+ // //else
+ // //debugCounters.updateCounter(CNT_NO_DEST);
+ // } else {
+ // //debugCounters.updateCounter(CNT_NO_DEST);
+ // }
+ //
+ // if (logger.isTraceEnabled()) {
+ // logger.trace("Received PI: {} on switch {}, port {} *** eth={}" +
+ // " *** srcDev={} *** dstDev={} *** ",
+ // new Object[] { pi, sw.getStringId(), pi.getInPort(), eth,
+ // srcDevice, dstDevice });
+ // }
+ //
+ // snoopDHCPClientName(eth, srcDevice);
+
+ return PacketResult.KEEP_PROCESSING;
+ }
+
+ // ****************
+ // Internal methods
+ // ****************
+
+ /**
+ * Snoop and record client-provided host name from DHCP requests
+ *
+ * @param eth
+ * @param srcDevice
+ */
+ // private void snoopDHCPClientName(Ethernet eth, Device srcDevice) {
+ // if (! (eth.getPayload() instanceof IPv4) )
+ // return;
+ // IPv4 ipv4 = (IPv4) eth.getPayload();
+ // if (! (ipv4.getPayload() instanceof UDP) )
+ // return;
+ // UDP udp = (UDP) ipv4.getPayload();
+ // if (!(udp.getPayload() instanceof DHCP))
+ // return;
+ // DHCP dhcp = (DHCP) udp.getPayload();
+ // byte opcode = dhcp.getOpCode();
+ // if (opcode == DHCP.OPCODE_REQUEST) {
+ // DHCPOption dhcpOption = dhcp.getOption(
+ // DHCPOptionCode.OptionCode_Hostname);
+ // if (dhcpOption != null) {
+ // debugCounters.updateCounter(CNT_DHCP_CLIENT_NAME_SNOOPED);
+ // srcDevice.dhcpClientName = new String(dhcpOption.getData());
+ // }
+ // }
+ // }
+
+ /**
+ * Check whether the given attachment point is valid given the current
+ * topology
+ *
+ * @param switchDPID
+ * the DPID
+ * @param switchPort
+ * the port
+ * @return true if it's a valid attachment point
+ */
+ public boolean isValidAttachmentPoint(NodeConnector port) {
+ // XXX - missing functionality -- need topology module
+ // if (topology.isAttachmentPointPort(port) == false)
+ // return false;
+ if (topology.isInternal(port))
+ return false;
+ if (!switchManager.isNodeConnectorEnabled(port))
+ return false;
+ if (suppressAPs.contains(new SwitchPort(port)))
+ return false;
+
+ return true;
+ }
+
+ /**
+ * Get sender IP address from packet if the packet is either an ARP packet.
+ *
+ * @param eth
+ * @param dlAddr
+ * @return
+ */
+ private int getSrcNwAddr(Ethernet eth, long dlAddr) {
+ if (eth.getPayload() instanceof ARP) {
+ ARP arp = (ARP) eth.getPayload();
+ if ((arp.getProtocolType() == ARP.PROTO_TYPE_IP)
+ && (toLong(arp.getSenderHardwareAddress()) == dlAddr)) {
+ return toIPv4Address(arp.getSenderProtocolAddress());
+ }
+ }
+ return 0;
+ }
+
+ /**
+ * Parse an entity from an {@link Ethernet} packet.
+ *
+ * @param eth
+ * the packet to parse
+ * @param sw
+ * the switch on which the packet arrived
+ * @param pi
+ * the original packetin
+ * @return the entity from the packet
+ */
+ protected Entity getSourceEntityFromPacket(Ethernet eth, NodeConnector port) {
+ byte[] dlAddrArr = eth.getSourceMACAddress();
+ long dlAddr = toLong(dlAddrArr);
+
+ // Ignore broadcast/multicast source
+ if ((dlAddrArr[0] & 0x1) != 0)
+ return null;
+
+ // XXX missing functionality
+ // short vlan = 0;
+ int nwSrc = getSrcNwAddr(eth, dlAddr);
+ return new Entity(dlAddr, null, ((nwSrc != 0) ? nwSrc : null), port,
+ new Date());
+ }
+
+ /**
+ * Learn device from ARP data in scenarios where the Ethernet source MAC is
+ * different from the sender hardware address in ARP data.
+ */
+ protected void learnDeviceFromArpResponseData(Ethernet eth,
+ NodeConnector port) {
+
+ if (!(eth.getPayload() instanceof ARP))
+ return;
+ ARP arp = (ARP) eth.getPayload();
+
+ byte[] dlAddrArr = eth.getSourceMACAddress();
+ long dlAddr = toLong(dlAddrArr);
+
+ byte[] senderHardwareAddr = arp.getSenderHardwareAddress();
+ long senderAddr = toLong(senderHardwareAddr);
+
+ if (dlAddr == senderAddr)
+ return;
+
+ // Ignore broadcast/multicast source
+ if ((senderHardwareAddr[0] & 0x1) != 0)
+ return;
+
+ // short vlan = eth.getVlanID();
+ int nwSrc = toIPv4Address(arp.getSenderProtocolAddress());
+
+ Entity e = new Entity(senderAddr, null, ((nwSrc != 0) ? nwSrc : null),
+ port, new Date());
+
+ learnDeviceByEntity(e);
+ }
+
+ /**
+ * Get a (partial) entity for the destination from the packet.
+ *
+ * @param eth
+ * @return
+ */
+ // protected Entity getDestEntityFromPacket(Ethernet eth) {
+ // byte[] dlAddrArr = eth.getDestinationMACAddress();
+ // long dlAddr = Ethernet.toLong(dlAddrArr);
+ // short vlan = eth.getVlanID();
+ // int nwDst = 0;
+ //
+ // // Ignore broadcast/multicast destination
+ // if ((dlAddrArr[0] & 0x1) != 0)
+ // return null;
+ //
+ // if (eth.getPayload() instanceof IPv4) {
+ // IPv4 ipv4 = (IPv4) eth.getPayload();
+ // nwDst = ipv4.getDestinationAddress();
+ // }
+ //
+ // return new Entity(dlAddr,
+ // ((vlan >= 0) ? vlan : null),
+ // ((nwDst != 0) ? nwDst : null),
+ // null,
+ // null,
+ // null);
+ // }
+
+ /**
+ * Parse an entity from an OFMatchWithSwDpid.
+ *
+ * @param ofmWithSwDpid
+ * @return the entity from the packet
+ */
+ // private Entity getEntityFromFlowMod(OFMatchWithSwDpid ofmWithSwDpid,
+ // boolean isSource) {
+ // byte[] dlAddrArr = ofmWithSwDpid.getOfMatch().getDataLayerSource();
+ // int nwSrc = ofmWithSwDpid.getOfMatch().getNetworkSource();
+ // if (!isSource) {
+ // dlAddrArr = ofmWithSwDpid.getOfMatch().getDataLayerDestination();
+ // nwSrc = ofmWithSwDpid.getOfMatch().getNetworkDestination();
+ // }
+ //
+ // long dlAddr = Ethernet.toLong(dlAddrArr);
+ //
+ // // Ignore broadcast/multicast source
+ // if ((dlAddrArr[0] & 0x1) != 0)
+ // return null;
+ //
+ // Long swDpid = null;
+ // Short inPort = null;
+ //
+ // if (isSource) {
+ // swDpid = ofmWithSwDpid.getSwitchDataPathId();
+ // inPort = ofmWithSwDpid.getOfMatch().getInputPort();
+ // }
+ //
+ // /**for the new flow cache design, the flow mods retrived are not always
+ // from the source, learn AP should be disabled --meiyang*/
+ // boolean learnap = false;
+ // /**
+ // * if (swDpid == null ||
+ // inPort == null ||
+ // !isValidAttachmentPoint(swDpid, inPort)) {
+ // // If this is an internal port or we otherwise don't want
+ // // to learn on these ports. In the future, we should
+ // // handle this case by labeling flows with something that
+ // // will give us the entity class. For now, we'll do our
+ // // best assuming attachment point information isn't used
+ // // as a key field.
+ // learnap = false;
+ // }
+ // */
+ //
+ // short vlan = ofmWithSwDpid.getOfMatch().getDataLayerVirtualLan();
+ // return new Entity(dlAddr,
+ // ((vlan >= 0) ? vlan : null),
+ // ((nwSrc != 0) ? nwSrc : null),
+ // (learnap ? swDpid : null),
+ // (learnap ? (int)inPort : null),
+ // new Date());
+ // }
+
+ /**
+ * Look up a {@link Device} based on the provided {@link Entity}. We first
+ * check the primary index. If we do not find an entry there we classify the
+ * device into its IEntityClass and query the classIndex. This implies that
+ * all key field of the current IEntityClassifier must be present in the
+ * entity for the lookup to succeed!
+ *
+ * @param entity
+ * the entity to search for
+ * @return The {@link Device} object if found
+ */
+ protected Device findDeviceByEntity(Entity entity) {
+ // Look up the fully-qualified entity to see if it already
+ // exists in the primary entity index.
+ Long deviceKey = primaryIndex.findByEntity(entity);
+ IEntityClass entityClass = null;
+
+ if (deviceKey == null) {
+ // If the entity does not exist in the primary entity index,
+ // use the entity classifier for find the classes for the
+ // entity. Look up the entity in the returned class'
+ // class entity index.
+ entityClass = entityClassifier.classifyEntity(entity);
+ if (entityClass == null) {
+ return null;
+ }
+ ClassState classState = getClassState(entityClass);
+
+ if (classState.classIndex != null) {
+ deviceKey = classState.classIndex.findByEntity(entity);
+ }
+ }
+ if (deviceKey == null)
+ return null;
+ return deviceMap.get(deviceKey);
+ }
+
+ /**
+ * Get a destination device using entity fields that corresponds with the
+ * given source device. The source device is important since there could be
+ * ambiguity in the destination device without the attachment point
+ * information.
+ *
+ * @param reference
+ * the source device's entity class. The returned destination
+ * will be in the same entity class as the source.
+ * @param dstEntity
+ * the entity to look up
+ * @return an {@link Device} or null if no device is found.
+ */
+ protected Device findDestByEntity(IEntityClass reference, Entity dstEntity) {
+
+ // Look up the fully-qualified entity to see if it
+ // exists in the primary entity index
+ Long deviceKey = primaryIndex.findByEntity(dstEntity);
+
+ if (deviceKey == null) {
+ // This could happen because:
+ // 1) no destination known, or a broadcast destination
+ // 2) if we have attachment point key fields since
+ // attachment point information isn't available for
+ // destination devices.
+ // For the second case, we'll need to match up the
+ // destination device with the class of the source
+ // device.
+ ClassState classState = getClassState(reference);
+ if (classState.classIndex == null) {
+ return null;
+ }
+ deviceKey = classState.classIndex.findByEntity(dstEntity);
+ }
+ if (deviceKey == null)
+ return null;
+ return deviceMap.get(deviceKey);
+ }
+
+ /**
+ * Look up a {@link Device} within a particular entity class based on the
+ * provided {@link Entity}.
+ *
+ * @param clazz
+ * the entity class to search for the entity
+ * @param entity
+ * the entity to search for
+ * @return The {@link Device} object if found private Device
+ * findDeviceInClassByEntity(IEntityClass clazz, Entity entity) { //
+ * XXX - TODO throw new UnsupportedOperationException(); }
+ */
+
+ /**
+ * Look up a {@link Device} based on the provided {@link Entity}. Also
+ * learns based on the new entity, and will update existing devices as
+ * required.
+ *
+ * @param entity
+ * the {@link Entity}
+ * @return The {@link Device} object if found
+ */
+ protected Device learnDeviceByEntity(Entity entity) {
+ logger.info("Primary index {}", primaryIndex);
+ ArrayList<Long> deleteQueue = null;
+ LinkedList<DeviceUpdate> deviceUpdates = null;
+ Device device = null;
+
+ // we may need to restart the learning process if we detect
+ // concurrent modification. Note that we ensure that at least
+ // one thread should always succeed so we don't get into infinite
+ // starvation loops
+ while (true) {
+ deviceUpdates = null;
+
+ // Look up the fully-qualified entity to see if it already
+ // exists in the primary entity index.
+ Long deviceKey = primaryIndex.findByEntity(entity);
+ IEntityClass entityClass = null;
+
+ if (deviceKey == null) {
+ // If the entity does not exist in the primary entity index,
+ // use the entity classifier for find the classes for the
+ // entity. Look up the entity in the returned class'
+ // class entity index.
+ entityClass = entityClassifier.classifyEntity(entity);
+ if (entityClass == null) {
+ // could not classify entity. No device
+ device = null;
+ break;
+ }
+ ClassState classState = getClassState(entityClass);
+
+ if (classState.classIndex != null) {
+ deviceKey = classState.classIndex.findByEntity(entity);
+ }
+ }
+ if (deviceKey != null) {
+ // If the primary or secondary index contains the entity
+ // use resulting device key to look up the device in the
+ // device map, and use the referenced Device below.
+ device = deviceMap.get(deviceKey);
+ if (device == null) {
+ // This can happen due to concurrent modification
+ if (logger.isDebugEnabled()) {
+ logger.debug("No device for deviceKey {} while "
+ + "while processing entity {}", deviceKey,
+ entity);
+ }
+ // if so, then try again till we don't even get the device
+ // key
+ // and so we recreate the device
+ continue;
+ }
+ } else {
+ // If the secondary index does not contain the entity,
+ // create a new Device object containing the entity, and
+ // generate a new device ID if the the entity is on an
+ // attachment point port. Otherwise ignore.
+ if (entity.hasSwitchPort()
+ && !isValidAttachmentPoint(entity.getPort())) {
+ // debugCounters.updateCounter(CNT_DEVICE_ON_INTERAL_PORT_NOT_LEARNED);
+ if (logger.isDebugEnabled()) {
+ logger.debug("Not learning new device on internal"
+ + " link: {}", entity);
+ }
+ device = null;
+ break;
+ }
+ // Before we create the new device also check if
+ // the entity is allowed (e.g., for spoofing protection)
+ if (!isEntityAllowed(entity, entityClass)) {
+ // debugCounters.updateCounter(CNT_PACKET_NOT_ALLOWED);
+ if (logger.isDebugEnabled()) {
+ logger.debug("PacketIn is not allowed {} {}",
+ entityClass.getName(), entity);
+ }
+ device = null;
+ break;
+ }
+ synchronized (deviceKeyLock) {
+ deviceKey = Long.valueOf(deviceKeyCounter++);
+ }
+ device = allocateDevice(deviceKey, entity, entityClass);
+
+ // Add the new device to the primary map with a simple put
+ deviceMap.put(deviceKey, device);
+
+ // update indices
+ if (!updateIndices(device, deviceKey)) {
+ if (deleteQueue == null)
+ deleteQueue = new ArrayList<Long>();
+ deleteQueue.add(deviceKey);
+ continue;
+ }
+
+ updateSecondaryIndices(entity, entityClass, deviceKey);
+
+ // We need to count and log here. If we log earlier we could
+ // hit a concurrent modification and restart the dev creation
+ // and potentially count the device twice.
+ // debugCounters.updateCounter(CNT_NEW_DEVICE);
+ if (logger.isDebugEnabled()) {
+ logger.debug(
+ "New device created: {} deviceKey={}, entity={}",
+ new Object[] { device, deviceKey, entity });
+ }
+ // generate new device update
+ deviceUpdates = updateUpdates(deviceUpdates, new DeviceUpdate(
+ device, ADD, null));
+
+ break;
+ }
+ // if it gets here, we have a pre-existing Device for this Entity
+ if (!isEntityAllowed(entity, device.getEntityClass())) {
+ // debugCounters.updateCounter(CNT_PACKET_NOT_ALLOWED);
+ if (logger.isDebugEnabled()) {
+ logger.info("PacketIn is not allowed {} {}", device
+ .getEntityClass().getName(), entity);
+ }
+ return null;
+ }
+ // If this is not an attachment point port we don't learn the new
+ // entity
+ // and don't update indexes. But we do allow the device to continue
+ // up
+ // the chain.
+ if (entity.hasSwitchPort()
+ && !isValidAttachmentPoint(entity.getPort())) {
+ // debugCounters.updateCounter(CNT_PACKET_ON_INTERNAL_PORT_FOR_KNOWN_DEVICE);
+ break;
+ }
+ int entityindex = -1;
+ if ((entityindex = device.entityIndex(entity)) >= 0) {
+ // Entity already exists
+ // update timestamp on the found entity
+ Date lastSeen = entity.getLastSeenTimestamp();
+ if (lastSeen == null) {
+ lastSeen = new Date();
+ entity.setLastSeenTimestamp(lastSeen);
+ }
+ device.entities[entityindex].setLastSeenTimestamp(lastSeen);
+ // we break the loop after checking for changes to the AP
+ } else {
+ // New entity for this device
+ // compute the insertion point for the entity.
+ // see Arrays.binarySearch()
+ entityindex = -(entityindex + 1);
+ Device newDevice = allocateDevice(device, entity, entityindex);
+
+ // generate updates
+ EnumSet<DeviceField> changedFields = findChangedFields(device,
+ entity);
+
+ // update the device map with a replace call
+ boolean res = deviceMap.replace(deviceKey, device, newDevice);
+ // If replace returns false, restart the process from the
+ // beginning (this implies another thread concurrently
+ // modified this Device).
+ if (!res)
+ continue;
+
+ device = newDevice;
+ // update indices
+ if (!updateIndices(device, deviceKey)) {
+ continue;
+ }
+ updateSecondaryIndices(entity, device.getEntityClass(),
+ deviceKey);
+
+ // We need to count here after all the possible "continue"
+ // statements in this branch
+ // debugCounters.updateCounter(CNT_NEW_ENTITY);
+ if (changedFields.size() > 0) {
+ // debugCounters.updateCounter(CNT_DEVICE_CHANGED);
+ deviceUpdates = updateUpdates(deviceUpdates,
+ new DeviceUpdate(newDevice, CHANGE, changedFields));
+ }
+ // we break the loop after checking for changed AP
+ }
+ // Update attachment point (will only be hit if the device
+ // already existed and no concurrent modification)
+ if (entity.hasSwitchPort()) {
+ boolean moved = device.updateAttachmentPoint(entity.getPort(),
+ entity.getLastSeenTimestamp().getTime());
+ // TODO: use update mechanism instead of sending the
+ // notification directly
+ if (moved) {
+ // we count device moved events in
+ // sendDeviceMovedNotification()
+ sendDeviceMovedNotification(device);
+ if (logger.isTraceEnabled()) {
+ logger.trace("Device moved: attachment points {},"
+ + "entities {}", device.attachmentPoints,
+ device.entities);
+ }
+ } else {
+ if (logger.isTraceEnabled()) {
+ logger.trace("Device attachment point updated: "
+ + "attachment points {}," + "entities {}",
+ device.attachmentPoints, device.entities);
+ }
+ }
+ }
+ break;
+ }
+
+ if (deleteQueue != null) {
+ for (Long l : deleteQueue) {
+ Device dev = deviceMap.get(l);
+ this.deleteDevice(dev);
+ }
+ }
+
+ processUpdates(deviceUpdates);
+ // deviceSyncManager.storeDeviceThrottled(device);
+
+ return device;
+ }
+
+ protected boolean isEntityAllowed(Entity entity, IEntityClass entityClass) {
+ return true;
+ }
+
+ protected EnumSet<DeviceField> findChangedFields(Device device,
+ Entity newEntity) {
+ EnumSet<DeviceField> changedFields = EnumSet.of(DeviceField.IPV4,
+ DeviceField.VLAN, DeviceField.SWITCHPORT);
+
+ if (newEntity.getIpv4Address() == null)
+ changedFields.remove(DeviceField.IPV4);
+ if (newEntity.getVlan() == null)
+ changedFields.remove(DeviceField.VLAN);
+ if (newEntity.getPort() == null)
+ changedFields.remove(DeviceField.SWITCHPORT);
+
+ if (changedFields.size() == 0)
+ return changedFields;
+
+ for (Entity entity : device.getEntities()) {
+ if (newEntity.getIpv4Address() == null
+ || (entity.getIpv4Address() != null && entity
+ .getIpv4Address()
+ .equals(newEntity.getIpv4Address())))
+ changedFields.remove(DeviceField.IPV4);
+ if (newEntity.getVlan() == null
+ || (entity.getVlan() != null && entity.getVlan().equals(
+ newEntity.getVlan())))
+ changedFields.remove(DeviceField.VLAN);
+ if (newEntity.getPort() == null
+ || (entity.getPort() != null && entity.getPort().equals(
+ newEntity.getPort())))
+ changedFields.remove(DeviceField.SWITCHPORT);
+ }
+
+ return changedFields;
+ }
+
+ /**
+ * Send update notifications to listeners
+ *
+ * @param updates
+ * the updates to process.
+ */
+ protected void processUpdates(Queue<DeviceUpdate> updates) {
+ if (updates == null)
+ return;
+ DeviceUpdate update = null;
+ while (null != (update = updates.poll())) {
+ if (logger.isTraceEnabled()) {
+ logger.trace("Dispatching device update: {}", update);
+ }
+ // if (update.change == DeviceUpdate.Change.DELETE)
+ // deviceSyncManager.removeDevice(update.device);
+ // else
+ // deviceSyncManager.storeDevice(update.device);
+ List<IDeviceListener> listeners = deviceListeners
+ .getOrderedListeners();
+ notifyListeners(listeners, update);
+ }
+ }
+
+ protected void notifyListeners(List<IDeviceListener> listeners,
+ DeviceUpdate update) {
+ if (listeners == null) {
+ return;
+ }
+ for (IDeviceListener listener : listeners) {
+ switch (update.change) {
+ case ADD:
+ listener.deviceAdded(update.device);
+ break;
+ case DELETE:
+ listener.deviceRemoved(update.device);
+ break;
+ case CHANGE:
+ for (DeviceField field : update.fieldsChanged) {
+ switch (field) {
+ case IPV4:
+ listener.deviceIPV4AddrChanged(update.device);
+ break;
+ case SWITCHPORT:
+ // listener.deviceMoved(update.device);
+ break;
+ case VLAN:
+ listener.deviceVlanChanged(update.device);
+ break;
+ default:
+ logger.debug("Unknown device field changed {}",
+ update.fieldsChanged.toString());
+ break;
+ }
+ }
+ break;
+ }
+ }
+ }
+
+ /**
+ * Check if the entity e has all the keyFields set. Returns false if not
+ *
+ * @param e
+ * entity to check
+ * @param keyFields
+ * the key fields to check e against
+ * @return
+ */
+ protected boolean allKeyFieldsPresent(Entity e,
+ EnumSet<DeviceField> keyFields) {
+ for (DeviceField f : keyFields) {
+ switch (f) {
+ case MAC:
+ // MAC address is always present
+ break;
+ case IPV4:
+ if (e.getIpv4Address() == null)
+ return false;
+ break;
+ case SWITCHPORT:
+ if (e.getPort() == null)
+ return false;
+ break;
+ case VLAN:
+ // FIXME: vlan==null is ambiguous: it can mean: not present
+ // or untagged
+ // if (e.vlan == null) return false;
+ break;
+ default:
+ // we should never get here. unless somebody extended
+ // DeviceFields
+ throw new IllegalStateException();
+ }
+ }
+ return true;
+ }
+
+ private LinkedList<DeviceUpdate> updateUpdates(
+ LinkedList<DeviceUpdate> list, DeviceUpdate update) {
+ if (update == null)
+ return list;
+ if (list == null)
+ list = new LinkedList<DeviceUpdate>();
+ list.add(update);
+
+ return list;
+ }
+
+ /**
+ * Get the secondary index for a class. Will return null if the secondary
+ * index was created concurrently in another thread.
+ *
+ * @param clazz
+ * the class for the index
+ * @return
+ */
+ private ClassState getClassState(IEntityClass clazz) {
+ ClassState classState = classStateMap.get(clazz.getName());
+ if (classState != null)
+ return classState;
+
+ classState = new ClassState(clazz);
+ ClassState r = classStateMap.putIfAbsent(clazz.getName(), classState);
+ if (r != null) {
+ // concurrent add
+ return r;
+ }
+ return classState;
+ }
+
+ /**
+ * Update both the primary and class indices for the provided device. If the
+ * update fails because of an concurrent update, will return false.
+ *
+ * @param device
+ * the device to update
+ * @param deviceKey
+ * the device key for the device
+ * @return true if the update succeeded, false otherwise.
+ */
+ private boolean updateIndices(Device device, Long deviceKey) {
+ if (!primaryIndex.updateIndex(device, deviceKey)) {
+ return false;
+ }
+ IEntityClass entityClass = device.getEntityClass();
+ ClassState classState = getClassState(entityClass);
+
+ if (classState.classIndex != null) {
+ if (!classState.classIndex.updateIndex(device, deviceKey))
+ return false;
+ }
+ return true;
+ }
+
+ /**
+ * Update the secondary indices for the given entity and associated entity
+ * classes
+ *
+ * @param entity
+ * the entity to update
+ * @param entityClass
+ * the entity class for the entity
+ * @param deviceKey
+ * the device key to set up
+ */
+ private void updateSecondaryIndices(Entity entity,
+ IEntityClass entityClass, Long deviceKey) {
+ for (DeviceIndex index : secondaryIndexMap.values()) {
+ index.updateIndex(entity, deviceKey);
+ }
+ ClassState state = getClassState(entityClass);
+ for (DeviceIndex index : state.secondaryIndexMap.values()) {
+ index.updateIndex(entity, deviceKey);
+ }
+ }
+
+ /**
+ * Clean up expired entities/devices
+ */
+ protected void cleanupEntities() {
+ // debugCounters.updateCounter(CNT_CLEANUP_ENTITIES_RUNS);
+
+ Calendar c = Calendar.getInstance();
+ c.add(Calendar.MILLISECOND, -ENTITY_TIMEOUT);
+ Date cutoff = c.getTime();
+
+ ArrayList<Entity> toRemove = new ArrayList<Entity>();
+ ArrayList<Entity> toKeep = new ArrayList<Entity>();
+
+ Iterator<Device> diter = deviceMap.values().iterator();
+ LinkedList<DeviceUpdate> deviceUpdates = new LinkedList<DeviceUpdate>();
+
+ while (diter.hasNext()) {
+ Device d = diter.next();
+
+ while (true) {
+ deviceUpdates.clear();
+ toRemove.clear();
+ toKeep.clear();
+ for (Entity e : d.getEntities()) {
+ if (e.getLastSeenTimestamp() != null
+ && 0 > e.getLastSeenTimestamp().compareTo(cutoff)) {
+ // individual entity needs to be removed
+ toRemove.add(e);
+ } else {
+ toKeep.add(e);
+ }
+ }
+ if (toRemove.size() == 0) {
+ break;
+ }
+
+ // debugCounters.updateCounter(CNT_ENTITY_REMOVED_TIMEOUT);
+ for (Entity e : toRemove) {
+ removeEntity(e, d.getEntityClass(), d.getDeviceKey(),
+ toKeep);
+ }
+
+ if (toKeep.size() > 0) {
+ Device newDevice = allocateDevice(d.getDeviceKey(),
+ d.getDHCPClientName(), d.oldAPs,
+ d.attachmentPoints, toKeep, d.getEntityClass());
+
+ EnumSet<DeviceField> changedFields = EnumSet
+ .noneOf(DeviceField.class);
+ for (Entity e : toRemove) {
+ changedFields.addAll(findChangedFields(newDevice, e));
+ }
+ DeviceUpdate update = null;
+ if (changedFields.size() > 0) {
+ update = new DeviceUpdate(d, CHANGE, changedFields);
+ }
+
+ if (!deviceMap.replace(newDevice.getDeviceKey(), d,
+ newDevice)) {
+ // concurrent modification; try again
+ // need to use device that is the map now for the next
+ // iteration
+ d = deviceMap.get(d.getDeviceKey());
+ if (null != d)
+ continue;
+ }
+ if (update != null) {
+ // need to count after all possibly continue stmts in
+ // this branch
+ // debugCounters.updateCounter(CNT_DEVICE_CHANGED);
+ deviceUpdates.add(update);
+ }
+ } else {
+ DeviceUpdate update = new DeviceUpdate(d, DELETE, null);
+ if (!deviceMap.remove(d.getDeviceKey(), d)) {
+ // concurrent modification; try again
+ // need to use device that is the map now for the next
+ // iteration
+ d = deviceMap.get(d.getDeviceKey());
+ if (null != d)
+ continue;
+ // debugCounters.updateCounter(CNT_DEVICE_DELETED);
+ }
+ deviceUpdates.add(update);
+ }
+ processUpdates(deviceUpdates);
+ break;
+ }
+ }
+ }
+
+ protected void removeEntity(Entity removed, IEntityClass entityClass,
+ Long deviceKey, Collection<Entity> others) {
+ // Don't count in this method. This method CAN BE called to clean-up
+ // after concurrent device adds/updates and thus counting here
+ // is misleading
+ for (DeviceIndex index : secondaryIndexMap.values()) {
+ index.removeEntityIfNeeded(removed, deviceKey, others);
+ }
+ ClassState classState = getClassState(entityClass);
+ for (DeviceIndex index : classState.secondaryIndexMap.values()) {
+ index.removeEntityIfNeeded(removed, deviceKey, others);
+ }
+
+ primaryIndex.removeEntityIfNeeded(removed, deviceKey, others);
+
+ if (classState.classIndex != null) {
+ classState.classIndex.removeEntityIfNeeded(removed, deviceKey,
+ others);
+ }
+ }
+
+ /**
+ * method to delete a given device, remove all entities first and then
+ * finally delete the device itself.
+ *
+ * @param device
+ */
+ protected void deleteDevice(Device device) {
+ // Don't count in this method. This method CAN BE called to clean-up
+ // after concurrent device adds/updates and thus counting here
+ // is misleading
+ ArrayList<Entity> emptyToKeep = new ArrayList<Entity>();
+ for (Entity entity : device.getEntities()) {
+ this.removeEntity(entity, device.getEntityClass(),
+ device.getDeviceKey(), emptyToKeep);
+ }
+ if (!deviceMap.remove(device.getDeviceKey(), device)) {
+ if (logger.isDebugEnabled())
+ logger.debug("device map does not have this device -"
+ + device.toString());
+ }
+ }
+
+ private EnumSet<DeviceField> getEntityKeys(Long macAddress, Short vlan,
+ Integer ipv4Address, NodeConnector port) {
+ // FIXME: vlan==null is a valid search. Need to handle this
+ // case correctly. Note that the code will still work correctly.
+ // But we might do a full device search instead of using an index.
+ EnumSet<DeviceField> keys = EnumSet.noneOf(DeviceField.class);
+ if (macAddress != null)
+ keys.add(DeviceField.MAC);
+ if (vlan != null)
+ keys.add(DeviceField.VLAN);
+ if (ipv4Address != null)
+ keys.add(DeviceField.IPV4);
+ if (port != null)
+ keys.add(DeviceField.SWITCHPORT);
+ return keys;
+ }
+
+ protected Iterator<Device> queryClassByEntity(IEntityClass clazz,
+ EnumSet<DeviceField> keyFields, Entity entity) {
+ ClassState classState = getClassState(clazz);
+ DeviceIndex index = classState.secondaryIndexMap.get(keyFields);
+ if (index == null)
+ return Collections.<Device> emptySet().iterator();
+ return new DeviceIndexInterator(this, index.queryByEntity(entity));
+ }
+
+ protected Device allocateDevice(Long deviceKey, Entity entity,
+ IEntityClass entityClass) {
+ return new Device(this, deviceKey, entity, entityClass);
+ }
+
+ // TODO: FIX THIS.
+ protected Device allocateDevice(Long deviceKey, String dhcpClientName,
+ List<AttachmentPoint> aps, List<AttachmentPoint> trueAPs,
+ Collection<Entity> entities, IEntityClass entityClass) {
+ return new Device(this, deviceKey, dhcpClientName, aps, trueAPs,
+ entities, entityClass);
+ }
+
+ protected Device allocateDevice(Device device, Entity entity,
+ int insertionpoint) {
+ return new Device(device, entity, insertionpoint);
+ }
+
+ // not used
+ protected Device allocateDevice(Device device, Set<Entity> entities) {
+ List<AttachmentPoint> newPossibleAPs = new ArrayList<AttachmentPoint>();
+ List<AttachmentPoint> newAPs = new ArrayList<AttachmentPoint>();
+ for (Entity entity : entities) {
+ if (entity.getPort() != null) {
+ AttachmentPoint aP = new AttachmentPoint(entity.getPort(), 0);
+ newPossibleAPs.add(aP);
+ }
+ }
+ if (device.attachmentPoints != null) {
+ for (AttachmentPoint oldAP : device.attachmentPoints) {
+ if (newPossibleAPs.contains(oldAP)) {
+ newAPs.add(oldAP);
+ }
+ }
+ }
+ if (newAPs.isEmpty())
+ newAPs = null;
+ Device d = new Device(this, device.getDeviceKey(),
+ device.getDHCPClientName(), newAPs, null, entities,
+ device.getEntityClass());
+ d.updateAttachmentPoint();
+ return d;
+ }
+
+ // *********************
+ // ITopologyManagerAware
+ // *********************
+
+ @Override
+ public void edgeUpdate(List<TopoEdgeUpdate> topoedgeupdateList) {
+ Iterator<Device> diter = deviceMap.values().iterator();
+
+ while (diter.hasNext()) {
+ Device d = diter.next();
+ if (d.updateAttachmentPoint()) {
+ if (logger.isDebugEnabled()) {
+ logger.debug("Attachment point changed for device: {}", d);
+ }
+ sendDeviceMovedNotification(d);
+ }
+ }
+ }
+
+ @Override
+ public void edgeOverUtilized(Edge edge) {
+ // nothing to do
+ }
+
+ @Override
+ public void edgeUtilBackToNormal(Edge edge) {
+ // nothing to do
+ }
+
+ // *********************
+ // IEntityClassListener
+ // *********************
+
+ @Override
+ public void entityClassChanged(Set<String> entityClassNames) {
+ /*
+ * iterate through the devices, reclassify the devices that belong to
+ * these entity class names
+ */
+ Iterator<Device> diter = deviceMap.values().iterator();
+ while (diter.hasNext()) {
+ Device d = diter.next();
+ if (d.getEntityClass() == null
+ || entityClassNames.contains(d.getEntityClass().getName()))
+ reclassifyDevice(d);
+ }
+ }
+
+ // *************
+ // Local methods
+ // *************
+ /**
+ * Send update notifications to listeners
+ *
+ * @param updates
+ * the updates to process.
+ */
+ protected void sendDeviceMovedNotification(Device d) {
+ // debugCounters.updateCounter(CNT_DEVICE_MOVED);
+ // deviceSyncManager.storeDevice(d);
+ List<IDeviceListener> listeners = deviceListeners.getOrderedListeners();
+ if (listeners != null) {
+ for (IDeviceListener listener : listeners) {
+ listener.deviceMoved(d);
+ }
+ }
+ }
+
+ /**
+ * this method will reclassify and reconcile a device - possibilities are -
+ * create new device(s), remove entities from this device. If the device
+ * entity class did not change then it returns false else true.
+ *
+ * @param device
+ */
+ protected boolean reclassifyDevice(Device device) {
+ // first classify all entities of this device
+ if (device == null) {
+ logger.debug("In reclassify for null device");
+ return false;
+ }
+ boolean needToReclassify = false;
+ for (Entity entity : device.entities) {
+ IEntityClass entityClass = this.entityClassifier
+ .classifyEntity(entity);
+ if (entityClass == null || device.getEntityClass() == null) {
+ needToReclassify = true;
+ break;
+ }
+ if (!entityClass.getName()
+ .equals(device.getEntityClass().getName())) {
+ needToReclassify = true;
+ break;
+ }
+ }
+ if (needToReclassify == false) {
+ return false;
+ }
+
+ // debugCounters.updateCounter(CNT_DEVICE_RECLASSIFY_DELETE);
+ LinkedList<DeviceUpdate> deviceUpdates = new LinkedList<DeviceUpdate>();
+ // delete this device and then re-learn all the entities
+ this.deleteDevice(device);
+ deviceUpdates.add(new DeviceUpdate(device, DeviceUpdate.Change.DELETE,
+ null));
+ if (!deviceUpdates.isEmpty())
+ processUpdates(deviceUpdates);
+ for (Entity entity : device.entities) {
+ this.learnDeviceByEntity(entity);
+ }
+ return true;
+ }
+
+ /**
+ * For testing: sets the interval between writes of the same device to the
+ * device store.
+ *
+ * @param intervalMs
+ */
+ // void setSyncStoreWriteInterval(int intervalMs) {
+ // this.syncStoreWriteIntervalMs = intervalMs;
+ // }
+
+ /**
+ * For testing: sets the time between transition to MASTER and consolidate
+ * store
+ *
+ * @param intervalMs
+ */
+ // void setInitialSyncStoreConsolidateMs(int intervalMs) {
+ // this.initialSyncStoreConsolidateMs = intervalMs;
+ // }
+
+ private long toLong(byte[] address) {
+ long mac = 0;
+ for (int i = 0; i < 6; i++) {
+ long t = (address[i] & 0xffL) << ((5 - i) * 8);
+ mac |= t;
+ }
+ return mac;
+ }
+
+ /**
+ * Accepts an IPv4 address in a byte array and returns the corresponding
+ * 32-bit integer value.
+ *
+ * @param ipAddress
+ * @return
+ */
+ private static int toIPv4Address(byte[] ipAddress) {
+ int ip = 0;
+ for (int i = 0; i < 4; i++) {
+ int t = (ipAddress[i] & 0xff) << ((3 - i) * 8);
+ ip |= t;
+ }
+ return ip;
+ }
+
+ private void registerDeviceManagerDebugCounters() {
+ /*
+ * XXX Missing functionality if (debugCounters == null) {
+ * logger.error("Debug Counter Service not found."); debugCounters = new
+ * NullDebugCounter(); return; }
+ * debugCounters.registerCounter(CNT_INCOMING,
+ * "All incoming packets seen by this module",
+ * CounterType.ALWAYS_COUNT);
+ * debugCounters.registerCounter(CNT_RECONCILE_REQUEST,
+ * "Number of flows that have been received for reconciliation by " +
+ * "this module", CounterType.ALWAYS_COUNT);
+ * debugCounters.registerCounter(CNT_RECONCILE_NO_SOURCE,
+ * "Number of flow reconcile events that failed because no source " +
+ * "device could be identified", CounterType.WARN); // is this really a
+ * warning debugCounters.registerCounter(CNT_RECONCILE_NO_DEST,
+ * "Number of flow reconcile events that failed because no " +
+ * "destination device could be identified", CounterType.WARN); // is
+ * this really a warning
+ * debugCounters.registerCounter(CNT_BROADCAST_SOURCE,
+ * "Number of packetIns that were discarded because the source " +
+ * "MAC was broadcast or multicast", CounterType.WARN);
+ * debugCounters.registerCounter(CNT_NO_SOURCE,
+ * "Number of packetIns that were discarded because the " +
+ * "could not identify a source device. This can happen if a " +
+ * "packet is not allowed, appears on an illegal port, does not " +
+ * "have a valid address space, etc.", CounterType.WARN);
+ * debugCounters.registerCounter(CNT_NO_DEST,
+ * "Number of packetIns that did not have an associated " +
+ * "destination device. E.g., because the destination MAC is " +
+ * "broadcast/multicast or is not yet known to the controller.",
+ * CounterType.ALWAYS_COUNT);
+ * debugCounters.registerCounter(CNT_DHCP_CLIENT_NAME_SNOOPED,
+ * "Number of times a DHCP client name was snooped from a " +
+ * "packetIn.", CounterType.ALWAYS_COUNT);
+ * debugCounters.registerCounter(CNT_DEVICE_ON_INTERAL_PORT_NOT_LEARNED,
+ * "Number of times packetIn was received on an internal port and" +
+ * "no source device is known for the source MAC. The packetIn is " +
+ * "discarded.", CounterType.WARN);
+ * debugCounters.registerCounter(CNT_PACKET_NOT_ALLOWED,
+ * "Number of times a packetIn was not allowed due to spoofing " +
+ * "protection configuration.", CounterType.WARN); // is this really a
+ * warning? debugCounters.registerCounter(CNT_NEW_DEVICE,
+ * "Number of times a new device was learned",
+ * CounterType.ALWAYS_COUNT); debugCounters.registerCounter(
+ * CNT_PACKET_ON_INTERNAL_PORT_FOR_KNOWN_DEVICE,
+ * "Number of times a packetIn was received on an internal port " +
+ * "for a known device.", CounterType.ALWAYS_COUNT);
+ * debugCounters.registerCounter(CNT_NEW_ENTITY,
+ * "Number of times a new entity was learned for an existing device",
+ * CounterType.ALWAYS_COUNT);
+ * debugCounters.registerCounter(CNT_DEVICE_CHANGED,
+ * "Number of times device properties have changed",
+ * CounterType.ALWAYS_COUNT);
+ * debugCounters.registerCounter(CNT_DEVICE_MOVED,
+ * "Number of times devices have moved", CounterType.ALWAYS_COUNT);
+ * debugCounters.registerCounter(CNT_CLEANUP_ENTITIES_RUNS,
+ * "Number of times the entity cleanup task has been run",
+ * CounterType.ALWAYS_COUNT);
+ * debugCounters.registerCounter(CNT_ENTITY_REMOVED_TIMEOUT,
+ * "Number of times entities have been removed due to timeout " +
+ * "(entity has been inactive for " + ENTITY_TIMEOUT/1000 + "s)",
+ * CounterType.ALWAYS_COUNT);
+ * debugCounters.registerCounter(CNT_DEVICE_DELETED,
+ * "Number of devices that have been removed due to inactivity",
+ * CounterType.ALWAYS_COUNT);
+ * debugCounters.registerCounter(CNT_DEVICE_RECLASSIFY_DELETE,
+ * "Number of devices that required reclassification and have been " +
+ * "temporarily delete for reclassification", CounterType.ALWAYS_COUNT);
+ * debugCounters.registerCounter(CNT_DEVICE_STORED,
+ * "Number of device entries written or updated to the sync store",
+ * CounterType.ALWAYS_COUNT);
+ * debugCounters.registerCounter(CNT_DEVICE_STORE_THROTTLED,
+ * "Number of times a device update to the sync store was " +
+ * "requested but not performed because the same device entities " +
+ * "have recently been updated already", CounterType.ALWAYS_COUNT);
+ * debugCounters.registerCounter(CNT_DEVICE_REMOVED_FROM_STORE,
+ * "Number of devices that were removed from the sync store " +
+ * "because the local controller removed the device due to " +
+ * "inactivity", CounterType.ALWAYS_COUNT);
+ * debugCounters.registerCounter(CNT_SYNC_EXCEPTION,
+ * "Number of times an operation on the sync store resulted in " +
+ * "sync exception", CounterType.WARN); // it this an error?
+ * debugCounters.registerCounter(CNT_DEVICES_FROM_STORE,
+ * "Number of devices that were read from the sync store after " +
+ * "the local controller transitioned from SLAVE to MASTER",
+ * CounterType.ALWAYS_COUNT);
+ * debugCounters.registerCounter(CNT_CONSOLIDATE_STORE_RUNS,
+ * "Number of times the task to consolidate entries in the " +
+ * "store witch live known devices has been run",
+ * CounterType.ALWAYS_COUNT);
+ * debugCounters.registerCounter(CNT_CONSOLIDATE_STORE_DEVICES_REMOVED,
+ * "Number of times a device has been removed from the sync " +
+ * "store because no corresponding live device is known. " +
+ * "This indicates a remote controller still writing device " +
+ * "entries despite the local controller being MASTER or an " +
+ * "incosistent store update from the local controller.",
+ * CounterType.WARN);
+ * debugCounters.registerCounter(CNT_TRANSITION_TO_MASTER,
+ * "Number of times this controller has transitioned from SLAVE " +
+ * "to MASTER role. Will be 0 or 1.", CounterType.ALWAYS_COUNT);
+ */
+ }
+
+ /**
+ * For testing: consolidate the store NOW
+ */
+ // void scheduleConsolidateStoreNow() {
+ // this.storeConsolidateTask.reschedule(0, TimeUnit.MILLISECONDS);
+ // }
+
+ // private class DeviceSyncManager {
+ // // maps (opaque) deviceKey to the time in System.nanoTime() when we
+ // // last wrote the device to the sync store
+ // private ConcurrentMap<Long, Long> lastWriteTimes =
+ // new ConcurrentHashMap<Long, Long>();
+ //
+ // /**
+ // * Write the given device to storage if we are MASTER.
+ // * Use this method if the device has significantly changed (e.g.,
+ // * new AP, new IP, entities removed).
+ // * @param d the device to store
+ // */
+ // public void storeDevice(Device d) {
+ // if (!isMaster)
+ // return;
+ // if (d == null)
+ // return;
+ // long now = System.nanoTime();
+ // writeUpdatedDeviceToStorage(d);
+ // lastWriteTimes.put(d.getDeviceKey(), now);
+ // }
+ //
+ // /**
+ // * Write the given device to storage if we are MASTER and if the
+ // * last write for the device was more than this.syncStoreIntervalNs
+ // * time ago.
+ // * Use this method to updated last active times in the store.
+ // * @param d the device to store
+ // */
+ // public void storeDeviceThrottled(Device d) {
+ // long intervalNs = syncStoreWriteIntervalMs*1000L*1000L;
+ // if (!isMaster)
+ // return;
+ // if (d == null)
+ // return;
+ // long now = System.nanoTime();
+ // Long last = lastWriteTimes.get(d.getDeviceKey());
+ // if (last == null ||
+ // now - last > intervalNs) {
+ // writeUpdatedDeviceToStorage(d);
+ // lastWriteTimes.put(d.getDeviceKey(), now);
+ // } else {
+ // debugCounters.updateCounter(CNT_DEVICE_STORE_THROTTLED);
+ // }
+ // }
+ //
+ // /**
+ // * Remove the given device from the store. If only some entities have
+ // * been removed the updated device should be written using
+ // * {@link #storeDevice(Device)}
+ // * @param d
+ // */
+ // public void removeDevice(Device d) {
+ // if (!isMaster)
+ // return;
+ // // FIXME: could we have a problem with concurrent put to the
+ // // hashMap? I.e., we write a stale entry to the map after the
+ // // delete and now are left with an entry we'll never clean up
+ // lastWriteTimes.remove(d.getDeviceKey());
+ // try {
+ // // TODO: should probably do versioned delete. OTOH, even
+ // // if we accidentally delete, we'll write it again after
+ // // the next entity ....
+ // debugCounters.updateCounter(CNT_DEVICE_REMOVED_FROM_STORE);
+ // storeClient.delete(DeviceSyncRepresentation.computeKey(d));
+ // } catch(ObsoleteVersionException e) {
+ // // FIXME
+ // } catch (SyncException e) {
+ // debugCounters.updateCounter(CNT_SYNC_EXCEPTION);
+ // logger.error("Could not remove device " + d + " from store", e);
+ // }
+ // }
+ //
+ // /**
+ // * Remove the given Versioned device from the store. If the device
+ // * was locally modified ignore the delete request.
+ // * @param syncedDeviceKey
+ // */
+ // private void removeDevice(Versioned<DeviceSyncRepresentation> dev) {
+ // try {
+ // debugCounters.updateCounter(CNT_DEVICE_REMOVED_FROM_STORE);
+ // storeClient.delete(dev.getValue().getKey(),
+ // dev.getVersion());
+ // } catch(ObsoleteVersionException e) {
+ // // Key was locally modified by another thread.
+ // // Do not delete and ignore.
+ // } catch(SyncException e) {
+ // debugCounters.updateCounter(CNT_SYNC_EXCEPTION);
+ // logger.error("Failed to remove device entry for " +
+ // dev.toString() + " from store.", e);
+ // }
+ // }
+ //
+ // /**
+ // * Synchronously transition from SLAVE to MASTER. By iterating through
+ // * the store and learning all devices from the store
+ // */
+ // private void goToMaster() {
+ // if (logger.isDebugEnabled()) {
+ // logger.debug("Transitioning to MASTER role");
+ // }
+ // debugCounters.updateCounter(CNT_TRANSITION_TO_MASTER);
+ // IClosableIterator<Map.Entry<String,Versioned<DeviceSyncRepresentation>>>
+ // iter = null;
+ // try {
+ // iter = storeClient.entries();
+ // } catch (SyncException e) {
+ // debugCounters.updateCounter(CNT_SYNC_EXCEPTION);
+ // logger.error("Failed to read devices from sync store", e);
+ // return;
+ // }
+ // try {
+ // while(iter.hasNext()) {
+ // Versioned<DeviceSyncRepresentation> versionedDevice =
+ // iter.next().getValue();
+ // DeviceSyncRepresentation storedDevice =
+ // versionedDevice.getValue();
+ // if (storedDevice == null)
+ // continue;
+ // debugCounters.updateCounter(CNT_DEVICES_FROM_STORE);
+ // for(SyncEntity se: storedDevice.getEntities()) {
+ // learnDeviceByEntity(se.asEntity());
+ // }
+ // }
+ // } finally {
+ // if (iter != null)
+ // iter.close();
+ // }
+ // storeConsolidateTask.reschedule(initialSyncStoreConsolidateMs,
+ // TimeUnit.MILLISECONDS);
+ // }
+ //
+ // /**
+ // * Actually perform the write of the device to the store
+ // * FIXME: concurrent modification behavior
+ // * @param device The device to write
+ // */
+ // private void writeUpdatedDeviceToStorage(Device device) {
+ // try {
+ // debugCounters.updateCounter(CNT_DEVICE_STORED);
+ // // FIXME: use a versioned put
+ // DeviceSyncRepresentation storeDevice =
+ // new DeviceSyncRepresentation(device);
+ // storeClient.put(storeDevice.getKey(), storeDevice);
+ // } catch (ObsoleteVersionException e) {
+ // // FIXME: what's the right behavior here. Can the store client
+ // // even throw this error?
+ // } catch (SyncException e) {
+ // debugCounters.updateCounter(CNT_SYNC_EXCEPTION);
+ // logger.error("Could not write device " + device +
+ // " to sync store:", e);
+ // }
+ // }
+ //
+ // /**
+ // * Iterate through all entries in the sync store. For each device
+ // * in the store check if any stored entity matches a live device. If
+ // * no entities match a live device we remove the entry from the store.
+ // *
+ // * Note: we do not check if all devices known to device manager are
+ // * in the store. We rely on regular packetIns for that.
+ // * Note: it's possible that multiple entries in the store map to the
+ // * same device. We don't check or handle this case.
+ // *
+ // * We need to perform this check after a SLAVE->MASTER transition to
+ // * get rid of all entries the old master might have written to the
+ // * store after we took over. We also run it regularly in MASTER
+ // * state to ensure we don't have stale entries in the store
+ // */
+ // private void consolidateStore() {
+ // if (!isMaster)
+ // return;
+ // debugCounters.updateCounter(CNT_CONSOLIDATE_STORE_RUNS);
+ // if (logger.isDebugEnabled()) {
+ // logger.debug("Running consolidateStore.");
+ // }
+ // IClosableIterator<Map.Entry<String,Versioned<DeviceSyncRepresentation>>>
+ // iter = null;
+ // try {
+ // iter = storeClient.entries();
+ // } catch (SyncException e) {
+ // debugCounters.updateCounter(CNT_SYNC_EXCEPTION);
+ // logger.error("Failed to read devices from sync store", e);
+ // return;
+ // }
+ // try {
+ // while(iter.hasNext()) {
+ // boolean found = false;
+ // Versioned<DeviceSyncRepresentation> versionedDevice =
+ // iter.next().getValue();
+ // DeviceSyncRepresentation storedDevice =
+ // versionedDevice.getValue();
+ // if (storedDevice == null)
+ // continue;
+ // for(SyncEntity se: storedDevice.getEntities()) {
+ // try {
+ // // Do we have a device for this entity??
+ // IDevice d = findDevice(se.macAddress, se.vlan,
+ // se.ipv4Address,
+ // se.switchDPID,
+ // se.switchPort);
+ // if (d != null) {
+ // found = true;
+ // break;
+ // }
+ // } catch (IllegalArgumentException e) {
+ // // not all key fields provided. Skip entity
+ // }
+ // }
+ // if (!found) {
+ // // We currently DO NOT have a live device that
+ // // matches the current device from the store.
+ // // Delete device from store.
+ // if (logger.isDebugEnabled()) {
+ // logger.debug("Removing device {} from store. No "
+ // + "corresponding live device",
+ // storedDevice.getKey());
+ // }
+ // debugCounters.updateCounter(CNT_CONSOLIDATE_STORE_DEVICES_REMOVED);
+ // removeDevice(versionedDevice);
+ // }
+ // }
+ // } finally {
+ // if (iter != null)
+ // iter.close();
+ // }
+ // }
+ // }
+ //
+ //
+ // /**
+ // * For testing. Sets the syncService. Only call after init but before
+ // * startUp. Used by MockDeviceManager
+ // * @param syncService
+ // */
+ // protected void setSyncServiceIfNotSet(ISyncService syncService) {
+ // if (this.syncService == null)
+ // this.syncService = syncService;
+ // }
+}
--- /dev/null
+/*
+ * Copyright (c) 2012 Big Switch Networks, Inc.
+ *
+ * Licensed under the Eclipse Public License, Version 1.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at
+ *
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Originally created by David Erickson, Stanford University
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an "AS
+ * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language
+ * governing permissions and limitations under the License.
+ */
+
+package org.opendaylight.controller.hosttracker.internal;
+
+import java.util.Collection;
+import java.util.Collections;
+import java.util.EnumSet;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+
+import org.opendaylight.controller.hosttracker.Entity;
+import org.opendaylight.controller.hosttracker.IDeviceService.DeviceField;
+import org.opendaylight.controller.sal.utils.IterableIterator;
+
+/**
+ * An index that maps key fields of an entity to device keys, with multiple
+ * device keys allowed per entity
+ */
+public class DeviceMultiIndex extends DeviceIndex {
+ /**
+ * The index
+ */
+ private ConcurrentHashMap<IndexedEntity, Collection<Long>> index;
+
+ /**
+ * @param keyFields
+ */
+ public DeviceMultiIndex(EnumSet<DeviceField> keyFields) {
+ super(keyFields);
+ index = new ConcurrentHashMap<IndexedEntity, Collection<Long>>();
+ }
+
+ // ***********
+ // DeviceIndex
+ // ***********
+
+ @Override
+ public Iterator<Long> queryByEntity(Entity entity) {
+ IndexedEntity ie = new IndexedEntity(keyFields, entity);
+ Collection<Long> devices = index.get(ie);
+ if (devices != null)
+ return devices.iterator();
+
+ return Collections.<Long> emptySet().iterator();
+ }
+
+ @Override
+ public Iterator<Long> getAll() {
+ Iterator<Collection<Long>> iter = index.values().iterator();
+ return new IterableIterator<Long>(iter);
+ }
+
+ @Override
+ public boolean updateIndex(Device device, Long deviceKey) {
+ for (Entity e : device.entities) {
+ updateIndex(e, deviceKey);
+ }
+ return true;
+ }
+
+ @Override
+ public void updateIndex(Entity entity, Long deviceKey) {
+ Collection<Long> devices = null;
+
+ IndexedEntity ie = new IndexedEntity(keyFields, entity);
+ if (!ie.hasNonNullKeys())
+ return;
+
+ devices = index.get(ie);
+ if (devices == null) {
+ Map<Long, Boolean> chm = new ConcurrentHashMap<Long, Boolean>();
+ devices = Collections.newSetFromMap(chm);
+ Collection<Long> r = index.putIfAbsent(ie, devices);
+ if (r != null)
+ devices = r;
+ }
+
+ devices.add(deviceKey);
+ }
+
+ @Override
+ public void removeEntity(Entity entity) {
+ IndexedEntity ie = new IndexedEntity(keyFields, entity);
+ index.remove(ie);
+ }
+
+ @Override
+ public void removeEntity(Entity entity, Long deviceKey) {
+ IndexedEntity ie = new IndexedEntity(keyFields, entity);
+ Collection<Long> devices = index.get(ie);
+ if (devices != null)
+ devices.remove(deviceKey);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2012 Big Switch Networks, Inc.
+ *
+ * Licensed under the Eclipse Public License, Version 1.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at
+ *
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Originally created by David Erickson, Stanford University
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an "AS
+ * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language
+ * governing permissions and limitations under the License.
+ */
+
+package org.opendaylight.controller.hosttracker.internal;
+
+import java.util.Collections;
+import java.util.EnumSet;
+import java.util.Iterator;
+import java.util.concurrent.ConcurrentHashMap;
+
+import org.opendaylight.controller.hosttracker.Entity;
+import org.opendaylight.controller.hosttracker.IDeviceService;
+
+/**
+ * An index that maps key fields of an entity uniquely to a device key
+ */
+public class DeviceUniqueIndex extends DeviceIndex {
+ /**
+ * The index
+ */
+ private final ConcurrentHashMap<IndexedEntity, Long> index;
+
+ /**
+ * Construct a new device index using the provided key fields
+ *
+ * @param keyFields
+ * the key fields to use
+ */
+ public DeviceUniqueIndex(EnumSet<IDeviceService.DeviceField> keyFields) {
+ super(keyFields);
+ index = new ConcurrentHashMap<IndexedEntity, Long>();
+ }
+
+ // ***********
+ // DeviceIndex
+ // ***********
+
+ @Override
+ public Iterator<Long> queryByEntity(Entity entity) {
+ final Long deviceKey = findByEntity(entity);
+ if (deviceKey != null)
+ return Collections.<Long> singleton(deviceKey).iterator();
+
+ return Collections.<Long> emptySet().iterator();
+ }
+
+ @Override
+ public Iterator<Long> getAll() {
+ return index.values().iterator();
+ }
+
+ @Override
+ public boolean updateIndex(Device device, Long deviceKey) {
+ for (Entity e : device.entities) {
+ IndexedEntity ie = new IndexedEntity(keyFields, e);
+ if (!ie.hasNonNullKeys())
+ continue;
+
+ Long ret = index.putIfAbsent(ie, deviceKey);
+ if (ret != null && !ret.equals(deviceKey)) {
+ // If the return value is non-null, then fail the insert
+ // (this implies that a device using this entity has
+ // already been created in another thread).
+ return false;
+ }
+ }
+ return true;
+ }
+
+ @Override
+ public void updateIndex(Entity entity, Long deviceKey) {
+ IndexedEntity ie = new IndexedEntity(keyFields, entity);
+ if (!ie.hasNonNullKeys())
+ return;
+ index.put(ie, deviceKey);
+ }
+
+ @Override
+ public void removeEntity(Entity entity) {
+ IndexedEntity ie = new IndexedEntity(keyFields, entity);
+ index.remove(ie);
+ }
+
+ @Override
+ public void removeEntity(Entity entity, Long deviceKey) {
+ IndexedEntity ie = new IndexedEntity(keyFields, entity);
+ index.remove(ie, deviceKey);
+ }
+
+ // **************
+ // Public Methods
+ // **************
+
+ /**
+ * Look up a {@link Device} based on the provided {@link Entity}.
+ *
+ * @param entity
+ * the entity to search for
+ * @return The key for the {@link Device} object if found
+ */
+ public Long findByEntity(Entity entity) {
+ IndexedEntity ie = new IndexedEntity(keyFields, entity);
+ Long deviceKey = index.get(ie);
+ if (deviceKey == null)
+ return null;
+ return deviceKey;
+ }
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2013 Big Switch Networks, Inc.
+ *
+ * Licensed under the Eclipse Public License, Version 1.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at
+ *
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+package org.opendaylight.controller.hosttracker.internal;
+
+import java.util.EnumSet;
+
+import org.opendaylight.controller.hosttracker.Entity;
+import org.opendaylight.controller.hosttracker.IDeviceService;
+import org.opendaylight.controller.hosttracker.IDeviceService.DeviceField;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This is a thin wrapper around {@link Entity} that allows overriding the
+ * behavior of {@link Object#hashCode()} and {@link Object#equals(Object)} so
+ * that the keying behavior in a hash map can be changed dynamically
+ *
+ * @author readams
+ */
+public class IndexedEntity {
+ protected EnumSet<DeviceField> keyFields;
+ protected Entity entity;
+ private int hashCode = 0;
+ protected static Logger logger = LoggerFactory
+ .getLogger(IndexedEntity.class);
+
+ /**
+ * Create a new {@link IndexedEntity} for the given {@link Entity} using the
+ * provided key fields.
+ *
+ * @param keyFields
+ * The key fields that will be used for computing
+ * {@link IndexedEntity#hashCode()} and
+ * {@link IndexedEntity#equals(Object)}
+ * @param entity
+ * the entity to wrap
+ */
+ public IndexedEntity(EnumSet<DeviceField> keyFields, Entity entity) {
+ super();
+ this.keyFields = keyFields;
+ this.entity = entity;
+ }
+
+ /**
+ * Check whether this entity has non-null values in any of its key fields
+ *
+ * @return true if any key fields have a non-null value
+ */
+ public boolean hasNonNullKeys() {
+ for (DeviceField f : keyFields) {
+ switch (f) {
+ case MAC:
+ return true;
+ case IPV4:
+ if (entity.getIpv4Address() != null)
+ return true;
+ break;
+ case SWITCHPORT:
+ if (entity.getPort() != null)
+ return true;
+ break;
+ case VLAN:
+ if (entity.getVlan() != null)
+ return true;
+ break;
+ }
+ }
+ return false;
+ }
+
+ @Override
+ public int hashCode() {
+
+ if (hashCode != 0) {
+ return hashCode;
+ }
+
+ final int prime = 31;
+ hashCode = 1;
+ for (DeviceField f : keyFields) {
+ switch (f) {
+ case MAC:
+ hashCode = prime
+ * hashCode
+ + (int) (entity.getMacAddress() ^ (entity
+ .getMacAddress() >>> 32));
+ break;
+ case IPV4:
+ hashCode = prime
+ * hashCode
+ + ((entity.getIpv4Address() == null) ? 0 : entity
+ .getIpv4Address().hashCode());
+ break;
+ case SWITCHPORT:
+ hashCode = prime
+ * hashCode
+ + ((entity.getPort() == null) ? 0 : entity.getPort()
+ .hashCode());
+ break;
+ case VLAN:
+ hashCode = prime
+ * hashCode
+ + ((entity.getVlan() == null) ? 0 : entity.getVlan()
+ .hashCode());
+ break;
+ }
+ }
+ return hashCode;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj)
+ return true;
+ if (obj == null)
+ return false;
+ if (getClass() != obj.getClass())
+ return false;
+ IndexedEntity other = (IndexedEntity) obj;
+
+ if (!keyFields.equals(other.keyFields))
+ return false;
+
+ for (IDeviceService.DeviceField f : keyFields) {
+ switch (f) {
+ case MAC:
+ if (entity.getMacAddress() != other.entity.getMacAddress())
+ return false;
+ break;
+ case IPV4:
+ if (entity.getIpv4Address() == null) {
+ if (other.entity.getIpv4Address() != null)
+ return false;
+ } else if (!entity.getIpv4Address().equals(
+ other.entity.getIpv4Address()))
+ return false;
+ break;
+ case SWITCHPORT:
+ if (entity.getPort() == null) {
+ if (other.entity.getPort() != null)
+ return false;
+ } else if (!entity.getPort().equals(other.entity.getPort()))
+ return false;
+ break;
+ case VLAN:
+ if (entity.getVlan() == null) {
+ if (other.entity.getVlan() != null)
+ return false;
+ } else if (!entity.getVlan().equals(other.entity.getVlan()))
+ return false;
+ break;
+ }
+ }
+
+ return true;
+ }
+
+}
--- /dev/null
+package org.opendaylight.controller.hosttracker.internal;
+
+///*
+// * Copyright (c) 2011,2013 Big Switch Networks, Inc.
+// *
+// * Licensed under the Eclipse Public License, Version 1.0 (the
+// * "License"); you may not use this file except in compliance with the
+// * License. You may obtain a copy of the License at
+// *
+// * http://www.eclipse.org/legal/epl-v10.html
+// *
+// * Unless required by applicable law or agreed to in writing, software
+// * distributed under the License is distributed on an "AS IS" BASIS,
+// * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+// * implied. See the License for the specific language governing
+// * permissions and limitations under the License.
+// *
+// * This file incorporates work covered by the following copyright and
+// * permission notice:
+// *
+// * Originally created by David Erickson, Stanford University
+// *
+// * Licensed under the Apache License, Version 2.0 (the "License");
+// * you may not use this file except in compliance with the
+// * License. You may obtain a copy of the License at
+// *
+// * http://www.apache.org/licenses/LICENSE-2.0
+// *
+// * Unless required by applicable law or agreed to in writing,
+// * software distributed under the License is distributed on an "AS
+// * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+// * express or implied. See the License for the specific language
+// * governing permissions and limitations under the License.
+// */
+//
+//package org.opendaylight.controller.hosttracker.internal;
+//
+//import static org.easymock.EasyMock.anyLong;
+//import static org.easymock.EasyMock.anyObject;
+//import static org.easymock.EasyMock.anyShort;
+//import static org.easymock.EasyMock.createMock;
+//import static org.easymock.EasyMock.createNiceMock;
+//import static org.easymock.EasyMock.eq;
+//import static org.easymock.EasyMock.expect;
+//import static org.easymock.EasyMock.expectLastCall;
+//import static org.easymock.EasyMock.isA;
+//import static org.easymock.EasyMock.or;
+//import static org.easymock.EasyMock.replay;
+//import static org.easymock.EasyMock.reset;
+//import static org.easymock.EasyMock.verify;
+//import static org.junit.Assert.*;
+//
+//import java.util.ArrayList;
+//import java.util.Arrays;
+//import java.util.Calendar;
+//import java.util.Collection;
+//import java.util.Collections;
+//import java.util.Date;
+//import java.util.EnumSet;
+//import java.util.HashMap;
+//import java.util.HashSet;
+//import java.util.Iterator;
+//import java.util.List;
+//import java.util.Map;
+//import java.util.Map.Entry;
+//import java.util.Set;
+//import java.util.concurrent.ConcurrentHashMap;
+//
+//import org.junit.Before;
+//import org.junit.Test;
+//
+//import org.slf4j.Logger;
+//import org.slf4j.LoggerFactory;
+//
+//public class DeviceManagerImplTest {
+//
+// protected static Logger logger =
+// LoggerFactory.getLogger(DeviceManagerImplTest.class);
+//
+// protected OFPacketIn packetIn_1, packetIn_2, packetIn_3;
+// protected IPacket testARPReplyPacket_1, testARPReplyPacket_2,
+// testARPReplyPacket_3;
+// protected IPacket testARPReqPacket_1, testARPReqPacket_2;
+// protected byte[] testARPReplyPacket_1_Srld, testARPReplyPacket_2_Srld;
+// private MockSyncService syncService;
+// private IStoreClient<String, DeviceSyncRepresentation> storeClient;
+//
+// DeviceManagerImpl deviceManager;
+// MemoryStorageSource storageSource;
+// FlowReconcileManager flowReconcileMgr;
+//
+// private IOFSwitch makeSwitchMock(long id) {
+// IOFSwitch mockSwitch = createMock(IOFSwitch.class);
+// ImmutablePort port = ImmutablePort.create("p1", (short)1);
+// expect(mockSwitch.getId()).andReturn(id).anyTimes();
+// expect(mockSwitch.getStringId())
+// .andReturn(HexString.toHexString(id, 6)).anyTimes();
+// expect(mockSwitch.getPort(anyShort()))
+// .andReturn(port).anyTimes();
+// return mockSwitch;
+// }
+//
+// /*
+// * return an EasyMock ITopologyService that's setup so that it will
+// * answer all questions a device or device manager will ask
+// * (isAttachmentPointPort, etc.) in a way so that every port is a
+// * non-BD, attachment point port.
+// * The returned mock is still in record mode
+// */
+// private ITopologyService makeMockTopologyAllPortsAp() {
+// ITopologyService mockTopology = createMock(ITopologyService.class);
+// mockTopology.isAttachmentPointPort(anyLong(), anyShort());
+// expectLastCall().andReturn(true).anyTimes();
+// mockTopology.getL2DomainId(anyLong());
+// expectLastCall().andReturn(1L).anyTimes();
+// mockTopology.isBroadcastDomainPort(anyLong(), anyShort());
+// expectLastCall().andReturn(false).anyTimes();
+// mockTopology.isConsistent(anyLong(), anyShort(), anyLong(), anyShort());
+// expectLastCall().andReturn(false).anyTimes();
+// mockTopology.isInSameBroadcastDomain(anyLong(), anyShort(),
+// anyLong(), anyShort());
+// expectLastCall().andReturn(false).anyTimes();
+// return mockTopology;
+// }
+//
+// @Override
+// @Before
+// public void setUp() throws Exception {
+// doSetUp(Role.MASTER);
+// }
+//
+// public void doSetUp(Role initialRole) throws Exception {
+// super.setUp();
+//
+// this.syncService = new MockSyncService();
+//
+// FloodlightModuleContext fmc = new FloodlightModuleContext();
+// RestApiServer restApi = new RestApiServer();
+// MockThreadPoolService tp = new MockThreadPoolService();
+// ITopologyService topology = createMock(ITopologyService.class);
+// fmc.addService(IThreadPoolService.class, tp);
+// mockFloodlightProvider = getMockFloodlightProvider();
+// mockFloodlightProvider.setRole(initialRole, "");
+//
+// deviceManager = new DeviceManagerImpl();
+// flowReconcileMgr = new FlowReconcileManager();
+// DefaultEntityClassifier entityClassifier = new DefaultEntityClassifier();
+// fmc.addService(IDeviceService.class, deviceManager);
+// storageSource = new MemoryStorageSource();
+// fmc.addService(IStorageSourceService.class, storageSource);
+// fmc.addService(IFloodlightProviderService.class, mockFloodlightProvider);
+// fmc.addService(IRestApiService.class, restApi);
+// fmc.addService(IFlowReconcileService.class, flowReconcileMgr);
+// fmc.addService(IEntityClassifierService.class, entityClassifier);
+// fmc.addService(ITopologyService.class, topology);
+// fmc.addService(ISyncService.class, syncService);
+// tp.init(fmc);
+// restApi.init(fmc);
+// storageSource.init(fmc);
+// deviceManager.init(fmc);
+// flowReconcileMgr.init(fmc);
+// entityClassifier.init(fmc);
+// syncService.init(fmc);
+// storageSource.startUp(fmc);
+// deviceManager.startUp(fmc);
+// flowReconcileMgr.startUp(fmc);
+// tp.startUp(fmc);
+// entityClassifier.startUp(fmc);
+// syncService.startUp(fmc);
+//
+// this.storeClient =
+// this.syncService.getStoreClient(DeviceManagerImpl.DEVICE_SYNC_STORE_NAME,
+// String.class, DeviceSyncRepresentation.class);
+//
+// reset(topology);
+// topology.addListener(deviceManager);
+// expectLastCall().anyTimes();
+// replay(topology);
+//
+// IOFSwitch mockSwitch1 = makeSwitchMock(1L);
+// IOFSwitch mockSwitch10 = makeSwitchMock(10L);
+// IOFSwitch mockSwitch5 = makeSwitchMock(5L);
+// IOFSwitch mockSwitch50 = makeSwitchMock(50L);
+// Map<Long, IOFSwitch> switches = new HashMap<Long,IOFSwitch>();
+// switches.put(1L, mockSwitch1);
+// switches.put(10L, mockSwitch10);
+// switches.put(5L, mockSwitch5);
+// switches.put(50L, mockSwitch50);
+// mockFloodlightProvider.setSwitches(switches);
+//
+// replay(mockSwitch1, mockSwitch5, mockSwitch10, mockSwitch50);
+//
+// // Build our test packet
+// this.testARPReplyPacket_1 = new Ethernet()
+// .setSourceMACAddress("00:44:33:22:11:01")
+// .setDestinationMACAddress("00:11:22:33:44:55")
+// .setEtherType(Ethernet.TYPE_ARP)
+// .setVlanID((short)5)
+// .setPayload(
+// new ARP()
+// .setHardwareType(ARP.HW_TYPE_ETHERNET)
+// .setProtocolType(ARP.PROTO_TYPE_IP)
+// .setHardwareAddressLength((byte) 6)
+// .setProtocolAddressLength((byte) 4)
+// .setOpCode(ARP.OP_REPLY)
+// .setSenderHardwareAddress(Ethernet.toMACAddress("00:44:33:22:11:01"))
+// .setSenderProtocolAddress(IPv4.toIPv4AddressBytes("192.168.1.1"))
+// .setTargetHardwareAddress(Ethernet.toMACAddress("00:11:22:33:44:55"))
+// .setTargetProtocolAddress(IPv4.toIPv4AddressBytes("192.168.1.2")));
+// this.testARPReplyPacket_1_Srld = testARPReplyPacket_1.serialize();
+//
+// // Another test packet with a different source IP
+// this.testARPReplyPacket_2 = new Ethernet()
+// .setSourceMACAddress("00:99:88:77:66:55")
+// .setDestinationMACAddress("00:11:22:33:44:55")
+// .setEtherType(Ethernet.TYPE_ARP)
+// .setVlanID((short)5)
+// .setPayload(
+// new ARP()
+// .setHardwareType(ARP.HW_TYPE_ETHERNET)
+// .setProtocolType(ARP.PROTO_TYPE_IP)
+// .setHardwareAddressLength((byte) 6)
+// .setProtocolAddressLength((byte) 4)
+// .setOpCode(ARP.OP_REPLY)
+// .setSenderHardwareAddress(Ethernet.toMACAddress("00:44:33:22:11:01"))
+// .setSenderProtocolAddress(IPv4.toIPv4AddressBytes("192.168.1.1"))
+// .setTargetHardwareAddress(Ethernet.toMACAddress("00:11:22:33:44:55"))
+// .setTargetProtocolAddress(IPv4.toIPv4AddressBytes("192.168.1.2")));
+// this.testARPReplyPacket_2_Srld = testARPReplyPacket_2.serialize();
+//
+// // Build the PacketIn
+// this.packetIn_1 = ((OFPacketIn) mockFloodlightProvider.
+// getOFMessageFactory().getMessage(OFType.PACKET_IN))
+// .setBufferId(-1)
+// .setInPort((short) 1)
+// .setPacketData(this.testARPReplyPacket_1_Srld)
+// .setReason(OFPacketInReason.NO_MATCH)
+// .setTotalLength((short) this.testARPReplyPacket_1_Srld.length);
+//
+// // Build the PacketIn
+// this.packetIn_2 = ((OFPacketIn) mockFloodlightProvider.
+// getOFMessageFactory().getMessage(OFType.PACKET_IN))
+// .setBufferId(-1)
+// .setInPort((short) 1)
+// .setPacketData(this.testARPReplyPacket_2_Srld)
+// .setReason(OFPacketInReason.NO_MATCH)
+// .setTotalLength((short) this.testARPReplyPacket_2_Srld.length);
+// }
+//
+//
+//
+//
+//
+// @Test
+// public void testLastSeen() throws Exception {
+// Calendar c = Calendar.getInstance();
+// Date d1 = c.getTime();
+// Entity entity1 = new Entity(1L, null, null, null, null, d1);
+// c.add(Calendar.SECOND, 1);
+// Entity entity2 = new Entity(1L, null, 1, null, null, c.getTime());
+//
+// IDevice d = deviceManager.learnDeviceByEntity(entity2);
+// assertEquals(c.getTime(), d.getLastSeen());
+// d = deviceManager.learnDeviceByEntity(entity1);
+// assertEquals(c.getTime(), d.getLastSeen());
+//
+// deviceManager.startUp(null);
+// d = deviceManager.learnDeviceByEntity(entity1);
+// assertEquals(d1, d.getLastSeen());
+// d = deviceManager.learnDeviceByEntity(entity2);
+// assertEquals(c.getTime(), d.getLastSeen());
+// }
+//
+// @Test
+// public void testEntityLearning() throws Exception {
+// IDeviceListener mockListener =
+// createMock(IDeviceListener.class);
+// expect(mockListener.getName()).andReturn("mockListener").atLeastOnce();
+// expect(mockListener.isCallbackOrderingPostreq((String)anyObject(), (String)anyObject()))
+// .andReturn(false).atLeastOnce();
+// expect(mockListener.isCallbackOrderingPrereq((String)anyObject(), (String)anyObject()))
+// .andReturn(false).atLeastOnce();
+//
+// replay(mockListener);
+// deviceManager.addListener(mockListener);
+// verify(mockListener);
+// reset(mockListener);
+// deviceManager.entityClassifier= new MockEntityClassifier();
+// deviceManager.startUp(null);
+//
+// ITopologyService mockTopology = createMock(ITopologyService.class);
+// expect(mockTopology.getL2DomainId(anyLong())).
+// andReturn(1L).anyTimes();
+// expect(mockTopology.isBroadcastDomainPort(anyLong(), anyShort())).
+// andReturn(false).anyTimes();
+//
+// expect(mockTopology.isAttachmentPointPort(anyLong(),
+// anyShort())).andReturn(true).anyTimes();
+// expect(mockTopology.isConsistent(10L, (short)1, 10L, (short)1)).
+// andReturn(true).anyTimes();
+// expect(mockTopology.isConsistent(1L, (short)1, 1L, (short)1)).
+// andReturn(true).anyTimes();
+// expect(mockTopology.isConsistent(50L, (short)3, 50L, (short)3)).
+// andReturn(true).anyTimes();
+//
+// Date topologyUpdateTime = new Date();
+// expect(mockTopology.getLastUpdateTime()).andReturn(topologyUpdateTime).
+// anyTimes();
+//
+// deviceManager.topology = mockTopology;
+//
+// Entity entity1 = new Entity(1L, null, null, 1L, 1, new Date());
+// Entity entity2 = new Entity(1L, null, null, 10L, 1, new Date());
+// Entity entity3 = new Entity(1L, null, 1, 10L, 1, new Date());
+// Entity entity4 = new Entity(1L, null, 1, 1L, 1, new Date());
+// Entity entity5 = new Entity(2L, (short)4, 1, 5L, 2, new Date());
+// Entity entity6 = new Entity(2L, (short)4, 1, 50L, 3, new Date());
+// Entity entity7 = new Entity(2L, (short)4, 2, 50L, 3, new Date());
+//
+// mockListener.deviceAdded(isA(IDevice.class));
+// replay(mockListener, mockTopology);
+//
+// Device d1 = deviceManager.learnDeviceByEntity(entity1);
+// assertSame(d1, deviceManager.learnDeviceByEntity(entity1));
+// assertSame(d1, deviceManager.findDeviceByEntity(entity1));
+// assertEquals(DefaultEntityClassifier.entityClass ,
+// d1.getEntityClass());
+// assertArrayEquals(new Short[] { -1 }, d1.getVlanId());
+// assertArrayEquals(new Integer[] { }, d1.getIPv4Addresses());
+//
+// assertEquals(1, deviceManager.getAllDevices().size());
+// verify(mockListener);
+//
+// reset(mockListener);
+// mockListener.deviceAdded(isA(IDevice.class));
+// replay(mockListener);
+//
+// Device d2 = deviceManager.learnDeviceByEntity(entity2);
+// assertFalse(d1.equals(d2));
+// assertNotSame(d1, d2);
+// assertNotSame(d1.getDeviceKey(), d2.getDeviceKey());
+// assertEquals(MockEntityClassifier.testEC, d2.getEntityClass());
+// assertArrayEquals(new Short[] { -1 }, d2.getVlanId());
+// assertArrayEquals(new Integer[] { }, d2.getIPv4Addresses());
+//
+// assertEquals(2, deviceManager.getAllDevices().size());
+// verify(mockListener);
+//
+// reset(mockListener);
+// mockListener.deviceIPV4AddrChanged(isA(IDevice.class));
+// replay(mockListener);
+//
+// Device d3 = deviceManager.learnDeviceByEntity(entity3);
+// assertNotSame(d2, d3);
+// assertEquals(d2.getDeviceKey(), d3.getDeviceKey());
+// assertEquals(MockEntityClassifier.testEC, d3.getEntityClass());
+// assertArrayEquals(new Integer[] { 1 },
+// d3.getIPv4Addresses());
+// assertArrayEquals(new SwitchPort[] { new SwitchPort(10L, 1) },
+// d3.getAttachmentPoints());
+// assertArrayEquals(new SwitchPort[] { new SwitchPort(10L, 1) },
+// d3.getAttachmentPoints(true));
+// assertArrayEquals(new Short[] { -1 },
+// d3.getVlanId());
+//
+// assertEquals(2, deviceManager.getAllDevices().size());
+// verify(mockListener);
+//
+// reset(mockListener);
+// mockListener.deviceIPV4AddrChanged(isA(IDevice.class));
+// replay(mockListener);
+//
+// Device d4 = deviceManager.learnDeviceByEntity(entity4);
+// assertNotSame(d1, d4);
+// assertEquals(d1.getDeviceKey(), d4.getDeviceKey());
+// assertEquals(DefaultEntityClassifier.entityClass, d4.getEntityClass());
+// assertArrayEquals(new Integer[] { 1 },
+// d4.getIPv4Addresses());
+// assertArrayEquals(new SwitchPort[] { new SwitchPort(1L, 1) },
+// d4.getAttachmentPoints());
+// assertArrayEquals(new Short[] { -1 },
+// d4.getVlanId());
+//
+// assertEquals(2, deviceManager.getAllDevices().size());
+// verify(mockListener);
+//
+// reset(mockListener);
+// mockListener.deviceAdded((isA(IDevice.class)));
+// replay(mockListener);
+//
+// Device d5 = deviceManager.learnDeviceByEntity(entity5);
+// assertArrayEquals(new SwitchPort[] { new SwitchPort(5L, 2) },
+// d5.getAttachmentPoints());
+// assertArrayEquals(new Short[] { (short) 4 },
+// d5.getVlanId());
+// assertEquals(2L, d5.getMACAddress());
+// assertEquals("00:00:00:00:00:02", d5.getMACAddressString());
+// verify(mockListener);
+//
+// reset(mockListener);
+// mockListener.deviceAdded(isA(IDevice.class));
+// replay(mockListener);
+//
+// Device d6 = deviceManager.learnDeviceByEntity(entity6);
+// assertArrayEquals(new SwitchPort[] { new SwitchPort(50L, 3) },
+// d6.getAttachmentPoints());
+// assertArrayEquals(new Short[] { (short) 4 },
+// d6.getVlanId());
+//
+// assertEquals(4, deviceManager.getAllDevices().size());
+// verify(mockListener);
+//
+// reset(mockListener);
+// mockListener.deviceIPV4AddrChanged(isA(IDevice.class));
+// replay(mockListener);
+//
+// Device d7 = deviceManager.learnDeviceByEntity(entity7);
+// assertNotSame(d6, d7);
+// assertEquals(d6.getDeviceKey(), d7.getDeviceKey());
+// assertArrayEquals(new SwitchPort[] { new SwitchPort(50L, 3) },
+// d7.getAttachmentPoints());
+// assertArrayEquals(new Short[] { (short) 4 },
+// d7.getVlanId());
+//
+// assertEquals(4, deviceManager.getAllDevices().size());
+// verify(mockListener);
+//
+//
+// reset(mockListener);
+// replay(mockListener);
+//
+// reset(deviceManager.topology);
+// deviceManager.topology.addListener(deviceManager);
+// expectLastCall().times(1);
+// replay(deviceManager.topology);
+//
+// deviceManager.entityClassifier = new MockEntityClassifierMac();
+// deviceManager.startUp(null);
+// Entity entityNoClass = new Entity(5L, (short)1, 5, -1L, 1, new Date());
+// assertEquals(null, deviceManager.learnDeviceByEntity(entityNoClass));
+//
+// verify(mockListener);
+// }
+//
+//
+// private void doTestEntityOrdering(boolean computeInsertionPoint) throws Exception {
+// Entity e = new Entity(10L, null, null, null, null, null);
+// IEntityClass ec = createNiceMock(IEntityClass.class);
+// Device d = new Device(deviceManager, 1L, e, ec);
+//
+// int expectedLength = 1;
+// Long[] macs = new Long[] { 5L, // new first element
+// 15L, // new last element
+// 7L, // insert in middle
+// 12L, // insert in middle
+// 6L, // insert at idx 1
+// 14L, // insert at idx length-2
+// 1L,
+// 20L
+// };
+//
+// for (Long mac: macs) {
+// e = new Entity(mac, null, null, null, null, null);
+// int insertionPoint;
+// if (computeInsertionPoint) {
+// insertionPoint = -(Arrays.binarySearch(d.entities, e)+1);
+// } else {
+// insertionPoint = -1;
+// }
+// d = deviceManager.allocateDevice(d, e, insertionPoint);
+// expectedLength++;
+// assertEquals(expectedLength, d.entities.length);
+// for (int i = 0; i < d.entities.length-1; i++)
+// assertEquals(-1, d.entities[i].compareTo(d.entities[i+1]));
+// }
+// }
+//
+// @Test
+// public void testEntityOrderingExternal() throws Exception {
+// doTestEntityOrdering(true);
+// }
+//
+// @Test
+// public void testEntityOrderingInternal() throws Exception {
+// doTestEntityOrdering(false);
+// }
+//
+// @Test
+// public void testAttachmentPointLearning() throws Exception {
+// IDeviceListener mockListener =
+// createMock(IDeviceListener.class);
+// expect(mockListener.getName()).andReturn("mockListener").atLeastOnce();
+// expect(mockListener.isCallbackOrderingPostreq((String)anyObject(), (String)anyObject()))
+// .andReturn(false).atLeastOnce();
+// expect(mockListener.isCallbackOrderingPrereq((String)anyObject(), (String)anyObject()))
+// .andReturn(false).atLeastOnce();
+//
+// replay(mockListener);
+// deviceManager.addListener(mockListener);
+// verify(mockListener);
+// reset(mockListener);
+//
+// ITopologyService mockTopology = createMock(ITopologyService.class);
+// expect(mockTopology.getL2DomainId(1L)).
+// andReturn(1L).anyTimes();
+// expect(mockTopology.getL2DomainId(5L)).
+// andReturn(1L).anyTimes();
+// expect(mockTopology.getL2DomainId(10L)).
+// andReturn(10L).anyTimes();
+// expect(mockTopology.getL2DomainId(50L)).
+// andReturn(10L).anyTimes();
+// expect(mockTopology.isBroadcastDomainPort(anyLong(), anyShort())).
+// andReturn(false).anyTimes();
+// expect(mockTopology.isInSameBroadcastDomain(anyLong(), anyShort(),
+// anyLong(), anyShort())).andReturn(false).anyTimes();
+//
+// expect(mockTopology.isAttachmentPointPort(anyLong(),
+// anyShort())).andReturn(true).anyTimes();
+// expect(mockTopology.isConsistent(1L, (short)1, 5L, (short)1)).
+// andReturn(false).anyTimes();
+// expect(mockTopology.isConsistent(5L, (short)1, 10L, (short)1)).
+// andReturn(false).anyTimes();
+// expect(mockTopology.isConsistent(10L, (short)1, 50L, (short)1)).
+// andReturn(false).anyTimes();
+//
+// Date topologyUpdateTime = new Date();
+// expect(mockTopology.getLastUpdateTime()).andReturn(topologyUpdateTime).
+// anyTimes();
+//
+// replay(mockTopology);
+//
+// deviceManager.topology = mockTopology;
+//
+// Calendar c = Calendar.getInstance();
+// Entity entity1 = new Entity(1L, null, 1, 1L, 1, c.getTime());
+// Entity entity0 = new Entity(1L, null, null, null, null, c.getTime());
+// c.add(Calendar.SECOND, 1);
+// Entity entity2 = new Entity(1L, null, null, 5L, 1, c.getTime());
+// c.add(Calendar.SECOND, 1);
+// Entity entity3 = new Entity(1L, null, null, 10L, 1, c.getTime());
+// c.add(Calendar.SECOND, 1);
+// Entity entity4 = new Entity(1L, null, null, 50L, 1, c.getTime());
+//
+// IDevice d;
+// SwitchPort[] aps;
+// Integer[] ips;
+//
+// mockListener.deviceAdded(isA(IDevice.class));
+// replay(mockListener);
+//
+// deviceManager.learnDeviceByEntity(entity1);
+// d = deviceManager.learnDeviceByEntity(entity0);
+// assertEquals(1, deviceManager.getAllDevices().size());
+// aps = d.getAttachmentPoints();
+// assertArrayEquals(new SwitchPort[] { new SwitchPort(1L, 1) }, aps);
+// ips = d.getIPv4Addresses();
+// assertArrayEquals(new Integer[] { 1 }, ips);
+// verify(mockListener);
+//
+// reset(mockListener);
+// mockListener.deviceMoved((isA(IDevice.class)));
+// replay(mockListener);
+//
+// d = deviceManager.learnDeviceByEntity(entity2);
+// assertEquals(1, deviceManager.getAllDevices().size());
+// aps = d.getAttachmentPoints();
+//
+// assertArrayEquals(new SwitchPort[] { new SwitchPort(5L, 1) }, aps);
+// ips = d.getIPv4Addresses();
+// assertArrayEquals(new Integer[] { 1 }, ips);
+// verify(mockListener);
+//
+// reset(mockListener);
+// mockListener.deviceMoved((isA(IDevice.class)));
+// replay(mockListener);
+//
+// d = deviceManager.learnDeviceByEntity(entity3);
+// assertEquals(1, deviceManager.getAllDevices().size());
+// aps = d.getAttachmentPoints();
+// assertArrayEquals(new SwitchPort[] {new SwitchPort(5L, 1), new SwitchPort(10L, 1)}, aps);
+// ips = d.getIPv4Addresses();
+// assertArrayEquals(new Integer[] { 1 }, ips);
+// verify(mockListener);
+//
+// reset(mockListener);
+// mockListener.deviceMoved((isA(IDevice.class)));
+// replay(mockListener);
+//
+// d = deviceManager.learnDeviceByEntity(entity4);
+// assertEquals(1, deviceManager.getAllDevices().size());
+// aps = d.getAttachmentPoints();
+// assertArrayEquals(new SwitchPort[] { new SwitchPort(5L, 1),
+// new SwitchPort(50L, 1) }, aps);
+// ips = d.getIPv4Addresses();
+// assertArrayEquals(new Integer[] { 1 }, ips);
+// verify(mockListener);
+// }
+//
+// private void verifyEntityArray(Entity[] expected, Device d) {
+// Arrays.sort(expected);
+// assertArrayEquals(expected, d.entities);
+// }
+//
+// @Test
+// public void testNoLearningOnInternalPorts() throws Exception {
+// IDeviceListener mockListener =
+// createMock(IDeviceListener.class);
+//
+// expect(mockListener.getName()).andReturn("mockListener").anyTimes();
+// expect(mockListener.isCallbackOrderingPostreq((String)anyObject(), (String)anyObject()))
+// .andReturn(false).atLeastOnce();
+// expect(mockListener.isCallbackOrderingPrereq((String)anyObject(), (String)anyObject()))
+// .andReturn(false).atLeastOnce();
+//
+// replay(mockListener);
+// deviceManager.addListener(mockListener);
+// verify(mockListener);
+// reset(mockListener);
+//
+// ITopologyService mockTopology = createMock(ITopologyService.class);
+// expect(mockTopology.getL2DomainId(1L)).
+// andReturn(1L).anyTimes();
+// expect(mockTopology.getL2DomainId(2L)).
+// andReturn(1L).anyTimes();
+// expect(mockTopology.getL2DomainId(3L)).
+// andReturn(1L).anyTimes();
+// expect(mockTopology.getL2DomainId(4L)).
+// andReturn(1L).anyTimes();
+// expect(mockTopology.isBroadcastDomainPort(anyLong(), anyShort()))
+// .andReturn(false).anyTimes();
+// expect(mockTopology.isInSameBroadcastDomain(anyLong(), anyShort(),
+// anyLong(), anyShort()))
+// .andReturn(false).anyTimes();
+//
+// expect(mockTopology.isAttachmentPointPort(or(eq(1L), eq(3L)), anyShort()))
+// .andReturn(true).anyTimes();
+// // Switches 2 and 4 have only internal ports
+// expect(mockTopology.isAttachmentPointPort(or(eq(2L), eq(4L)), anyShort()))
+// .andReturn(false).anyTimes();
+//
+// expect(mockTopology.isConsistent(1L, (short)1, 3L, (short)1))
+// .andReturn(false).once();
+//
+// Date topologyUpdateTime = new Date();
+// expect(mockTopology.getLastUpdateTime()).andReturn(topologyUpdateTime).
+// anyTimes();
+//
+// replay(mockTopology);
+//
+// deviceManager.topology = mockTopology;
+//
+// Calendar c = Calendar.getInstance();
+// Entity entity1 = new Entity(1L, null, 1, 1L, 1, c.getTime());
+// c.add(Calendar.SECOND, 1);
+// Entity entity2 = new Entity(1L, null, 2, 2L, 1, c.getTime());
+// c.add(Calendar.SECOND, 1);
+// Entity entity3 = new Entity(1L, null, 3, 3L, 1, c.getTime());
+// c.add(Calendar.SECOND, 1);
+// Entity entity4 = new Entity(1L, null, 4, 4L, 1, c.getTime());
+//
+// IDevice d;
+// SwitchPort[] aps;
+// Integer[] ips;
+//
+// mockListener.deviceAdded(isA(IDevice.class));
+// expectLastCall().once();
+// replay(mockListener);
+//
+// // cannot learn device internal ports
+// d = deviceManager.learnDeviceByEntity(entity2);
+// assertNull(d);
+// d = deviceManager.learnDeviceByEntity(entity4);
+// assertNull(d);
+//
+// d = deviceManager.learnDeviceByEntity(entity1);
+// assertEquals(1, deviceManager.getAllDevices().size());
+// aps = d.getAttachmentPoints();
+// assertArrayEquals(new SwitchPort[] { new SwitchPort(1L, 1) }, aps);
+// verifyEntityArray(new Entity[] { entity1 } , (Device)d);
+// ips = d.getIPv4Addresses();
+// assertArrayEquals(new Integer[] { 1 }, ips);
+// verify(mockListener);
+//
+// reset(mockListener);
+// replay(mockListener);
+//
+// // don't learn
+// d = deviceManager.learnDeviceByEntity(entity2);
+// assertEquals(1, deviceManager.getAllDevices().size());
+// aps = d.getAttachmentPoints();
+// assertArrayEquals(new SwitchPort[] { new SwitchPort(1L, 1) }, aps);
+// verifyEntityArray(new Entity[] { entity1 } , (Device)d);
+// ips = d.getIPv4Addresses();
+// assertArrayEquals(new Integer[] { 1 }, ips);
+// verify(mockListener);
+//
+// reset(mockListener);
+// mockListener.deviceMoved(isA(IDevice.class));
+// mockListener.deviceIPV4AddrChanged(isA(IDevice.class));
+// replay(mockListener);
+//
+// // learn
+// d = deviceManager.learnDeviceByEntity(entity3);
+// assertEquals(1, deviceManager.getAllDevices().size());
+// aps = d.getAttachmentPoints();
+// assertArrayEquals(new SwitchPort[] { new SwitchPort(3L, 1) }, aps);
+// verifyEntityArray(new Entity[] { entity1, entity3 } , (Device)d);
+// ips = d.getIPv4Addresses();
+// Arrays.sort(ips);
+// assertArrayEquals(new Integer[] { 1, 3 }, ips);
+// verify(mockListener);
+//
+// reset(mockListener);
+// replay(mockListener);
+//
+// // don't learn
+// d = deviceManager.learnDeviceByEntity(entity4);
+// assertEquals(1, deviceManager.getAllDevices().size());
+// aps = d.getAttachmentPoints();
+// assertArrayEquals(new SwitchPort[] { new SwitchPort(3L, 1) }, aps);
+// verifyEntityArray(new Entity[] { entity1, entity3 } , (Device)d);
+// ips = d.getIPv4Addresses();
+// Arrays.sort(ips);
+// assertArrayEquals(new Integer[] { 1, 3 }, ips);
+// verify(mockListener);
+// }
+//
+// @Test
+// public void testAttachmentPointSuppression() throws Exception {
+// IDeviceListener mockListener =
+// createMock(IDeviceListener.class);
+//
+// expect(mockListener.getName()).andReturn("mockListener").anyTimes();
+// expect(mockListener.isCallbackOrderingPostreq((String)anyObject(), (String)anyObject()))
+// .andReturn(false).atLeastOnce();
+// expect(mockListener.isCallbackOrderingPrereq((String)anyObject(), (String)anyObject()))
+// .andReturn(false).atLeastOnce();
+//
+// replay(mockListener);
+// deviceManager.addListener(mockListener);
+// verify(mockListener);
+// reset(mockListener);
+//
+// ITopologyService mockTopology = createMock(ITopologyService.class);
+// expect(mockTopology.getL2DomainId(1L)).
+// andReturn(1L).anyTimes();
+// expect(mockTopology.getL2DomainId(5L)).
+// andReturn(1L).anyTimes();
+// expect(mockTopology.getL2DomainId(10L)).
+// andReturn(10L).anyTimes();
+// expect(mockTopology.getL2DomainId(50L)).
+// andReturn(10L).anyTimes();
+// expect(mockTopology.isBroadcastDomainPort(anyLong(), anyShort()))
+// .andReturn(false).anyTimes();
+// expect(mockTopology.isInSameBroadcastDomain(anyLong(), anyShort(),
+// anyLong(), anyShort()))
+// .andReturn(false).anyTimes();
+//
+// expect(mockTopology.isAttachmentPointPort(anyLong(), anyShort()))
+// .andReturn(true).anyTimes();
+// expect(mockTopology.isConsistent(5L, (short)1, 50L, (short)1))
+// .andReturn(false).anyTimes();
+//
+// Date topologyUpdateTime = new Date();
+// expect(mockTopology.getLastUpdateTime()).andReturn(topologyUpdateTime).
+// anyTimes();
+//
+// replay(mockTopology);
+//
+// deviceManager.topology = mockTopology;
+// // suppress (1L, 1) and (10L, 1)
+// deviceManager.addSuppressAPs(1L, (short)1);
+// deviceManager.addSuppressAPs(10L, (short)1);
+//
+// Calendar c = Calendar.getInstance();
+// Entity entity0 = new Entity(1L, null, null, null, null, c.getTime());
+// // No attachment point should be learnt on 1L, 1
+// Entity entity1 = new Entity(1L, null, 1, 1L, 1, c.getTime());
+// c.add(Calendar.SECOND, 1);
+// Entity entity2 = new Entity(1L, null, 1, 5L, 1, c.getTime());
+// c.add(Calendar.SECOND, 1);
+// Entity entity3 = new Entity(1L, null, null, 10L, 1, c.getTime());
+// c.add(Calendar.SECOND, 1);
+// Entity entity4 = new Entity(1L, null, null, 50L, 1, c.getTime());
+//
+// IDevice d;
+// SwitchPort[] aps;
+// Integer[] ips;
+//
+// mockListener.deviceAdded(isA(IDevice.class));
+// mockListener.deviceIPV4AddrChanged((isA(IDevice.class)));
+// replay(mockListener);
+//
+// // TODO: we currently do learn entities on suppressed APs
+// // // cannot learn device on suppressed AP
+// // d = deviceManager.learnDeviceByEntity(entity1);
+// // assertNull(d);
+//
+// deviceManager.learnDeviceByEntity(entity0);
+// d = deviceManager.learnDeviceByEntity(entity1);
+// assertEquals(1, deviceManager.getAllDevices().size());
+// aps = d.getAttachmentPoints();
+// assertEquals(aps.length, 0);
+// verifyEntityArray(new Entity[] { entity0, entity1} , (Device)d);
+// ips = d.getIPv4Addresses();
+// assertArrayEquals(new Integer[] { 1 }, ips);
+// verify(mockListener);
+//
+// reset(mockListener);
+// mockListener.deviceMoved((isA(IDevice.class)));
+// //mockListener.deviceIPV4AddrChanged((isA(IDevice.class)));
+// replay(mockListener);
+// d = deviceManager.learnDeviceByEntity(entity2);
+// assertEquals(1, deviceManager.getAllDevices().size());
+// aps = d.getAttachmentPoints();
+// assertArrayEquals(new SwitchPort[] { new SwitchPort(5L, 1) }, aps);
+// verifyEntityArray(new Entity[] { entity0, entity1, entity2 } , (Device)d);
+// ips = d.getIPv4Addresses();
+// assertArrayEquals(new Integer[] { 1 }, ips);
+// verify(mockListener);
+//
+// reset(mockListener);
+// replay(mockListener);
+//
+// d = deviceManager.learnDeviceByEntity(entity3);
+// assertEquals(1, deviceManager.getAllDevices().size());
+// aps = d.getAttachmentPoints();
+// assertArrayEquals(new SwitchPort[] { new SwitchPort(5L, 1) }, aps);
+// verifyEntityArray(new Entity[] { entity0, entity1, entity2, entity3 } , (Device)d);
+// ips = d.getIPv4Addresses();
+// assertArrayEquals(new Integer[] { 1 }, ips);
+// verify(mockListener);
+//
+// reset(mockListener);
+// mockListener.deviceMoved((isA(IDevice.class)));
+// replay(mockListener);
+//
+// d = deviceManager.learnDeviceByEntity(entity4);
+// assertEquals(1, deviceManager.getAllDevices().size());
+// aps = d.getAttachmentPoints();
+// assertArrayEquals(new SwitchPort[] { new SwitchPort(5L, 1),
+// new SwitchPort(50L, 1) }, aps);
+// verifyEntityArray(new Entity[] { entity0, entity1, entity2, entity3, entity4} , (Device)d);
+// ips = d.getIPv4Addresses();
+// assertArrayEquals(new Integer[] { 1 }, ips);
+// verify(mockListener);
+// }
+//
+// @Test
+// public void testBDAttachmentPointLearning() throws Exception {
+// ITopologyService mockTopology = createMock(ITopologyService.class);
+// expect(mockTopology.getL2DomainId(anyLong())).
+// andReturn(1L).anyTimes();
+// expect(mockTopology.isAttachmentPointPort(anyLong(), anyShort())).
+// andReturn(true).anyTimes();
+// expect(mockTopology.isBroadcastDomainPort(1L, (short)1)).
+// andReturn(false).anyTimes();
+// expect(mockTopology.isBroadcastDomainPort(1L, (short)2)).
+// andReturn(true).anyTimes();
+// expect(mockTopology.isInSameBroadcastDomain(1L, (short)1,
+// 1L, (short)2)).andReturn(true).anyTimes();
+// expect(mockTopology.isInSameBroadcastDomain(1L, (short)2,
+// 1L, (short)1)).andReturn(true).anyTimes();
+// expect(mockTopology.isConsistent(anyLong(), anyShort(), anyLong(), anyShort())).andReturn(false).anyTimes();
+//
+// Date topologyUpdateTime = new Date();
+// expect(mockTopology.getLastUpdateTime()).andReturn(topologyUpdateTime).
+// anyTimes();
+//
+// replay(mockTopology);
+//
+// deviceManager.topology = mockTopology;
+//
+// Calendar c = Calendar.getInstance();
+// Entity entity1 = new Entity(1L, null, 1, 1L, 1, c.getTime());
+// c.add(Calendar.MILLISECOND,
+// (int)AttachmentPoint.OPENFLOW_TO_EXTERNAL_TIMEOUT/ 2);
+// Entity entity2 = new Entity(1L, null, null, 1L, 2, c.getTime());
+// c.add(Calendar.MILLISECOND,
+// (int)AttachmentPoint.OPENFLOW_TO_EXTERNAL_TIMEOUT / 2 + 1);
+// Entity entity3 = new Entity(1L, null, null, 1L, 2, c.getTime());
+//
+// IDevice d;
+// SwitchPort[] aps;
+//
+// d = deviceManager.learnDeviceByEntity(entity1);
+// assertEquals(1, deviceManager.getAllDevices().size());
+// aps = d.getAttachmentPoints();
+// assertArrayEquals(new SwitchPort[] { new SwitchPort(1L, 1) }, aps);
+//
+// // this timestamp is too soon; don't switch
+// d = deviceManager.learnDeviceByEntity(entity2);
+// assertEquals(1, deviceManager.getAllDevices().size());
+// aps = d.getAttachmentPoints();
+// assertArrayEquals(new SwitchPort[] { new SwitchPort(1L, 1) }, aps);
+//
+// // it should switch when we learn with a timestamp after the
+// // timeout
+// d = deviceManager.learnDeviceByEntity(entity3);
+// assertEquals(1, deviceManager.getAllDevices().size());
+// aps = d.getAttachmentPoints();
+// assertArrayEquals(new SwitchPort[] { new SwitchPort(1L, 2) }, aps);
+// }
+//
+// /**
+// * This test verifies that the learning behavior on OFPP_LOCAL ports.
+// * Once a host is learned on OFPP_LOCAL, it is allowed to move only from
+// * one OFPP_LOCAL to another OFPP_LOCAL port.
+// * @throws Exception
+// */
+// @Test
+// public void testLOCALAttachmentPointLearning() throws Exception {
+// ITopologyService mockTopology = createMock(ITopologyService.class);
+// expect(mockTopology.getL2DomainId(anyLong())).
+// andReturn(1L).anyTimes();
+// expect(mockTopology.isAttachmentPointPort(anyLong(), anyShort())).
+// andReturn(true).anyTimes();
+// expect(mockTopology.isBroadcastDomainPort(1L, (short)1)).
+// andReturn(false).anyTimes();
+// expect(mockTopology.isBroadcastDomainPort(1L, OFPort.OFPP_LOCAL.getValue())).
+// andReturn(false).anyTimes();
+// expect(mockTopology.isBroadcastDomainPort(1L, (short)2)).
+// andReturn(true).anyTimes();
+// expect(mockTopology.isInSameBroadcastDomain(1L, (short)1,
+// 1L, OFPort.OFPP_LOCAL.getValue())).andReturn(true).anyTimes();
+// expect(mockTopology.isInSameBroadcastDomain(1L, OFPort.OFPP_LOCAL.getValue(),
+// 1L, (short)2)).andReturn(true).anyTimes();
+// expect(mockTopology.isInSameBroadcastDomain(1L, (short)2,
+// 1L, OFPort.OFPP_LOCAL.getValue())).andReturn(true).anyTimes();
+// expect(mockTopology.isConsistent(anyLong(), anyShort(), anyLong(), anyShort())).andReturn(false).anyTimes();
+//
+// Date topologyUpdateTime = new Date();
+// expect(mockTopology.getLastUpdateTime()).andReturn(topologyUpdateTime).
+// anyTimes();
+//
+// replay(mockTopology);
+//
+// deviceManager.topology = mockTopology;
+//
+// Calendar c = Calendar.getInstance();
+// Entity entity1 = new Entity(1L, null, 1, 1L, 1, c.getTime());
+// c.add(Calendar.MILLISECOND,
+// (int)AttachmentPoint.OPENFLOW_TO_EXTERNAL_TIMEOUT/ 2);
+// Entity entity2 = new Entity(1L, null, null, 1L, (int)OFPort.OFPP_LOCAL.getValue(), c.getTime());
+// c.add(Calendar.MILLISECOND,
+// (int)AttachmentPoint.OPENFLOW_TO_EXTERNAL_TIMEOUT + 1);
+// Entity entity3 = new Entity(1L, null, null, 1L, 2, c.getTime());
+//
+// IDevice d;
+// SwitchPort[] aps;
+//
+// d = deviceManager.learnDeviceByEntity(entity1);
+// assertEquals(1, deviceManager.getAllDevices().size());
+// aps = d.getAttachmentPoints();
+// assertArrayEquals(new SwitchPort[] { new SwitchPort(1L, 1) }, aps);
+//
+// // Ensure that the attachment point changes to OFPP_LOCAL
+// d = deviceManager.learnDeviceByEntity(entity2);
+// assertEquals(1, deviceManager.getAllDevices().size());
+// aps = d.getAttachmentPoints();
+// assertArrayEquals(new SwitchPort[] { new SwitchPort(1L, OFPort.OFPP_LOCAL.getValue()) }, aps);
+//
+// // Even though the new attachment point is consistent with old
+// // and the time has elapsed, OFPP_LOCAL attachment point should
+// // be maintained.
+// d = deviceManager.learnDeviceByEntity(entity3);
+// assertEquals(1, deviceManager.getAllDevices().size());
+// aps = d.getAttachmentPoints();
+// assertArrayEquals(new SwitchPort[] { new SwitchPort(1L, OFPort.OFPP_LOCAL.getValue()) }, aps);
+// }
+//
+// @Test
+// public void testPacketInBasic(byte[] deviceMac, OFPacketIn packetIn) {
+// // Mock up our expected behavior
+// ITopologyService mockTopology = createMock(ITopologyService.class);
+// deviceManager.topology = mockTopology;
+// expect(mockTopology.isAttachmentPointPort(EasyMock.anyLong(),
+// EasyMock.anyShort())).
+// andReturn(true).anyTimes();
+// expect(mockTopology.isConsistent(EasyMock.anyLong(),
+// EasyMock.anyShort(),
+// EasyMock.anyLong(),
+// EasyMock.anyShort())).andReturn(false).
+// anyTimes();
+// expect(mockTopology.getL2DomainId(EasyMock.anyLong())).andReturn(1L).anyTimes();
+// replay(mockTopology);
+//
+// Date currentDate = new Date();
+//
+// // build our expected Device
+// Integer ipaddr = IPv4.toIPv4Address("192.168.1.1");
+// Device device =
+// new Device(deviceManager,
+// new Long(deviceManager.deviceKeyCounter),
+// new Entity(Ethernet.toLong(deviceMac),
+// (short)5,
+// ipaddr,
+// 1L,
+// 1,
+// currentDate),
+// DefaultEntityClassifier.entityClass);
+//
+// // Get the listener and trigger the packet in
+// IOFSwitch switch1 = mockFloodlightProvider.getSwitch(1L);
+// mockFloodlightProvider.dispatchMessage(switch1, packetIn);
+//
+// // Verify the replay matched our expectations
+// // verify(mockTopology);
+//
+// // Verify the device
+// Device rdevice = (Device)
+// deviceManager.findDevice(Ethernet.toLong(deviceMac),
+// (short)5, null, null, null);
+//
+// assertEquals(device, rdevice);
+// assertEquals(new Short((short)5), rdevice.getVlanId()[0]);
+//
+// Device result = null;
+// Iterator<? extends IDevice> dstiter =
+// deviceManager.queryClassDevices(device.getEntityClass(),
+// null, null, ipaddr,
+// null, null);
+// if (dstiter.hasNext()) {
+// result = (Device)dstiter.next();
+// }
+//
+// assertEquals(device, result);
+//
+// device =
+// new Device(device,
+// new Entity(Ethernet.toLong(deviceMac),
+// (short)5,
+// ipaddr,
+// 5L,
+// 2,
+// currentDate),
+// -1);
+//
+// reset(mockTopology);
+// expect(mockTopology.isAttachmentPointPort(anyLong(),
+// anyShort())).
+// andReturn(true).
+// anyTimes();
+// expect(mockTopology.isConsistent(EasyMock.anyLong(),
+// EasyMock.anyShort(),
+// EasyMock.anyLong(),
+// EasyMock.anyShort())).andReturn(false).
+// anyTimes();
+// expect(mockTopology.isBroadcastDomainPort(EasyMock.anyLong(),
+// EasyMock.anyShort()))
+// .andReturn(false)
+// .anyTimes();
+// expect(mockTopology.getL2DomainId(1L)).andReturn(1L).anyTimes();
+// expect(mockTopology.getL2DomainId(5L)).andReturn(1L).anyTimes();
+// expect(mockTopology.isInSameBroadcastDomain(1L, (short)1, 5L, (short)2)).
+// andReturn(false).anyTimes();
+//
+// // Start recording the replay on the mocks
+// replay(mockTopology);
+// // Get the listener and trigger the packet in
+// IOFSwitch switch5 = mockFloodlightProvider.getSwitch(5L);
+// mockFloodlightProvider.
+// dispatchMessage(switch5, this.packetIn_1.setInPort((short)2));
+//
+// // Verify the replay matched our expectations
+// verify(mockTopology);
+//
+// // Verify the device
+// rdevice = (Device)
+// deviceManager.findDevice(Ethernet.toLong(deviceMac),
+// (short)5, null, null, null);
+// assertEquals(device, rdevice);
+// }
+//
+// @Test
+// public void testPacketIn() throws Exception {
+// byte[] deviceMac1 =
+// ((Ethernet)this.testARPReplyPacket_1).getSourceMACAddress();
+// testPacketInBasic(deviceMac1, packetIn_1);
+// }
+//
+// /**
+// * This test ensures the device manager learns the source device
+// * corresponding to the senderHardwareAddress and senderProtocolAddress
+// * in an ARP response whenever the senderHardwareAddress is different
+// * from the source MAC address of the Ethernet frame.
+// *
+// * This test is the same as testPacketIn method, except for the
+// * packet-in that's used.
+// * @throws Exception
+// */
+// @Test
+// public void testDeviceLearningFromArpResponseData() throws Exception {
+// ARP arp = (ARP)((Ethernet)this.testARPReplyPacket_2).getPayload();
+// byte[] deviceMac2 = arp.getSenderHardwareAddress();
+//
+// testPacketInBasic(deviceMac2, packetIn_2);
+// }
+//
+// /**
+// * Note: Entity expiration does not result in device moved notification.
+// * @throws Exception
+// */
+// public void doTestEntityExpiration() throws Exception {
+// IDeviceListener mockListener =
+// createMock(IDeviceListener.class);
+// expect(mockListener.getName()).andReturn("mockListener").anyTimes();
+// expect(mockListener.isCallbackOrderingPostreq((String)anyObject(), (String)anyObject()))
+// .andReturn(false).atLeastOnce();
+// expect(mockListener.isCallbackOrderingPrereq((String)anyObject(), (String)anyObject()))
+// .andReturn(false).atLeastOnce();
+//
+// ITopologyService mockTopology = createMock(ITopologyService.class);
+// expect(mockTopology.isAttachmentPointPort(anyLong(),
+// anyShort())).
+// andReturn(true).anyTimes();
+//
+// expect(mockTopology.isBroadcastDomainPort(1L, (short)1)).andReturn(false).anyTimes();
+// expect(mockTopology.isBroadcastDomainPort(5L, (short)1)).andReturn(false).anyTimes();
+// expect(mockTopology.getL2DomainId(1L)).andReturn(1L).anyTimes();
+// expect(mockTopology.getL2DomainId(5L)).andReturn(5L).anyTimes();
+// expect(mockTopology.isConsistent(1L, (short)1, 5L, (short)1)).
+// andReturn(false).anyTimes();
+//
+// Date topologyUpdateTime = new Date();
+// expect(mockTopology.getLastUpdateTime()).andReturn(topologyUpdateTime).
+// anyTimes();
+//
+// replay(mockTopology);
+// deviceManager.topology = mockTopology;
+//
+// Calendar c = Calendar.getInstance();
+// Entity entity1 = new Entity(1L, null, 2, 1L, 1, c.getTime());
+// c.add(Calendar.MILLISECOND, -DeviceManagerImpl.ENTITY_TIMEOUT-1);
+// Entity entity2 = new Entity(1L, null, 1, 5L, 1, c.getTime());
+//
+// deviceManager.learnDeviceByEntity(entity1);
+// IDevice d = deviceManager.learnDeviceByEntity(entity2);
+// assertArrayEquals(new Integer[] { 1, 2 }, d.getIPv4Addresses());
+// assertArrayEquals(new SwitchPort[] { new SwitchPort(1L, 1),
+// new SwitchPort(5L, 1)},
+// d.getAttachmentPoints());
+// Iterator<? extends IDevice> diter =
+// deviceManager.queryClassDevices(d.getEntityClass(),
+// null, null, 1, null, null);
+// assertTrue(diter.hasNext());
+// assertEquals(d.getDeviceKey(), diter.next().getDeviceKey());
+// diter = deviceManager.queryClassDevices(d.getEntityClass(),
+// null, null, 2, null, null);
+// assertTrue(diter.hasNext());
+// assertEquals(d.getDeviceKey(), diter.next().getDeviceKey());
+//
+// replay(mockListener);
+// deviceManager.addListener(mockListener);
+// verify(mockListener);
+// reset(mockListener);
+//
+// mockListener.deviceIPV4AddrChanged(isA(IDevice.class));
+// replay(mockListener);
+// deviceManager.entityCleanupTask.reschedule(0, null);
+//
+// d = deviceManager.getDevice(d.getDeviceKey());
+// assertArrayEquals(new Integer[] { 2 }, d.getIPv4Addresses());
+//
+// // Attachment points are not removed, previous ones are still valid.
+// assertArrayEquals(new SwitchPort[] { new SwitchPort(1L, 1),
+// new SwitchPort(5L, 1) },
+// d.getAttachmentPoints());
+// diter = deviceManager.queryClassDevices(d.getEntityClass(),
+// null, null, 2, null, null);
+// assertTrue(diter.hasNext());
+// assertEquals(d.getDeviceKey(), diter.next().getDeviceKey());
+// diter = deviceManager.queryClassDevices(d.getEntityClass(),
+// null, null, 1, null, null);
+// assertFalse(diter.hasNext());
+//
+// d = deviceManager.findDevice(1L, null, null, null, null);
+// assertArrayEquals(new Integer[] { 2 }, d.getIPv4Addresses());
+//
+// // Attachment points are not removed, previous ones are still valid.
+// assertArrayEquals(new SwitchPort[] { new SwitchPort(1L, 1),
+// new SwitchPort(5L, 1) },
+// d.getAttachmentPoints());
+//
+// verify(mockListener);
+// }
+//
+// public void doTestDeviceExpiration() throws Exception {
+// IDeviceListener mockListener =
+// createMock(IDeviceListener.class);
+// expect(mockListener.getName()).andReturn("mockListener").anyTimes();
+// expect(mockListener.isCallbackOrderingPostreq((String)anyObject(), (String)anyObject()))
+// .andReturn(false).atLeastOnce();
+// expect(mockListener.isCallbackOrderingPrereq((String)anyObject(), (String)anyObject()))
+// .andReturn(false).atLeastOnce();
+//
+// Calendar c = Calendar.getInstance();
+// c.add(Calendar.MILLISECOND, -DeviceManagerImpl.ENTITY_TIMEOUT-1);
+// Entity entity1 = new Entity(1L, null, 1, 1L, 1, c.getTime());
+// Entity entity2 = new Entity(1L, null, 2, 5L, 1, c.getTime());
+//
+// ITopologyService mockTopology = createMock(ITopologyService.class);
+// deviceManager.topology = mockTopology;
+//
+// expect(mockTopology.isAttachmentPointPort(EasyMock.anyLong(),
+// EasyMock.anyShort())).
+// andReturn(true).
+// anyTimes();
+// expect(mockTopology.getL2DomainId(1L)).andReturn(1L).anyTimes();
+// expect(mockTopology.getL2DomainId(5L)).andReturn(1L).anyTimes();
+// expect(mockTopology.isConsistent(EasyMock.anyLong(),
+// EasyMock.anyShort(),
+// EasyMock.anyLong(),
+// EasyMock.anyShort())).andReturn(false).
+// anyTimes();
+// expect(mockTopology.isBroadcastDomainPort(EasyMock.anyLong(),
+// EasyMock.anyShort())).
+// andReturn(false).anyTimes();
+// replay(mockTopology);
+//
+// IDevice d = deviceManager.learnDeviceByEntity(entity2);
+// d = deviceManager.learnDeviceByEntity(entity1);
+// assertArrayEquals(new Integer[] { 1, 2 }, d.getIPv4Addresses());
+//
+// replay(mockListener);
+// deviceManager.addListener(mockListener);
+// verify(mockListener);
+// reset(mockListener);
+//
+// mockListener.deviceRemoved(isA(IDevice.class));
+// replay(mockListener);
+// deviceManager.entityCleanupTask.reschedule(0, null);
+//
+// IDevice r = deviceManager.getDevice(d.getDeviceKey());
+// assertNull(r);
+// Iterator<? extends IDevice> diter =
+// deviceManager.queryClassDevices(d.getEntityClass(),
+// null, null, 1, null, null);
+// assertFalse(diter.hasNext());
+//
+// r = deviceManager.findDevice(1L, null, null, null, null);
+// assertNull(r);
+//
+// verify(mockListener);
+// }
+//
+// /*
+// * A ConcurrentHashMap for devices (deviceMap) that can be used to test
+// * code that specially handles concurrent modification situations. In
+// * particular, we overwrite values() and will replace / remove all the
+// * elements returned by values.
+// *
+// * The remove flag in the constructor specifies if devices returned by
+// * values() should be removed or replaced.
+// */
+// protected static class ConcurrentlyModifiedDeviceMap
+// extends ConcurrentHashMap<Long, Device> {
+// private static final long serialVersionUID = 7784938535441180562L;
+// protected boolean remove;
+// public ConcurrentlyModifiedDeviceMap(boolean remove) {
+// super();
+// this.remove = remove;
+// }
+//
+// @Override
+// public Collection<Device> values() {
+// // Get the values from the real map and copy them since
+// // the collection returned by values can reflect changed
+// Collection<Device> devs = new ArrayList<Device>(super.values());
+// for (Device d: devs) {
+// if (remove) {
+// // We remove the device from the underlying map
+// super.remove(d.getDeviceKey());
+// } else {
+// super.remove(d.getDeviceKey());
+// // We add a different Device instance with the same
+// // key to the map. We'll do some hackery so the device
+// // is different enough to compare differently in equals
+// // but otherwise looks the same.
+// // It's ugly but it works.
+// // clone entities
+// Device newDevice = d;
+// for (Entity e: d.getEntities()) {
+// Entity newEntity = new Entity (e.macAddress,
+// e.vlan,
+// e.ipv4Address,
+// e.switchDPID,
+// e.switchPort,
+// e.lastSeenTimestamp);
+// if (e.vlan == null)
+// newEntity.vlan = (short)1;
+// else
+// newEntity.vlan = (short)((e.vlan + 1 % 4095)+1);
+// newDevice = new Device(newDevice, newEntity, -1);
+// }
+// assertEquals(false, newDevice.equals(d));
+// super.put(newDevice.getDeviceKey(), newDevice);
+// }
+// }
+// return devs;
+// }
+// }
+//
+// @Test
+// public void testEntityExpiration() throws Exception {
+// doTestEntityExpiration();
+// }
+//
+// @Test
+// public void testDeviceExpiration() throws Exception {
+// doTestDeviceExpiration();
+// }
+//
+// /* Test correct entity cleanup behavior when a concurrent modification
+// * occurs.
+// */
+// @Test
+// public void testEntityExpirationConcurrentModification() throws Exception {
+// deviceManager.deviceMap = new ConcurrentlyModifiedDeviceMap(false);
+// doTestEntityExpiration();
+// }
+//
+// /* Test correct entity cleanup behavior when a concurrent remove
+// * occurs.
+// */
+// @Test
+// public void testDeviceExpirationConcurrentRemove() throws Exception {
+// deviceManager.deviceMap = new ConcurrentlyModifiedDeviceMap(true);
+// doTestDeviceExpiration();
+// }
+//
+// /* Test correct entity cleanup behavior when a concurrent modification
+// * occurs.
+// */
+// @Test
+// public void testDeviceExpirationConcurrentModification() throws Exception {
+// deviceManager.deviceMap = new ConcurrentlyModifiedDeviceMap(false);
+// doTestDeviceExpiration();
+// }
+//
+//
+// @Test
+// public void testAttachmentPointFlapping() throws Exception {
+// Calendar c = Calendar.getInstance();
+//
+// ITopologyService mockTopology = createMock(ITopologyService.class);
+// expect(mockTopology.isAttachmentPointPort(anyLong(),
+// anyShort())).andReturn(true).anyTimes();
+// expect(mockTopology.isBroadcastDomainPort(anyLong(),
+// anyShort())).
+// andReturn(false).anyTimes();
+// expect(mockTopology.isInSameBroadcastDomain(anyLong(), anyShort(),
+// anyLong(), anyShort())).andReturn(false).anyTimes();
+// expect(mockTopology.getL2DomainId(anyLong())).
+// andReturn(1L).anyTimes();
+// expect(mockTopology.isConsistent(1L, (short)1, 1L, (short)1)).
+// andReturn(true).anyTimes();
+// expect(mockTopology.isConsistent(1L, (short)1, 5L, (short)1)).
+// andReturn(false).anyTimes();
+// expect(mockTopology.isConsistent(1L, (short)1, 10L, (short)1)).
+// andReturn(false).anyTimes();
+// expect(mockTopology.isConsistent(5L, (short)1, 10L, (short)1)).
+// andReturn(false).anyTimes();
+// expect(mockTopology.isConsistent(10L, (short)1, 1L, (short)1)).
+// andReturn(false).anyTimes();
+// expect(mockTopology.isConsistent(5L, (short)1, 1L, (short)1)).
+// andReturn(false).anyTimes();
+// expect(mockTopology.isConsistent(10L, (short)1, 5L, (short)1)).
+// andReturn(false).anyTimes();
+//
+// Date topologyUpdateTime = new Date();
+// expect(mockTopology.getLastUpdateTime()).andReturn(topologyUpdateTime).
+// anyTimes();
+//
+//
+// replay(mockTopology);
+// deviceManager.topology = mockTopology;
+//
+// Entity entity1 = new Entity(1L, null, null, 1L, 1, c.getTime());
+// Entity entity1a = new Entity(1L, null, 1, 1L, 1, c.getTime());
+// Entity entity2 = new Entity(1L, null, null, 5L, 1, c.getTime());
+// Entity entity3 = new Entity(1L, null, null, 10L, 1, c.getTime());
+// entity1.setLastSeenTimestamp(c.getTime());
+// c.add(Calendar.MILLISECOND, Entity.ACTIVITY_TIMEOUT/2);
+// entity1a.setLastSeenTimestamp(c.getTime());
+// c.add(Calendar.MILLISECOND, 1);
+// entity2.setLastSeenTimestamp(c.getTime());
+// c.add(Calendar.MILLISECOND, 1);
+// entity3.setLastSeenTimestamp(c.getTime());
+//
+//
+//
+// IDevice d;
+// d = deviceManager.learnDeviceByEntity(entity1);
+// d = deviceManager.learnDeviceByEntity(entity1a);
+// d = deviceManager.learnDeviceByEntity(entity2);
+// d = deviceManager.learnDeviceByEntity(entity3);
+//
+// // all entities are active, so entity3 should win
+// assertArrayEquals(new SwitchPort[] { new SwitchPort(10L, 1) },
+// d.getAttachmentPoints());
+//
+// assertArrayEquals(new SwitchPort[] { new SwitchPort(10L, 1),},
+// d.getAttachmentPoints(true));
+//
+// c.add(Calendar.MILLISECOND, Entity.ACTIVITY_TIMEOUT/4);
+// entity1.setLastSeenTimestamp(c.getTime());
+// d = deviceManager.learnDeviceByEntity(entity1);
+//
+// // all are still active; entity3 should still win
+// assertArrayEquals(new SwitchPort[] { new SwitchPort(1L, 1) },
+// d.getAttachmentPoints());
+// assertArrayEquals(new SwitchPort[] { new SwitchPort(1L, 1),
+// new SwitchPort(5L, 1,
+// ErrorStatus.DUPLICATE_DEVICE),
+// new SwitchPort(10L, 1,
+// ErrorStatus.DUPLICATE_DEVICE) },
+// d.getAttachmentPoints(true));
+//
+// c.add(Calendar.MILLISECOND, Entity.ACTIVITY_TIMEOUT+2000);
+// entity1.setLastSeenTimestamp(c.getTime());
+// d = deviceManager.learnDeviceByEntity(entity1);
+//
+// assertEquals(entity1.getActiveSince(), entity1.getLastSeenTimestamp());
+// // entity1 should now be the only active entity
+// assertArrayEquals(new SwitchPort[] { new SwitchPort(1L, 1) },
+// d.getAttachmentPoints());
+// assertArrayEquals(new SwitchPort[] { new SwitchPort(1L, 1) },
+// d.getAttachmentPoints(true));
+// }
+//
+//
+// @Test
+// public void testAttachmentPointFlappingTwoCluster() throws Exception {
+// Calendar c = Calendar.getInstance();
+//
+// ITopologyService mockTopology = createMock(ITopologyService.class);
+// expect(mockTopology.isAttachmentPointPort(anyLong(),
+// anyShort())).andReturn(true).anyTimes();
+// expect(mockTopology.isBroadcastDomainPort(anyLong(),
+// anyShort())).
+// andReturn(false).anyTimes();
+// expect(mockTopology.isInSameBroadcastDomain(anyLong(), anyShort(),
+// anyLong(), anyShort())).andReturn(false).anyTimes();
+// expect(mockTopology.getL2DomainId(1L)).
+// andReturn(1L).anyTimes();
+// expect(mockTopology.getL2DomainId(5L)).
+// andReturn(5L).anyTimes();
+// expect(mockTopology.isConsistent(1L, (short)1, 1L, (short)2)).
+// andReturn(false).anyTimes();
+// expect(mockTopology.isConsistent(1L, (short)2, 5L, (short)1)).
+// andReturn(false).anyTimes();
+// expect(mockTopology.isConsistent(5L, (short)1, 5L, (short)2)).
+// andReturn(false).anyTimes();
+// expect(mockTopology.isConsistent(1L, (short)2, 1L, (short)1)).
+// andReturn(false).anyTimes();
+// expect(mockTopology.isConsistent(1L, (short)1, 5L, (short)1)).
+// andReturn(false).anyTimes();
+// expect(mockTopology.isConsistent(1L, (short)1, 5L, (short)2)).
+// andReturn(false).anyTimes();
+// expect(mockTopology.isConsistent(5L, (short)2, 5L, (short)1)).
+// andReturn(false).anyTimes();
+//
+// Date topologyUpdateTime = new Date();
+// expect(mockTopology.getLastUpdateTime()).andReturn(topologyUpdateTime).
+// anyTimes();
+//
+// replay(mockTopology);
+// deviceManager.topology = mockTopology;
+//
+// Entity entity1 = new Entity(1L, null, null, 1L, 1, c.getTime());
+// Entity entity2 = new Entity(1L, null, null, 1L, 2, c.getTime());
+// Entity entity3 = new Entity(1L, null, null, 5L, 1, c.getTime());
+// Entity entity4 = new Entity(1L, null, null, 5L, 2, c.getTime());
+// entity1.setLastSeenTimestamp(c.getTime());
+// c.add(Calendar.MILLISECOND, Entity.ACTIVITY_TIMEOUT/2);
+// c.add(Calendar.MILLISECOND, 1);
+// entity2.setLastSeenTimestamp(c.getTime());
+// c.add(Calendar.MILLISECOND, 1);
+// entity3.setLastSeenTimestamp(c.getTime());
+// c.add(Calendar.MILLISECOND, 1);
+// entity4.setLastSeenTimestamp(c.getTime());
+//
+// deviceManager.learnDeviceByEntity(entity1);
+// deviceManager.learnDeviceByEntity(entity2);
+// deviceManager.learnDeviceByEntity(entity3);
+// IDevice d = deviceManager.learnDeviceByEntity(entity4);
+//
+// // all entities are active, so entities 2,4 should win
+// assertArrayEquals(new SwitchPort[] { new SwitchPort(1L, 2),
+// new SwitchPort(5L, 2) },
+// d.getAttachmentPoints());
+// assertArrayEquals(new SwitchPort[] { new SwitchPort(1L, 2),
+// new SwitchPort(5L, 2)},
+// d.getAttachmentPoints(true));
+//
+// c.add(Calendar.MILLISECOND, 1);
+// entity1.setLastSeenTimestamp(c.getTime());
+// d = deviceManager.learnDeviceByEntity(entity1);
+//
+// // all entities are active, so entities 2,4 should win
+// assertArrayEquals(new SwitchPort[] { new SwitchPort(1L, 1),
+// new SwitchPort(5L, 2) },
+// d.getAttachmentPoints());
+// assertArrayEquals(new SwitchPort[] { new SwitchPort(1L, 1),
+// new SwitchPort(5L, 2),
+// new SwitchPort(1L, 2, ErrorStatus.DUPLICATE_DEVICE)},
+// d.getAttachmentPoints(true));
+//
+// c.add(Calendar.MILLISECOND, Entity.ACTIVITY_TIMEOUT+1);
+// entity1.setLastSeenTimestamp(c.getTime());
+// d = deviceManager.learnDeviceByEntity(entity1);
+//
+// // entities 3,4 are still in conflict, but 1 should be resolved
+// assertArrayEquals(new SwitchPort[] { new SwitchPort(1L, 1),
+// new SwitchPort(5L, 2) },
+// d.getAttachmentPoints());
+// assertArrayEquals(new SwitchPort[] { new SwitchPort(1L, 1),
+// new SwitchPort(5L, 2)},
+// d.getAttachmentPoints(true));
+//
+// entity3.setLastSeenTimestamp(c.getTime());
+// d = deviceManager.learnDeviceByEntity(entity3);
+//
+// // no conflicts, 1 and 3 will win
+// assertArrayEquals(new SwitchPort[] { new SwitchPort(1L, 1),
+// new SwitchPort(5L, 1) },
+// d.getAttachmentPoints());
+// assertArrayEquals(new SwitchPort[] { new SwitchPort(1L, 1),
+// new SwitchPort(5L, 1) },
+// d.getAttachmentPoints(true));
+//
+// }
+//
+// protected void doTestDeviceQuery() throws Exception {
+// Entity entity1 = new Entity(1L, (short)1, 1, 1L, 1, new Date());
+// Entity entity2 = new Entity(2L, (short)2, 2, 1L, 2, new Date());
+// Entity entity3 = new Entity(3L, (short)3, 3, 5L, 1, new Date());
+// Entity entity4 = new Entity(4L, (short)4, 3, 5L, 2, new Date());
+// Entity entity5 = new Entity(1L, (short)4, 3, 5L, 2, new Date());
+//
+// Device d1 = deviceManager.learnDeviceByEntity(entity1);
+// deviceManager.learnDeviceByEntity(entity2);
+// Device d3 = deviceManager.learnDeviceByEntity(entity3);
+// Device d4 = deviceManager.learnDeviceByEntity(entity4);
+//
+// IDevice d;
+//
+// Iterator<? extends IDevice> iter =
+// deviceManager.queryDevices(null, (short)1, 1, null, null);
+// int count = 0;
+// while (iter.hasNext()) {
+// count += 1;
+// d = iter.next();
+// assertEquals(d1.getDeviceKey(), d.getDeviceKey());
+// }
+// assertEquals(1, count);
+//
+// iter = deviceManager.queryDevices(null, (short)3, 3, null, null);
+// count = 0;
+// while (iter.hasNext()) {
+// count += 1;
+// d = iter.next();
+// assertEquals(d3.getDeviceKey(), d.getDeviceKey());
+// }
+// assertEquals(1, count);
+//
+// iter = deviceManager.queryDevices(null, (short)1, 3, null, null);
+// count = 0;
+// while (iter.hasNext()) {
+// count += 1;
+// iter.next();
+// }
+// assertEquals(0, count);
+//
+// Device d5 = deviceManager.learnDeviceByEntity(entity5);
+// iter = deviceManager.queryDevices(null, (short)4, 3, null, null);
+// count = 0;
+// Set<Long> deviceKeysFromIterator = new HashSet<Long>();
+// while (iter.hasNext()) {
+// count += 1;
+// d = iter.next();
+// deviceKeysFromIterator.add(d.getDeviceKey());
+// }
+// Set<Long> expectedDeviceKeys = new HashSet<Long>();
+// expectedDeviceKeys.add(d4.getDeviceKey());
+// expectedDeviceKeys.add(d5.getDeviceKey());
+// assertEquals(expectedDeviceKeys, deviceKeysFromIterator);
+// assertEquals(2, count);
+//
+//
+// iter = deviceManager.queryDevices(1L, null, null, null, null);
+// count = 0;
+// deviceKeysFromIterator = new HashSet<Long>();
+// while (iter.hasNext()) {
+// count += 1;
+// d = iter.next();
+// deviceKeysFromIterator.add(d.getDeviceKey());
+// }
+// expectedDeviceKeys = new HashSet<Long>();
+// expectedDeviceKeys.add(d1.getDeviceKey());
+// expectedDeviceKeys.add(d5.getDeviceKey());
+// assertEquals(expectedDeviceKeys, deviceKeysFromIterator);
+// assertEquals(2, count);
+// }
+//
+// @Test
+// public void testDeviceIndex() throws Exception {
+// EnumSet<IDeviceService.DeviceField> indexFields =
+// EnumSet.noneOf(IDeviceService.DeviceField.class);
+// indexFields.add(IDeviceService.DeviceField.IPV4);
+// indexFields.add(IDeviceService.DeviceField.VLAN);
+// deviceManager.addIndex(false, indexFields);
+//
+// indexFields = EnumSet.noneOf(IDeviceService.DeviceField.class);
+// deviceManager.addIndex(false, indexFields);
+//
+// ITopologyService mockTopology = createMock(ITopologyService.class);
+// deviceManager.topology = mockTopology;
+// expect(mockTopology.isAttachmentPointPort(anyLong(),
+// anyShort())).
+// andReturn(true).anyTimes();
+// expect(mockTopology.getL2DomainId(EasyMock.anyLong())).andReturn(1L).anyTimes();
+// replay(mockTopology);
+// doTestDeviceQuery();
+// }
+//
+// @Test
+// public void testDeviceQuery() throws Exception {
+// ITopologyService mockTopology = createMock(ITopologyService.class);
+// deviceManager.topology = mockTopology;
+// expect(mockTopology.isAttachmentPointPort(anyLong(),
+// anyShort())).
+// andReturn(true).anyTimes();
+// expect(mockTopology.getL2DomainId(EasyMock.anyLong())).andReturn(1L).anyTimes();
+// replay(mockTopology);
+//
+// doTestDeviceQuery();
+// }
+//
+// protected void doTestDeviceClassQuery() throws Exception {
+// Entity entity1 = new Entity(1L, (short)1, 1, 1L, 1, new Date());
+// Entity entity2 = new Entity(2L, (short)2, 2, 1L, 2, new Date());
+// Entity entity3 = new Entity(3L, (short)3, 3, 5L, 1, new Date());
+// Entity entity4 = new Entity(4L, (short)4, 3, 5L, 2, new Date());
+// Entity entity5 = new Entity(1L, (short)4, 3, 5L, 2, new Date());
+//
+// IDevice d1 = deviceManager.learnDeviceByEntity(entity1);
+// IDevice d2 = deviceManager.learnDeviceByEntity(entity2);
+// IDevice d3 = deviceManager.learnDeviceByEntity(entity3);
+// IDevice d4 = deviceManager.learnDeviceByEntity(entity4);
+// assertEquals(d1.getEntityClass(), d2.getEntityClass());
+// assertEquals(d1.getEntityClass(), d3.getEntityClass());
+// assertEquals(d1.getEntityClass(), d4.getEntityClass());
+//
+// IDevice d;
+//
+// Iterator<? extends IDevice> iter =
+// deviceManager.queryClassDevices(d1.getEntityClass(), null,
+// (short)1, 1, null, null);
+// int count = 0;
+// while (iter.hasNext()) {
+// count += 1;
+// d = iter.next();
+// assertEquals(d1.getDeviceKey(), d.getDeviceKey());
+// }
+// assertEquals(1, count);
+//
+// iter = deviceManager.queryClassDevices(d1.getEntityClass(), null,
+// (short)3, 3, null, null);
+// count = 0;
+// while (iter.hasNext()) {
+// count += 1;
+// d = iter.next();
+// assertEquals(d3.getDeviceKey(), d.getDeviceKey());
+//
+// }
+// assertEquals(1, count);
+//
+// iter = deviceManager.queryClassDevices(d1.getEntityClass(), null,
+// (short)1, 3, null, null);
+// count = 0;
+// while (iter.hasNext()) {
+// count += 1;
+// iter.next();
+// }
+// assertEquals(0, count);
+//
+// IDevice d5 = deviceManager.learnDeviceByEntity(entity5);
+// assertEquals(d1.getEntityClass(), d5.getEntityClass());
+// iter = deviceManager.queryClassDevices(d1.getEntityClass(), null,
+// (short)4, 3, null, null);
+// count = 0;
+// Set<Long> deviceKeysFromIterator = new HashSet<Long>();
+// while (iter.hasNext()) {
+// count += 1;
+// d = iter.next();
+// deviceKeysFromIterator.add(d.getDeviceKey());
+// }
+// Set<Long> expectedDeviceKeys = new HashSet<Long>();
+// expectedDeviceKeys.add(d4.getDeviceKey());
+// expectedDeviceKeys.add(d5.getDeviceKey());
+// assertEquals(expectedDeviceKeys, deviceKeysFromIterator);
+// assertEquals(2, count);
+// }
+//
+// @Test
+// public void testDeviceClassIndex() throws Exception {
+// EnumSet<IDeviceService.DeviceField> indexFields =
+// EnumSet.noneOf(IDeviceService.DeviceField.class);
+// indexFields.add(IDeviceService.DeviceField.IPV4);
+// indexFields.add(IDeviceService.DeviceField.VLAN);
+// deviceManager.addIndex(true, indexFields);
+//
+// ITopologyService mockTopology = createMock(ITopologyService.class);
+// deviceManager.topology = mockTopology;
+// expect(mockTopology.isAttachmentPointPort(anyLong(),
+// anyShort())).
+// andReturn(true).anyTimes();
+// expect(mockTopology.getL2DomainId(EasyMock.anyLong())).andReturn(1L).anyTimes();
+// replay(mockTopology);
+//
+// doTestDeviceClassQuery();
+// }
+//
+// @Test
+// public void testDeviceClassQuery() throws Exception {
+// ITopologyService mockTopology = createMock(ITopologyService.class);
+// deviceManager.topology = mockTopology;
+// expect(mockTopology.isAttachmentPointPort(anyLong(),
+// anyShort())).
+// andReturn(true).anyTimes();
+// expect(mockTopology.getL2DomainId(EasyMock.anyLong())).andReturn(1L).anyTimes();
+// replay(mockTopology);
+//
+// doTestDeviceClassQuery();
+// }
+//
+// @Test
+// public void testFindDevice() throws FloodlightModuleException {
+// boolean exceptionCaught;
+// deviceManager.entityClassifier= new MockEntityClassifierMac();
+// deviceManager.startUp(null);
+//
+// ITopologyService mockTopology = createMock(ITopologyService.class);
+// deviceManager.topology = mockTopology;
+// expect(mockTopology.isAttachmentPointPort(anyLong(),
+// anyShort())).
+// andReturn(true).anyTimes();
+// expect(mockTopology.getL2DomainId(EasyMock.anyLong())).andReturn(1L).anyTimes();
+// replay(mockTopology);
+//
+// Entity entity1 = new Entity(1L, (short)1, 1, 1L, 1, new Date());
+// Entity entity2 = new Entity(2L, (short)2, 2, 1L, 2, new Date());
+// Entity entity2b = new Entity(22L, (short)2, 2, 1L, 2, new Date());
+//
+// Entity entity3 = new Entity(3L, (short)1, 3, 2L, 1, new Date());
+// Entity entity4 = new Entity(4L, (short)2, 4, 2L, 2, new Date());
+//
+// Entity entity5 = new Entity(5L, (short)1, 5, 3L, 1, new Date());
+//
+//
+// IDevice d1 = deviceManager.learnDeviceByEntity(entity1);
+// IDevice d2 = deviceManager.learnDeviceByEntity(entity2);
+// IDevice d3 = deviceManager.learnDeviceByEntity(entity3);
+// IDevice d4 = deviceManager.learnDeviceByEntity(entity4);
+// IDevice d5 = deviceManager.learnDeviceByEntity(entity5);
+//
+// // Make sure the entity classifier worked as expected
+// assertEquals(MockEntityClassifierMac.testECMac1, d1.getEntityClass());
+// assertEquals(MockEntityClassifierMac.testECMac1, d2.getEntityClass());
+// assertEquals(MockEntityClassifierMac.testECMac2, d3.getEntityClass());
+// assertEquals(MockEntityClassifierMac.testECMac2, d4.getEntityClass());
+// assertEquals(DefaultEntityClassifier.entityClass,
+// d5.getEntityClass());
+//
+// // Look up the device using findDevice() which uses only the primary
+// // index
+// assertEquals(d1, deviceManager.findDevice(entity1.getMacAddress(),
+// entity1.getVlan(),
+// entity1.getIpv4Address(),
+// entity1.getSwitchDPID(),
+// entity1.getSwitchPort()));
+// // port changed. Device will be found through class index
+// assertEquals(d1, deviceManager.findDevice(entity1.getMacAddress(),
+// entity1.getVlan(),
+// entity1.getIpv4Address(),
+// entity1.getSwitchDPID(),
+// entity1.getSwitchPort()+1));
+// // VLAN changed. No device matches
+// assertEquals(null, deviceManager.findDevice(entity1.getMacAddress(),
+// (short)42,
+// entity1.getIpv4Address(),
+// entity1.getSwitchDPID(),
+// entity1.getSwitchPort()));
+// assertEquals(null, deviceManager.findDevice(entity1.getMacAddress(),
+// null,
+// entity1.getIpv4Address(),
+// entity1.getSwitchDPID(),
+// entity1.getSwitchPort()));
+// assertEquals(d2, deviceManager.findDeviceByEntity(entity2));
+// assertEquals(null, deviceManager.findDeviceByEntity(entity2b));
+// assertEquals(d3, deviceManager.findDevice(entity3.getMacAddress(),
+// entity3.getVlan(),
+// entity3.getIpv4Address(),
+// entity3.getSwitchDPID(),
+// entity3.getSwitchPort()));
+// // switch and port not set. throws exception
+// exceptionCaught = false;
+// try {
+// assertEquals(null, deviceManager.findDevice(entity3.getMacAddress(),
+// entity3.getVlan(),
+// entity3.getIpv4Address(),
+// null,
+// null));
+// }
+// catch (IllegalArgumentException e) {
+// exceptionCaught = true;
+// }
+// if (!exceptionCaught)
+// fail("findDevice() did not throw IllegalArgumentException");
+// assertEquals(d4, deviceManager.findDeviceByEntity(entity4));
+// assertEquals(d5, deviceManager.findDevice(entity5.getMacAddress(),
+// entity5.getVlan(),
+// entity5.getIpv4Address(),
+// entity5.getSwitchDPID(),
+// entity5.getSwitchPort()));
+// // switch and port not set. throws exception (swith/port are key
+// // fields of IEntityClassifier but not d5.entityClass
+// exceptionCaught = false;
+// try {
+// assertEquals(d5, deviceManager.findDevice(entity5.getMacAddress(),
+// entity5.getVlan(),
+// entity5.getIpv4Address(),
+// null,
+// null));
+// }
+// catch (IllegalArgumentException e) {
+// exceptionCaught = true;
+// }
+// if (!exceptionCaught)
+// fail("findDevice() did not throw IllegalArgumentException");
+//
+//
+// Entity entityNoClass = new Entity(5L, (short)1, 5, -1L, 1, new Date());
+// assertEquals(null, deviceManager.findDeviceByEntity(entityNoClass));
+//
+//
+// // Now look up destination devices
+// assertEquals(d1, deviceManager.findClassDevice(d2.getEntityClass(),
+// entity1.getMacAddress(),
+// entity1.getVlan(),
+// entity1.getIpv4Address()));
+// assertEquals(d1, deviceManager.findClassDevice(d2.getEntityClass(),
+// entity1.getMacAddress(),
+// entity1.getVlan(),
+// null));
+// assertEquals(null, deviceManager.findClassDevice(d2.getEntityClass(),
+// entity1.getMacAddress(),
+// (short) -1,
+// 0));
+// }
+//
+//
+//
+// @Test
+// public void testGetIPv4Addresses() {
+// // Looks like Date is only 1s granularity
+//
+// ITopologyService mockTopology = createMock(ITopologyService.class);
+// deviceManager.topology = mockTopology;
+// expect(mockTopology.isAttachmentPointPort(anyLong(),
+// anyShort())).
+// andReturn(true).anyTimes();
+// expect(mockTopology.getL2DomainId(anyLong())).andReturn(1L).anyTimes();
+// expect(mockTopology.isConsistent(EasyMock.anyLong(),
+// EasyMock.anyShort(),
+// EasyMock.anyLong(),
+// EasyMock.anyShort()))
+// .andReturn(false)
+// .anyTimes();
+// expect(mockTopology.isBroadcastDomainPort(EasyMock.anyLong(),
+// EasyMock.anyShort()))
+// .andReturn(false)
+// .anyTimes();
+// expect(mockTopology.isInSameBroadcastDomain(EasyMock.anyLong(),
+// EasyMock.anyShort(),
+// EasyMock.anyLong(),
+// EasyMock.anyShort())).
+// andReturn(false).anyTimes();
+// replay(mockTopology);
+//
+// Entity e1 = new Entity(1L, (short)1, null, null, null, new Date(2000));
+// Device d1 = deviceManager.learnDeviceByEntity(e1);
+// assertArrayEquals(new Integer[0], d1.getIPv4Addresses());
+//
+//
+// Entity e2 = new Entity(2L, (short)2, 2, null, null, new Date(2000));
+// Device d2 = deviceManager.learnDeviceByEntity(e2);
+// d2 = deviceManager.learnDeviceByEntity(e2);
+// assertArrayEquals(new Integer[] { 2 }, d2.getIPv4Addresses());
+// // More than one entity
+// Entity e2b = new Entity(2L, (short)2, null, 2L, 2, new Date(3000));
+// d2 = deviceManager.learnDeviceByEntity(e2b);
+// assertEquals(2, d2.entities.length);
+// assertArrayEquals(new Integer[] { 2 }, d2.getIPv4Addresses());
+// // and now add an entity with an IP
+// Entity e2c = new Entity(2L, (short)2, 2, 2L, 3, new Date(3000));
+// d2 = deviceManager.learnDeviceByEntity(e2c);
+// assertArrayEquals(new Integer[] { 2 }, d2.getIPv4Addresses());
+// assertEquals(3, d2.entities.length);
+//
+// // Other devices with different IPs shouldn't interfere
+// Entity e3 = new Entity(3L, (short)3, 3, null, null, new Date(4000));
+// Entity e3b = new Entity(3L, (short)3, 3, 3L, 3, new Date(4400));
+// Device d3 = deviceManager.learnDeviceByEntity(e3);
+// d3 = deviceManager.learnDeviceByEntity(e3b);
+// assertArrayEquals(new Integer[] { 2 }, d2.getIPv4Addresses());
+// assertArrayEquals(new Integer[] { 3 }, d3.getIPv4Addresses());
+//
+// // Add another IP to d3
+// Entity e3c = new Entity(3L, (short)3, 33, 3L, 3, new Date(4400));
+// d3 = deviceManager.learnDeviceByEntity(e3c);
+// Integer[] ips = d3.getIPv4Addresses();
+// Arrays.sort(ips);
+// assertArrayEquals(new Integer[] { 3, 33 }, ips);
+//
+// // Add another device that also claims IP2 but is older than e2
+// Entity e4 = new Entity(4L, (short)4, 2, null, null, new Date(1000));
+// Entity e4b = new Entity(4L, (short)4, null, 4L, 4, new Date(1000));
+// Device d4 = deviceManager.learnDeviceByEntity(e4);
+// assertArrayEquals(new Integer[] { 2 }, d2.getIPv4Addresses());
+// assertArrayEquals(new Integer[0], d4.getIPv4Addresses());
+// // add another entity to d4
+// d4 = deviceManager.learnDeviceByEntity(e4b);
+// assertArrayEquals(new Integer[0], d4.getIPv4Addresses());
+//
+// // Make e4 and e4a newer
+// Entity e4c = new Entity(4L, (short)4, 2, null, null, new Date(5000));
+// Entity e4d = new Entity(4L, (short)4, null, 4L, 5, new Date(5000));
+// d4 = deviceManager.learnDeviceByEntity(e4c);
+// d4 = deviceManager.learnDeviceByEntity(e4d);
+// assertArrayEquals(new Integer[0], d2.getIPv4Addresses());
+// // FIXME: d4 should not return IP4
+// assertArrayEquals(new Integer[] { 2 }, d4.getIPv4Addresses());
+//
+// // Add another newer entity to d2 but with different IP
+// Entity e2d = new Entity(2L, (short)2, 22, 4L, 6, new Date(6000));
+// d2 = deviceManager.learnDeviceByEntity(e2d);
+// assertArrayEquals(new Integer[] { 22 }, d2.getIPv4Addresses());
+// assertArrayEquals(new Integer[] { 2 }, d4.getIPv4Addresses());
+//
+// // new IP for d2,d4 but with same timestamp. Both devices get the IP
+// Entity e2e = new Entity(2L, (short)2, 42, 2L, 4, new Date(7000));
+// d2 = deviceManager.learnDeviceByEntity(e2e);
+// ips= d2.getIPv4Addresses();
+// Arrays.sort(ips);
+// assertArrayEquals(new Integer[] { 22, 42 }, ips);
+// Entity e4e = new Entity(4L, (short)4, 42, 4L, 7, new Date(7000));
+// d4 = deviceManager.learnDeviceByEntity(e4e);
+// ips= d4.getIPv4Addresses();
+// Arrays.sort(ips);
+// assertArrayEquals(new Integer[] { 2, 42 }, ips);
+//
+// // add a couple more IPs
+// Entity e2f = new Entity(2L, (short)2, 4242, 2L, 5, new Date(8000));
+// d2 = deviceManager.learnDeviceByEntity(e2f);
+// ips= d2.getIPv4Addresses();
+// Arrays.sort(ips);
+// assertArrayEquals(new Integer[] { 22, 42, 4242 }, ips);
+// Entity e4f = new Entity(4L, (short)4, 4242, 4L, 8, new Date(9000));
+// d4 = deviceManager.learnDeviceByEntity(e4f);
+// ips= d4.getIPv4Addresses();
+// Arrays.sort(ips);
+// assertArrayEquals(new Integer[] { 2, 42, 4242 }, ips);
+// }
+//
+// // TODO: this test should really go into a separate class that collects
+// // unit tests for Device
+// @Test
+// public void testGetSwitchPortVlanId() {
+// Entity entity1 = new Entity(1L, (short)1, null, 10L, 1, new Date());
+// Entity entity2 = new Entity(1L, null, null, 10L, 1, new Date());
+// Entity entity3 = new Entity(1L, (short)3, null, 1L, 1, new Date());
+// Entity entity4 = new Entity(1L, (short)42, null, 1L, 1, new Date());
+// Entity[] entities = new Entity[] { entity1, entity2,
+// entity3, entity4
+// };
+// Device d = new Device(null,1L, null, null, null,
+// Arrays.asList(entities), null);
+// SwitchPort swp1x1 = new SwitchPort(1L, 1);
+// SwitchPort swp1x2 = new SwitchPort(1L, 2);
+// SwitchPort swp2x1 = new SwitchPort(2L, 1);
+// SwitchPort swp10x1 = new SwitchPort(10L, 1);
+// assertArrayEquals(new Short[] { -1, 1},
+// d.getSwitchPortVlanIds(swp10x1));
+// assertArrayEquals(new Short[] { 3, 42},
+// d.getSwitchPortVlanIds(swp1x1));
+// assertArrayEquals(new Short[0],
+// d.getSwitchPortVlanIds(swp1x2));
+// assertArrayEquals(new Short[0],
+// d.getSwitchPortVlanIds(swp2x1));
+// }
+//
+// @Test
+// public void testReclassifyDevice() throws FloodlightModuleException {
+// MockFlexEntityClassifier flexClassifier =
+// new MockFlexEntityClassifier();
+// deviceManager.entityClassifier= flexClassifier;
+// deviceManager.startUp(null);
+//
+// ITopologyService mockTopology = createMock(ITopologyService.class);
+// deviceManager.topology = mockTopology;
+// expect(mockTopology.isAttachmentPointPort(anyLong(),
+// anyShort())).
+// andReturn(true).anyTimes();
+// expect(mockTopology.getL2DomainId(anyLong())).andReturn(1L).anyTimes();
+// expect(mockTopology.isConsistent(EasyMock.anyLong(),
+// EasyMock.anyShort(),
+// EasyMock.anyLong(),
+// EasyMock.anyShort()))
+// .andReturn(false)
+// .anyTimes();
+// expect(mockTopology.isBroadcastDomainPort(EasyMock.anyLong(),
+// EasyMock.anyShort()))
+// .andReturn(false)
+// .anyTimes();
+// replay(mockTopology);
+//
+// //flexClassifier.createTestEntityClass("Class1");
+//
+// Entity entity1 = new Entity(1L, (short)1, 1, 1L, 1, new Date());
+// Entity entity1b = new Entity(1L, (short)2, 1, 1L, 1, new Date());
+// Entity entity2 = new Entity(2L, (short)1, 2, 2L, 2, new Date());
+// Entity entity2b = new Entity(2L, (short)2, 2, 2L, 2, new Date());
+//
+//
+// Device d1 = deviceManager.learnDeviceByEntity(entity1);
+// Device d2 = deviceManager.learnDeviceByEntity(entity2);
+// Device d1b = deviceManager.learnDeviceByEntity(entity1b);
+// Device d2b = deviceManager.learnDeviceByEntity(entity2b);
+//
+// d1 = deviceManager.getDeviceIteratorForQuery(entity1.getMacAddress(),
+// entity1.getVlan(), entity1.getIpv4Address(),
+// entity1.getSwitchDPID(), entity1.getSwitchPort())
+// .next();
+// d1b = deviceManager.getDeviceIteratorForQuery(entity1b.getMacAddress(),
+// entity1b.getVlan(), entity1b.getIpv4Address(),
+// entity1b.getSwitchDPID(), entity1b.getSwitchPort()).next();
+//
+// assertEquals(d1, d1b);
+//
+// d2 = deviceManager.getDeviceIteratorForQuery(entity2.getMacAddress(),
+// entity2.getVlan(), entity2.getIpv4Address(),
+// entity2.getSwitchDPID(), entity2.getSwitchPort()).next();
+// d2b = deviceManager.getDeviceIteratorForQuery(entity2b.getMacAddress(),
+// entity2b.getVlan(), entity2b.getIpv4Address(),
+// entity2b.getSwitchDPID(), entity2b.getSwitchPort()).next();
+// assertEquals(d2, d2b);
+//
+// IEntityClass eC1 = flexClassifier.createTestEntityClass("C1");
+// IEntityClass eC2 = flexClassifier.createTestEntityClass("C2");
+//
+// flexClassifier.addVlanEntities((short)1, eC1);
+// flexClassifier.addVlanEntities((short)2, eC1);
+//
+// deviceManager.reclassifyDevice(d1);
+// deviceManager.reclassifyDevice(d2);
+//
+// d1 = deviceManager.deviceMap.get(
+// deviceManager.primaryIndex.findByEntity(entity1));
+// d1b = deviceManager.deviceMap.get(
+// deviceManager.primaryIndex.findByEntity(entity1b));
+//
+// assertEquals(d1, d1b);
+//
+// d2 = deviceManager.deviceMap.get(
+// deviceManager.primaryIndex.findByEntity(entity2));
+// d2b = deviceManager.deviceMap.get(
+// deviceManager.primaryIndex.findByEntity(entity2b));
+//
+// assertEquals(d2, d2b);
+//
+// flexClassifier.addVlanEntities((short)1, eC2);
+//
+// deviceManager.reclassifyDevice(d1);
+// deviceManager.reclassifyDevice(d2);
+// d1 = deviceManager.deviceMap.get(
+// deviceManager.primaryIndex.findByEntity(entity1));
+// d1b = deviceManager.deviceMap.get(
+// deviceManager.primaryIndex.findByEntity(entity1b));
+// d2 = deviceManager.deviceMap.get(
+// deviceManager.primaryIndex.findByEntity(entity2));
+// d2b = deviceManager.deviceMap.get(
+// deviceManager.primaryIndex.findByEntity(entity2b));
+//
+// assertNotSame(d1, d1b);
+//
+// assertNotSame(d2, d2b);
+//
+// flexClassifier.addVlanEntities((short)1, eC1);
+// deviceManager.reclassifyDevice(d1);
+// deviceManager.reclassifyDevice(d2);
+// ClassState classState = deviceManager.classStateMap.get(eC1.getName());
+//
+// Long deviceKey1 = null;
+// Long deviceKey1b = null;
+// Long deviceKey2 = null;
+// Long deviceKey2b = null;
+//
+// deviceKey1 =
+// classState.classIndex.findByEntity(entity1);
+// deviceKey1b =
+// classState.classIndex.findByEntity(entity1b);
+// deviceKey2 =
+// classState.classIndex.findByEntity(entity2);
+// deviceKey2b =
+// classState.classIndex.findByEntity(entity2b);
+//
+// assertEquals(deviceKey1, deviceKey1b);
+//
+// assertEquals(deviceKey2, deviceKey2b);
+// }
+//
+// @Test
+// public void testSyncEntity() {
+// Date d1 = new Date();
+// Date d2 = new Date(0);
+// Entity e1 = new Entity(1L, (short)2, 3, 4L, 5, d1);
+// e1.setActiveSince(d2);
+// SyncEntity se1 = new SyncEntity(e1);
+// assertEntityEquals(e1, se1);
+// assertEquals(1L, se1.macAddress);
+// assertEquals(Short.valueOf((short)2), se1.vlan);
+// assertEquals(Integer.valueOf(3), se1.ipv4Address);
+// assertEquals(Long.valueOf(4L), se1.switchDPID);
+// assertEquals(Integer.valueOf(5), se1.switchPort);
+// assertEquals(d1, se1.lastSeenTimestamp);
+// assertEquals(d2, se1.activeSince);
+// assertNotSame(d1, se1.lastSeenTimestamp);
+// assertNotSame(d2, se1.activeSince);
+//
+// Entity e2 = new Entity(42L, null, null, null, null, null);
+// SyncEntity se2 = new SyncEntity(e2);
+// assertEntityEquals(e2, se2);
+//
+// SyncEntity se3 = new SyncEntity();
+// SyncEntity se4 = new SyncEntity();
+// se3.lastSeenTimestamp = new Date(1000);
+// se4.lastSeenTimestamp = new Date(2000);
+// assertTrue("", se3.compareTo(se4) < 0);
+// assertTrue("", se4.compareTo(se3) > 0);
+// se4.lastSeenTimestamp = new Date(1000);
+// assertTrue("", se3.compareTo(se4) == 0);
+// assertTrue("", se4.compareTo(se3) == 0);
+// se4.lastSeenTimestamp = new Date(500);
+// assertTrue("", se3.compareTo(se4) > 0);
+// assertTrue("", se4.compareTo(se3) < 0);
+// }
+//
+// /* Test basic DeviceSyncRepresentation behavior */
+// @Test
+// public void testDeviceSyncRepresentationBasics() {
+// DeviceSyncRepresentation dsr = new DeviceSyncRepresentation();
+// assertNull(dsr.getKey());
+// assertNull(dsr.getEntities());
+// dsr.setKey("MyKey");
+// assertEquals("MyKey", dsr.getKey());
+// assertEquals("MyKey", dsr.toString());
+//
+// List<SyncEntity> entities = new ArrayList<SyncEntity>();
+// Entity e1a = new Entity(1L, (short)2, 3, 4L, 5, new Date(1000));
+// Entity e1b = new Entity(1L, (short)2, null, 4L, 5, new Date(0));
+// entities.add(new SyncEntity(e1a));
+// entities.add(new SyncEntity(e1b));
+// // e1b comes before e1 (lastSeen) but we add it after it to test
+// // sorting
+// dsr.setEntities(entities);
+//
+// assertEquals(2, dsr.getEntities().size());
+// // e1b has earlier time
+// assertEquals(e1b, dsr.getEntities().get(0).asEntity());
+// assertEquals(e1a, dsr.getEntities().get(1).asEntity());
+//
+// dsr.setKey(null);
+// dsr.setEntities(null);
+// assertNull(dsr.getKey());
+// assertNull(dsr.getEntities());
+// }
+//
+// @Test
+// public void testDeviceSyncRepresentationFromDevice() {
+// ITopologyService mockTopology = makeMockTopologyAllPortsAp();
+// replay(mockTopology);
+// deviceManager.topology = mockTopology;
+//
+// deviceManager.entityClassifier = new MockEntityClassifier();
+//
+// //**************************************
+// // Test 1: a single entity
+// Entity e1 = new Entity(1L, (short)2, 3, 4L, 5, new Date(1000));
+// Device d1 = deviceManager.learnDeviceByEntity(e1);
+// assertEquals("Sanity check failed. Device doesn't have the expected " +
+// "entity class. Something with the test setup is strange",
+// "DefaultEntityClass", d1.getEntityClass().getName());
+// assertEquals("Sanity check failed. Device doesn't have the expected " +
+// "entity class. Something with the test setup is strange",
+// EnumSet.of(DeviceField.MAC, DeviceField.VLAN),
+// d1.getEntityClass().getKeyFields());
+//
+// Long deviceKey = d1.getDeviceKey();
+// DeviceSyncRepresentation dsr1 = new DeviceSyncRepresentation(d1);
+// assertEquals("DefaultEntityClass::00:00:00:00:00:01::[2]::",
+// dsr1.getKey());
+// assertEquals(1, dsr1.getEntities().size());
+// assertEquals(e1, dsr1.getEntities().get(0).asEntity());
+//
+// //**************************************
+// // Test 1b: same device, now with a second entity (no IP).
+// // this second entity has a lastSeen time that is earlier than the
+// // first entity
+// Entity e1b = new Entity(1L, (short)2, null, 4L, 5, new Date(0));
+// d1 = deviceManager.learnDeviceByEntity(e1b);
+// assertEquals("Sanity check failed. Should still be same device but " +
+// "deviceKeys differs", deviceKey, d1.getDeviceKey());
+// dsr1 = new DeviceSyncRepresentation(d1);
+// assertEquals("DefaultEntityClass::00:00:00:00:00:01::[2]::",
+// dsr1.getKey());
+// assertEquals(2, dsr1.getEntities().size());
+// // Entities are ordered by their lastSeen time. e1b should come
+// // before e1.
+// assertEquals(e1, dsr1.getEntities().get(1).asEntity());
+// assertEquals(e1b, dsr1.getEntities().get(0).asEntity());
+//
+// //**************************************
+// // Test 1c: same device with a third entity that does not have a
+// // switch port. It should be added to the DeviceSyncRepresentation
+// Entity e1c = new Entity(1L, (short)2, 33, null, null, new Date(2000));
+// d1 = deviceManager.learnDeviceByEntity(e1c);
+// assertEquals("Sanity check failed. Should still be same device but " +
+// "deviceKeys differs", deviceKey, d1.getDeviceKey());
+// dsr1 = new DeviceSyncRepresentation(d1);
+// assertEquals("DefaultEntityClass::00:00:00:00:00:01::[2]::",
+// dsr1.getKey());
+// assertEquals(3, dsr1.getEntities().size());
+// // Entities are ordered by their lastSeen time
+// assertEquals(e1c, dsr1.getEntities().get(2).asEntity());
+// assertEquals(e1, dsr1.getEntities().get(1).asEntity());
+// assertEquals(e1b, dsr1.getEntities().get(0).asEntity());
+//
+// //**************************************
+// // Test 1d: same device with a fourth entity that has a different
+// // attachment point and that is newer. Device should move and
+// // non-attachment point entities should be removed (e1b). Although
+// // e1 is non-attachment point it will remain because it has an IP
+// Entity e1d = new Entity(1L, (short)2, 33, 4L, 6, new Date(3000));
+// d1 = deviceManager.learnDeviceByEntity(e1d);
+// assertEquals("Sanity check failed. Should still be same device but " +
+// "deviceKeys differs", deviceKey, d1.getDeviceKey());
+// dsr1 = new DeviceSyncRepresentation(d1);
+// assertEquals("DefaultEntityClass::00:00:00:00:00:01::[2]::",
+// dsr1.getKey());
+// assertEquals(3, dsr1.getEntities().size());
+// assertEquals(e1, dsr1.getEntities().get(0).asEntity());
+// assertEquals(e1c, dsr1.getEntities().get(1).asEntity());
+// assertEquals(e1d, dsr1.getEntities().get(2).asEntity());
+//
+// d1 = null;
+//
+//
+// //**************************************
+// // Test 2: a second device with a different entity class. The
+// // mock entity classifier will return an entity class where all
+// // fields are keys if the DPID is > 10L
+// Entity e2 = new Entity(2L, (short)23, 24, 11L, 1, new Date(0));
+// Device d2 = deviceManager.learnDeviceByEntity(e2);
+// DeviceSyncRepresentation dsr2 = new DeviceSyncRepresentation(d2);
+// assertEquals("Sanity check failed. Device doesn't have the expected " +
+// "entity class. Something with the test setup is strange",
+// "TestEntityClass", d2.getEntityClass().getName());
+// assertEquals("Sanity check failed. Device doesn't have the expected " +
+// "entity class. Something with the test setup is strange",
+// EnumSet.of(DeviceField.MAC, DeviceField.VLAN,
+// DeviceField.SWITCH, DeviceField.PORT),
+// d2.getEntityClass().getKeyFields());
+// SwitchPort swp = new SwitchPort(11L, 1, null);
+// assertEquals("TestEntityClass::00:00:00:00:00:02::[23]::[" +
+// swp.toString() + "]::",
+// dsr2.getKey());
+// }
+//
+// /* interate through all entries in the sync store and return them as
+// * list. We don't return the key from the store however, we assert
+// * that the key from the store matches the key in the representation.
+// * If we have a null value (tombstone) we simply add the null value to
+// * the list to return.
+// */
+// private List<DeviceSyncRepresentation> getEntriesFromStore()
+// throws Exception {
+// List<DeviceSyncRepresentation> entries =
+// new ArrayList<DeviceSyncRepresentation>();
+// IClosableIterator<Entry<String, Versioned<DeviceSyncRepresentation>>> iter =
+// storeClient.entries();
+// try {
+// while(iter.hasNext()) {
+// Entry<String, Versioned<DeviceSyncRepresentation>> entry =
+// iter.next();
+// DeviceSyncRepresentation dsr = entry.getValue().getValue();
+// if (dsr != null)
+// assertEquals(entry.getKey(), dsr.getKey());
+// entries.add(dsr);
+// }
+// } finally {
+// if (iter != null)
+// iter.close();
+// }
+// return entries;
+// }
+//
+// /*
+// * assert whether the given Entity expected is equals to the given
+// * SyncEntity actual. This method also compares the times (lastSeen,
+// * activeSince). Entity.equals will not do that!
+// */
+// private static void assertEntityEquals(Entity expected, SyncEntity actual) {
+// assertNotNull(actual);
+// assertNotNull(expected);
+// Entity actualEntity = actual.asEntity();
+// assertEquals("entityFields", expected, actualEntity);
+// assertEquals("lastSeenTimestamp",
+// expected.getLastSeenTimestamp(),
+// actualEntity.getLastSeenTimestamp());
+// assertEquals("activeSince",
+// expected.getActiveSince(), actualEntity.getActiveSince());
+// }
+//
+// /* This test tests the normal operation as master when we write to the sync
+// * store or delete from the store.
+// */
+// @Test
+// public void testWriteToSyncStore() throws Exception {
+// int syncStoreIntervalMs = 50;
+// ITopologyService mockTopology = makeMockTopologyAllPortsAp();
+// replay(mockTopology);
+// deviceManager.topology = mockTopology;
+// deviceManager.setSyncStoreWriteInterval(syncStoreIntervalMs);
+//
+// Entity e1a = new Entity(1L, (short)2, 3, 4L, 5, new Date(1000));
+// e1a.setActiveSince(new Date(0));
+// deviceManager.learnDeviceByEntity(e1a);
+//
+// //storeClient.put("FooBar", new DeviceSyncRepresentation());
+//
+// List<DeviceSyncRepresentation> entries = getEntriesFromStore();
+// assertEquals(1, entries.size());
+// DeviceSyncRepresentation dsr1 = entries.get(0);
+// assertEquals(1, dsr1.getEntities().size());
+// assertEntityEquals(e1a, dsr1.getEntities().get(0));
+//
+// // Same entity but newer timestamp. Since the device hasn't changed,
+// // only the timestamp is updated and the write should be throttled.
+// Entity e1b = new Entity(1L, (short)2, 3, 4L, 5, new Date(2000));
+// e1b.setActiveSince(new Date(0));
+// deviceManager.learnDeviceByEntity(e1a);
+// entries = getEntriesFromStore();
+// assertEquals(1, entries.size());
+// dsr1 = entries.get(0);
+// assertEquals(1, dsr1.getEntities().size());
+// assertEntityEquals(e1a, dsr1.getEntities().get(0)); //e1a not e1b !!!
+//
+// // Wait for the write interval to expire then write again.
+// Thread.sleep(syncStoreIntervalMs+5);
+// Entity e1c = new Entity(1L, (short)2, 3, 4L, 5, new Date(3000));
+// e1c.setActiveSince(new Date(0));
+// deviceManager.learnDeviceByEntity(e1c);
+// entries = getEntriesFromStore();
+// assertEquals(1, entries.size());
+// dsr1 = entries.get(0);
+// assertEquals(1, dsr1.getEntities().size());
+// assertEntityEquals(e1c, dsr1.getEntities().get(0)); // e1c !!
+//
+// // Entity for same device but with different IP. should be added
+// // immediately
+// Entity e1d = new Entity(1L, (short)2, 33, 4L, 5, new Date(4000));
+// e1d.setActiveSince(new Date(0));
+// deviceManager.learnDeviceByEntity(e1d);
+// entries = getEntriesFromStore();
+// assertEquals(1, entries.size());
+// dsr1 = entries.get(0);
+// assertEquals(2, dsr1.getEntities().size());
+// assertEntityEquals(e1c, dsr1.getEntities().get(0)); // e1c !!
+// assertEntityEquals(e1d, dsr1.getEntities().get(1)); // e1d !!
+//
+// // Entity for same device with new switch port ==> moved ==> write
+// // update immediately without throttle.
+// // Note: the previous entities will still be there because they have
+// // IPs (even though they aren't for the current attachment point)
+// Entity e1e = new Entity(1L, (short)2, 33, 4L, 6, new Date(5000));
+// e1e.setActiveSince(new Date(0));
+// deviceManager.learnDeviceByEntity(e1e);
+// entries = getEntriesFromStore();
+// assertEquals(1, entries.size());
+// dsr1 = entries.get(0);
+// assertEquals(3, dsr1.getEntities().size());
+// assertEntityEquals(e1c, dsr1.getEntities().get(0));
+// assertEntityEquals(e1d, dsr1.getEntities().get(1));
+// assertEntityEquals(e1e, dsr1.getEntities().get(2));
+//
+// // Add a second device
+// Entity e2 = new Entity(2L, null, null, 5L, 5, new Date());
+// deviceManager.learnDeviceByEntity(e2);
+// entries = getEntriesFromStore();
+// assertEquals(2, entries.size());
+// for (DeviceSyncRepresentation dsr: entries) {
+// // This is a kinda ugly way to ensure we have the two
+// // devices we need..... but it will work for now
+// if (dsr.getKey().contains("::00:00:00:00:00:01::")) {
+// assertEquals(3, dsr.getEntities().size());
+// assertEntityEquals(e1c, dsr.getEntities().get(0));
+// assertEntityEquals(e1d, dsr.getEntities().get(1));
+// assertEntityEquals(e1e, dsr.getEntities().get(2));
+// } else if (dsr.getKey().contains("::00:00:00:00:00:02::")) {
+// assertEquals(1, dsr.getEntities().size());
+// assertEntityEquals(e2, dsr.getEntities().get(0));
+// } else {
+// fail("Unknown entry in store: " + dsr);
+// }
+// }
+//
+//
+// // Run entity cleanup. Since we've used phony time stamps for
+// // device 1 its entities should be cleared and the device should be
+// // removed from the store. Device 2 should remain in the store.
+// deviceManager.cleanupEntities();
+// entries = getEntriesFromStore();
+// assertEquals(2, entries.size());
+// for (DeviceSyncRepresentation dsr: entries) {
+// if (dsr == null) {
+// // pass
+// } else if (dsr.getKey().contains("::00:00:00:00:00:02::")) {
+// assertEquals(1, dsr.getEntities().size());
+// assertEntityEquals(e2, dsr.getEntities().get(0));
+// } else {
+// fail("Unknown entry in store: " + dsr);
+// }
+// }
+// }
+//
+//
+// private void assertDeviceIps(Integer[] expected, IDevice d) {
+// List<Integer> expectedList = Arrays.asList(expected);
+// Collections.sort(expectedList);
+// List<Integer> actualList = Arrays.asList(d.getIPv4Addresses());
+// Collections.sort(actualList);
+// assertEquals(expectedList, actualList);
+// }
+//
+// private IDevice getSingleDeviceFromDeviceManager(long mac) {
+// Iterator<? extends IDevice> diter =
+// deviceManager.queryDevices(mac, null, null, null, null);
+// assertTrue("Query didn't return a device", diter.hasNext());
+// IDevice d = diter.next();
+// assertFalse("Query returned more than one device", diter.hasNext());
+// return d;
+// }
+//
+// @Test
+// public void testToMaster() throws Exception {
+// int syncStoreWriteIntervalMs = 0;
+// int initialSyncStoreConsolidateIntervalMs = 50;
+// ITopologyService mockTopology = makeMockTopologyAllPortsAp();
+// replay(mockTopology);
+// deviceManager.topology = mockTopology;
+// // We want an EntityClassifier that has switch/port as key fields
+// deviceManager.entityClassifier = new MockEntityClassifier();
+// deviceManager.setSyncStoreWriteInterval(syncStoreWriteIntervalMs);
+// deviceManager.setInitialSyncStoreConsolidateMs(initialSyncStoreConsolidateIntervalMs);
+//
+// // Add Device1 with two entities with two different IPs
+// Entity e1a = new Entity(1L, null, 3, 4L, 5, new Date(1000));
+// Entity e1b = new Entity(1L, null, 33, 4L, 5, new Date(2000));
+// Device d1 = deviceManager.allocateDevice(1L, e1a,
+// DefaultEntityClassifier.entityClass);
+// d1 = deviceManager.allocateDevice(d1, e1b, -1);
+// DeviceSyncRepresentation dsr = new DeviceSyncRepresentation(d1);
+// storeClient.put(dsr.getKey(), dsr);
+//
+// // Add Device2 with different switch-ports. Only the most recent
+// // one should be the attachment point
+// Entity e2a = new Entity(2L, null, null, 4L, 4, new Date(1000));
+// Entity e2b = new Entity(2L, null, null, 4L, 5, new Date(2000));
+// Device d2 = deviceManager.allocateDevice(2L, e2a,
+// DefaultEntityClassifier.entityClass);
+// d2 = deviceManager.allocateDevice(d2, e2b, -1);
+// d2.updateAttachmentPoint(4L, (short)5,
+// e2b.getLastSeenTimestamp().getTime());
+// SwitchPort swp = new SwitchPort(4L, 5);
+// SwitchPort[] aps = d2.getAttachmentPoints();
+// // sanity check
+// assertArrayEquals("Sanity check: should only have AP(4L,5)",
+// new SwitchPort[] {swp}, aps);
+// dsr = new DeviceSyncRepresentation(d2);
+// storeClient.put(dsr.getKey(), dsr);
+//
+// // Add a tombstone entry to the store to make sure we don't trip a
+// // NPE
+// dsr = null;
+// Versioned<DeviceSyncRepresentation> versionedDsr =
+// storeClient.get("FooBar");
+// storeClient.put("FooBar", versionedDsr);
+//
+// deviceManager.getHAListener().transitionToMaster();
+//
+// // Query for the Device1. Make sure we have the two IPs we stored.
+// IDevice d = getSingleDeviceFromDeviceManager(1L);
+// assertDeviceIps(new Integer[] {3, 33}, d);
+// assertArrayEquals(new Short[] { Ethernet.VLAN_UNTAGGED }, d.getVlanId());
+// swp = new SwitchPort(4L, 5);
+// assertArrayEquals(new SwitchPort[] { swp }, d.getAttachmentPoints());
+//
+// // Query for Device2. Make sure we only have the more recent AP
+// // Query for the Device1. Make sure we have the two IPs we stored.
+// d = getSingleDeviceFromDeviceManager(2L);
+// assertArrayEquals(new Integer[0], d.getIPv4Addresses());
+// assertArrayEquals(new Short[] { Ethernet.VLAN_UNTAGGED }, d.getVlanId());
+// swp = new SwitchPort(4L, 5);
+// assertArrayEquals(new SwitchPort[] { swp }, d.getAttachmentPoints());
+//
+// //----------------------------
+// // add another entry device to the store. since device manager is
+// // already master we won't read this device and it should be
+// // removed from the store by the consolidate task
+// Entity e3 = new Entity(3L, null, null, 1L, 1, null);
+// dsr = new DeviceSyncRepresentation();
+// dsr.setKey("Device3");
+// dsr.setEntities(Collections.singletonList(new SyncEntity(e3)));
+// storeClient.put(dsr.getKey(), dsr);
+//
+// // make sure it's in the store
+// List<DeviceSyncRepresentation> entries = getEntriesFromStore();
+// boolean found = false;
+// for (DeviceSyncRepresentation entry: entries) {
+// if (entry!=null && entry.getKey().equals("Device3"))
+// found = true;
+// }
+// assertTrue("Device3 not in store. Entries in store: " + entries, found);
+// // make sure it's not in DevManager
+// Iterator<? extends IDevice> diter =
+// deviceManager.queryDevices(3L, null, null, null, null);
+// assertFalse("Device3 found in DeviceManager. Should be there",
+// diter.hasNext());
+//
+// // Wait for consolidate
+// Thread.sleep(initialSyncStoreConsolidateIntervalMs + 5);
+// // make sure it's in NOT the store
+// entries = getEntriesFromStore();
+// found = false;
+// for (DeviceSyncRepresentation entry: entries) {
+// if (entry!=null && entry.getKey().equals("Device3"))
+// found = true;
+// }
+// assertFalse("Device3 not is still in the store. Entries in store: "
+// + entries, found);
+// // make sure it's not in DevManager
+// diter = deviceManager.queryDevices(3L, null, null, null, null);
+// assertFalse("Device3 found in DeviceManager. Should be there",
+// diter.hasNext());
+// }
+//
+//
+// @Test
+// public void testConsolitateStore() throws Exception {
+// int syncStoreInternalMs = 0;
+// ITopologyService mockTopology = makeMockTopologyAllPortsAp();
+// replay(mockTopology);
+// deviceManager.topology = mockTopology;
+// // We want an EntityClassifier that has switch/port as key fields
+// deviceManager.entityClassifier = new MockEntityClassifier();
+// deviceManager.setSyncStoreWriteInterval(syncStoreInternalMs);
+//
+// // Add Device1 with two entities to store and let device manager
+// // learn
+// Entity e1a = new Entity(1L, null, null, 4L, 5, new Date(1000));
+// Entity e1b = new Entity(1L, null, 3, 4L, 5, new Date(2000));
+// Device d1 = deviceManager.learnDeviceByEntity(e1a);
+// deviceManager.learnDeviceByEntity(e1b);
+// String dev1Key = DeviceSyncRepresentation.computeKey(d1);
+//
+//
+// // Add a second device to the store but do NOT add to device manager
+// Entity e2 = new Entity(2L, null, null, 5L, 5, new Date());
+// Device d2 = deviceManager.allocateDevice(42L, e2,
+// DefaultEntityClassifier.entityClass);
+// DeviceSyncRepresentation dsr = new DeviceSyncRepresentation(d2);
+// storeClient.put(dsr.getKey(), dsr);
+// String dev2Key = DeviceSyncRepresentation.computeKey(d2);
+//
+// // Make sure we have two devices in the store
+// List<DeviceSyncRepresentation> entries = getEntriesFromStore();
+// assertEquals(2, entries.size());
+//
+// deviceManager.scheduleConsolidateStoreNow();
+// Thread.sleep(25); // give the scheduler time to run the task
+//
+// // We should still have two entries, however one of them will be a
+// // tombstone
+// entries = getEntriesFromStore();
+// assertEquals(2, entries.size());
+//
+// // Device 1 should still be in store
+// Versioned<DeviceSyncRepresentation> versioned =
+// storeClient.get(dev1Key);
+// dsr = versioned.getValue();
+// assertNotNull(dsr);
+// assertEquals(2, dsr.getEntities().size());
+// assertEntityEquals(e1a, dsr.getEntities().get(0));
+// assertEntityEquals(e1b, dsr.getEntities().get(1));
+//
+// // Device2 should be gone
+// versioned = storeClient.get(dev2Key);
+// assertNull(versioned.getValue());
+//
+// // Run consolitate again. This time we check that tombstones in
+// // the store are handled correctly
+// deviceManager.scheduleConsolidateStoreNow();
+// Thread.sleep(25); // give the scheduler time to run the task
+//
+// // Now write a device to the store that doesn't have any switch-port
+// // it should be removed
+// Entity e3 = new Entity(3L, null, null, null, null, null);
+// dsr.setKey("Device3");
+// dsr.setEntities(Collections.singletonList(new SyncEntity(e3)));
+// storeClient.put(dsr.getKey(), dsr);
+//
+// // Run consolitate again. This time we check that tombstones in
+// // the store are handled correctly
+// deviceManager.scheduleConsolidateStoreNow();
+// Thread.sleep(25); // give the scheduler time to run the task
+// versioned = storeClient.get("Device3");
+// assertNull(versioned.getValue());
+//
+// }
+//
+// }
--- /dev/null
+/*
+ * Copyright (c) 2012 Big Switch Networks, Inc.
+ *
+ * Licensed under the Eclipse Public License, Version 1.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at
+ *
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+package org.opendaylight.controller.hosttracker.internal;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Date;
+import java.util.EnumSet;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Set;
+
+import junit.framework.TestCase;
+
+import org.junit.Test;
+import org.opendaylight.controller.hosttracker.Entity;
+import org.opendaylight.controller.hosttracker.IDeviceService.DeviceField;
+import org.opendaylight.controller.sal.core.Node;
+import org.opendaylight.controller.sal.core.NodeConnector;
+import org.opendaylight.controller.sal.core.NodeConnector.NodeConnectorIDType;
+
+/**
+ *
+ * @author gregor
+ *
+ */
+public class DeviceUniqueIndexTest extends TestCase {
+ protected Entity e1a;
+ protected Entity e1b;
+ protected Device d1;
+ protected Entity e2;
+ protected Entity e2alt;
+ protected Entity e3;
+ protected Entity e3_ip;
+ protected Entity e4;
+
+ @Override
+ protected void setUp() throws Exception {
+ super.setUp();
+ Node n1 = new Node(Node.NodeIDType.OPENFLOW, Long.valueOf(1L));
+ NodeConnector n1_1 = new NodeConnector(NodeConnectorIDType.OPENFLOW,
+ Short.valueOf((short) 1), n1);
+ e1a = new Entity(1L, (short) 1, 1, n1_1, new Date());
+ e1b = new Entity(1L, (short) 2, 1, n1_1, new Date());
+ List<Entity> d1Entities = new ArrayList<Entity>(2);
+ d1Entities.add(e1a);
+ d1Entities.add(e1b);
+ d1 = new Device(null, Long.valueOf(1), null, null, null, d1Entities,
+ null);
+
+ Node n2 = new Node(Node.NodeIDType.OPENFLOW, Long.valueOf(2L));
+ NodeConnector n2_2 = new NodeConnector(NodeConnectorIDType.OPENFLOW,
+ Short.valueOf((short) 2), n2);
+ Node n3 = new Node(Node.NodeIDType.OPENFLOW, Long.valueOf(3L));
+ NodeConnector n3_3 = new NodeConnector(NodeConnectorIDType.OPENFLOW,
+ Short.valueOf((short) 3), n3);
+
+ // e2 and e2 alt match in MAC and VLAN
+ e2 = new Entity(2L, (short) 2, 2, n2_2, new Date());
+ e2alt = new Entity(2, (short) 2, null, null, null);
+
+ // IP is null
+ e3 = new Entity(3L, (short) 3, null, n3_3, new Date());
+ e3_ip = new Entity(3L, (short) 3, 3, n3_3, new Date());
+
+ // IP and switch and port are null
+ e4 = new Entity(4L, (short) 4, null, null, new Date());
+ }
+
+ /*
+ * Checks that the iterator it returns the elements in the Set expected
+ * Doesn't check how often an element is returned as long it's at least once
+ */
+ protected void verifyIterator(Set<Long> expected, Iterator<Long> it) {
+ HashSet<Long> actual = new HashSet<Long>();
+ while (it.hasNext()) {
+ actual.add(it.next());
+ }
+ assertEquals(expected, actual);
+ }
+
+ @Test
+ public void testDeviceUniqueIndex() {
+ DeviceUniqueIndex idx1 = new DeviceUniqueIndex(EnumSet.of(
+ DeviceField.MAC, DeviceField.VLAN));
+
+ idx1.updateIndex(d1, d1.getDeviceKey());
+ idx1.updateIndex(e2, 2L);
+
+ // -------------
+ // Test findByEntity lookups
+ assertEquals(Long.valueOf(1L), idx1.findByEntity(e1a));
+ assertEquals(Long.valueOf(1L), idx1.findByEntity(e1b));
+ assertEquals(Long.valueOf(2L), idx1.findByEntity(e2));
+ // we didn't add e2alt but since they key fields are the same we
+ // should find it
+ assertEquals(Long.valueOf(2L), idx1.findByEntity(e2alt));
+ assertEquals(null, idx1.findByEntity(e3));
+ assertEquals(null, idx1.findByEntity(e4));
+
+ // -------------
+ // Test getAll()
+ HashSet<Long> expectedKeys = new HashSet<Long>();
+ expectedKeys.add(1L);
+ expectedKeys.add(2L);
+ verifyIterator(expectedKeys, idx1.getAll());
+
+ // -------------
+ // Test queryByEntity()
+ verifyIterator(Collections.<Long> singleton(1L),
+ idx1.queryByEntity(e1a));
+ verifyIterator(Collections.<Long> singleton(1L),
+ idx1.queryByEntity(e1b));
+ verifyIterator(Collections.<Long> singleton(2L), idx1.queryByEntity(e2));
+ verifyIterator(Collections.<Long> singleton(2L),
+ idx1.queryByEntity(e2alt));
+ assertEquals(false, idx1.queryByEntity(e3).hasNext());
+ assertEquals(false, idx1.queryByEntity(e3).hasNext());
+
+ // -------------
+ // Test removal
+ idx1.removeEntity(e1a, 42L); // No-op. e1a isn't mapped to this key
+ assertEquals(Long.valueOf(1L), idx1.findByEntity(e1a));
+ idx1.removeEntity(e1a, 1L);
+ assertEquals(null, idx1.findByEntity(e1a));
+ assertEquals(Long.valueOf(1L), idx1.findByEntity(e1b));
+ assertEquals(Long.valueOf(2L), idx1.findByEntity(e2));
+ idx1.removeEntity(e2);
+ assertEquals(null, idx1.findByEntity(e2));
+ assertEquals(Long.valueOf(1L), idx1.findByEntity(e1b));
+
+ // -------------
+ // Test null keys
+ DeviceUniqueIndex idx2 = new DeviceUniqueIndex(EnumSet.of(
+ DeviceField.IPV4, DeviceField.SWITCHPORT));
+ // only one key field is null
+ idx2.updateIndex(e3, 3L);
+ assertEquals(Long.valueOf(3L), idx2.findByEntity(e3));
+ assertEquals(null, idx2.findByEntity(e3_ip));
+ // all key fields are null
+ idx2.updateIndex(e4, 4L);
+ assertEquals(null, idx2.findByEntity(e4));
+ Device d4 = new Device(null, 4L, null, null, null,
+ Collections.<Entity> singleton(e4), null);
+ idx2.updateIndex(d4, 4L);
+ assertEquals(null, idx2.findByEntity(e4));
+
+ // -------------
+ // entity already exists with different deviceKey
+ DeviceUniqueIndex idx3 = new DeviceUniqueIndex(EnumSet.of(
+ DeviceField.MAC, DeviceField.VLAN));
+ idx3.updateIndex(e1a, 42L);
+ assertEquals(false, idx3.updateIndex(d1, 1L));
+ // TODO: shouldn't this fail as well so that the behavior
+ // is consistent?
+ idx3.updateIndex(e1a, 1L);
+ // anyways. We can now add d1 ;-)
+ assertEquals(true, idx3.updateIndex(d1, 1L));
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2011,2012 Big Switch Networks, Inc.
+ *
+ * Licensed under the Eclipse Public License, Version 1.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at
+ *
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Originally created by David Erickson, Stanford University
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an "AS
+ * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language
+ * governing permissions and limitations under the License.
+ */
+
+package org.opendaylight.controller.hosttracker.test;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+import java.util.TreeSet;
+
+import org.opendaylight.controller.hosttracker.Entity;
+import org.opendaylight.controller.hosttracker.IEntityClass;
+import org.opendaylight.controller.hosttracker.SwitchPort;
+import org.opendaylight.controller.hosttracker.internal.AttachmentPoint;
+import org.opendaylight.controller.hosttracker.internal.Device;
+import org.opendaylight.controller.hosttracker.internal.DeviceManagerImpl;
+
+/**
+ * This mock device removes the dependency on topology and a parent device
+ * manager and simply assumes all its entities are current and correct
+ */
+public class MockDevice extends Device {
+
+ public MockDevice(DeviceManagerImpl deviceManager, Long deviceKey,
+ Entity entity, IEntityClass entityClass) {
+ super(deviceManager, deviceKey, entity, entityClass);
+ }
+
+ public MockDevice(Device device, Entity newEntity, int insertionpoint) {
+ super(device, newEntity, insertionpoint);
+ }
+
+ public MockDevice(DeviceManagerImpl deviceManager, Long deviceKey,
+ List<AttachmentPoint> aps, List<AttachmentPoint> trueAPs,
+ Collection<Entity> entities, IEntityClass entityClass) {
+ super(deviceManager, deviceKey, null, aps, trueAPs, entities,
+ entityClass);
+ }
+
+ @Override
+ public Integer[] getIPv4Addresses() {
+ TreeSet<Integer> vals = new TreeSet<Integer>();
+ for (Entity e : entities) {
+ if (e.getIpv4Address() == null)
+ continue;
+ vals.add(e.getIpv4Address());
+ }
+
+ return vals.toArray(new Integer[vals.size()]);
+ }
+
+ @Override
+ public SwitchPort[] getAttachmentPoints() {
+ ArrayList<SwitchPort> vals = new ArrayList<SwitchPort>(entities.length);
+ for (Entity e : entities) {
+ if (e.getPort() != null
+ && deviceManager.isValidAttachmentPoint(e.getPort())) {
+ SwitchPort sp = new SwitchPort(e.getPort());
+ vals.add(sp);
+ }
+ }
+ return vals.toArray(new SwitchPort[vals.size()]);
+ }
+
+ @Override
+ public String toString() {
+ return "MockDevice [getEntityClass()=" + getEntityClass()
+ + ", getEntities()=" + Arrays.toString(getEntities()) + "]";
+ }
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2013 Big Switch Networks, Inc.
+ *
+ * Licensed under the Eclipse Public License, Version 1.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at
+ *
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+package org.opendaylight.controller.hosttracker.test;
+
+import java.util.Collection;
+import java.util.Date;
+import java.util.List;
+
+import org.opendaylight.controller.hosttracker.Entity;
+import org.opendaylight.controller.hosttracker.IDevice;
+import org.opendaylight.controller.hosttracker.IDeviceListener;
+import org.opendaylight.controller.hosttracker.IEntityClass;
+import org.opendaylight.controller.hosttracker.IEntityClassifierService;
+import org.opendaylight.controller.hosttracker.internal.AttachmentPoint;
+import org.opendaylight.controller.hosttracker.internal.Device;
+import org.opendaylight.controller.hosttracker.internal.DeviceManagerImpl;
+import org.opendaylight.controller.sal.core.NodeConnector;
+
+/**
+ * Mock device manager useful for unit tests
+ *
+ * @author readams
+ */
+public class MockDeviceManager extends DeviceManagerImpl {
+ /**
+ * Set a new IEntityClassifier Use this as a quick way to use a particular
+ * entity classifier in a single test without having to setup the full
+ * FloodlightModuleContext again.
+ *
+ * @param ecs
+ */
+ public void setEntityClassifier(IEntityClassifierService ecs) {
+ this.entityClassifier = ecs;
+ // setSyncServiceIfNotSet(new MockSyncService());
+ this.start();
+ }
+
+ /**
+ * Learn a device using the given characteristics.
+ *
+ * @param macAddress
+ * the MAC
+ * @param vlan
+ * the VLAN (can be null)
+ * @param ipv4Address
+ * the IP (can be null)
+ * @param switchDPID
+ * the attachment point switch DPID (can be null)
+ * @param switchPort
+ * the attachment point switch port (can be null)
+ * @param processUpdates
+ * if false, will not send updates. Note that this method is not
+ * thread safe if this is false
+ * @return the device, either new or not
+ */
+ public IDevice learnEntity(long macAddress, Short vlan,
+ Integer ipv4Address, NodeConnector port, boolean processUpdates) {
+ List<IDeviceListener> listeners = deviceListeners.getOrderedListeners();
+ if (!processUpdates) {
+ deviceListeners.clearListeners();
+ }
+
+ if (vlan != null && vlan.shortValue() <= 0)
+ vlan = null;
+ if (ipv4Address != null && ipv4Address == 0)
+ ipv4Address = null;
+ IDevice res = learnDeviceByEntity(new Entity(macAddress, vlan,
+ ipv4Address, port, new Date()));
+ // Restore listeners
+ if (listeners != null) {
+ for (IDeviceListener listener : listeners) {
+ deviceListeners.addListener("device", listener);
+ }
+ }
+ return res;
+ }
+
+ @Override
+ public void deleteDevice(Device device) {
+ super.deleteDevice(device);
+ }
+
+ /**
+ * Learn a device using the given characteristics.
+ *
+ * @param macAddress
+ * the MAC
+ * @param vlan
+ * the VLAN (can be null)
+ * @param ipv4Address
+ * the IP (can be null)
+ * @param switchDPID
+ * the attachment point switch DPID (can be null)
+ * @param switchPort
+ * the attachment point switch port (can be null)
+ * @return the device, either new or not
+ */
+ public IDevice learnEntity(long macAddress, Short vlan,
+ Integer ipv4Address, NodeConnector port) {
+ return learnEntity(macAddress, vlan, ipv4Address, port, true);
+ }
+
+ @Override
+ protected Device allocateDevice(Long deviceKey, Entity entity,
+ IEntityClass entityClass) {
+ return new MockDevice(this, deviceKey, entity, entityClass);
+ }
+
+ @Override
+ protected Device allocateDevice(Long deviceKey, String dhcpClientName,
+ List<AttachmentPoint> aps, List<AttachmentPoint> trueAPs,
+ Collection<Entity> entities, IEntityClass entityClass) {
+ return new MockDevice(this, deviceKey, aps, trueAPs, entities,
+ entityClass);
+ }
+
+ @Override
+ protected Device allocateDevice(Device device, Entity entity,
+ int insertionpoint) {
+ return new MockDevice(device, entity, insertionpoint);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2013 Big Switch Networks, Inc.
+ *
+ * Licensed under the Eclipse Public License, Version 1.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at
+ *
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+package org.opendaylight.controller.hosttracker.test;
+
+import static org.opendaylight.controller.hosttracker.IDeviceService.DeviceField.MAC;
+import static org.opendaylight.controller.hosttracker.IDeviceService.DeviceField.SWITCHPORT;
+import static org.opendaylight.controller.hosttracker.IDeviceService.DeviceField.VLAN;
+
+import java.util.EnumSet;
+
+import org.opendaylight.controller.hosttracker.Entity;
+import org.opendaylight.controller.hosttracker.IDeviceService;
+import org.opendaylight.controller.hosttracker.IDeviceService.DeviceField;
+import org.opendaylight.controller.hosttracker.IEntityClass;
+import org.opendaylight.controller.hosttracker.internal.DefaultEntityClassifier;
+
+/**
+ * A simple IEntityClassifier. Useful for tests that need IEntityClassifiers and
+ * IEntityClass'es with switch and/or port key fields
+ */
+public class MockEntityClassifier extends DefaultEntityClassifier {
+ public static class TestEntityClass implements IEntityClass {
+ @Override
+ public EnumSet<DeviceField> getKeyFields() {
+ return EnumSet.of(MAC, VLAN, SWITCHPORT);
+ }
+
+ @Override
+ public String getName() {
+ return "TestEntityClass";
+ }
+ }
+
+ public static IEntityClass testEC = new MockEntityClassifier.TestEntityClass();
+
+ @Override
+ public IEntityClass classifyEntity(Entity entity) {
+ if (((Long) entity.getPort().getNode().getID()) >= 10L) {
+ return testEC;
+ }
+ return DefaultEntityClassifier.entityClass;
+ }
+
+ @Override
+ public EnumSet<IDeviceService.DeviceField> getKeyFields() {
+ return EnumSet.of(MAC, VLAN, SWITCHPORT);
+ }
+
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2013 Big Switch Networks, Inc.
+ *
+ * Licensed under the Eclipse Public License, Version 1.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at
+ *
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+package org.opendaylight.controller.hosttracker.test;
+
+import static org.opendaylight.controller.hosttracker.IDeviceService.DeviceField.MAC;
+import static org.opendaylight.controller.hosttracker.IDeviceService.DeviceField.SWITCHPORT;
+import static org.opendaylight.controller.hosttracker.IDeviceService.DeviceField.VLAN;
+
+import java.util.EnumSet;
+
+import org.opendaylight.controller.hosttracker.Entity;
+import org.opendaylight.controller.hosttracker.IDeviceService;
+import org.opendaylight.controller.hosttracker.IDeviceService.DeviceField;
+import org.opendaylight.controller.hosttracker.IEntityClass;
+import org.opendaylight.controller.hosttracker.internal.DefaultEntityClassifier;
+
+/**
+ * A simple IEntityClassifier. Useful for tests that need an IEntityClassifier
+ * with switch/port as key fields.
+ */
+public class MockEntityClassifierMac extends DefaultEntityClassifier {
+ public static class TestEntityClassMac implements IEntityClass {
+ protected String name;
+
+ public TestEntityClassMac(String name) {
+ this.name = name;
+ }
+
+ @Override
+ public EnumSet<DeviceField> getKeyFields() {
+ return EnumSet.of(MAC, VLAN);
+ }
+
+ @Override
+ public String getName() {
+ return name;
+ }
+ }
+
+ public static IEntityClass testECMac1 = new MockEntityClassifierMac.TestEntityClassMac(
+ "testECMac1");
+ public static IEntityClass testECMac2 = new MockEntityClassifierMac.TestEntityClassMac(
+ "testECMac2");
+
+ @Override
+ public IEntityClass classifyEntity(Entity entity) {
+ if (((Long) entity.getPort().getNode().getID()) == null) {
+ throw new IllegalArgumentException("Not all key fields specified."
+ + " Required fields: " + getKeyFields());
+ } else if (((Long) entity.getPort().getNode().getID()) == 1L) {
+ return testECMac1;
+ } else if (((Long) entity.getPort().getNode().getID()) == 2L) {
+ return testECMac2;
+ } else if (((Long) entity.getPort().getNode().getID()) == -1L) {
+ return null;
+ }
+ return DefaultEntityClassifier.entityClass;
+ }
+
+ @Override
+ public EnumSet<IDeviceService.DeviceField> getKeyFields() {
+ return EnumSet.of(MAC, VLAN, SWITCHPORT);
+ }
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2013 Big Switch Networks, Inc.
+ *
+ * Licensed under the Eclipse Public License, Version 1.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at
+ *
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+package org.opendaylight.controller.hosttracker.test;
+
+import static org.opendaylight.controller.hosttracker.IDeviceService.DeviceField.MAC;
+import static org.opendaylight.controller.hosttracker.IDeviceService.DeviceField.SWITCHPORT;
+import static org.opendaylight.controller.hosttracker.IDeviceService.DeviceField.VLAN;
+
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.opendaylight.controller.hosttracker.Entity;
+import org.opendaylight.controller.hosttracker.IDeviceService;
+import org.opendaylight.controller.hosttracker.IDeviceService.DeviceField;
+import org.opendaylight.controller.hosttracker.IEntityClass;
+import org.opendaylight.controller.hosttracker.internal.DefaultEntityClassifier;
+
+/**
+ * Extension to simple entity classifier to help in unit tests to provide table
+ * based multiple entity classification mock for reclassification tests
+ *
+ */
+public class MockFlexEntityClassifier extends DefaultEntityClassifier {
+ Map<Long, IEntityClass> switchEntities;
+ Map<Short, IEntityClass> vlanEntities;
+
+ public static class TestEntityClass implements IEntityClass {
+ String name;
+
+ public TestEntityClass(String name) {
+ this.name = name;
+ }
+
+ @Override
+ public EnumSet<DeviceField> getKeyFields() {
+ return EnumSet.of(MAC);
+ }
+
+ @Override
+ public String getName() {
+ return name;
+ }
+ }
+
+ public static IEntityClass defaultClass = new TestEntityClass("default");
+
+ public MockFlexEntityClassifier() {
+ switchEntities = new HashMap<Long, IEntityClass>();
+ vlanEntities = new HashMap<Short, IEntityClass>();
+ }
+
+ public IEntityClass createTestEntityClass(String name) {
+ return new TestEntityClass(name);
+ }
+
+ public void addSwitchEntity(Long dpid, IEntityClass entityClass) {
+ switchEntities.put(dpid, entityClass);
+ }
+
+ public void removeSwitchEntity(Long dpid) {
+ switchEntities.remove(dpid);
+ }
+
+ public void addVlanEntities(Short vlan, IEntityClass entityClass) {
+ vlanEntities.put(vlan, entityClass);
+ }
+
+ public void removeVlanEntities(Short vlan) {
+ vlanEntities.remove(vlan);
+ }
+
+ @Override
+ public IEntityClass classifyEntity(Entity entity) {
+ if (switchEntities.containsKey((Long) entity.getPort().getNode()
+ .getID()))
+ return switchEntities
+ .get((Long) entity.getPort().getNode().getID());
+ if (vlanEntities.containsKey(entity.getVlan()))
+ return vlanEntities.get(entity.getVlan());
+ return defaultClass;
+ }
+
+ @Override
+ public EnumSet<IDeviceService.DeviceField> getKeyFields() {
+ return EnumSet.of(MAC, VLAN, SWITCHPORT);
+ }
+}
this.listener.logged(entry);
}
}
-
+
/*
* Install the default exception handler so that the uncaught
* exceptions are handled by our customized handler. This new
*/
Thread.setDefaultUncaughtExceptionHandler(new org.opendaylight.
controller.logging.bridge.internal.UncaughtExceptionHandler());
-
+
/*
* Install the Shutdown handler. This will intercept SIGTERM signal and
* close the system bundle. This allows for a graceful closing of OSGI
* framework.
*/
-
+
Runtime.getRuntime().addShutdownHook(new shutdownHandler(context));
} else {
this.log.error("Cannot register the LogListener because "
this.listener = null;
this.log = null;
}
-
+
private class shutdownHandler extends Thread {
BundleContext bundlecontext;
public shutdownHandler(BundleContext ctxt) {
this.bundlecontext = ctxt;
}
-
+
public void run () {
try {
this.bundlecontext.getBundle(0).stop();
} catch (BundleException e) {
log.debug("Bundle couldn't be stopped");
}
- }
+ }
}
}
*
*/
package org.opendaylight.controller.northbound.commons.exception;
-
public class NorthboundUtils {
-
+
/**
* Returns whether the current user has the required privilege on the
* specified container
- *
+ *
* @param userName
* The user name
* @param containerName
* @param required
* Operation to be performed - READ/WRITE
* @param bundle
- * Class from where the function is invoked
+ * Class from where the function is invoked
* @return The Status of the request, either Success or Unauthorized
*/
public static boolean isAuthorized(String userName, String containerName,
Privilege required,Object bundle) {
-
+
if (containerName.equals(GlobalConstants.DEFAULT.toString())) {
IUserManager auth = (IUserManager) ServiceHelper.getGlobalInstance(
IUserManager.class, bundle);
-
+
switch (required) {
case WRITE:
return (auth.getUserLevel(userName).ordinal() <= UserLevel.NETWORKADMIN.ordinal());
case READ:
- return (auth.getUserLevel(userName).ordinal() <= UserLevel.NETWORKOPERATOR.ordinal());
+ return (auth.getUserLevel(userName).ordinal() <= UserLevel.NETWORKOPERATOR.ordinal());
default:
return false;
}
}
return true;
}
-
+
}
@XmlAccessorType(XmlAccessType.NONE)
public class FlowConfigs {
- @XmlElement
- List<FlowConfig> flowConfig;
- //To satisfy JAXB
- private FlowConfigs() {
-
- }
-
- public FlowConfigs(List<FlowConfig> flowConfig) {
- this.flowConfig = flowConfig;
- }
-
- public List<FlowConfig> getFlowConfig() {
- return flowConfig;
- }
-
- public void setFlowConfig(List<FlowConfig> flowConfig) {
- this.flowConfig = flowConfig;
- }
+ @XmlElement
+ List<FlowConfig> flowConfig;
+ //To satisfy JAXB
+ private FlowConfigs() {
+
+ }
+
+ public FlowConfigs(List<FlowConfig> flowConfig) {
+ this.flowConfig = flowConfig;
+ }
+
+ public List<FlowConfig> getFlowConfig() {
+ return flowConfig;
+ }
+
+ public void setFlowConfig(List<FlowConfig> flowConfig) {
+ this.flowConfig = flowConfig;
+ }
}
/**
* Flow Configuration Northbound API
- *
+ *
* <br>
* <br>
* Authentication scheme : <b>HTTP Basic</b><br>
* trusted authority.<br>
* More info :
* http://tomcat.apache.org/tomcat-7.0-doc/ssl-howto.html#Configuration
- *
+ *
*/
@Path("/")
public class FlowProgrammerNorthbound {
/**
* Returns a list of Flows configured on the given container
- *
+ *
* @param containerName
* Name of the Container. The Container name for the base
* controller is "default".
/**
* Returns a list of Flows configured on a Node in a given container
- *
+ *
* @param containerName
* Name of the Container. The Container name for the base
* controller is "default".
/**
* Returns the flow configuration matching a human-readable name and nodeId
* on a given Container.
- *
+ *
* @param containerName
* Name of the Container. The Container name for the base
* controller is "default".
/**
* Add a flow configuration
- *
+ *
* @param containerName
* Name of the Container. The Container name for the base
* controller is "default".
/**
* Delete a Flow configuration
- *
+ *
* DELETE /flows/{containerName}/{nodeType}/{nodeId}/{name}
- *
+ *
* @param containerName
* Name of the Container. The Container name for the base
* controller is "default".
/**
* Toggle a Flow configuration
- *
+ *
* @param containerName
* Name of the Container. The Container name for the base
* controller is "default".
* Location is represented by Host node connector which is essentially a logical
* entity that represents a Switch/Port. A host is represented by it's
* IP-address and mac-address.
- *
+ *
* <br>
* <br>
* Authentication scheme : <b>HTTP Basic</b><br>
* trusted authority.<br>
* More info :
* http://tomcat.apache.org/tomcat-7.0-doc/ssl-howto.html#Configuration
- *
+ *
*/
@Path("/")
/**
* Returns a list of all Hosts : both configured via PUT API and dynamically
* learnt on the network.
- *
+ *
* @param containerName
* Name of the Container. The Container name for the base
* controller is "default".
@ResponseCode(code = 404, condition = "The containerName is not found"),
@ResponseCode(code = 503, condition = "One or more of Controller Services are unavailable") })
public Hosts getActiveHosts(@PathParam("containerName") String containerName) {
-
+
if (!NorthboundUtils.isAuthorized(
getUserName(), containerName, Privilege.READ, this)) {
throw new UnauthorizedException(
/**
* Returns a list of Hosts that are statically configured and are connected
* to a NodeConnector that is down.
- *
+ *
* @param containerName
* Name of the Container. The Container name for the base
* controller is "default".
/**
* Returns a host that matches the IP Address value passed as parameter.
- *
+ *
* @param containerName
* Name of the Container. The Container name for the base
* controller is "default".
/**
* Add a Static Host configuration
- *
+ *
* @param containerName
* Name of the Container. The Container name for the base
* controller is "default".
/**
* Delete a Static Host configuration
- *
+ *
* @param containerName
* Name of the Container. The Container name for the base
* controller is "default".
public Response deleteFlow(
@PathParam(value = "containerName") String containerName,
@PathParam(value = "networkAddress") String networkAddress) {
-
+
if (!NorthboundUtils.isAuthorized(
getUserName(), containerName, Privilege.WRITE, this)) {
throw new UnauthorizedException(
@XmlAccessorType(XmlAccessType.NONE)
public class Hosts {
- @XmlElement (name="host")
- Set<HostNodeConnector> hostNodeConnector;
-
- public Hosts() {
- }
- public Hosts (Set<HostNodeConnector> hostNodeConnector) {
- this.hostNodeConnector = hostNodeConnector;
- }
- public Set<HostNodeConnector> getHostNodeConnector() {
- return hostNodeConnector;
- }
- public void setHostNodeConnector(Set<HostNodeConnector> hostNodeConnector) {
- this.hostNodeConnector = hostNodeConnector;
- }
+ @XmlElement (name="host")
+ Set<HostNodeConnector> hostNodeConnector;
+
+ public Hosts() {
+ }
+ public Hosts (Set<HostNodeConnector> hostNodeConnector) {
+ this.hostNodeConnector = hostNodeConnector;
+ }
+ public Set<HostNodeConnector> getHostNodeConnector() {
+ return hostNodeConnector;
+ }
+ public void setHostNodeConnector(Set<HostNodeConnector> hostNodeConnector) {
+ this.hostNodeConnector = hostNodeConnector;
+ }
}
}
}
+ private void testNodeProperties(JSONObject node, Integer nodeId,
+ String nodeType, Integer timestamp, String timestampName,
+ Integer actionsValue, Integer capabilitiesValue,
+ Integer tablesValue, Integer buffersValue) throws JSONException {
+
+ JSONObject nodeInfo = node.getJSONObject("node");
+ Assert.assertEquals(nodeId, (Integer) nodeInfo.getInt("@id"));
+ Assert.assertEquals(nodeType, nodeInfo.getString("@type"));
+
+ JSONObject properties = node.getJSONObject("properties");
+
+ if (timestamp == null || timestampName == null) {
+ Assert.assertFalse(properties.has("timeStamp"));
+ } else {
+ Assert.assertEquals(
+ timestamp,
+ (Integer) properties.getJSONObject("timeStamp").getInt(
+ "timestamp"));
+ Assert.assertEquals(
+ timestampName,
+ properties.getJSONObject("timeStamp").getString(
+ "timestampName"));
+ }
+ if (actionsValue == null) {
+ Assert.assertFalse(properties.has("actions"));
+ } else {
+ Assert.assertEquals(actionsValue, (Integer) properties
+ .getJSONObject("actions").getInt("actionsValue"));
+ }
+ if (capabilitiesValue == null) {
+ Assert.assertFalse(properties.has("capabilities"));
+ } else {
+ Assert.assertEquals(capabilitiesValue, (Integer) properties
+ .getJSONObject("capabilities").getInt("capabilitiesValue"));
+ }
+ if (tablesValue == null) {
+ Assert.assertFalse(properties.has("tables"));
+ } else {
+ Assert.assertEquals(tablesValue, (Integer) properties
+ .getJSONObject("tables").getInt("tablesValue"));
+ }
+ if (buffersValue == null) {
+ Assert.assertFalse(properties.has("buffers"));
+ } else {
+ Assert.assertEquals(buffersValue, (Integer) properties
+ .getJSONObject("buffers").getInt("buffersValue"));
+ }
+ }
+
+ private void testNodeConnectorProperties(
+ JSONObject nodeConnectorProperties, Integer ncId, String ncType,
+ Integer nodeId, String nodeType, Integer state,
+ Integer capabilities, Integer bandwidth) throws JSONException {
+
+ JSONObject nodeConnector = nodeConnectorProperties
+ .getJSONObject("nodeconnector");
+ JSONObject node = nodeConnector.getJSONObject("node");
+ JSONObject properties = nodeConnectorProperties
+ .getJSONObject("properties");
+
+ Assert.assertEquals(ncId, (Integer) nodeConnector.getInt("@id"));
+ Assert.assertEquals(ncType, nodeConnector.getString("@type"));
+ Assert.assertEquals(nodeId, (Integer) node.getInt("@id"));
+ Assert.assertEquals(nodeType, node.getString("@type"));
+ if (state == null) {
+ Assert.assertFalse(properties.has("state"));
+ } else {
+ Assert.assertEquals(
+ state,
+ (Integer) properties.getJSONObject("state").getInt(
+ "stateValue"));
+ }
+ if (capabilities == null) {
+ Assert.assertFalse(properties.has("capabilities"));
+ } else {
+ Assert.assertEquals(capabilities, (Integer) properties
+ .getJSONObject("capabilities").getInt("capabilitiesValue"));
+ }
+ if (bandwidth == null) {
+ Assert.assertFalse(properties.has("bandwidth"));
+ } else {
+ Assert.assertEquals(
+ bandwidth,
+ (Integer) properties.getJSONObject("bandwidth").getInt(
+ "bandwidthValue"));
+ }
+
+ }
+
+ @Test
+ public void testSwitchManager() {
+ String baseURL = "http://127.0.0.1:8080/controller/nb/v2/switch/default/";
+
+ // define Node/NodeConnector attributes for test
+ int nodeId_1 = 51966;
+ int nodeId_2 = 3366;
+ int nodeId_3 = 4477;
+ int nodeConnectorId_1 = 51966;
+ int nodeConnectorId_2 = 12;
+ int nodeConnectorId_3 = 34;
+ String nodeType = "STUB";
+ String ncType = "STUB";
+ int timestamp_1 = 100000;
+ String timestampName_1 = "connectedSince";
+ int actionsValue_1 = 2;
+ int capabilitiesValue_1 = 3;
+ int tablesValue_1 = 1;
+ int buffersValue_1 = 1;
+ int ncState = 1;
+ int ncCapabilities = 1;
+ int ncBandwidth = 1000000000;
+
+ // Test GET all nodes
+ try {
+ String result = getJsonResult(baseURL + "nodes");
+ JSONTokener jt = new JSONTokener(result);
+ JSONObject json = new JSONObject(jt);
+
+ // Test for first node
+ JSONObject node = getJsonInstance(json, "nodeProperties", nodeId_1);
+ Assert.assertNotNull(node);
+ testNodeProperties(node, nodeId_1, nodeType, timestamp_1,
+ timestampName_1, actionsValue_1, capabilitiesValue_1,
+ tablesValue_1, buffersValue_1);
+
+ // Test 2nd node, properties of 2nd node same as first node
+ node = getJsonInstance(json, "nodeProperties", nodeId_2);
+ Assert.assertNotNull(node);
+ testNodeProperties(node, nodeId_2, nodeType, timestamp_1,
+ timestampName_1, actionsValue_1, capabilitiesValue_1,
+ tablesValue_1, buffersValue_1);
+
+ // Test 3rd node, properties of 3rd node same as first node
+ node = getJsonInstance(json, "nodeProperties", nodeId_3);
+ Assert.assertNotNull(node);
+ testNodeProperties(node, nodeId_3, nodeType, timestamp_1,
+ timestampName_1, actionsValue_1, capabilitiesValue_1,
+ tablesValue_1, buffersValue_1);
+
+ } catch (Exception e) {
+ Assert.assertTrue(false);
+ }
+
+ // Test GET nodeConnectors of a node
+ try {
+ //Test first node
+ String result = getJsonResult(baseURL + "node/STUB/" + nodeId_1);
+ JSONTokener jt = new JSONTokener(result);
+ JSONObject json = new JSONObject(jt);
+ JSONObject nodeConnectorProperties = json
+ .getJSONObject("nodeConnectorProperties");
+
+ testNodeConnectorProperties(nodeConnectorProperties,
+ nodeConnectorId_1, ncType, nodeId_1, nodeType, ncState,
+ ncCapabilities, ncBandwidth);
+
+ //Test second node
+ result = getJsonResult(baseURL + "node/STUB/" + nodeId_2);
+ jt = new JSONTokener(result);
+ json = new JSONObject(jt);
+ nodeConnectorProperties = json
+ .getJSONObject("nodeConnectorProperties");
+
+ testNodeConnectorProperties(nodeConnectorProperties,
+ nodeConnectorId_2, ncType, nodeId_2, nodeType, ncState,
+ ncCapabilities, ncBandwidth);
+
+ //Test third node
+ result = getJsonResult(baseURL + "node/STUB/" + nodeId_3);
+ jt = new JSONTokener(result);
+ json = new JSONObject(jt);
+
+ nodeConnectorProperties = json
+ .getJSONObject("nodeConnectorProperties");
+ testNodeConnectorProperties(nodeConnectorProperties,
+ nodeConnectorId_3, ncType, nodeId_3, nodeType, ncState,
+ ncCapabilities, ncBandwidth);
+
+ } catch (Exception e) {
+ Assert.assertTrue(false);
+ }
+
+ // Test delete node property
+ try {
+ // Delete timestamp property from node1
+ String result = getJsonResult(baseURL + "node/STUB/" + nodeId_1
+ + "/property/timeStamp", "DELETE");
+ Assert.assertEquals(200, httpResponseCode.intValue());
+
+ // Check node1
+ result = getJsonResult(baseURL + "nodes");
+ JSONTokener jt = new JSONTokener(result);
+ JSONObject json = new JSONObject(jt);
+ JSONObject node = getJsonInstance(json, "nodeProperties", nodeId_1);
+ Assert.assertNotNull(node);
+ testNodeProperties(node, nodeId_1, nodeType, null, null,
+ actionsValue_1, capabilitiesValue_1, tablesValue_1,
+ buffersValue_1);
+
+ // Delete actions property from node2
+ result = getJsonResult(baseURL + "node/STUB/" + nodeId_2
+ + "/property/actions", "DELETE");
+ Assert.assertEquals(200, httpResponseCode.intValue());
+
+ // Check node2
+ result = getJsonResult(baseURL + "nodes");
+ jt = new JSONTokener(result);
+ json = new JSONObject(jt);
+ node = getJsonInstance(json, "nodeProperties", nodeId_2);
+ Assert.assertNotNull(node);
+ testNodeProperties(node, nodeId_2, nodeType, timestamp_1,
+ timestampName_1, null, capabilitiesValue_1, tablesValue_1,
+ buffersValue_1);
+
+ } catch (Exception e) {
+ Assert.assertTrue(false);
+ }
+
+ // Test add property to node
+ try {
+ // Add Tier and Bandwidth property to node1
+ String result = getJsonResult(baseURL + "node/STUB/" + nodeId_1
+ + "/property/tier/1001", "PUT");
+ Assert.assertEquals(201, httpResponseCode.intValue());
+ result = getJsonResult(baseURL + "node/STUB/" + nodeId_1
+ + "/property/bandwidth/1002", "PUT");
+ Assert.assertEquals(201, httpResponseCode.intValue());
+
+ // Test for first node
+ result = getJsonResult(baseURL + "nodes");
+ JSONTokener jt = new JSONTokener(result);
+ JSONObject json = new JSONObject(jt);
+ JSONObject node = getJsonInstance(json, "nodeProperties", nodeId_1);
+ Assert.assertNotNull(node);
+ Assert.assertEquals(1001, node.getJSONObject("properties")
+ .getJSONObject("tier").getInt("tierValue"));
+ Assert.assertEquals(1002, node.getJSONObject("properties")
+ .getJSONObject("bandwidth").getInt("bandwidthValue"));
+
+ } catch (Exception e) {
+ Assert.assertTrue(false);
+ }
+
+ // Test delete nodeConnector property
+ try {
+ // Delete state property of nodeconnector1
+ String result = getJsonResult(baseURL + "nodeconnector/STUB/"
+ + nodeId_1 + "/STUB/" + nodeConnectorId_1
+ + "/property/state", "DELETE");
+ Assert.assertEquals(200, httpResponseCode.intValue());
+
+ result = getJsonResult(baseURL + "node/STUB/" + nodeId_1);
+ JSONTokener jt = new JSONTokener(result);
+ JSONObject json = new JSONObject(jt);
+ JSONObject nodeConnectorProperties = json
+ .getJSONObject("nodeConnectorProperties");
+
+ testNodeConnectorProperties(nodeConnectorProperties,
+ nodeConnectorId_1, ncType, nodeId_1, nodeType, null,
+ ncCapabilities, ncBandwidth);
+
+ // Delete capabilities property of nodeconnector2
+ result = getJsonResult(baseURL + "nodeconnector/STUB/" + nodeId_2
+ + "/STUB/" + nodeConnectorId_2 + "/property/capabilities",
+ "DELETE");
+ Assert.assertEquals(200, httpResponseCode.intValue());
+
+ result = getJsonResult(baseURL + "node/STUB/" + nodeId_2);
+ jt = new JSONTokener(result);
+ json = new JSONObject(jt);
+ nodeConnectorProperties = json
+ .getJSONObject("nodeConnectorProperties");
+
+ testNodeConnectorProperties(nodeConnectorProperties,
+ nodeConnectorId_2, ncType, nodeId_2, nodeType, ncState,
+ null, ncBandwidth);
+
+ } catch (Exception e) {
+ Assert.assertTrue(false);
+ }
+
+ // Test PUT nodeConnector property
+ try {
+ int newBandwidth = 1001;
+
+ // Add Name/Bandwidth property to nodeConnector1
+ String result = getJsonResult(baseURL + "nodeconnector/STUB/"
+ + nodeId_1 + "/STUB/" + nodeConnectorId_1
+ + "/property/bandwidth/" + newBandwidth, "PUT");
+ Assert.assertEquals(201, httpResponseCode.intValue());
+
+ result = getJsonResult(baseURL + "node/STUB/" + nodeId_1);
+ JSONTokener jt = new JSONTokener(result);
+ JSONObject json = new JSONObject(jt);
+ JSONObject nodeConnectorProperties = json
+ .getJSONObject("nodeConnectorProperties");
+
+ // Check for new bandwidth value, state value removed from previous
+ // test
+ testNodeConnectorProperties(nodeConnectorProperties,
+ nodeConnectorId_1, ncType, nodeId_1, nodeType, null,
+ ncCapabilities, newBandwidth);
+
+ } catch (Exception e) {
+ Assert.assertTrue(false);
+ }
+ }
+
@Test
public void testStatistics() {
String actionTypes[] = { "drop", "loopback", "flood", "floodAll",
fc = "{\"dynamic\":\"false\", \"name\":\"test2\", \"nwSrc\":\"1.1.1.1\", \"node\":{\"@id\":\"51966\",\"@type\":\"STUB\"}, \"actions\":[\"DROP\"]}";
result = getJsonResult(baseURL + "STUB/51966/test2", "POST", fc);
Assert.assertTrue(httpResponseCode == 201);
-
+
// check that request returns both flows given node.
result = getJsonResult(baseURL + "STUB/51966/", "GET");
jt = new JSONTokener(result);
@XmlAccessorType(XmlAccessType.NONE)
public class StaticRoutes {
- @XmlElement
- List<StaticRoute> staticRoute;
- //To satisfy JAXB
- private StaticRoutes() {
-
- }
-
- public StaticRoutes(List<StaticRoute> staticRoute) {
- this.staticRoute = staticRoute;
- }
-
- public List<StaticRoute> getFlowConfig() {
- return staticRoute;
- }
-
- public void setFlowConfig(List<StaticRoute> staticRoute) {
- this.staticRoute = staticRoute;
- }
+ @XmlElement
+ List<StaticRoute> staticRoute;
+ //To satisfy JAXB
+ private StaticRoutes() {
+
+ }
+
+ public StaticRoutes(List<StaticRoute> staticRoute) {
+ this.staticRoute = staticRoute;
+ }
+
+ public List<StaticRoute> getFlowConfig() {
+ return staticRoute;
+ }
+
+ public void setFlowConfig(List<StaticRoute> staticRoute) {
+ this.staticRoute = staticRoute;
+ }
}
* Authentication realm : <b>opendaylight</b><br>
* Transport : <b>HTTP and HTTPS</b><br>
* <br>
- * HTTPS Authentication is disabled by default. Administrator can enable it in tomcat-server.xml after adding
+ * HTTPS Authentication is disabled by default. Administrator can enable it in tomcat-server.xml after adding
* a proper keystore / SSL certificate from a trusted authority.<br>
* More info : http://tomcat.apache.org/tomcat-7.0-doc/ssl-howto.html#Configuration
*/
public class StaticRoutingNorthbound {
- private String username;
-
+ private String username;
+
@Context
public void setSecurityContext(SecurityContext context) {
- username = context.getUserPrincipal().getName();
+ username = context.getUserPrincipal().getName();
}
protected String getUserName() {
return username;
}
-
-
+
+
private List<StaticRoute> getStaticRoutesInternal(String containerName) {
IForwardingStaticRouting staticRouting = (IForwardingStaticRouting) ServiceHelper
public StaticRoutes getStaticRoutes(
@PathParam("containerName") String containerName) {
- if(!NorthboundUtils.isAuthorized(getUserName(), containerName,
+ if(!NorthboundUtils.isAuthorized(getUserName(), containerName,
Privilege.WRITE, this)){
- throw new
+ throw new
UnauthorizedException("User is not authorized to perform this operation on container "
+ containerName);
}
@PathParam("containerName") String containerName,
@PathParam("name") String name) {
- if(!NorthboundUtils.isAuthorized(getUserName(), containerName,
+ if(!NorthboundUtils.isAuthorized(getUserName(), containerName,
Privilege.WRITE, this)){
- throw new
+ throw new
UnauthorizedException("User is not authorized to perform this operation on container "
+ containerName);
}
@PathParam(value = "name") String name,
@TypeHint(StaticRoute.class) JAXBElement<StaticRoute> staticRouteData) {
-
- if(!NorthboundUtils.isAuthorized(getUserName(), containerName,
+
+ if(!NorthboundUtils.isAuthorized(getUserName(), containerName,
Privilege.WRITE, this)){
- throw new
+ throw new
UnauthorizedException("User is not authorized to perform this operation on container "
+ containerName);
}
public Response removeStaticRoute(
@PathParam(value = "containerName") String containerName,
@PathParam(value = "name") String name) {
-
- if(!NorthboundUtils.isAuthorized(getUserName(), containerName,
+
+ if(!NorthboundUtils.isAuthorized(getUserName(), containerName,
Privilege.WRITE, this)){
- throw new
+ throw new
UnauthorizedException("User is not authorized to perform this operation on container "
+ containerName);
}
@XmlAccessorType(XmlAccessType.NONE)
public class AllFlowStatistics {
- @XmlElement
- List<FlowStatistics> flowStatistics;
- //To satisfy JAXB
- private AllFlowStatistics() {
- }
-
- public AllFlowStatistics(List<FlowStatistics> flowStatistics) {
- this.flowStatistics = flowStatistics;
- }
-
- public List<FlowStatistics> getFlowStatistics() {
- return flowStatistics;
- }
-
- public void setFlowStatistics(List<FlowStatistics> flowStatistics) {
- this.flowStatistics = flowStatistics;
- }
+ @XmlElement
+ List<FlowStatistics> flowStatistics;
+ //To satisfy JAXB
+ private AllFlowStatistics() {
+ }
+
+ public AllFlowStatistics(List<FlowStatistics> flowStatistics) {
+ this.flowStatistics = flowStatistics;
+ }
+
+ public List<FlowStatistics> getFlowStatistics() {
+ return flowStatistics;
+ }
+
+ public void setFlowStatistics(List<FlowStatistics> flowStatistics) {
+ this.flowStatistics = flowStatistics;
+ }
}
@XmlAccessorType(XmlAccessType.NONE)
public class AllPortStatistics {
- @XmlElement
- List<PortStatistics> portStatistics;
- //To satisfy JAXB
- private AllPortStatistics() {
- }
-
- public AllPortStatistics(List<PortStatistics> portStatistics) {
- this.portStatistics = portStatistics;
- }
-
- public List<PortStatistics> getPortStatistics() {
- return portStatistics;
- }
-
- public void setPortStatistics(List<PortStatistics> portStatistics) {
- this.portStatistics = portStatistics;
- }
+ @XmlElement
+ List<PortStatistics> portStatistics;
+ //To satisfy JAXB
+ private AllPortStatistics() {
+ }
+
+ public AllPortStatistics(List<PortStatistics> portStatistics) {
+ this.portStatistics = portStatistics;
+ }
+
+ public List<PortStatistics> getPortStatistics() {
+ return portStatistics;
+ }
+
+ public void setPortStatistics(List<PortStatistics> portStatistics) {
+ this.portStatistics = portStatistics;
+ }
}
// To satisfy JAXB
@SuppressWarnings("unused")
- private FlowStatistics() {
+ private FlowStatistics() {
}
public FlowStatistics(Node node, List<FlowOnNode> flowStat) {
// To satisfy JAXB
@SuppressWarnings("unused")
- private PortStatistics() {
+ private PortStatistics() {
}
public PortStatistics(Node node, List<NodeConnectorStatistics> portStats) {
/**
* Northbound APIs that returns various Statistics exposed by the Southbound
* plugins such as Openflow.
- *
+ *
* <br>
* <br>
* Authentication scheme : <b>HTTP Basic</b><br>
* trusted authority.<br>
* More info :
* http://tomcat.apache.org/tomcat-7.0-doc/ssl-howto.html#Configuration
- *
+ *
*/
@Path("/")
public class StatisticsNorthbound {
/**
* Returns a list of all Flow Statistics from all the Nodes.
- *
+ *
* @param containerName
* Name of the Container. The Container name for the base
* controller is "default".
/**
* Returns a list of Flow Statistics for a given Node.
- *
+ *
* @param containerName
* Name of the Container. The Container name for the base
* controller is "default".
/**
* Returns a list of all the Port Statistics across all the NodeConnectors
* on all the Nodes.
- *
+ *
* @param containerName
* Name of the Container. The Container name for the base
* controller is "default".
/**
* Returns a list of all the Port Statistics across all the NodeConnectors
* in a given Node.
- *
+ *
* @param containerName
* Name of the Container. The Container name for the base
* controller is "default".
@XmlAccessorType(XmlAccessType.NONE)
public class SubnetConfigs {
- @XmlElement
- List<SubnetConfig> subnetConfig;
- //To satisfy JAXB
- private SubnetConfigs() {
-
- }
-
- public SubnetConfigs(List<SubnetConfig> subnetConfig) {
- this.subnetConfig = subnetConfig;
- }
-
- public List<SubnetConfig> getSubnetConfig() {
- return subnetConfig;
- }
-
- public void setSubnetConfig(List<SubnetConfig> subnetConfig) {
- this.subnetConfig = subnetConfig;
- }
+ @XmlElement
+ List<SubnetConfig> subnetConfig;
+ //To satisfy JAXB
+ private SubnetConfigs() {
+
+ }
+
+ public SubnetConfigs(List<SubnetConfig> subnetConfig) {
+ this.subnetConfig = subnetConfig;
+ }
+
+ public List<SubnetConfig> getSubnetConfig() {
+ return subnetConfig;
+ }
+
+ public void setSubnetConfig(List<SubnetConfig> subnetConfig) {
+ this.subnetConfig = subnetConfig;
+ }
}
/**
* List all the subnets in a given container
- *
+ *
* @param containerName
* container in which we want to query the subnets
- *
+ *
* @return a List of SubnetConfig
*/
@Path("/{containerName}")
/**
* List the configuration of a subnet in a given container
- *
+ *
* @param containerName
* container in which we want to query the subnet
* @param subnetName
* of the subnet being queried
- *
+ *
* @return a SubnetConfig
*/
@Path("/{containerName}/{subnetName}")
/**
* Add/Update a subnet to a container
- *
+ *
* @param containerName
* container in which we want to add/update the subnet
* @param subnetName
* @param subnet
* pair default gateway IP/mask that identify the subnet being
* added modified
- *
+ *
*/
@Path("/{containerName}/{subnetName}")
@POST
/**
* Delete a subnet from a container
- *
+ *
* @param containerName
* container in which we want to delete the subnet by name
* @param subnetName
* of the subnet to be remove.
- *
+ *
*/
@Path("/{containerName}/{subnetName}")
@DELETE
}
/*
- *
+ *
* Add or remove switch ports to a subnet POST subnets/green/sw
- *
+ *
* @param model
- *
+ *
* @param containerName
- *
+ *
* @param name
- *
+ *
* @param subnet: the subnet name name
- *
+ *
* @param switchports: datapath ID/port list =>
* xx:xx:xx:xx:xx:xx:xx:xx/a,b,c-m,r-t,y
- *
+ *
* @return
- *
+ *
* @RequestMapping(value = "/{containerName}/{name}", method =
* RequestMethod.POST)
- *
+ *
* public View addSwitchports(Map<String, Object> model,
- *
+ *
* @PathVariable(value = "containerName") String containerName,
- *
+ *
* @PathVariable(value = "name") String name,
- *
+ *
* @RequestParam(value = "nodeports") String nodePorts,
- *
+ *
* @RequestParam(value = "action") String action) {
- *
+ *
* checkDefaultDisabled(containerName); ISwitchManager switchManager = null;
* try { BundleContext bCtx = FrameworkUtil.getBundle(this.getClass())
* .getBundleContext();
- *
+ *
* ServiceReference[] services = bCtx.getServiceReferences(
* ISwitchManager.class.getName(), "(containerName=" + containerName + ")");
- *
+ *
* if (services != null) { switchManager = (ISwitchManager)
* bCtx.getService(services[0]); logger.debug("Switch manager reference is:"
* + switchManager); } } catch (Exception e) {
* logger.error("Switch Manager reference is NULL"); }
- *
+ *
* checkContainerExists(switchManager);
- *
+ *
* String ret; if (action.equals("add")) { ret =
* switchManager.addPortsToSubnet(name, nodePorts); } else if
* (action.equals("remove")) { ret =
* switchManager.removePortsFromSubnet(name, nodePorts); } else { throw new
* UnsupportedMediaTypeException(RestMessages.UNKNOWNACTION .toString() +
* ": " + action); }
- *
+ *
* return returnViewOrThrowConflicEx(model, ret); }
*/
}
@XmlAccessorType(XmlAccessType.NONE)
public class NodeConnectors {
- @XmlElement
- List<NodeConnectorProperties> nodeConnectorProperties;
- //To satisfy JAXB
- private NodeConnectors() {
-
- }
-
- public NodeConnectors(List<NodeConnectorProperties> nodeConnectorProperties) {
- this.nodeConnectorProperties = nodeConnectorProperties;
- }
-
- public List<NodeConnectorProperties> getNodeConnectorProperties() {
- return nodeConnectorProperties;
- }
-
- public void setNodeConnectorProperties(List<NodeConnectorProperties> nodeConnectorProperties) {
- this.nodeConnectorProperties = nodeConnectorProperties;
- }
+ @XmlElement
+ List<NodeConnectorProperties> nodeConnectorProperties;
+ //To satisfy JAXB
+ private NodeConnectors() {
+
+ }
+
+ public NodeConnectors(List<NodeConnectorProperties> nodeConnectorProperties) {
+ this.nodeConnectorProperties = nodeConnectorProperties;
+ }
+
+ public List<NodeConnectorProperties> getNodeConnectorProperties() {
+ return nodeConnectorProperties;
+ }
+
+ public void setNodeConnectorProperties(List<NodeConnectorProperties> nodeConnectorProperties) {
+ this.nodeConnectorProperties = nodeConnectorProperties;
+ }
}
@XmlAccessorType(XmlAccessType.NONE)
public class Nodes {
- @XmlElement
- List<NodeProperties> nodeProperties;
- //To satisfy JAXB
- private Nodes() {
-
- }
-
- public Nodes(List<NodeProperties> nodeProperties) {
- this.nodeProperties = nodeProperties;
- }
-
- public List<NodeProperties> getNodeProperties() {
- return nodeProperties;
- }
-
- public void setNodeProperties(List<NodeProperties> nodeProperties) {
- this.nodeProperties = nodeProperties;
- }
+ @XmlElement
+ List<NodeProperties> nodeProperties;
+ //To satisfy JAXB
+ private Nodes() {
+
+ }
+
+ public Nodes(List<NodeProperties> nodeProperties) {
+ this.nodeProperties = nodeProperties;
+ }
+
+ public List<NodeProperties> getNodeProperties() {
+ return nodeProperties;
+ }
+
+ public void setNodeProperties(List<NodeProperties> nodeProperties) {
+ this.nodeProperties = nodeProperties;
+ }
}
}
handleNodeAvailability(containerName, nodeType, nodeId);
- Node node = Node.fromString(nodeId);
+ Node node = Node.fromString(nodeType, nodeId);
Property prop = switchManager.createProperty(propName, propValue);
if (prop == null) {
}
handleNodeAvailability(containerName, nodeType, nodeId);
- Node node = Node.fromString(nodeId);
-
+ Node node = Node.fromString(nodeType, nodeId);
Status ret = switchManager.removeNodeProp(node, propertyName);
if (ret.isSuccess()) {
return Response.ok().build();
}
handleNodeAvailability(containerName, nodeType, nodeId);
- Node node = Node.fromString(nodeId);
-
+ Node node = Node.fromString(nodeType, nodeId);
List<NodeConnectorProperties> res = new ArrayList<NodeConnectorProperties>();
Set<NodeConnector> ncs = switchManager.getNodeConnectors(node);
if (ncs == null) {
}
handleNodeAvailability(containerName, nodeType, nodeId);
- Node node = Node.fromString(nodeId);
+ Node node = Node.fromString(nodeType, nodeId);
handleNodeConnectorAvailability(containerName, node, nodeConnectorType,
nodeConnectorId);
NodeConnector nc = NodeConnector
- .fromStringNoNode(nodeConnectorId, node);
+ .fromStringNoNode(nodeConnectorType, nodeConnectorId, node);
Property prop = switchManager.createProperty(propName, propValue);
if (prop == null) {
}
handleNodeAvailability(containerName, nodeType, nodeId);
- Node node = Node.fromString(nodeId);
+ Node node = Node.fromString(nodeType, nodeId);
handleNodeConnectorAvailability(containerName, node, nodeConnectorType,
nodeConnectorId);
NodeConnector nc = NodeConnector
- .fromStringNoNode(nodeConnectorId, node);
-
+ .fromStringNoNode(nodeConnectorType, nodeConnectorId, node);
Status ret = switchManager.removeNodeConnectorProp(nc, propertyName);
if (ret.isSuccess()) {
return Response.ok().build();
public class SwitchManagerNorthboundTest extends TestCase {
- @Test
- public void testNodes() {
- List<NodeProperties> nodeProperties = new ArrayList<NodeProperties>();
- Nodes nodes = new Nodes(nodeProperties);
- Assert.assertTrue(nodes.getNodeProperties().equals(nodeProperties));
- nodes.setNodeProperties(null);
- Assert.assertTrue(nodes.getNodeProperties() == null);
- }
-
- @Test
- public void testNodeProperties() {
- Node node = NodeCreator.createOFNode(1L);
- NodeProperties np= new NodeProperties(node, null);
- Assert.assertTrue(np.getNode().equals(node));
- Assert.assertTrue(np.getProperties() == null);
-
- Node node2 = NodeCreator.createOFNode(2L);
- np.setNode(node2);
- Assert.assertTrue(np.getNode().equals(node2));
+ @Test
+ public void testNodes() {
+ List<NodeProperties> nodeProperties = new ArrayList<NodeProperties>();
+ Nodes nodes = new Nodes(nodeProperties);
+ Assert.assertTrue(nodes.getNodeProperties().equals(nodeProperties));
+ nodes.setNodeProperties(null);
+ Assert.assertTrue(nodes.getNodeProperties() == null);
+ }
- Set<Property> props = new HashSet<Property>();
- np.setProperties(props);
- Assert.assertTrue(np.getProperties().equals(props));
- }
+ @Test
+ public void testNodeProperties() {
+ Node node = NodeCreator.createOFNode(1L);
+ NodeProperties np= new NodeProperties(node, null);
+ Assert.assertTrue(np.getNode().equals(node));
+ Assert.assertTrue(np.getProperties() == null);
- @Test
- public void testNodeConnectors() {
- List<NodeConnectorProperties> nodeConnectorProperties = new ArrayList<NodeConnectorProperties>();
- NodeConnectors ncs = new NodeConnectors(nodeConnectorProperties);
- Assert.assertTrue(ncs.getNodeConnectorProperties().equals(nodeConnectorProperties));
- ncs.setNodeConnectorProperties(null);
- Assert.assertTrue(ncs.getNodeConnectorProperties() == null);
- }
-
- @Test
- public void testNodeConnectorProperties() {
- Node node = NodeCreator.createOFNode(1L);
+ Node node2 = NodeCreator.createOFNode(2L);
+ np.setNode(node2);
+ Assert.assertTrue(np.getNode().equals(node2));
+
+ Set<Property> props = new HashSet<Property>();
+ np.setProperties(props);
+ Assert.assertTrue(np.getProperties().equals(props));
+ }
+
+ @Test
+ public void testNodeConnectors() {
+ List<NodeConnectorProperties> nodeConnectorProperties = new ArrayList<NodeConnectorProperties>();
+ NodeConnectors ncs = new NodeConnectors(nodeConnectorProperties);
+ Assert.assertTrue(ncs.getNodeConnectorProperties().equals(nodeConnectorProperties));
+ ncs.setNodeConnectorProperties(null);
+ Assert.assertTrue(ncs.getNodeConnectorProperties() == null);
+ }
+
+ @Test
+ public void testNodeConnectorProperties() {
+ Node node = NodeCreator.createOFNode(1L);
NodeConnector port = NodeConnectorCreator.createOFNodeConnector(
(short) 24, node);
-
+
NodeConnectorProperties ncp= new NodeConnectorProperties(port, null);
- Assert.assertTrue(ncp.getProperties() == null);
- Assert.assertTrue(ncp.getNodeConnector().equals(port));
-
+ Assert.assertTrue(ncp.getProperties() == null);
+ Assert.assertTrue(ncp.getNodeConnector().equals(port));
+
NodeConnector port2 = NodeConnectorCreator.createOFNodeConnector(
(short) 33, node);
- ncp.setNodeConnector(port2);
- Assert.assertTrue(ncp.getNodeConnector().equals(port2));
+ ncp.setNodeConnector(port2);
+ Assert.assertTrue(ncp.getNodeConnector().equals(port2));
+
+ Set<Property> props = new HashSet<Property>();
+ ncp.setProperties(props);
+ Assert.assertTrue(ncp.getProperties().equals(props));
+ }
- Set<Property> props = new HashSet<Property>();
- ncp.setProperties(props);
- Assert.assertTrue(ncp.getProperties().equals(props));
- }
-
}
@XmlAccessorType(XmlAccessType.NONE)
public class Topology {
- @XmlElement
- List<EdgeProperties> edgeProperties;
- //To satisfy JAXB
- private Topology() {
-
- }
-
- public Topology(List<EdgeProperties> edgeProperties) {
- this.edgeProperties = edgeProperties;
- }
-
- public List<EdgeProperties> getEdgeProperties() {
- return edgeProperties;
- }
-
- public void setEdgeProperties(List<EdgeProperties> edgeProperties) {
- this.edgeProperties = edgeProperties;
- }
+ @XmlElement
+ List<EdgeProperties> edgeProperties;
+ //To satisfy JAXB
+ private Topology() {
+
+ }
+
+ public Topology(List<EdgeProperties> edgeProperties) {
+ this.edgeProperties = edgeProperties;
+ }
+
+ public List<EdgeProperties> getEdgeProperties() {
+ return edgeProperties;
+ }
+
+ public void setEdgeProperties(List<EdgeProperties> edgeProperties) {
+ this.edgeProperties = edgeProperties;
+ }
}
/**
* Topology Northbound REST API
- *
+ *
* <br>
* <br>
* Authentication scheme : <b>HTTP Basic</b><br>
}
/**
- *
+ *
* Retrieve the Topology
- *
+ *
* @param containerName
* The container for which we want to retrieve the topology
- *
+ *
* @return A List of EdgeProps each EdgeProp represent an Edge of the grap
* with the corresponding properties attached to it.
*/
/**
* Retrieve the user configured links
- *
+ *
* @param containerName
* The container for which we want to retrieve the user links
- *
+ *
* @return A List of user configured links
*/
@Path("/{containerName}/userLink")
/**
* Add an User Link
- *
+ *
* @param containerName
* Name of the Container. The base Container is "default".
* @param TopologyUserLinkConfig
/**
* Delete an User Link
- *
+ *
* @param containerName
* Name of the Container. The base Container is "default".
* @param name
@XmlAccessorType(XmlAccessType.NONE)
public class TopologyUserLinks {
- @XmlElement
- List<TopologyUserLinkConfig> userLinks;
-
- //To satisfy JAXB
- private TopologyUserLinks() {
-
- }
-
- public List<TopologyUserLinkConfig> getUserLinks() {
- return userLinks;
- }
-
- public void setUserLinks(List<TopologyUserLinkConfig> userLinks) {
- this.userLinks = userLinks;
- }
-
- public TopologyUserLinks(List<TopologyUserLinkConfig> userLinks) {
- this.userLinks = new ArrayList<TopologyUserLinkConfig>(userLinks);
- }
+ @XmlElement
+ List<TopologyUserLinkConfig> userLinks;
+
+ //To satisfy JAXB
+ private TopologyUserLinks() {
+
+ }
+
+ public List<TopologyUserLinkConfig> getUserLinks() {
+ return userLinks;
+ }
+
+ public void setUserLinks(List<TopologyUserLinkConfig> userLinks) {
+ this.userLinks = userLinks;
+ }
+
+ public TopologyUserLinks(List<TopologyUserLinkConfig> userLinks) {
+ this.userLinks = new ArrayList<TopologyUserLinkConfig>(userLinks);
+ }
}
assertEquals(2, getEdgePropList.size());
}
-
+
}
/**
* Updates node and its properties
*
- * @param node {@link org.opendaylight.controller.sal.core.Node} being updated
- * @param type {@link org.opendaylight.controller.sal.core.UpdateType}
- * @param props set of {@link org.opendaylight.controller.sal.core.Property} such as
- * {@link org.opendaylight.controller.sal.core.Description} and/or
- * {@link org.opendaylight.controller.sal.core.Tier} etc.
+ * @param node {@link org.opendaylight.controller.sal.core.Node} being updated
+ * @param type {@link org.opendaylight.controller.sal.core.UpdateType}
+ * @param props set of {@link org.opendaylight.controller.sal.core.Property} such as
+ * {@link org.opendaylight.controller.sal.core.Description} and/or
+ * {@link org.opendaylight.controller.sal.core.Tier} etc.
*/
public void updateNode(Node node, UpdateType type, Set<Property> props);
/**
* Updates node connector and its properties
*
- * @param nodeConnector {@link org.opendaylight.controller.sal.core.NodeConnector} being updated
- * @param type {@link org.opendaylight.controller.sal.core.UpdateType}
- * @param props set of {@link org.opendaylight.controller.sal.core.Property} such as
- * {@link org.opendaylight.controller.sal.core.Description} and/or
- * {@link org.opendaylight.controller.sal.core.State} etc.
+ * @param nodeConnector {@link org.opendaylight.controller.sal.core.NodeConnector} being updated
+ * @param type {@link org.opendaylight.controller.sal.core.UpdateType}
+ * @param props set of {@link org.opendaylight.controller.sal.core.Property} such as
+ * {@link org.opendaylight.controller.sal.core.Description} and/or
+ * {@link org.opendaylight.controller.sal.core.State} etc.
*/
public void updateNodeConnector(NodeConnector nodeConnector,
UpdateType type, Set<Property> props);
* For port statistics, it is the port id. If null the query
* will be performed for all the targets for the specified
* statistics type.
- *
+ *
* @param timeout the timeout in milliseconds the system will wait for a response
* from the switch, before declaring failure
* @return the list of openflow statistics
package org.opendaylight.controller.protocol_plugin.openflow;
/**
- * @file IRefreshInternalProvider.java
+ * @file IRefreshInternalProvider.java
*
- * @brief Topology refresh notifications requested by application
- * to be fetched from the plugin
+ * @brief Topology refresh notifications requested by application
+ * to be fetched from the plugin
*
* For example, an application that has been started late, will want to
* be up to date with the latest topology. Hence, it requests for a
* is updated with new one.
*/
public interface IStatisticsListener {
- public void descriptionRefreshed(Long switchId,
- OFDescriptionStatistics description);
+ public void descriptionRefreshed(Long switchId,
+ OFDescriptionStatistics description);
}
public interface ITopologyServiceShimListener {
/**
* Called to update on Edge in the topology graph
- *
+ *
* @param topoedgeupdateList
* List of topoedgeupdates Each topoedgeupdate includes edge, its
* Properties ( BandWidth and/or Latency etc) and update type.
/**
* Called when an Edge utilization is above the safe threshold configured on
* the controller
- *
+ *
* @param {@link org.opendaylight.controller.sal.core.Edge}
*/
public void edgeOverUtilized(Edge edge);
/**
* Called when the Edge utilization is back to normal, below the safety
* threshold level configured on the controller
- *
+ *
* @param {@link org.opendaylight.controller.sal.core.Edge}
*/
public void edgeUtilBackToNormal(Edge edge);
* @return ISwitch if present, null otherwise
*/
public ISwitch getSwitch(Long switchId);
-
+
}
* encryption and decryption of the network data.
*/
public interface IMessageReadWrite {
- /**
- * Sends the OF message out over the socket channel. For secure
- * communication, the data will be encrypted.
- *
- * @param msg OF message to be sent
- * @throws Exception
- */
- public void asyncSend(OFMessage msg) throws Exception;
-
- /**
- * Resumes sending the remaining messages in the outgoing buffer
- * @throws Exception
- */
- public void resumeSend() throws Exception;
-
- /**
- * Reads the incoming network data from the socket and retrieves the OF
- * messages. For secure communication, the data will be decrypted first.
- *
- * @return list of OF messages
- * @throws Exception
- */
+ /**
+ * Sends the OF message out over the socket channel. For secure
+ * communication, the data will be encrypted.
+ *
+ * @param msg OF message to be sent
+ * @throws Exception
+ */
+ public void asyncSend(OFMessage msg) throws Exception;
+
+ /**
+ * Resumes sending the remaining messages in the outgoing buffer
+ * @throws Exception
+ */
+ public void resumeSend() throws Exception;
+
+ /**
+ * Reads the incoming network data from the socket and retrieves the OF
+ * messages. For secure communication, the data will be decrypted first.
+ *
+ * @return list of OF messages
+ * @throws Exception
+ */
public List<OFMessage> readMessages() throws Exception;
-
- /**
- * Proper clean up when the switch connection is closed
- *
- * @return
- * @throws Exception
- */
+
+ /**
+ * Proper clean up when the switch connection is closed
+ *
+ * @return
+ * @throws Exception
+ */
public void stop() throws Exception;
}
/**
* This interface defines an abstraction of an Open Flow Switch.
- *
+ *
*/
public interface ISwitch {
/**
* Gets a unique XID.
- *
+ *
* @return XID
*/
public int getNextXid();
/**
* Returns the Switch's ID.
- *
+ *
* @return the Switch's ID
*/
public Long getId();
/**
* Returns the Switch's table numbers supported by datapath
- *
+ *
* @return the tables
*/
public Byte getTables();
/**
* Returns the Switch's bitmap of supported ofp_action_type
- *
+ *
* @return the actions
*/
public Integer getActions();
/**
* Returns the Switch's bitmap of supported ofp_capabilities
- *
+ *
* @return the capabilities
*/
public Integer getCapabilities();
/**
* Returns the Switch's buffering capacity in Number of Pkts
- *
+ *
* @return the buffers
*/
public Integer getBuffers();
/**
* Returns the Date when the switch was connected.
- *
+ *
* @return Date The date when the switch was connected
*/
public Date getConnectedDate();
* should be used for non-critical messages such as statistics request,
* discovery packets, etc. An unique XID is generated automatically and
* inserted into the message.
- *
+ *
* @param msg
* The OF message to be sent
* @return The XID used
* priority. It will be served after high priority messages. The method
* should be used for non-critical messages such as statistics request,
* discovery packets, etc. The specified XID is inserted into the message.
- *
+ *
* @param msg
* The OF message to be Sent
* @param xid
* method should be used for critical messages such as hello, echo reply
* etc. An unique XID is generated automatically and inserted into the
* message.
- *
+ *
* @param msg
* The OF message to be sent
* @return The XID used
* priority. It will be served first before normal priority messages. The
* method should be used for critical messages such as hello, echo reply
* etc. The specified XID is inserted into the message.
- *
+ *
* @param msg
* The OF message to be sent
* @return The XID used
/**
* Sends the OF message followed by a Barrier Request with a unique XID
* which is automatically generated, and waits for a result from the switch.
- *
+ *
* @param msg
* The message to be sent
* @return An Object which has one of the followings instances/values:
/**
* Returns a map containing all OFPhysicalPorts of this switch.
- *
+ *
* @return The Map of OFPhysicalPort
*/
public Map<Short, OFPhysicalPort> getPhysicalPorts();
/**
* Returns a Set containing all port IDs of this switch.
- *
+ *
* @return The Set of port ID
*/
public Set<Short> getPorts();
/**
* Returns OFPhysicalPort of the specified portNumber of this switch.
- *
+ *
* @param portNumber
* The port ID
* @return OFPhysicalPort for the specified PortNumber
/**
* Returns the bandwidth of the specified portNumber of this switch.
- *
+ *
* @param portNumber
* the port ID
* @return bandwidth
/**
* Returns True if the port is enabled,
- *
+ *
* @param portNumber
* @return True if the port is enabled
*/
/**
* Returns True if the port is enabled.
- *
+ *
* @param port
* @return True if the port is enabled
*/
/**
* Returns a list containing all enabled ports of this switch.
- *
+ *
* @return: List containing all enabled ports of this switch
*/
public List<OFPhysicalPort> getEnabledPorts();
/**
* Sends OFStatisticsRequest with a unique XID generated automatically and
* waits for a result from the switch.
- *
+ *
* @param req
* the OF Statistic Request to be sent
* @return Object has one of the following instances/values::
/**
* Returns true if the switch has reached the operational state (has sent
* FEATURE_REPLY to the controller).
- *
+ *
* @return true if the switch is operational
*/
public boolean isOperational();
/**
* Function called by the dependency manager when all the required
* dependencies are satisfied
- *
+ *
*/
public void init() {
logger.debug("Initializing!");
/**
* Function called by dependency manager after "init ()" is called and after
* the services provided by the class are registered in the service registry
- *
+ *
*/
public void start() {
logger.debug("Starting!");
* Function called by the dependency manager before the services exported by
* the component are unregistered, this will be followed by a "destroy ()"
* calls
- *
+ *
*/
public void stop() {
for (Iterator<Entry<Long, ISwitch>> it = switches.entrySet().iterator(); it
* Function called by the dependency manager when at least one dependency
* become unsatisfied or when the component is shutting down because for
* example bundle is being stopped.
- *
+ *
*/
public void destroy() {
}
instanceName);
switchHandler.start();
if (sc.isConnected()) {
- logger.info("Switch:{} is connected to the Controller",
+ logger.info("Switch:{} is connected to the Controller",
sc.socket().getRemoteSocketAddress()
.toString().split("/")[1]);
}
/**
* Sends the OF message out over the socket channel.
- *
+ *
* @param msg
* OF message to be sent
* @throws Exception
/**
* Resumes sending the remaining messages in the outgoing buffer
- *
+ *
* @throws Exception
*/
@Override
/**
* Reads the incoming network data from the socket and retrieves the OF
* messages.
- *
+ *
* @return list of OF messages
* @throws Exception
*/
final static AtomicLong seq = new AtomicLong();
final long seqNum;
boolean syncReply; // set to true if we want to be blocked until the response arrives
-
+
public PriorityMessage(OFMessage msg, int priority) {
this.msg = msg;
this.priority = priority;
/**
* Bring up secure channel using SSL Engine
- *
+ *
* @param socket
* TCP socket channel
* @throws Exception
/**
* Sends the OF message out over the socket channel. The message is
* encrypted by SSL Engine.
- *
+ *
* @param msg
* OF message to be sent
* @throws Exception
/**
* Resumes sending the remaining messages in the outgoing buffer
- *
+ *
* @throws Exception
*/
@Override
/**
* Reads the incoming network data from the socket, decryptes them and then
* retrieves the OF messages.
- *
+ *
* @return list of OF messages
* @throws Exception
*/
/**
* Function called by the dependency manager when all the required
* dependencies are satisfied
- *
+ *
*/
void init() {
this.controller.addMessageListener(OFType.PACKET_IN, this);
* Function called by the dependency manager when at least one dependency
* become unsatisfied or when the component is shutting down because for
* example bundle is being stopped.
- *
+ *
*/
void destroy() {
this.controller.removeMessageListener(OFType.PACKET_IN, this);
public DescStatisticsConverter(List<OFStatistics> statsList) {
this.hwDesc = null;
- this.ofDesc = (statsList == null || statsList.isEmpty())?
+ this.ofDesc = (statsList == null || statsList.isEmpty())?
null : (OFDescriptionStatistics) statsList.get(0);
}
help.append("\t pcc - Print CC info\n");
help.append("\t psize - Print sizes of all the lists\n");
help.append("\t ptm - Print timeout info\n");
- help.append("\t ecc - Enable CC\n");
- help.append("\t dcc - Disable CC\n");
+ help.append("\t ecc - Enable CC\n");
+ help.append("\t dcc - Disable CC\n");
help.append("\t scc [multiple] - Set/show CC multiple and interval\n");
- help.append("\t sports [ports] - Set/show max ports per batch\n");
+ help.append("\t sports [ports] - Set/show max ports per batch\n");
help.append("\t spause [ticks] - Set/show pause period\n");
- help.append("\t sdi [ticks] - Set/show discovery interval in ticks\n");
+ help.append("\t sdi [ticks] - Set/show discovery interval in ticks\n");
help.append("\t stm [ticks] - Set/show per timeout ticks\n");
- help.append("\t sretry [count] - Set/show num of retries\n");
- help.append("\t addsw <swid> - Add a switch\n");
- help.append("\t remsw <swid> - Remove a switch\n");
+ help.append("\t sretry [count] - Set/show num of retries\n");
+ help.append("\t addsw <swid> - Add a switch\n");
+ help.append("\t remsw <swid> - Remove a switch\n");
help.append("\t page - Print aging info\n");
help.append("\t sage - Set/Show aging time limit\n");
- help.append("\t eage - Enable aging\n");
- help.append("\t dage - Disable aging\n");
+ help.append("\t eage - Enable aging\n");
+ help.append("\t dage - Disable aging\n");
help.append("\t pthrot - Print throttling\n");
help.append("\t ethrot - Enable throttling\n");
help.append("\t dthrot - Disable throttling\n");
}
}
ci.println("\n");
- ci.println(" NodeConnector Edge ");
+ ci.println(" NodeConnector Edge ");
Set<NodeConnector> keySet = prodMap.keySet();
if (keySet == null) {
return;
* Function called by the dependency manager when at least one dependency
* become unsatisfied or when the component is shutting down because for
* example bundle is being stopped.
- *
+ *
*/
void destroy() {
logger.debug("DESTROY called!");
/**
* Function called by dependency manager after "init ()" is called and after
* the services provided by the class are registered in the service registry
- *
+ *
*/
void start() {
logger.debug("START called!");
* Function called by the dependency manager before the services exported by
* the component are unregistered, this will be followed by a "destroy ()"
* calls
- *
+ *
*/
void stop() {
logger.debug("STOP called!");
/**
* Function called by the dependency manager when all the required
* dependencies are satisfied
- *
+ *
*/
void init() {
this.controller.addMessageListener(OFType.FLOW_REMOVED, this);
* Function called by the dependency manager when at least one dependency
* become unsatisfied or when the component is shutting down because for
* example bundle is being stopped.
- *
+ *
*/
void destroy() {
}
/**
* Function called by dependency manager after "init ()" is called and after
* the services provided by the class are registered in the service registry
- *
+ *
*/
void start() {
}
* Function called by the dependency manager before the services exported by
* the component are unregistered, this will be followed by a "destroy ()"
* calls
- *
+ *
*/
void stop() {
}
* will be inserted automatically to synchronize the
* progression.
*/
- result = asyncMsgSend(node, sw, msg, rid);
+ result = asyncMsgSend(node, sw, msg, rid);
}
return getStatusInternal(result, action, rid);
} else {
if ((rid == null) || (rid == 0)) {
return;
}
-
+
/*
* Notifies the caller that error has been reported for a previous flow
* programming request
return new Status(StatusCode.INTERNALERROR,
"Failed to send Barrier message.");
}
-
+
@Override
public Status asyncSendBarrierMessage(Node node) {
if (!node.getType().equals(NodeIDType.OPENFLOW)) {
return new Status(StatusCode.INTERNALERROR,
"Failed to send Barrier message.");
}
-
+
/**
* This method sends the message asynchronously until the number of messages
* sent reaches a threshold. Then a Barrier message is sent automatically
* for sync purpose. An unique Request ID associated with the message is
* passed down by the caller. The Request ID will be returned to the caller
* when an error message is received from the switch.
- *
+ *
* @param node
* The node
* @param msg
xid = sw.asyncSend(msg);
addXid2Rid(swid, xid, rid);
-
+
Map<Integer, Long> swxid2rid = this.xid2rid.get(swid);
if (swxid2rid == null) {
return result;
}
-
+
int size = swxid2rid.size();
if (size % barrierMessagePriorCount == 0) {
result = asyncSendBarrierMessage(node);
}
-
+
return result;
}
-
+
/**
* A number of async messages are sent followed by a synchronous Barrier
* message. This method returns the maximum async messages that can be sent
* before the Barrier message.
- *
+ *
* @return The max count of async messages sent prior to Barrier message
*/
private int getBarrierMessagePriorCount() {
return rv;
}
-
+
/**
* This method returns the message Request ID previously assigned by the
* caller for a given OF message xid
- *
+ *
* @param swid
* The switch id
* @param xid
/**
* This method returns a copy of outstanding xid to rid mappings.for a given
* switch
- *
+ *
* @param swid
* The switch id
* @return a copy of xid2rid mappings
*/
public Map<Integer, Long> getSwXid2Rid(long swid) {
Map<Integer, Long> swxid2rid = this.xid2rid.get(swid);
-
+
if (swxid2rid != null) {
return new HashMap<Integer, Long>(swxid2rid);
} else {
/**
* Adds xid to rid mapping to the local DB
- *
+ *
* @param swid
* The switch id
* @param xid
/**
* When an Error message is received, this method will be invoked to remove
* the offending xid from the local DB.
- *
+ *
* @param swid
* The switch id
* @param xid
/**
* Convert various result into Status
- *
+ *
* @param result
* The returned result from previous action
* @param action
"send", action, "Internal Error"));
}
}
-
+
/**
* When a Barrier reply is received, this method will be invoked to clear
* the local DB
- *
+ *
* @param swid
* The switch id
*/
@Override
public void updateNode(Node node, UpdateType type, Set<Property> props) {
long swid = (Long)node.getID();
-
+
switch (type) {
case ADDED:
Map<Integer, Long> swxid2rid = new HashMap<Integer, Long>();
ci.println("Please enter a valid node id");
return;
}
-
+
long sid;
try {
sid = HexEncode.stringToLong(st);
ci.println("Please enter a valid node id");
return;
}
-
+
Map<Integer, Long> swxid2rid = this.xid2rid.get(sid);
if (swxid2rid == null) {
ci.println("The node id entered does not exist");
}
ci.println("xid rid");
-
+
Set<Integer> xidSet = swxid2rid.keySet();
if (xidSet == null) {
return;
/**
* Converts an openflow list of flow statistics in a SAL list of FlowOnNode
* objects
- *
- *
- *
+ *
+ *
+ *
*/
public class FlowStatisticsConverter {
private static final Logger log = LoggerFactory
if (bw != null) {
props.add(bw);
}
-
+
Bandwidth abw = InventoryServiceHelper.OFPortToBandWidth(port.getAdvertisedFeatures());
if (abw != null) {
- AdvertisedBandwidth a = new AdvertisedBandwidth(abw.getValue());
- if (a != null) {
- props.add(a);
- }
+ AdvertisedBandwidth a = new AdvertisedBandwidth(abw.getValue());
+ if (a != null) {
+ props.add(a);
+ }
}
Bandwidth sbw = InventoryServiceHelper.OFPortToBandWidth(port.getSupportedFeatures());
if (sbw != null) {
- SupportedBandwidth s = new SupportedBandwidth(sbw.getValue());
- if (s != null) {
- props.add(s);
- }
+ SupportedBandwidth s = new SupportedBandwidth(sbw.getValue());
+ if (s != null) {
+ props.add(s);
+ }
}
Bandwidth pbw = InventoryServiceHelper.OFPortToBandWidth(port.getPeerFeatures());
if (pbw != null) {
- PeerBandwidth p = new PeerBandwidth(pbw.getValue());
- if (p != null) {
- props.add(p);
- }
+ PeerBandwidth p = new PeerBandwidth(pbw.getValue());
+ if (p != null) {
+ props.add(p);
+ }
}
props.add(new Name(port.getName()));
props.add(InventoryServiceHelper.OFPortToConfig(port.getConfig()));
/**
* Returns the average transmit rate in bps
- *
+ *
* @return the average transmit rate [bps]
*/
public long getAverageTxRate() {
/**
* Function called by the dependency manager when all the required
* dependencies are satisfied
- *
+ *
*/
void init() {
flowStatistics = new ConcurrentHashMap<Long, List<OFStatistics>>();
* Function called by the dependency manager when at least one dependency
* become unsatisfied or when the component is shutting down because for
* example bundle is being stopped.
- *
+ *
*/
void destroy() {
}
/**
* Function called by dependency manager after "init ()" is called and after
* the services provided by the class are registered in the service registry
- *
+ *
*/
void start() {
// Start managed timers
* Function called by the dependency manager before the services exported by
* the component are unregistered, this will be followed by a "destroy ()"
* calls
- *
+ *
*/
void stop() {
// Stop managed timers
/**
* Update the cached port rates for this switch with the latest retrieved
* port transmit byte count
- *
+ *
* @param switchId
*/
private synchronized void updatePortsTxRate(long switchId) {
/**
* Abstract class which provides the utilities for converting the Openflow port
* number to the equivalent NodeConnector and vice versa
- *
- *
- *
+ *
+ *
+ *
*/
public abstract class PortConverter {
private static final Logger log = LoggerFactory
/**
* Converts an openflow list of port statistics in a SAL list of
* NodeConnectorStatistics objects
- *
- *
- *
+ *
+ *
+ *
*/
public class PortStatisticsConverter {
private static final Logger log = LoggerFactory
/**
* Returns whether the passed NodeConnector belongs to the container
*
- * @param container container name
- * @param p node connector to test
- * @return true if belongs false otherwise
+ * @param container container name
+ * @param p node connector to test
+ * @return true if belongs false otherwise
*/
public boolean containerOwnsNodeConnector(String container, NodeConnector p) {
// All node connectors belong to the default container
/**
* Returns whether the passed NodeConnector belongs to the container
*
- * @param container container name
- * @param table node table to test
- * @return true if belongs false otherwise
+ * @param container container name
+ * @param table node table to test
+ * @return true if belongs false otherwise
*/
public boolean containerOwnsNodeTable(String container, NodeTable table) {
// All node table belong to the default container
/**
* Function called by the dependency manager when all the required
* dependencies are satisfied
- *
+ *
*/
@SuppressWarnings("unchecked")
void init(Component c) {
* Function called by the dependency manager when at least one dependency
* become unsatisfied or when the component is shutting down because for
* example bundle is being stopped.
- *
+ *
*/
void destroy() {
logger.trace("DESTROY called!");
/**
* Function called by dependency manager after "init ()" is called and after
* the services provided by the class are registered in the service registry
- *
+ *
*/
void start() {
logger.trace("START called!");
* Function called by the dependency manager before the services exported by
* the component are unregistered, this will be followed by a "destroy ()"
* calls
- *
+ *
*/
void stop() {
logger.trace("STOP called!");
/**
* Retrieve SAL service IPluginOutTopologyService
- *
+ *
* @param s
* Called by Dependency Manager as soon as the SAL service is
* available
/**
* called when SAL service IPluginOutTopologyService is no longer available
- *
+ *
* @param s
* Called by Dependency Manager as soon as the SAL service is
* unavailable
/**
* Retrieve OF protocol_plugin service IRefreshInternalProvider
- *
+ *
* @param s
* Called by Dependency Manager as soon as the SAL service is
* available
/**
* called when OF protocol_plugin service IRefreshInternalProvider is no
* longer available
- *
+ *
* @param s
* Called by Dependency Manager as soon as the SAL service is
* unavailable
er.readFrom(bb);
return er.toString();
}
-
+
// Handle OF1.0 errors here
OFErrorType et = OFErrorType.values()[0xffff & error.getErrorType()];
String errorStr = "Error : " + et.toString();
import org.openflow.protocol.OFError;
public class V6Error extends OFError {
- private static final long serialVersionUID = 1L;
- public static int MINIMUM_LENGTH = 20;//OfHdr(8) + NXET_VENDOR(2) + NXEC_VENDOR_ERROR(2) + struct nx_vendor_error(8)
- public static final short NICIRA_VENDOR_ERRORTYPE = (short)0xb0c2;
- protected int V6VendorId;
+ private static final long serialVersionUID = 1L;
+ public static int MINIMUM_LENGTH = 20;//OfHdr(8) + NXET_VENDOR(2) + NXEC_VENDOR_ERROR(2) + struct nx_vendor_error(8)
+ public static final short NICIRA_VENDOR_ERRORTYPE = (short)0xb0c2;
+ protected int V6VendorId;
protected short V6VendorErrorType;
protected short V6VendorErrorCode;
protected byte[] V6ErrorData;
-
+
public V6Error(OFError e) {
this.length = (short)e.getLengthU();
this.errorType = e.getErrorType();
this.errorCode = e.getErrorCode();
this.xid = e.getXid();
}
-
+
@Override
public void readFrom(ByteBuffer data) {
this.V6VendorId = data.getInt();
if (dataLength > 0) {
this.V6ErrorData = new byte[dataLength];
data.get(this.V6ErrorData);
- }
+ }
}
-
+
/**
* @return the V6VendorId
*/
public int getVendorId() {
return V6VendorId;
}
-
+
/**
* @return the V6VendorErrorType
*/
public short getVendorErrorType() {
return V6VendorErrorType;
}
-
+
/**
* @return the VendorErrorType
*/
public short getVendorErrorCode() {
return V6VendorErrorCode;
}
-
+
/**
* @return the Error Bytes
*/
public byte[] getError() {
return V6ErrorData;
}
-
+
@Override
public int hashCode() {
final int prime = 31;
/**
* This class is used to create IPv6 Vendor Extension messages. Specfically, It
* defines the methods used in creation of Vendor specific IPv6 Flow Mod message.
- *
+ *
*
*/
public class V6FlowMod extends OFVendor implements Cloneable {
private static int IPV6_EXT_MIN_HDR_LEN = 36;
/**
- * Constructor for the V6FlowMod class. Initializes OFVendor (parent class)
+ * Constructor for the V6FlowMod class. Initializes OFVendor (parent class)
* fields by calling the parent class' constructor.
*/
public V6FlowMod() {
/**
* This method sets the match fields of V6FlowMod object
- * @param match V6Match object for this V6FlowMod message
+ * @param match V6Match object for this V6FlowMod message
*/
public void setMatch(V6Match match) {
this.match = match;
/**
* Sets the list of actions V6FlowMod message
- * @param actions a list of ordered OFAction objects
+ * @param actions a list of ordered OFAction objects
*/
public void setActions(List<OFAction> actions) {
this.actions = actions;
/**
* Sets the priority field of V6FlowMod message
- * @param priority Priority of the message
+ * @param priority Priority of the message
*/
public void setPriority(short priority) {
this.priority = priority;
/**
* Sets the cookie field of V6FlowMod message
- * @param cookie Cookie of the message
+ * @param cookie Cookie of the message
*/
public void setCookie(long cookie) {
this.cookie = cookie;
/**
* Sets the command field of V6FlowMod message
- * @param command Command type of the message (ADD or DELETE)
+ * @param command Command type of the message (ADD or DELETE)
*/
public V6FlowMod setCommand(short command) {
this.command = command;
/**
* Sets the outPort field of V6FlowMod message
- * @param outPort outPort of the message
+ * @param outPort outPort of the message
*/
public V6FlowMod setOutPort(OFPort port) {
this.outPort = port.getValue();
/**
* Sets the idle_timeout of V6FlowMod message
- * @param idleTimeout idle timeout for this message
+ * @param idleTimeout idle timeout for this message
*/
public void setIdleTimeout(short idleTimeout) {
this.idleTimeout = idleTimeout;
/**
* Sets the hardTimeout field of V6FlowMod message
- * @param hardTimeout hard timeout of the message
+ * @param hardTimeout hard timeout of the message
*/
public void setHardTimeout(short hardTimeout) {
this.hardTimeout = hardTimeout;
/**
* Returns the Flow Mod message subtype for V6FlowMod message
- * @return message subtype
+ * @return message subtype
*/
private int getIPv6ExtensionFlowModAddSubType() {
return IPV6EXT_ADD_FLOW_MSG_TYPE;
}
-
+
/**
* Returns the minimum header size for V6Flow Message type
- * @return minimum header size
+ * @return minimum header size
*/
public int getV6FlowModMinHdrSize() {
return IPV6_EXT_MIN_HDR_LEN;
}
-
+
/**
* Sets the Vendor type in OFVendor message
*/
public void setVendor() {
super.setVendor(V6StatsRequest.NICIRA_VENDOR_ID);
}
-
+
/**
* Get flags
* @return
public void setFlags(short flags) {
this.flags = flags;
}
-
+
/**
* This method forms the Vendor extension IPv6 Flow Mod message.It uses the
- * fields in V6FlowMod class, and writes the data according to vendor
+ * fields in V6FlowMod class, and writes the data according to vendor
* extension format. The fields include flow properties (cookie, timeout,
- * priority, etc), flow match, and action list. It also takes care of
+ * priority, etc), flow match, and action list. It also takes care of
* required padding.
*/
/**
* Forms the clone of V6FlowMod Object. If Object is returned
- * successfully, then returns the cloned object. Throws an
+ * successfully, then returns the cloned object. Throws an
* exception if cloning is not supported.
*/
@Override
/**
* This Class forms the vendor specific IPv6 Flow Match messages as well as
* processes the vendor specific IPv6 Stats Reply message.
- *
- * For message creation, it parses the user entered IPv6 match fields, creates
- * a sub-message for each field which are later used to form the complete
- * message.
- *
+ *
+ * For message creation, it parses the user entered IPv6 match fields, creates
+ * a sub-message for each field which are later used to form the complete
+ * message.
+ *
* For message processing, it parses the incoming message and reads each field
* of the message and stores in appropriate field of V6Match object.
- *
+ *
*
*/
public class V6Match extends OFMatch implements Cloneable {
/**
* Sets this (V6Match) object's member variables based on a comma-separated key=value pair similar to OFMatch's fromString.
- *
+ *
* @param match a key=value comma separated string.
*/
@Override
/**
* This Class processes the OpenFlow Vendor Extension Reply message of a Stats
* Request. It parses the reply message and initializes fields of V6StatsReply
- * object. Multiple instances of this class objects are created and used by
+ * object. Multiple instances of this class objects are created and used by
* OpenDaylight's Troubleshooting Application.
- *
+ *
*/
public class V6StatsReply extends OFVendorStatistics {
/**
- * This Class creates the OpenFlow Vendor Extension IPv6 Flow Stats Request
+ * This Class creates the OpenFlow Vendor Extension IPv6 Flow Stats Request
* messages and also reads the Reply of a stats request message.
- *
+ *
*/
public class V6StatsRequest extends OFVendorStatistics {
/**
* stub protocol plugin Activator
- *
- *
+ *
+ *
*/
public class Activator extends ComponentActivatorAbstractBase {
protected static final Logger logger = LoggerFactory
/**
* Function called when the activator starts just after some initializations
* are done by the ComponentActivatorAbstractBase.
- *
+ *
*/
public void init() {
Node.NodeIDType.registerIDType("STUB", Integer.class);
/**
* Function called when the activator stops just before the cleanup done by
* ComponentActivatorAbstractBase
- *
+ *
*/
public void destroy() {
Node.NodeIDType.unRegisterIDType("STUB");
/**
* Function that is used to communicate to dependency manager the list of
* known implementations for services inside a container
- *
- *
+ *
+ *
* @return An array containing all the CLASS objects that will be
* instantiated in order to get an fully working implementation
* Object
/**
* Function that is called when configuration of the dependencies is
* required.
- *
+ *
* @param c
* dependency manager Component object, used for configuring the
* dependencies exported and imported
c.setInterface(IPluginInInventoryService.class.getName(), props);
}
}
-
+
public Object[] getGlobalImplementations() {
Object[] res = { FlowProgrammerService.class, StubNodeFactory.class, StubNodeConnectorFactory.class };
return res;
}
-
+
public void configureGlobalInstance(Component c, Object imp){
if (imp.equals(FlowProgrammerService.class)) {
// export the service to be used by SAL
props.put("protocolName", "STUB");
c.setInterface(INodeConnectorFactory.class.getName(), props);
}
-
+
}
}
* Function called by the dependency manager when at least one dependency
* become unsatisfied or when the component is shutting down because for
* example bundle is being stopped.
- *
+ *
*/
void destroy() {
}
/**
* Function called by dependency manager after "init ()" is called and after
* the services provided by the class are registered in the service registry
- *
+ *
*/
void start() {
}
* Function called by the dependency manager before the services exported by
* the component are unregistered, this will be followed by a "destroy ()"
* calls
- *
+ *
*/
void stop() {
}
-
-
+
+
/**
* Synchronously add a flow to the network node
- *
+ *
* @param node
* @param flow
*/
/**
* Synchronously modify existing flow on the switch
- *
+ *
* @param node
* @param flow
*/
}
/**
* Synchronously remove the flow from the network node
- *
+ *
* @param node
* @param flow
*/
/**
* Asynchronously add a flow to the network node
- *
+ *
* @param node
* @param flow
* @param rid
/**
* Asynchronously modify existing flow on the switch
- *
+ *
* @param node
* @param flow
* @param rid
/**
* Asynchronously remove the flow from the network node
- *
+ *
* @param node
* @param flow
* @param rid
/**
* Remove all flows present on the network node
- *
+ *
* @param node
*/
public Status removeAllFlows(Node node){
/**
* Send Barrier message synchronously. The caller will be blocked until the
* Barrier reply arrives.
- *
+ *
* @param node
*/
public Status syncSendBarrierMessage(Node node){
/**
* Send Barrier message asynchronously. The caller is not blocked.
- *
+ *
* @param node
*/
public Status asyncSendBarrierMessage(Node node){
private static final Logger logger = LoggerFactory
.getLogger(InventoryService.class);
- private ConcurrentMap<Node, Map<String, Property>> nodeProps; // properties are maintained in global container only
- private ConcurrentMap<NodeConnector, Map<String, Property>> nodeConnectorProps; // properties are maintained in global container only
-
+ private ConcurrentMap<Node, Map<String, Property>> nodeProps; // properties
+ // are
+ // maintained
+ // in global
+ // container
+ // only
+ private ConcurrentMap<NodeConnector, Map<String, Property>> nodeConnectorProps; // properties
+ // are
+ // maintained
+ // in
+ // global
+ // container
+ // only
/**
* Function called by the dependency manager when all the required
nodeProps = new ConcurrentHashMap<Node, Map<String, Property>>();
nodeConnectorProps = new ConcurrentHashMap<NodeConnector, Map<String, Property>>();
Node.NodeIDType.registerIDType("STUB", Integer.class);
- NodeConnector.NodeConnectorIDType.registerIDType("STUB", Integer.class, "STUB");
+ NodeConnector.NodeConnectorIDType.registerIDType("STUB", Integer.class,
+ "STUB");
+
+ setupNodeProps();
+ setupNodeConnectorProps();
+ }
+
+ private void setupNodeConnectorProps() {
+ Map<String, Property> ncPropMap = new HashMap<String, Property>();
+ Capabilities cap = new Capabilities(
+ CapabilitiesType.FLOW_STATS_CAPABILITY.getValue());
+ ncPropMap.put(Capabilities.CapabilitiesPropName, cap);
+ Bandwidth bw = new Bandwidth(Bandwidth.BW1Gbps);
+ ncPropMap.put(Bandwidth.BandwidthPropName, bw);
+ State st = new State(State.EDGE_UP);
+ ncPropMap.put(State.StatePropName, st);
+
+ // setup property map for all node connectors
+ NodeConnector nc;
+ Node node;
+ try {
+ node = new Node("STUB", new Integer(0xCAFE));
+ nc = new NodeConnector("STUB", 0xCAFE, node);
+ } catch (ConstructionException e) {
+ nc = null;
+ node = null;
+ }
+ nodeConnectorProps.put(nc, ncPropMap);
+
+ try {
+ node = new Node("STUB", 3366);
+ nc = new NodeConnector("STUB", 12, node);
+ } catch (ConstructionException e) {
+ nc = null;
+ node = null;
+ }
+ nodeConnectorProps.put(nc, ncPropMap);
+
+ try {
+ node = new Node("STUB", 4477);
+ nc = new NodeConnector("STUB", 34, node);
+ } catch (ConstructionException e) {
+ nc = null;
+ node = null;
+ }
+ nodeConnectorProps.put(nc, ncPropMap);
+
+ }
+
+ private void setupNodeProps() {
+ Map<String, Property> propMap = new HashMap<String, Property>();
+
+ Tables t = new Tables((byte) 1);
+ propMap.put(Tables.TablesPropName, t);
+ Capabilities c = new Capabilities((int) 3);
+ propMap.put(Capabilities.CapabilitiesPropName, c);
+ Actions a = new Actions((int) 2);
+ propMap.put(Actions.ActionsPropName, a);
+ Buffers b = new Buffers((int) 1);
+ propMap.put(Buffers.BuffersPropName, b);
+ Long connectedSinceTime = 100000L;
+ TimeStamp timeStamp = new TimeStamp(connectedSinceTime,
+ "connectedSince");
+ propMap.put(TimeStamp.TimeStampPropName, timeStamp);
+
+ // setup property map for all nodes
+ Node node;
+ try {
+ node = new Node("STUB", new Integer(0xCAFE));
+ } catch (ConstructionException e) {
+ node = null;
+ }
+
+ nodeProps.put(node, propMap);
+
+ try {
+ node = new Node("STUB", 3366);
+ } catch (ConstructionException e) {
+ node = null;
+ }
+ nodeProps.put(node, propMap);
+
+ try {
+ node = new Node("STUB", 4477);
+ } catch (ConstructionException e) {
+ node = null;
+ }
+ nodeProps.put(node, propMap);
+
}
/**
*/
@Override
public ConcurrentMap<Node, Map<String, Property>> getNodeProps() {
-
- // setup nodeProps
- Map<String, Property> propMap = new HashMap<String, Property>();
-
- Tables t = new Tables((byte)1);
- propMap.put(Tables.TablesPropName, t);
- Capabilities c = new Capabilities((int)3);
- propMap.put(Capabilities.CapabilitiesPropName, c);
- Actions a = new Actions((int)2);
- propMap.put(Actions.ActionsPropName, a);
- Buffers b = new Buffers((int)1);
- propMap.put(Buffers.BuffersPropName, b);
- Long connectedSinceTime = 100000L;
- TimeStamp timeStamp = new TimeStamp(connectedSinceTime,
- "connectedSince");
- propMap.put(TimeStamp.TimeStampPropName, timeStamp);
-
- // setup property map for all nodes
- Node node;
- try{
- node = new Node("STUB", new Integer(0xCAFE));
- }catch(ConstructionException e){
- node = null;
- }
-
- nodeProps.put(node, propMap);
-
- try{
- node = new Node("STUB", 3366);
- }catch(ConstructionException e){
- node = null;
- }
- nodeProps.put(node, propMap);
-
- try{
- node = new Node("STUB", 4477);
- }catch(ConstructionException e){
- node = null;
- }
- nodeProps.put(node, propMap);
-
return nodeProps;
}
@Override
public ConcurrentMap<NodeConnector, Map<String, Property>> getNodeConnectorProps(
Boolean refresh) {
-
- // setup nodeConnectorProps
- Map<String, Property> ncPropMap = new HashMap<String, Property>();
- Capabilities cap = new Capabilities
- (CapabilitiesType.FLOW_STATS_CAPABILITY.getValue());
- ncPropMap.put(Capabilities.CapabilitiesPropName, cap);
- Bandwidth bw = new Bandwidth (Bandwidth.BW1Gbps);
- ncPropMap.put(Bandwidth.BandwidthPropName, bw);
- State st = new State (State.EDGE_UP);
- ncPropMap.put(State.StatePropName, st);
-
- // setup property map for all node connectors
- NodeConnector nc;
- Node node;
- try{
- node = new Node("STUB", new Integer(0xCAFE));
- nc = new NodeConnector("STUB", 0xCAFE, node);
- }catch(ConstructionException e){
- nc = null;
- node = null;
- }
- nodeConnectorProps.put(nc, ncPropMap);
-
- try{
- node = new Node("STUB", 3366);
- nc = new NodeConnector("STUB", 12, node);
- } catch(ConstructionException e){
- nc = null;
- node = null;
- }
- nodeConnectorProps.put(nc, ncPropMap);
-
- try{
- node = new Node("STUB", 4477);
- nc = new NodeConnector("STUB", 34, node);
- }catch(ConstructionException e){
- nc = null;
- node = null;
- }
- nodeConnectorProps.put(nc, ncPropMap);
-
- return nodeConnectorProps;
+ return nodeConnectorProps;
}
-
}
import org.opendaylight.controller.sal.reader.NodeTableStatistics;
/**
* Stub Implementation for IPluginInReadService used by SAL
- *
- *
+ *
+ *
*/
public class ReadService implements IPluginInReadService {
private static final Logger logger = LoggerFactory
/**
* Function called by the dependency manager when all the required
* dependencies are satisfied
- *
+ *
*/
void init() {
}
* Function called by the dependency manager when at least one dependency
* become unsatisfied or when the component is shutting down because for
* example bundle is being stopped.
- *
+ *
*/
void destroy() {
}
/**
* Function called by dependency manager after "init ()" is called and after
* the services provided by the class are registered in the service registry
- *
+ *
*/
void start() {
}
* Function called by the dependency manager before the services exported by
* the component are unregistered, this will be followed by a "destroy ()"
* calls
- *
+ *
*/
void stop() {
}
* Function called by the dependency manager when at least one dependency
* become unsatisfied or when the component is shutting down because for
* example bundle is being stopped.
- *
+ *
*/
void destroy() {
}
/**
* Function called by dependency manager after "init ()" is called and after
* the services provided by the class are registered in the service registry
- *
+ *
*/
void start() {
}
* Function called by the dependency manager before the services exported by
* the component are unregistered, this will be followed by a "destroy ()"
* calls
- *
+ *
*/
void stop() {
}
-
+
public NodeConnector fromStringNoNode(String typeStr, String IDStr,
Node n){
if(typeStr.equals("STUB")){
* Function called by the dependency manager when at least one dependency
* become unsatisfied or when the component is shutting down because for
* example bundle is being stopped.
- *
+ *
*/
void destroy() {
}
/**
* Function called by dependency manager after "init ()" is called and after
* the services provided by the class are registered in the service registry
- *
+ *
*/
void start() {
}
* Function called by the dependency manager before the services exported by
* the component are unregistered, this will be followed by a "destroy ()"
* calls
- *
+ *
*/
void stop() {
}
-
+
public Node fromString(String nodeType, String nodeId){
if(nodeType.equals("STUB"))
try{
/**
* Function called by the dependency manager when all the required
* dependencies are satisfied
- *
+ *
*/
@SuppressWarnings({ "unchecked", "rawtypes" })
public void init() {
* Function called by the dependency manager when at least one dependency
* become unsatisfied or when the component is shutting down because for
* example bundle is being stopped.
- *
+ *
*/
void destroy() {
log.debug("Routing destroy() is called");
/**
* Function called by dependency manager after "init ()" is called and after
* the services provided by the class are registered in the service registry
- *
+ *
*/
void start() {
log.debug("Routing start() is called");
* Function called by the dependency manager before the services exported by
* the component are unregistered, this will be followed by a "destroy ()"
* calls
- *
+ *
*/
public void stop() {
log.debug("Routing stop() is called");
@XmlRootElement
@XmlAccessorType(XmlAccessType.NONE)
@XmlSeeAlso({Controller.class, Drop.class, Flood.class, FloodAll.class, HwPath.class, Loopback.class, Output.class,
- PopVlan.class, PushVlan.class, SetDlDst.class, SetDlSrc.class, SetDlType.class, SetNwDst.class, SetNwSrc.class,
- SetNwTos.class, SetTpDst.class, SetTpSrc.class, SetVlanCfi.class, SetVlanId.class, SetVlanPcp.class, SwPath.class})
+ PopVlan.class, PushVlan.class, SetDlDst.class, SetDlSrc.class, SetDlType.class, SetNwDst.class, SetNwSrc.class,
+ SetNwTos.class, SetTpDst.class, SetTpSrc.class, SetVlanCfi.class, SetVlanId.class, SetVlanPcp.class, SwPath.class})
public abstract class Action {
private static final Logger logger = LoggerFactory.getLogger(Action.class);
private static boolean debug = false; // Enable to find where in the code an invalid assignment is made
/*
public Action (ActionType type, Object value) {
- this.type = type;
- this.value = value;
- this.isValid = true;
+ this.type = type;
+ this.value = value;
+ this.isValid = true;
} */
/**
/**
* Returns whether the Action is valid or not
*
- * @return boolean
+ * @return boolean
*/
public boolean isValid() {
return isValid;
@XmlAccessorType(XmlAccessType.NONE)
public class Output extends Action {
- @XmlElement
+ @XmlElement
private NodeConnector port;
/* Dummy constructor for JAXB */
* Execute it multiple times to achieve QinQ
*
* 802.1q = [TPID(16) + TCI(16)]
- * TCI = [PCP(3) + CFI(1) + VID(12)]
+ * TCI = [PCP(3) + CFI(1) + VID(12)]
*
*
*
public byte[] getDlAddress() {
return address.clone();
}
-
+
@XmlElement(name = "address")
public String getDlAddressString() {
return HexEncode.bytesToHexString(address);
}
-
+
@Override
public boolean equals(Object obj) {
if (this == obj)
public String getDlAddressString() {
return HexEncode.bytesToHexString(address);
}
-
+
@Override
public boolean equals(Object obj) {
if (this == obj)
@XmlAccessorType(XmlAccessType.NONE)
public class SetDlType extends Action {
- @XmlElement
+ @XmlElement
private int dlType;
/* Dummy constructor for JAXB */
/**
* Returns the network address this action will set
*
- * @return InetAddress
+ * @return InetAddress
*/
public InetAddress getAddress() {
return address;
}
-
+
@XmlElement (name="address")
public String getAddressAsString() {
- return address.getHostAddress();
+ return address.getHostAddress();
}
@Override
/**
* Returns the network address this action will set
*
- * @return InetAddress
+ * @return InetAddress
*/
public InetAddress getAddress() {
return address;
@XmlElement (name="address")
public String getAddressAsString() {
- return address.getHostAddress();
+ return address.getHostAddress();
}
@Override
@XmlAccessorType(XmlAccessType.NONE)
public class SetNwTos extends Action {
- @XmlElement
+ @XmlElement
private int tos;
/* Dummy constructor for JAXB */
@XmlAccessorType(XmlAccessType.NONE)
public class SetTpDst extends Action {
- @XmlElement
+ @XmlElement
private int port;
/* Dummy constructor for JAXB */
@XmlAccessorType(XmlAccessType.NONE)
public class SetTpSrc extends Action {
- @XmlElement
+ @XmlElement
private int port;
/* Dummy constructor for JAXB */
@XmlAccessorType(XmlAccessType.NONE)
public class SetVlanCfi extends Action {
- @XmlElement
+ @XmlElement
private int cfi;
/* Dummy constructor for JAXB */
@XmlAccessorType(XmlAccessType.NONE)
public class SetVlanId extends Action {
- @XmlElement
+ @XmlElement
private int vlanId;
- private SetVlanId() {
-
- }
-
+ private SetVlanId() {
+
+ }
+
public SetVlanId(int vlanId) {
type = ActionType.SET_VLAN_ID;
this.vlanId = vlanId;
@XmlAccessorType(XmlAccessType.NONE)
public class SetVlanPcp extends Action {
- @XmlElement
+ @XmlElement
private int pcp;
- private SetVlanPcp() {
-
- }
-
+ private SetVlanPcp() {
+
+ }
+
public SetVlanPcp(int pcp) {
type = ActionType.SET_VLAN_PCP;
this.pcp = pcp;
public String toStringPretty() {
return this.prettyLevel;
}
-
+
public static AppRoleLevel fromString(String levelString) {
- for (AppRoleLevel level : AppRoleLevel.values()) {
- if (level.toString().equals(levelString)) {
- return level;
- }
- }
- return null;
+ for (AppRoleLevel level : AppRoleLevel.values()) {
+ if (level.toString().equals(levelString)) {
+ return level;
+ }
+ }
+ return null;
}
-}
+}
/**
* Create a Role name for the application
*
- * @param role the role name
- * @param userLevel the user level in the application context
- * @return the status of the request
- */
+ * @param role the role name
+ * @param userLevel the user level in the application context
+ * @return the status of the request
+ */
public Status createRole(String role, AppRoleLevel userLevel);
/**
* Remove a Role
- *
+ *
* @param role the role name
* @return the status of the request
*/
public Status removeRole(String role);
-
+
/**
* Return the list of roles configured for the application
*
*
* @param roleName the role name to query
* @return the application level of the given role in the application context as specified by {@link AppRoleLevel}
- * if the role is not part of this application's roles, <code>NOUSER<code> is returned
+ * if the role is not part of this application's roles, <code>NOUSER<code> is returned
*/
public AppRoleLevel getApplicationRoleLevel(String roleName);
* Returns whether the specified role is part of this application's roles
*
* @param roleName the role name to test
- * @return true if the role belongs to this application, false otherwise
+ * @return true if the role belongs to this application, false otherwise
*/
public boolean isApplicationRole(String roleName);
* @return the status of the request
*/
public Status assignResourceGroupToRole(String groupName, String role);
-
+
/**
* Unassign the passed resource group from the specified role
- *
+ *
* @param group
* @param role
* @return the status of the request
*/
public Status unassignResourceGroupFromRole(String group, String role);
-
+
/**
* Returns the list of resource groups the given Role is authorized to use
* The returning object expresses the resource group name and the access
* Describes the user role level in the controller space
*/
public enum UserLevel implements Serializable {
- SYSTEMADMIN(0, "System-Admin", "System Administrator"), // can do everything
- NETWORKADMIN(1, "Network-Admin", "Network Administrator"), // can do everything but setting a system admin user profile
- NETWORKOPERATOR(2, "Network-Operator", "Network Operator"), // can only see what is configured anywhere
- CONTAINERUSER(4, "Container-User", "Container User"), // container context user
- APPUSER(5, "App-User", "Application User"), // application context user
+ SYSTEMADMIN(0, "System-Admin", "System Administrator"), // can do everything
+ NETWORKADMIN(1, "Network-Admin", "Network Administrator"), // can do everything but setting a system admin user profile
+ NETWORKOPERATOR(2, "Network-Operator", "Network Operator"), // can only see what is configured anywhere
+ CONTAINERUSER(4, "Container-User", "Container User"), // container context user
+ APPUSER(5, "App-User", "Application User"), // application context user
NOUSER(255, "Not Authorized", "Not Authorized");
private int userLevel;
@XmlRootElement
public class Actions extends Property {
- private static final long serialVersionUID = 1L;
+ private static final long serialVersionUID = 1L;
@XmlElement
private int actionsValue;
-
- public enum ActionType {
- OUTPUT_PORT_ACTION(1<<0),
- VLAN_VID_ACTION(1<<1),
- VLAN_PCP_ACTION(1<<2),
- VLAN_STRIP_ACTION(1<<3),
- DLSRC_ACTION(1<<4),
- DLDST_ACTION(1<<5),
- NWSRC_ACTION(1<<6),
- NWDST_ACTION(1<<7),
- NWTOS_ACTION(1<<8),
- TPTSRC_ACTION(1<<9),
- TPTDST_ACTION(1<<10),
- ENQUEUE_ACTION(1<<11),
- VENDOR_ACTION(0xffff);
- private final int at;
- ActionType(int val) {
- this.at = val;
- }
- public int getValue() {
- return at;
- }
+
+ public enum ActionType {
+ OUTPUT_PORT_ACTION(1<<0),
+ VLAN_VID_ACTION(1<<1),
+ VLAN_PCP_ACTION(1<<2),
+ VLAN_STRIP_ACTION(1<<3),
+ DLSRC_ACTION(1<<4),
+ DLDST_ACTION(1<<5),
+ NWSRC_ACTION(1<<6),
+ NWDST_ACTION(1<<7),
+ NWTOS_ACTION(1<<8),
+ TPTSRC_ACTION(1<<9),
+ TPTDST_ACTION(1<<10),
+ ENQUEUE_ACTION(1<<11),
+ VENDOR_ACTION(0xffff);
+ private final int at;
+ ActionType(int val) {
+ this.at = val;
+ }
+ public int getValue() {
+ return at;
+ }
}
-
+
public static final String ActionsPropName = "actions";
/**
* Construct a actions property
public Actions clone() {
return new Actions(this.actionsValue);
}
-
+
public int getValue() {
- return this.actionsValue;
+ return this.actionsValue;
}
-
-
+
+
@Override
public int hashCode() {
final int prime = 31;
@XmlRootElement
@SuppressWarnings("serial")
public class AdvertisedBandwidth extends Bandwidth {
- public static final String AdvertisedBandwidthPropName = "advertisedBandwidth";
-
- public AdvertisedBandwidth(long value) {
- super(AdvertisedBandwidthPropName);
- this.bandwidthValue = value;
- }
-
- /*
+ public static final String AdvertisedBandwidthPropName = "advertisedBandwidth";
+
+ public AdvertisedBandwidth(long value) {
+ super(AdvertisedBandwidthPropName);
+ this.bandwidthValue = value;
+ }
+
+ /*
* Private constructor used for JAXB mapping
*/
private AdvertisedBandwidth() {
- super(AdvertisedBandwidthPropName);
- this.bandwidthValue = 0;
+ super(AdvertisedBandwidthPropName);
+ this.bandwidthValue = 0;
}
-
- public AdvertisedBandwidth clone() {
- return new AdvertisedBandwidth(this.bandwidthValue);
+
+ public AdvertisedBandwidth clone() {
+ return new AdvertisedBandwidth(this.bandwidthValue);
}
-
+
@Override
public String toString() {
StringBuffer sb = new StringBuffer();
super(BandwidthPropName);
this.bandwidthValue = (long) bandwidth;
}
-
+
public Bandwidth(String name) {
super(name);
}
*/
@XmlRootElement
public class Buffers extends Property {
- private static final long serialVersionUID = 1L;
+ private static final long serialVersionUID = 1L;
@XmlElement
private int buffersValue;
-
+
public static final String BuffersPropName = "buffers";
-
+
/**
* Construct a Buffers property
*
- * @param buffers the Buffers
+ * @param buffers the Buffers
* @return Constructed object
*/
public Buffers(int buffers) {
public int getValue() {
return this.buffersValue;
}
-
+
@Override
public int hashCode() {
final int prime = 31;
*/
@XmlRootElement
public class Capabilities extends Property {
- private static final long serialVersionUID = 1L;
+ private static final long serialVersionUID = 1L;
@XmlElement
private int capabilitiesValue;
-
- public enum CapabilitiesType {
- FLOW_STATS_CAPABILITY(1<<0),
- TABLE_STATS_CAPABILITY(1<<1),
- PORT_STATS_CAPABILITY(1<<2),
- STP_CAPABILITY(1<<3),
- RSVD_CAPABILITY(1<<4),
- IP_REASSEM_CAPABILITY(1<<5),
- QUEUE_STATS_CAPABILITY(1<<6),
- ARP_MATCH_IP_CAPABILITY(1<<7);
- private final int ct;
- CapabilitiesType(int val) {
- this.ct = val;
- }
- public int getValue() {
- return ct;
- }
+
+ public enum CapabilitiesType {
+ FLOW_STATS_CAPABILITY(1<<0),
+ TABLE_STATS_CAPABILITY(1<<1),
+ PORT_STATS_CAPABILITY(1<<2),
+ STP_CAPABILITY(1<<3),
+ RSVD_CAPABILITY(1<<4),
+ IP_REASSEM_CAPABILITY(1<<5),
+ QUEUE_STATS_CAPABILITY(1<<6),
+ ARP_MATCH_IP_CAPABILITY(1<<7);
+ private final int ct;
+ CapabilitiesType(int val) {
+ this.ct = val;
+ }
+ public int getValue() {
+ return ct;
+ }
}
-
+
public static final String CapabilitiesPropName = "capabilities";
/**
* Construct a Capabilities property
}
public int getValue() {
- return this.capabilitiesValue;
+ return this.capabilitiesValue;
}
-
+
@Override
public int hashCode() {
final int prime = 31;
}
}
}
-
+
// Register with OSGi the provider for the service IContainerAware
context.registerService(
IContainerAware.class.getName(), this, null);
/**
* The class represents Admin Config status
- *
- *
+ *
+ *
*/
@XmlRootElement
@SuppressWarnings("serial")
/**
* Returns whether the specified match is allowed
*
- * @param match the match to test
+ * @param match the match to test
* @return true if the match is allowed, false otherwise
*/
public boolean allowsMatch(Match target) {
*/
@XmlRootElement
public class PeerBandwidth extends Bandwidth {
- private static final long serialVersionUID = 1L;
-
- public static final String PeerBandwidthPropName = "peerBandwidth";
-
- public PeerBandwidth(long value) {
- super(PeerBandwidthPropName);
- this.bandwidthValue = value;
- }
-
- /*
+ private static final long serialVersionUID = 1L;
+
+ public static final String PeerBandwidthPropName = "peerBandwidth";
+
+ public PeerBandwidth(long value) {
+ super(PeerBandwidthPropName);
+ this.bandwidthValue = value;
+ }
+
+ /*
* Private constructor used for JAXB mapping
*/
private PeerBandwidth() {
- super(PeerBandwidthPropName);
- this.bandwidthValue = 0;
+ super(PeerBandwidthPropName);
+ this.bandwidthValue = 0;
}
- public PeerBandwidth clone() {
- return new PeerBandwidth(this.bandwidthValue);
+ public PeerBandwidth clone() {
+ return new PeerBandwidth(this.bandwidthValue);
}
-
+
@Override
public String toString() {
*/
@XmlRootElement
public class SupportedBandwidth extends Bandwidth {
- private static final long serialVersionUID = 1L;
- public static final String SupportedBandwidthPropName = "supportedBandwidth";
-
- public SupportedBandwidth(long value) {
- super(SupportedBandwidthPropName);
- this.bandwidthValue = value;
- }
-
- /*
+ private static final long serialVersionUID = 1L;
+ public static final String SupportedBandwidthPropName = "supportedBandwidth";
+
+ public SupportedBandwidth(long value) {
+ super(SupportedBandwidthPropName);
+ this.bandwidthValue = value;
+ }
+
+ /*
* Private constructor used for JAXB mapping
*/
private SupportedBandwidth() {
- super(SupportedBandwidthPropName);
- this.bandwidthValue = 0;
+ super(SupportedBandwidthPropName);
+ this.bandwidthValue = 0;
}
-
- public SupportedBandwidth clone() {
- return new SupportedBandwidth(this.bandwidthValue);
+
+ public SupportedBandwidth clone() {
+ return new SupportedBandwidth(this.bandwidthValue);
}
@Override
*/
@XmlRootElement
public class Tables extends Property {
- private static final long serialVersionUID = 1L;
+ private static final long serialVersionUID = 1L;
@XmlElement
private byte tablesValue;
-
+
public static final String TablesPropName = "tables";
/**
* Construct a Tables property
*
- * @param tables the Tables
+ * @param tables the Tables
* @return Constructed object
*/
public Tables(byte tables) {
public byte getValue() {
return this.tablesValue;
}
-
+
@Override
public int hashCode() {
final int prime = 31;
/**
* The methods is called when an edge is added/deleted/changed
*
- * @param edge {@link org.opendaylight.controller.sal.core.Edge} being updated
- * @param type {@link org.opendaylight.controller.sal.core.UpdateType}
- * @param props set of {@link org.opendaylight.controller.sal.core.Property} like
- * {@link org.opendaylight.controller.sal.core.Bandwidth} and/or
- * {@link org.opendaylight.controller.sal.core.Latency} etc.
+ * @param edge {@link org.opendaylight.controller.sal.core.Edge} being updated
+ * @param type {@link org.opendaylight.controller.sal.core.UpdateType}
+ * @param props set of {@link org.opendaylight.controller.sal.core.Property} like
+ * {@link org.opendaylight.controller.sal.core.Bandwidth} and/or
+ * {@link org.opendaylight.controller.sal.core.Latency} etc.
*/
public void notifyEdge(Edge edge, UpdateType type, Set<Property> props);
}
public class Flow implements Cloneable, Serializable {
protected static final Logger logger = LoggerFactory
.getLogger(Flow.class);
- private static final long serialVersionUID = 1L;
- @XmlElement
+ private static final long serialVersionUID = 1L;
+ @XmlElement
private Match match;
@XmlElement
private List<Action> actions;
public interface IFlowProgrammerService {
/**
* Synchronously add a flow to the network node
- *
+ *
* @param node
* The target network node
* @param flow
/**
* Synchronously modify existing flow on the switch
- *
+ *
* @param node
* The target network node
* @param oldFlow
/**
* Synchronously remove the flow from the network node
- *
+ *
* @param node
* The target network node
* @param flow
/**
* Asynchronously add a flow to the network node
- *
+ *
* @param node
* The target network node
* @param flow
/**
* Asynchronously modify existing flow on the switch
- *
+ *
* @param node
* The target network node
* @param oldFlow
/**
* Asynchronously remove the flow from the network node
- *
+ *
* @param node
* The target network node
* @param flow
/**
* Remove all flows present on the network node
- *
+ *
* @param node
* The target network node
* @return The status of this request containing the unique request id
/**
* Send Barrier message synchronously. The caller will be blocked until the
* solicitation response arrives.
- *
+ *
* Solicit the network node to report whether all the requests sent so far
* are completed. When this call is done, caller knows that all past flow
* operations requested to the node in asynchronous fashion were satisfied
* by the network node and that in case of any failure, a message was sent
* to the controller.
- *
+ *
* @param node
* The network node to solicit
* @return The status of this request containing the unique request id
/**
* Send Barrier message asynchronously. The caller is not blocked.
- *
+ *
* Solicit the network node to report whether all the requests sent so far
* are completed. When this call is done, caller knows that all past flow
* operations requested to the node in asynchronous fashion were satisfied
* by the network node and that in case of any failure, a message was sent
* to the controller.
- *
+ *
* @param node
* The network node to solicit
* @return The status of this request containing the unique request id
/**
* @file IPluginOutFlowProgrammer.java
- *
+ *
* @brief Flow programmer interface to be implemented by protocol plugins
*/
public interface IPluginInFlowProgrammerService {
/**
* Synchronously add a flow to the network node
- *
+ *
* @param node
* @param flow
*/
/**
* Synchronously modify existing flow on the switch
- *
+ *
* @param node
* @param flow
*/
/**
* Synchronously remove the flow from the network node
- *
+ *
* @param node
* @param flow
*/
/**
* Asynchronously add a flow to the network node
- *
+ *
* @param node
* @param flow
* @param rid
/**
* Asynchronously modify existing flow on the switch
- *
+ *
* @param node
* @param flow
* @param rid
/**
* Asynchronously remove the flow from the network node
- *
+ *
* @param node
* @param flow
* @param rid
/**
* Remove all flows present on the network node
- *
+ *
* @param node
*/
Status removeAllFlows(Node node);
/**
* Send Barrier message synchronously. The caller will be blocked until the
* Barrier reply arrives.
- *
+ *
* @param node
*/
Status syncSendBarrierMessage(Node node);
/**
* Send Barrier message asynchronously. The caller is not blocked.
- *
+ *
* @param node
*/
Status asyncSendBarrierMessage(Node node);
* Inform SAL that the flow on the specified node has been removed Consumer
* has to expect this notification only for flows which were installed with
* an idle or hard timeout specified.
- *
+ *
* @param node
* the network node on which the flow got removed
* @param flow
* Inform SAL that an error message has been received from a switch
* regarding a flow message previously sent to the switch. A Request ID
* associated with the offending message is also returned.
- *
+ *
* @param node
* the network node on which the error reported
* @param rid
/**
* This method is called when some properties of a node are added/deleted/changed.
*
- * @param node {@link org.opendaylight.controller.sal.core.Node} being updated
- * @param type {@link org.opendaylight.controller.sal.core.UpdateType}
- * @param props set of {@link org.opendaylight.controller.sal.core.Property} such as
- * {@link org.opendaylight.controller.sal.core.Description} and/or
- * {@link org.opendaylight.controller.sal.core.Tier} etc.
+ * @param node {@link org.opendaylight.controller.sal.core.Node} being updated
+ * @param type {@link org.opendaylight.controller.sal.core.UpdateType}
+ * @param props set of {@link org.opendaylight.controller.sal.core.Property} such as
+ * {@link org.opendaylight.controller.sal.core.Description} and/or
+ * {@link org.opendaylight.controller.sal.core.Tier} etc.
*/
public void updateNode(Node node, UpdateType type, Set<Property> props);
/**
* This method is called when some properties of a node connector are added/deleted/changed.
*
- * @param nodeConnector {@link org.opendaylight.controller.sal.core.NodeConnector} being updated
- * @param type {@link org.opendaylight.controller.sal.core.UpdateType}
- * @param props set of {@link org.opendaylight.controller.sal.core.Property} such as
- * {@link org.opendaylight.controller.sal.core.Description} and/or
- * {@link org.opendaylight.controller.sal.core.State} etc.
+ * @param nodeConnector {@link org.opendaylight.controller.sal.core.NodeConnector} being updated
+ * @param type {@link org.opendaylight.controller.sal.core.UpdateType}
+ * @param props set of {@link org.opendaylight.controller.sal.core.Property} such as
+ * {@link org.opendaylight.controller.sal.core.Description} and/or
+ * {@link org.opendaylight.controller.sal.core.State} etc.
*/
public void updateNodeConnector(NodeConnector nodeConnector,
UpdateType type, Set<Property> props);
/**
* This method is called when some properties of a node are added/deleted/changed.
*
- * @param node {@link org.opendaylight.controller.sal.core.Node} being updated
- * @param type {@link org.opendaylight.controller.sal.core.UpdateType}
- * @param props set of {@link org.opendaylight.controller.sal.core.Property} such as
- * {@link org.opendaylight.controller.sal.core.Description} and/or
- * {@link org.opendaylight.controller.sal.core.Tier} etc.
+ * @param node {@link org.opendaylight.controller.sal.core.Node} being updated
+ * @param type {@link org.opendaylight.controller.sal.core.UpdateType}
+ * @param props set of {@link org.opendaylight.controller.sal.core.Property} such as
+ * {@link org.opendaylight.controller.sal.core.Description} and/or
+ * {@link org.opendaylight.controller.sal.core.Tier} etc.
*/
public void updateNode(Node node, UpdateType type, Set<Property> props);
/**
* This method is called when some properties of a node connector are added/deleted/changed.
*
- * @param nodeConnector {@link org.opendaylight.controller.sal.core.NodeConnector} being updated
- * @param type {@link org.opendaylight.controller.sal.core.UpdateType}
- * @param props set of {@link org.opendaylight.controller.sal.core.Property} such as
- * {@link org.opendaylight.controller.sal.core.Description} and/or
- * {@link org.opendaylight.controller.sal.core.State} etc.
+ * @param nodeConnector {@link org.opendaylight.controller.sal.core.NodeConnector} being updated
+ * @param type {@link org.opendaylight.controller.sal.core.UpdateType}
+ * @param props set of {@link org.opendaylight.controller.sal.core.Property} such as
+ * {@link org.opendaylight.controller.sal.core.Description} and/or
+ * {@link org.opendaylight.controller.sal.core.State} etc.
*/
public void updateNodeConnector(NodeConnector nodeConnector,
UpdateType type, Set<Property> props);
@XmlRootElement
@XmlAccessorType(XmlAccessType.NONE)
public class Match implements Cloneable, Serializable {
- private static final long serialVersionUID = 1L;
- private static final Map<MatchType, MatchType> reversableMatches;
+ private static final long serialVersionUID = 1L;
+ private static final Map<MatchType, MatchType> reversableMatches;
static {
Map<MatchType, MatchType> map = new HashMap<MatchType, MatchType>();
map.put(MatchType.DL_SRC, MatchType.DL_DST);
* Generic setter for frame/packet/message's header fields against which to match
* Note: For MAC addresses, please pass the cloned value to this function
*
- * @param type packet's header field type
- * @param value field's value to assign to the match
- * @param mask field's bitmask to apply to the match (has to be of the same class type of value)
+ * @param type packet's header field type
+ * @param value field's value to assign to the match
+ * @param mask field's bitmask to apply to the match (has to be of the same class type of value)
*/
public void setField(MatchType type, Object value, Object mask) {
MatchField field = new MatchField(type, value, mask);
* Generic setter for frame/packet/message's header fields against which to match
* Note: For MAC addresses, please pass the cloned value to this function
*
- * @param type packet's header field type
- * @param value field's value to assign to the match
+ * @param type packet's header field type
+ * @param value field's value to assign to the match
*/
public void setField(MatchType type, Object value) {
MatchField field = new MatchField(type, value);
/**
* Generic getter for fields against which the match is programmed
*
- * @param type frame/packet/message's header field type
+ * @param type frame/packet/message's header field type
* @return
*/
public MatchField getField(MatchType type) {
/**
* Returns the list of MatchType fields the match is set for
*
- * @return List of individual MatchType fields.
+ * @return List of individual MatchType fields.
*/
public List<MatchType> getMatchesList() {
return new ArrayList<MatchType>(fields.keySet());
/**
* Returns the list of MatchFields the match is set for
*
- * @return List of individual MatchField values.
+ * @return List of individual MatchField values.
*/
@XmlElement(name="matchField")
public List<MatchField> getMatchFields() {
- return new ArrayList<MatchField>(fields.values());
+ return new ArrayList<MatchField>(fields.values());
}
-
+
/**
* Returns whether this match is for an IPv6 flow
*/
/**
* Represents the generic matching field
- *
+ *
*/
@XmlRootElement
/**
* Mask based match constructor
- *
+ *
* @param type
* @param value
* @param mask
/**
* Full match constructor
- *
+ *
* @param type
* @param value
*/
/**
* Returns the value set for this match field
- *
+ *
* @return
*/
public Object getValue() {
/**
* Returns the type field this match field object is for
- *
+ *
* @return
*/
public MatchType getType() {
/**
* Returns the mask value set for this field match A null mask means this is
* a full match
- *
+ *
* @return
*/
public Object getMask() {
/**
* Returns the bitmask set for this field match
- *
+ *
* @return
*/
public long getBitMask() {
/**
* Returns whether the field match configuration is valid or not
- *
+ *
* @return
*/
public boolean isValid() {
* Represents the binding between the id, the value and mask type and the range
* values of the elements type that can be matched on the network
* frame/packet/message
- *
- *
- *
+ *
+ *
+ *
*/
public enum MatchType {
IN_PORT("inPort", 1 << 0, NodeConnector.class, 1, 0),
/**
* Perform the assignment type validation
- *
+ *
* @param value
* @param mask
* @return
/**
* Perform the value and mask range validation
- *
+ *
* @param value
* @param mask
* @return
/**
* Return the mask value in 64 bits bitmask form
- *
+ *
* @param mask
* @return
*/
public static short REQUEST = (short) 0x1;
public static short REPLY = (short) 0x2;
+ public static short PROTO_TYPE_IP = 0x800;
+
private static Map<String, Pair<Integer, Integer>> fieldCoordinates = new LinkedHashMap<String, Pair<Integer, Integer>>() {
private static final long serialVersionUID = 1L;
{
* @param int startOffset - offset to start fetching bits from data from
* @param int numBits - number of bits to be fetched from data
* @return byte [] - LSB aligned bits
- *
+ *
* @throws BufferException
* when the startOffset and numBits parameters are not congruent
* with the data buffer size
* @param byte - input byte to be inserted
* @param startOffset - offset of data[] to start inserting byte from
* @param numBits - number of bits of input to be inserted into data[]
- *
+ *
* @throws BufferException
* when the input, startOffset and numBits are not congruent
* with the data buffer size
/**
* Returns numBits 1's in the MSB position
- *
+ *
* @param numBits
* @return
*/
/**
* Returns numBits 1's in the LSB position
- *
+ *
* @param numBits
* @return
*/
/**
* Returns the numerical value of the byte array passed
- *
+ *
* @param byte[] - array
* @return long - numerical value of byte array passed
*/
/**
* Returns the numerical value of the last numBits (LSB bits) of the byte
* array passed
- *
+ *
* @param byte[] - array
* @param int - numBits
* @return long - numerical value of byte array passed
* Accepts a number as input and returns its value in byte form in LSB
* aligned form example: input = 5000 [1001110001000] bytes = 19, -120
* [00010011] [10001000]
- *
+ *
* @param Number
* @return byte[]
- *
+ *
*/
public static byte[] toByteArray(Number input) {
* Accepts a number as input and returns its value in byte form in MSB
* aligned form example: input = 5000 [1001110001000] bytes = -114, 64
* [10011100] [01000000]
- *
+ *
* @param Number
* input
* @param int numBits - the number of bits to be returned
* @return byte[]
- *
+ *
*/
public static byte[] toByteArray(Number input, int numBits) {
Class<? extends Number> dataType = input.getClass();
/**
* Takes an LSB aligned byte array and returned the LSB numBits in a MSB
* aligned byte array
- *
+ *
* @param inputbytes
* @param numBits
* @return
/**
* It aligns the last numBits bits to the head of the byte array following
* them with numBits % 8 zero bits.
- *
+ *
* Example: For inputbytes = [00000111][01110001] and numBits = 12 it
* returns: shiftedBytes = [01110111][00010000]
- *
+ *
* @param byte[] inputBytes
* @param int numBits - number of bits to be left aligned
* @return byte[]
/**
* It aligns the first numBits bits to the right end of the byte array
* preceding them with numBits % 8 zero bits.
- *
+ *
* Example: For inputbytes = [01110111][00010000] and numBits = 12 it
* returns: shiftedBytes = [00000111][01110001]
- *
+ *
* @param byte[] inputBytes
* @param int numBits - number of bits to be right aligned
* @return byte[]
* Insert in the data buffer at position dictated by the offset the number
* of bits specified from the input data byte array. The input byte array
* has the bits stored starting from the LSB
- *
+ *
* @param byte[] data
* @param byte[] inputdata
* @param int startOffset
int InputMSBbits = 0, InputLSBbits = 0;
int i;
- if (numBits == 0) {
- return;
+ if (numBits == 0) {
+ return;
}
if (extraOffsetBits == 0) {
/**
* Sets the type for the current ICMP message
- *
+ *
* @param type
* The ICMP message type
* @return This ICMP object
/**
* Sets the ICMP code (type subtype) for the current ICMP object instance
- *
+ *
* @param code
* The ICMP message type subtype
* @return This ICMP object
/**
* Computes the ICMP checksum on the serialized ICMP message
- *
+ *
* @param serialized
* The data stream
* @param start
* @param checksum the checksum to set
*/
/*public IPv4 setChecksum() {
- short ipChecksum = computeChecksum();
+ short ipChecksum = computeChecksum();
byte[] checksum = BitBufferHelper.toByteArray(ipChecksum);
- fieldValues.put(CHECKSUM, checksum);
+ fieldValues.put(CHECKSUM, checksum);
return this;
}*/
/**
* Computes the IPv4 header checksum on the passed stream of bytes
* representing the packet
- *
+ *
* @param data
* The byte stream
* @param offset
/**
* Returns the size of LLDP packet in bytes
- *
+ *
* @return int - LLDP Packet size in bytes
*/
private int getLLDPPacketLength() {
* Describes an exception that is raised when the process of serializing or
* deserializing a network packet/stream fails. This generally happens when the
* packet/stream is malformed.
- *
+ *
*/
public class PacketException extends Exception {
private static final long serialVersionUID = 1L;
import org.opendaylight.controller.sal.core.TimeStamp;
/**
- *
+ *
* Describe a raw Data Packet, this is how a packet is received from the network
* and how it will be transmitted. It essentially wraps the raw bytestream
- *
+ *
*/
public class RawPacket {
private byte[] packetData;
/**
* If the packet is being sent this parameter tells where the packet is sent
* toward
- *
- *
+ *
+ *
* @return the NodeConnector toward where the packet is being sent
*/
public NodeConnector getOutgoingNodeConnector() {
/**
* Setter method for OutGoing NodeConnector
- *
+ *
* @param outgoingNodeConnector
* NodeConnector toward where the packet is travelling
*/
/**
* Return the incoming NodeConnector if the packet was received
- *
+ *
* @return NodeConnector where the packet was received from
*/
public NodeConnector getIncomingNodeConnector() {
/**
* Setter for Incoming NodeConnector
- *
+ *
* @param incomingNodeConnector
* NodeConnector to be used and incoming one
*/
/**
* Retrieve a given property attached to the packet, if exits of course
- *
+ *
* @param key
* Key to retrieve the wanted property attached to the packet
- *
+ *
* @return The property attached to the packet
*/
public Object getProps(Object key) {
/**
* Generic data associated to the data packet
- *
+ *
* @param key
* key for the association
* @param value
/**
* Constructor for RawPacket
- *
+ *
* @param data
* content of the packet as bytestream
* @param e
* datalink encapsulation for the packet
- *
+ *
*/
public RawPacket(byte[] data, LinkEncap e) throws ConstructionException {
if (data == null) {
* Copy Constructor for RawPacket, it performs a copy of the packet so each
* packet can be modified independently without worrying that source packet
* content is touched
- *
+ *
* @param src
* packet to copy data from
- *
+ *
*/
public RawPacket(RawPacket src) throws ConstructionException {
if (src == null) {
/**
* Constructor for RawPacket with Ethernet encapsulation
- *
+ *
* @param data
* content of the packet as bytestream
- *
+ *
*/
public RawPacket(byte[] data) throws ConstructionException {
this(data, LinkEncap.ETHERNET);
/**
* Read the time stamp when the packet has entered the system
- *
+ *
* @return The time stamp when the packet has entered the system
*/
public TimeStamp getIncomingTime() {
/**
* Read the packet encapsulation
- *
+ *
* @return The encapsulation for the raw packet, necessary to start parsing
* the packet
*/
/**
* Get bytestream of the packet body
- *
+ *
* @return The raw bytestream composing the packet
*/
public byte[] getPacketData() {
/**
* Returns the time at which the current instance of RawPacket was created
* as a copy of the original one.
- *
+ *
* @return The time stamp at which this RawPacket instance was created. null
* if this is the original instance.
*/
*/
public void setHeaderField(String headerField, byte[] readValue) {
/*if (headerField.equals("Protocol")) {
- payloadClass = decodeMap.get(readValue);
+ payloadClass = decodeMap.get(readValue);
}*/
hdrFieldsMap.put(headerField, readValue);
}
/* Dummy constructor for JAXB */
private FlowOnNode () {
}
-
+
public FlowOnNode(Flow flow) {
this.flow = flow;
}
@XmlRootElement
@XmlAccessorType(XmlAccessType.NONE)
public class NodeConnectorStatistics {
- @XmlElement
+ @XmlElement
private NodeConnector nodeConnector;
- @XmlElement
+ @XmlElement
private long receivePackets;
- @XmlElement
+ @XmlElement
private long transmitPackets;
- @XmlElement
+ @XmlElement
private long receiveBytes;
- @XmlElement
+ @XmlElement
private long transmitBytes;
- @XmlElement
+ @XmlElement
private long receiveDrops;
- @XmlElement
+ @XmlElement
private long transmitDrops;
- @XmlElement
+ @XmlElement
private long receiveErrors;
- @XmlElement
+ @XmlElement
private long transmitErrors;
- @XmlElement
+ @XmlElement
private long receiveFrameError;
- @XmlElement
+ @XmlElement
private long receiveOverRunError;
- @XmlElement
+ @XmlElement
private long receiveCrcError;
- @XmlElement
+ @XmlElement
private long collisionCount;
- //To Satisfy JAXB
- public NodeConnectorStatistics() {
-
- }
+ //To Satisfy JAXB
+ public NodeConnectorStatistics() {
+
+ }
/**
* Set the node connector
* @param port
@Override
public String toString() {
return "HwDescription[manufacturer=" + manufacturer + ", hardware="
- + hardware + ", software=" + software + ", serialNumber="
- + serialNumber + ", description=" + description + "]";
+ + hardware + ", software=" + software + ", serialNumber="
+ + serialNumber + ", description=" + description + "]";
}
}
/**
* @author Aditya Prakash Vaja <aditya.vaja@bigswitch.com>
* Represents the Table statistics for the node
- *
+ *
*/
@XmlRootElement
/**
* Topology notifications provided by SAL toward the application
- *
+ *
*/
public interface IListenTopoUpdates {
/**
* Called to update on Edge in the topology graph
- *
+ *
* @param topoedgeupdateList
* List of topoedgeupdates Each topoedgeupdate includes edge, its
* Properties ( BandWidth and/or Latency etc) and update type.
/**
* Called when an Edge utilization is above the safety threshold configured
* on the controller
- *
+ *
* @param edge
* The edge which bandwidth usage is above the safety level
*/
/**
* Called when the Edge utilization is back to normal, below the safety
* threshold level configured on the controller
- *
+ *
* @param edge
*/
public void edgeUtilBackToNormal(Edge edge);
/**
* Methods that are invoked from Protocol Plugin toward SAL
- *
+ *
*/
public interface IPluginOutTopologyService {
/**
* Called to update on Edge in the topology graph
- *
+ *
* @param topoedgeupdateList
* List of topoedgeupdates Each topoedgeupdate includes edge, its
* Properties ( BandWidth and/or Latency etc) and update type.
/**
* Called when an Edge utilization is above the safety threshold configured
* on the controller
- *
+ *
* @param edge
*/
public void edgeOverUtilized(Edge edge);
/**
* Called when the Edge utilization is back to normal, below the safety
* threshold level configured on the controller
- *
+ *
* @param edge
*/
public void edgeUtilBackToNormal(Edge edge);
--- /dev/null
+/*
+ * Copyright (c) 2012 Big Switch Networks, Inc.
+ *
+ * Licensed under the Eclipse Public License, Version 1.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at
+ *
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Originally created by David Erickson, Stanford University
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an "AS
+ * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language
+ * governing permissions and limitations under the License.
+ */
+
+package org.opendaylight.controller.sal.utils;
+
+import java.util.Iterator;
+import java.util.NoSuchElementException;
+
+/**
+ * An iterator that will filter values from an iterator and return only those
+ * values that match the predicate.
+ */
+public abstract class FilterIterator<T> implements Iterator<T> {
+ protected Iterator<T> subIterator;
+ protected T next;
+
+ /**
+ * Construct a filter iterator from the given sub iterator
+ *
+ * @param subIterator
+ * the sub iterator over which we'll filter
+ */
+ public FilterIterator(Iterator<T> subIterator) {
+ super();
+ this.subIterator = subIterator;
+ }
+
+ /**
+ * Check whether the given value should be returned by the filter
+ *
+ * @param value
+ * the value to check
+ * @return true if the value should be included
+ */
+ protected abstract boolean matches(T value);
+
+ // ***********
+ // Iterator<T>
+ // ***********
+
+ @Override
+ public boolean hasNext() {
+ if (next != null)
+ return true;
+
+ while (subIterator.hasNext()) {
+ next = subIterator.next();
+ if (matches(next))
+ return true;
+ }
+ next = null;
+ return false;
+ }
+
+ @Override
+ public T next() {
+ if (hasNext()) {
+ T cur = next;
+ next = null;
+ return cur;
+ }
+ throw new NoSuchElementException();
+ }
+
+ @Override
+ public void remove() {
+ throw new UnsupportedOperationException();
+ }
+
+}
*
*/
public enum GlobalConstants {
- DEFAULT("default"),
- CONTAINERMANAGER("containermanager"),
- CONTAINERNAME("name"),
- STATICVLAN("staticvlan"),
- CLUSTERINGSERVICES("clusteringservices"),
+ DEFAULT("default"),
+ CONTAINERMANAGER("containermanager"),
+ CONTAINERNAME("name"),
+ STATICVLAN("staticvlan"),
+ CLUSTERINGSERVICES("clusteringservices"),
STARTUPHOME("configuration/startup/");
private GlobalConstants(String name) {
public class HexEncode {
/**
* This method converts byte array into String format without ":" inserted.
- *
+ *
* @param bytes
* The byte array to convert to string
* @return The hexadecimal representation of the byte array. If bytes is
--- /dev/null
+/*
+ * Copyright (c) 2011 Big Switch Networks, Inc.
+ *
+ * Licensed under the Eclipse Public License, Version 1.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at
+ *
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Originally created by David Erickson, Stanford University
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an "AS
+ * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language
+ * governing permissions and limitations under the License.
+ */
+
+package org.opendaylight.controller.sal.utils;
+
+public interface IListener<T> {
+ public enum Command {
+ CONTINUE, STOP
+ }
+
+ /**
+ * The name assigned to this listener
+ *
+ * @return
+ */
+ public String getName();
+
+ /**
+ * Check if the module called name is a callback ordering prerequisite for
+ * this module. In other words, if this function returns true for the given
+ * name, then this listener will be called after that message listener.
+ *
+ * @param type
+ * the object type to which this applies
+ * @param name
+ * the name of the module
+ * @return whether name is a prerequisite.
+ */
+ public boolean isCallbackOrderingPrereq(T type, String name);
+
+ /**
+ * Check if the module called name is a callback ordering post-requisite for
+ * this module. In other words, if this function returns true for the given
+ * name, then this listener will be called before that message listener.
+ *
+ * @param type
+ * the object type to which this applies
+ * @param name
+ * the name of the module
+ * @return whether name is a post-requisite.
+ */
+ public boolean isCallbackOrderingPostreq(T type, String name);
+}
// Openflow 1.0 supports the IP Proto match only for ICMP, TCP and UDP
public enum IPProtocols {
ANY("any", 0),
- /* HOPOPT("HOPOPT",0),
+ /* HOPOPT("HOPOPT",0),
*/ICMP("ICMP", 1),
- /* IGMP("IGMP",2),
+ /* IGMP("IGMP",2),
GGP("GGP",3),
IPV4("IPv4",4),
ST("ST",5),
*/TCP("TCP", 6),
- /* CBT("CBT",7),
+ /* CBT("CBT",7),
EGP("EGP",8),
IGP("IGP",9),
BBNRCCMON("BBN-RCC-MON",10),
XNET("XNET",15),
CHAOS("CHAOS",16),
*/UDP("UDP", 17),
- /* MUX("MUX",18),
+ /* MUX("MUX",18),
DCNMEAS("DCN-MEAS",19),
HMP("HMP",20),
PRM("PRM",21),
TLSP("TLSP",56),
SKIP("SKIP",57),
*/IPV6ICMP("IPv6-ICMP", 58);
- /* IPV6NoNxt("IPv6-NoNxt",59),
+ /* IPV6NoNxt("IPv6-NoNxt",59),
IPV6Opts("IPv6-Opts",60),
ANYHOST("ANY-HOST",61),
CFTP("CFTP",62),
--- /dev/null
+/*
+ * Copyright (c) 2012 Big Switch Networks, Inc.
+ *
+ * Licensed under the Eclipse Public License, Version 1.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at
+ *
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Originally created by David Erickson, Stanford University
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an "AS
+ * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language
+ * governing permissions and limitations under the License.
+ */
+
+package org.opendaylight.controller.sal.utils;
+
+import java.util.Iterator;
+import java.util.NoSuchElementException;
+
+/**
+ * Iterator over all values in an iterator of iterators
+ *
+ * @param <T>
+ * the type of elements returned by this iterator
+ */
+public class IterableIterator<T> implements Iterator<T> {
+ Iterator<? extends Iterable<T>> subIterator;
+ Iterator<T> current = null;
+
+ public IterableIterator(Iterator<? extends Iterable<T>> subIterator) {
+ super();
+ this.subIterator = subIterator;
+ }
+
+ @Override
+ public boolean hasNext() {
+ if (current == null) {
+ if (subIterator.hasNext()) {
+ current = subIterator.next().iterator();
+ } else {
+ return false;
+ }
+ }
+ while (!current.hasNext() && subIterator.hasNext()) {
+ current = subIterator.next().iterator();
+ }
+
+ return current.hasNext();
+ }
+
+ @Override
+ public T next() {
+ if (hasNext())
+ return current.next();
+ throw new NoSuchElementException();
+ }
+
+ @Override
+ public void remove() {
+ if (hasNext())
+ current.remove();
+ throw new NoSuchElementException();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2011 Big Switch Networks, Inc.
+ *
+ * Licensed under the Eclipse Public License, Version 1.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at
+ *
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Originally created by David Erickson, Stanford University
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an "AS
+ * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language
+ * governing permissions and limitations under the License.
+ */
+
+package org.opendaylight.controller.sal.utils;
+
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Maintain lists of listeners ordered by dependency.
+ *
+ * @author readams
+ *
+ */
+public class ListenerDispatcher<U, T extends IListener<U>> {
+ protected static Logger logger = LoggerFactory
+ .getLogger(ListenerDispatcher.class);
+ volatile List<T> listeners = new ArrayList<T>();
+
+ private void visit(List<T> newlisteners, U type, HashSet<T> visited,
+ List<T> ordering, T listener) {
+ if (!visited.contains(listener)) {
+ visited.add(listener);
+
+ for (T i : newlisteners) {
+ if (ispre(type, i, listener)) {
+ visit(newlisteners, type, visited, ordering, i);
+ }
+ }
+ ordering.add(listener);
+ }
+ }
+
+ private boolean ispre(U type, T l1, T l2) {
+ return (l2.isCallbackOrderingPrereq(type, l1.getName()) || l1
+ .isCallbackOrderingPostreq(type, l2.getName()));
+ }
+
+ /**
+ * Add a listener to the list of listeners
+ *
+ * @param listener
+ */
+ public void addListener(U type, T listener) {
+ List<T> newlisteners = new ArrayList<T>();
+ if (listeners != null)
+ newlisteners.addAll(listeners);
+
+ newlisteners.add(listener);
+ // Find nodes without outgoing edges
+ List<T> terminals = new ArrayList<T>();
+ for (T i : newlisteners) {
+ boolean isterm = true;
+ for (T j : newlisteners) {
+ if (ispre(type, i, j)) {
+ isterm = false;
+ break;
+ }
+ }
+ if (isterm) {
+ terminals.add(i);
+ }
+ }
+
+ if (terminals.size() == 0) {
+ logger.error("No listener dependency solution: "
+ + "No listeners without incoming dependencies");
+ listeners = newlisteners;
+ return;
+ }
+
+ // visit depth-first traversing in the opposite order from
+ // the dependencies. Note we will not generally detect cycles
+ HashSet<T> visited = new HashSet<T>();
+ List<T> ordering = new ArrayList<T>();
+ for (T term : terminals) {
+ visit(newlisteners, type, visited, ordering, term);
+ }
+ listeners = ordering;
+ }
+
+ /**
+ * Remove the given listener
+ *
+ * @param listener
+ * the listener to remove
+ */
+ public void removeListener(T listener) {
+ if (listeners != null) {
+ List<T> newlisteners = new ArrayList<T>();
+ newlisteners.addAll(listeners);
+ newlisteners.remove(listener);
+ listeners = newlisteners;
+ }
+ }
+
+ /**
+ * Clear all listeners
+ */
+ public void clearListeners() {
+ listeners = new ArrayList<T>();
+ }
+
+ /**
+ * Get the ordered list of listeners ordered by dependencies
+ *
+ * @return
+ */
+ public List<T> getOrderedListeners() {
+ return listeners;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2012 Big Switch Networks, Inc.
+ *
+ * Licensed under the Eclipse Public License, Version 1.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at
+ *
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Originally created by David Erickson, Stanford University
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an "AS
+ * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language
+ * governing permissions and limitations under the License.
+ */
+
+package org.opendaylight.controller.sal.utils;
+
+import java.util.Iterator;
+import java.util.NoSuchElementException;
+
+/**
+ * Iterator over all values in an iterator of iterators
+ *
+ * @param <T>
+ * the type of elements returned by this iterator
+ */
+public class MultiIterator<T> implements Iterator<T> {
+ Iterator<Iterator<T>> subIterator;
+ Iterator<T> current = null;
+
+ public MultiIterator(Iterator<Iterator<T>> subIterator) {
+ super();
+ this.subIterator = subIterator;
+ }
+
+ @Override
+ public boolean hasNext() {
+ if (current == null) {
+ if (subIterator.hasNext()) {
+ current = subIterator.next();
+ } else {
+ return false;
+ }
+ }
+ while (!current.hasNext() && subIterator.hasNext()) {
+ current = subIterator.next();
+ }
+
+ return current.hasNext();
+ }
+
+ @Override
+ public T next() {
+ if (hasNext())
+ return current.next();
+ throw new NoSuchElementException();
+ }
+
+ @Override
+ public void remove() {
+ if (hasNext())
+ current.remove();
+ throw new NoSuchElementException();
+ }
+}
/**
* Converts a 4 bytes array into an integer number
*
- * @param ba the 4 bytes long byte array
- * @return the integer number
+ * @param ba the 4 bytes long byte array
+ * @return the integer number
*/
public static int byteArray4ToInt(byte[] ba) {
if (ba == null || ba.length != 4) {
* Converts an IP address passed as integer value into the
* respective InetAddress object
*
- * @param address the IP address in integer form
- * @return the IP address in InetAddress form
+ * @param address the IP address in integer form
+ * @return the IP address in InetAddress form
*/
public static InetAddress getInetAddress(int address) {
InetAddress ip = null;
* The prefix bit mask indicates the contiguous leading bits that are NOT masked out.
* Example: A prefix bit mask length of 8 will give an InetAddress Network Mask of 255.0.0.0
*
- * @param prefixMaskLength integer representing the length of the prefix network mask
- * @param isV6 boolean representing the IP version of the returned address
+ * @param prefixMaskLength integer representing the length of the prefix network mask
+ * @param isV6 boolean representing the IP version of the returned address
* @return
*/
public static InetAddress getInetNetworkMask(int prefixMaskLength,
* while ff.00.00.00 will return a subnet mask length of 24.
* If the passed prefixMask object is null, 0 is returned
*
- * @param prefixMask the prefix mask as byte array
- * @return the length of the prefix network mask
+ * @param prefixMask the prefix mask as byte array
+ * @return the length of the prefix network mask
*/
public static int getSubnetMaskLength(byte[] prefixMask) {
int maskLength = 0;
* while ff.00.00.00 will return a subnet mask length of 24
* If the passed prefixMask object is null, 0 is returned
*
- * @param prefixMask the prefix mask as InetAddress
- * @return the length of the prefix network mask
+ * @param prefixMask the prefix mask as InetAddress
+ * @return the length of the prefix network mask
*/
public static int getSubnetMaskLength(InetAddress prefixMask) {
return (prefixMask == null) ? 0 : NetUtils
* the equivalent subnet prefix IP address
* Example: for ip = "172.28.30.254" and maskLen = 25 it will return "172.28.30.128"
*
- * @param ip the IP address in InetAddress form
- * @param maskLen the length of the prefix network mask
- * @return the subnet prefix IP address in InetAddress form
+ * @param ip the IP address in InetAddress form
+ * @param maskLen the length of the prefix network mask
+ * @return the subnet prefix IP address in InetAddress form
*/
public static InetAddress getSubnetPrefix(InetAddress ip, int maskLen) {
int bytes = maskLen / 8;
/**
* Returns true if the passed MAC address is all zero
*
- * @param mac the byte array representing the MAC address
- * @return true if all MAC bytes are zero
+ * @param mac the byte array representing the MAC address
+ * @return true if all MAC bytes are zero
*/
public static boolean isZeroMAC(byte[] mac) {
for (short i = 0; i < 6; i++) {
/**
* Returns true if the passed InetAddress contains all zero
*
- * @param ip the IP address to test
- * @return true if the address is all zero
+ * @param ip the IP address to test
+ * @return true if the address is all zero
*/
public static boolean isAny(InetAddress ip) {
for (byte b : ip.getAddress()) {
/**
* Returns the unsigned value of the passed byte variable
*
- * @param b the byte value
+ * @param b the byte value
* @return the int variable containing the unsigned byte value
*/
public static int getUnsignedByte(byte b) {
- return (b > 0)? (int)b : (b & 0x7F | 0x80);
- }
+ return (b > 0)? (int)b : (b & 0x7F | 0x80);
+ }
/**
* Return the unsigned value of the passed short variable
* @param s the short value
* @return the int variable containing the unsigned short value
*/
- public static int getUnsignedShort(short s) {
- return (s > 0)? (int)s : (s & 0x7FFF | 0x8000);
- }
+ public static int getUnsignedShort(short s) {
+ return (s > 0)? (int)s : (s & 0x7FFF | 0x8000);
+ }
}
--- /dev/null
+/*
+ * Copyright (c) 2011,2013 Big Switch Networks, Inc.
+ *
+ * Licensed under the Eclipse Public License, Version 1.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at
+ *
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Originally created by David Erickson, Stanford University
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an "AS
+ * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language
+ * governing permissions and limitations under the License.
+ */
+
+package org.opendaylight.controller.sal.utils;
+
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This allows you to represent a task that should be queued for future
+ * execution but where you only want the task to complete once in response to
+ * some sequence of events. For example, if you get a change notification and
+ * want to reload state, you only want to reload the state once, at the end, and
+ * don't want to queue an update for every notification that might come in.
+ *
+ * The semantics are as follows: * If the task hasn't begun yet, do not queue a
+ * new task * If the task has begun, set a bit to restart it after the current
+ * task finishes
+ */
+public class SingletonTask {
+ protected static Logger logger = LoggerFactory
+ .getLogger(SingletonTask.class);
+
+ protected static class SingletonTaskContext {
+ protected boolean taskShouldRun = false;
+ protected boolean taskRunning = false;
+
+ protected SingletonTaskWorker waitingTask = null;
+ }
+
+ protected static class SingletonTaskWorker implements Runnable {
+ SingletonTask parent;
+ boolean canceled = false;
+ long nextschedule = 0;
+
+ public SingletonTaskWorker(SingletonTask parent) {
+ super();
+ this.parent = parent;
+ }
+
+ @Override
+ public void run() {
+ synchronized (parent.context) {
+ if (canceled || !parent.context.taskShouldRun)
+ return;
+
+ parent.context.taskRunning = true;
+ parent.context.taskShouldRun = false;
+ }
+
+ try {
+ parent.task.run();
+ } catch (Exception e) {
+ logger.error("Exception while executing task", e);
+ }
+
+ synchronized (parent.context) {
+ parent.context.taskRunning = false;
+
+ if (parent.context.taskShouldRun) {
+ long now = System.nanoTime();
+ if ((nextschedule <= 0 || (nextschedule - now) <= 0)) {
+ parent.ses.execute(this);
+ } else {
+ parent.ses.schedule(this, nextschedule - now,
+ TimeUnit.NANOSECONDS);
+ }
+ }
+ }
+ }
+ }
+
+ protected SingletonTaskContext context = new SingletonTaskContext();
+ protected Runnable task;
+ protected ScheduledExecutorService ses;
+
+ /**
+ * Construct a new SingletonTask for the given runnable. The context is used
+ * to manage the state of the task execution and can be shared by more than
+ * one instance of the runnable.
+ *
+ * @param context
+ * @param Task
+ */
+ public SingletonTask(ScheduledExecutorService ses, Runnable task) {
+ super();
+ this.task = task;
+ this.ses = ses;
+ }
+
+ /**
+ * Schedule the task to run if there's not already a task scheduled If there
+ * is such a task waiting that has not already started, it cancel that task
+ * and reschedule it to run at the given time. If the task is already
+ * started, it will cause the task to be rescheduled once it completes to
+ * run after delay from the time of reschedule.
+ *
+ * @param delay
+ * the delay in scheduling
+ * @param unit
+ * the timeunit of the delay
+ */
+ public void reschedule(long delay, TimeUnit unit) {
+ boolean needQueue = true;
+ SingletonTaskWorker stw = null;
+
+ synchronized (context) {
+ if (context.taskRunning || context.taskShouldRun) {
+ if (context.taskRunning) {
+ // schedule to restart at the right time
+ if (delay > 0) {
+ long now = System.nanoTime();
+ long then = now
+ + TimeUnit.NANOSECONDS.convert(delay, unit);
+ context.waitingTask.nextschedule = then;
+ } else {
+ context.waitingTask.nextschedule = 0;
+ }
+ needQueue = false;
+ } else {
+ // cancel and requeue
+ context.waitingTask.canceled = true;
+ context.waitingTask = null;
+ }
+ }
+
+ context.taskShouldRun = true;
+
+ if (needQueue) {
+ stw = context.waitingTask = new SingletonTaskWorker(this);
+ }
+ }
+
+ if (needQueue) {
+ if (delay <= 0)
+ ses.execute(stw);
+ else
+ ses.schedule(stw, delay, unit);
+ }
+ }
+}
\ No newline at end of file
/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* for internal API2 function calls. This constructor allows to specify,
* beside the Status Code, a custom human readable description to add more
* information about the status.
- *
+ *
* @param errorCode
* The status code. If passed as null, code will be stored as
* {@code StatusCode.UNDEFINED}
* Generates an instance of the Status class based on the passed StatusCode
* only. The description field of the Status object will be inferred by the
* status code.
- *
+ *
* @param errorCode
* The status code. If passed as null, code will be stored as
* {@code StatusCode.UNDEFINED}
* asynchronous call. It is supposed to be created by the underlying
* infrastructure only when it was successful in allocating the asynchronous
* request id, hence caller should expect StatusCode to be successful.
- *
+ *
* @param errorCode
* The status code. If passed as null, code will be stored as
* {@code StatusCode.UNDEFINED}
/**
* Returns the status code
- *
+ *
* @return the {@code StatusCode} representing the status code
*/
public StatusCode getCode() {
/**
* Returns a human readable description of the failure if any
- *
+ *
* @return a string representing the reason of failure
*/
public String getDescription() {
/**
* Tells whether the status is successful
- *
+ *
* @return true if the Status code is {@code StatusCode.SUCCESS}
*/
public boolean isSuccess() {
* Return the request id assigned by underlying infrastructure in case of
* asynchronous request. In case of synchronous requests, the returned id
* is expected to be 0
- *
+ *
* @return The request id assigned for this asynchronous request
*/
public long getRequestId() {
/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
/**
* The enum which describes the generic error conditions.
- * Each enum value is associated with a minimal description string.
+ * Each enum value is associated with a minimal description string.
*
*/
public enum StatusCode {
- SUCCESS("Success"),
-
- BADREQUEST("Bad Request"),
- UNAUTHORIZED("UnAuthorized"),
- FORBIDDEN("Forbidden"),
- NOTFOUND("Not Found"),
- NOTALLOWED("Method Not Allowed"),
- NOTACCEPTABLE("Request Not Acceptable"),
- TIMEOUT("Request Timeout"),
- CONFLICT("Resource Conflict"),
- GONE("Resource Gone"),
- UNSUPPORTED("Unsupported"),
-
- INTERNALERROR("Internal Error"),
- NOTIMPLEMENTED("Not Implemented"),
- NOSERVICE("Service Not Available"),
-
- UNDEFINED("Undefined Error");
-
- private String description;
- private StatusCode(String description) {
- this.description = description;
- }
-
- /**
- * Prints the description associated to the code value
- */
- public String toString() {
- return description;
- }
+ SUCCESS("Success"),
+
+ BADREQUEST("Bad Request"),
+ UNAUTHORIZED("UnAuthorized"),
+ FORBIDDEN("Forbidden"),
+ NOTFOUND("Not Found"),
+ NOTALLOWED("Method Not Allowed"),
+ NOTACCEPTABLE("Request Not Acceptable"),
+ TIMEOUT("Request Timeout"),
+ CONFLICT("Resource Conflict"),
+ GONE("Resource Gone"),
+ UNSUPPORTED("Unsupported"),
+
+ INTERNALERROR("Internal Error"),
+ NOTIMPLEMENTED("Not Implemented"),
+ NOSERVICE("Service Not Available"),
+
+ UNDEFINED("Undefined Error");
+
+ private String description;
+ private StatusCode(String description) {
+ this.description = description;
+ }
+
+ /**
+ * Prints the description associated to the code value
+ */
+ public String toString() {
+ return description;
+ }
}
*/
package org.opendaylight.controller.sal.authorization;
-
+
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Map;
import org.opendaylight.controller.sal.core.Node;
import org.opendaylight.controller.sal.utils.NodeCreator;
-
- public class AuthorizationTest {
-
- @Test
- public void testResources () {
- Privilege p = Privilege.WRITE;
- ResourceGroup resourceGroup = new ResourceGroup("NodeGroup", p);
- Map<ResourceGroup, ArrayList<Resource>> resourceMap = new HashMap<ResourceGroup, ArrayList<Resource>>();
- ArrayList<Resource> resourceList = new ArrayList<Resource>();
-
- for (int i = 0; i < 5; i++) {
- Node node = NodeCreator.createOFNode((long)i);
- Resource resource = new Resource (node, p);
- resourceList.add(resource);
- }
-
- resourceMap.put(resourceGroup, resourceList);
-
- ArrayList<Resource> retrievedResourceList = resourceMap.get(resourceGroup);
- for (Entry<ResourceGroup, ArrayList<Resource>> entry : resourceMap.entrySet()) {
- ResourceGroup rGroup = entry.getKey();
- Assert.assertTrue(rGroup.getGroupName().equals(resourceGroup.getGroupName()));
- for (int i = 0; i < 5; i++) {
- Resource resource = retrievedResourceList.get(i);
- Assert.assertTrue(resource.getPrivilege().equals(Privilege.WRITE));
- Assert.assertTrue(((Long)((Node)resource.getResource()).getID()).equals((long)i));
- }
- }
- }
-
- @Test
- public void testAppRoleLevel() {
- AppRoleLevel appRoleLevel = AppRoleLevel.APPOPERATOR;
- Assert.assertTrue(appRoleLevel.toString().equals("App-Operator"));
- Assert.assertTrue(appRoleLevel.toNumber() == 2);
- Assert.assertTrue(appRoleLevel.toStringPretty().equals("Application Operator"));
- }
-
- @Test
- public void testUserLevel() {
- UserLevel userLevel = UserLevel.SYSTEMADMIN;
- Assert.assertTrue(userLevel.toString().equals("System-Admin"));
- Assert.assertTrue(userLevel.toNumber() == 0);
- Assert.assertTrue(userLevel.toStringPretty().equals("System Administrator"));
- }
-
- @Test
- public void testAppRoleLevelFromString() {
- Assert.assertTrue(AppRoleLevel.fromString("App-Admin") == AppRoleLevel.APPADMIN);
- Assert.assertTrue(AppRoleLevel.fromString("App-User") == AppRoleLevel.APPUSER);
- Assert.assertTrue(AppRoleLevel.fromString("App-Operator") == AppRoleLevel.APPOPERATOR);
- Assert.assertTrue(AppRoleLevel.fromString(" ") == null);
- Assert.assertTrue(AppRoleLevel.fromString("") == null);
- Assert.assertTrue(AppRoleLevel.fromString("App-Admini") == null);
- }
+
+ public class AuthorizationTest {
+
+ @Test
+ public void testResources () {
+ Privilege p = Privilege.WRITE;
+ ResourceGroup resourceGroup = new ResourceGroup("NodeGroup", p);
+ Map<ResourceGroup, ArrayList<Resource>> resourceMap = new HashMap<ResourceGroup, ArrayList<Resource>>();
+ ArrayList<Resource> resourceList = new ArrayList<Resource>();
+
+ for (int i = 0; i < 5; i++) {
+ Node node = NodeCreator.createOFNode((long)i);
+ Resource resource = new Resource (node, p);
+ resourceList.add(resource);
+ }
+
+ resourceMap.put(resourceGroup, resourceList);
+
+ ArrayList<Resource> retrievedResourceList = resourceMap.get(resourceGroup);
+ for (Entry<ResourceGroup, ArrayList<Resource>> entry : resourceMap.entrySet()) {
+ ResourceGroup rGroup = entry.getKey();
+ Assert.assertTrue(rGroup.getGroupName().equals(resourceGroup.getGroupName()));
+ for (int i = 0; i < 5; i++) {
+ Resource resource = retrievedResourceList.get(i);
+ Assert.assertTrue(resource.getPrivilege().equals(Privilege.WRITE));
+ Assert.assertTrue(((Long)((Node)resource.getResource()).getID()).equals((long)i));
+ }
+ }
+ }
+
+ @Test
+ public void testAppRoleLevel() {
+ AppRoleLevel appRoleLevel = AppRoleLevel.APPOPERATOR;
+ Assert.assertTrue(appRoleLevel.toString().equals("App-Operator"));
+ Assert.assertTrue(appRoleLevel.toNumber() == 2);
+ Assert.assertTrue(appRoleLevel.toStringPretty().equals("Application Operator"));
+ }
+
+ @Test
+ public void testUserLevel() {
+ UserLevel userLevel = UserLevel.SYSTEMADMIN;
+ Assert.assertTrue(userLevel.toString().equals("System-Admin"));
+ Assert.assertTrue(userLevel.toNumber() == 0);
+ Assert.assertTrue(userLevel.toStringPretty().equals("System Administrator"));
+ }
+
+ @Test
+ public void testAppRoleLevelFromString() {
+ Assert.assertTrue(AppRoleLevel.fromString("App-Admin") == AppRoleLevel.APPADMIN);
+ Assert.assertTrue(AppRoleLevel.fromString("App-User") == AppRoleLevel.APPUSER);
+ Assert.assertTrue(AppRoleLevel.fromString("App-Operator") == AppRoleLevel.APPOPERATOR);
+ Assert.assertTrue(AppRoleLevel.fromString(" ") == null);
+ Assert.assertTrue(AppRoleLevel.fromString("") == null);
+ Assert.assertTrue(AppRoleLevel.fromString("App-Admini") == null);
+ }
}
public void testExtensibleNode() {
// Add a new ID type
Assert.assertTrue(Node.NodeIDType.registerIDType("FOO", Integer.class));
-
+
// Trying to re-register the node must fail
Assert.assertFalse(Node.NodeIDType.registerIDType("FOO",
Integer.class));
// Got an unexpected exception
Assert.assertTrue(false);
}
-
+
// Now unregister the type and make sure the node doesn't get
// created
Node.NodeIDType.unRegisterIDType("FOO");
InetAddress ipMask2 = null;
short ethertype = EtherTypes.IPv4.shortValue();
short ethertype2 = EtherTypes.IPv4.shortValue();
-
+
/*
* Create a SAL Flow aFlow
*/
Match match1 = new Match();
Match match2 = new Match();
-
+
match1.setField(MatchType.DL_TYPE, ethertype);
match1.setField(MatchType.NW_SRC, srcIP, ipMask);
match2.setField(MatchType.NW_SRC, srcIP2, ipMask2);
Assert.assertTrue(match1.equals(match2));
-
+
ipMask2 = InetAddress.getByName("255.255.255.255");
match2.setField(MatchType.NW_SRC, srcIP2, ipMask2);
ipMask2 = InetAddress.getByName("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff");
ethertype = EtherTypes.IPv6.shortValue();
ethertype2 = EtherTypes.IPv6.shortValue();
-
+
match1.setField(MatchType.DL_TYPE, ethertype);
match1.setField(MatchType.NW_SRC, srcIP, ipMask);
Assert.assertTrue(match1.equals(match2));
}
-
+
@Test
public void testCloning() throws Exception {
Node node = NodeCreator.createOFNode(7l);
Assert.assertTrue(data2[3] == 80);
// INPUT: {75, 110, 107, 80, 10, 12, 35, 100, 125, 65} =
- // [01001011] [01101110] [01101011] [10100000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001] // OUTPUT: [01001011] [01101000] = {75, 104}
+ // [01001011] [01101110] [01101011] [10100000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001] // OUTPUT: [01001011] [01101000] = {75, 104}
byte data10[] = new byte[2];
startOffset = 0;
numBits = 13;
Assert.assertTrue(data10[1] == 104);
// INPUT: {75, 110, 107, 80, 10, 12, 35, 100, 125, 65} =
- // [01001011] [01101110] [01101011] [10100000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001] // OUTPUT: [01001000] = {72}
+ // [01001011] [01101110] [01101011] [10100000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001] // OUTPUT: [01001000] = {72}
byte data11[] = new byte[4];
startOffset = 8;
numBits = 6;
Assert.assertTrue(data11[1] == 72);
// INPUT: {75, 110, 107, 80, 10, 12, 35, 100, 125, 65} =
- // [01001011] [01101110] [01101011] [10100000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001] //OUTPUT: [01001011] [01101110] [01101000] = {75, 110, 105}
+ // [01001011] [01101110] [01101011] [10100000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001] //OUTPUT: [01001011] [01101110] [01101000] = {75, 110, 105}
byte data12[] = new byte[4];
startOffset = 0;
numBits = 23;
Assert.assertTrue(data12[2] == 106);
// INPUT: {75, 110, 107, 80, 10, 12, 35, 100, 125, 65} =
- // [01001011] [01101110] [01101011] [10100000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001] //OUTPUT: [01001011] [01101110] [01100000] = {75, 110, 96}
+ // [01001011] [01101110] [01101011] [10100000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001] //OUTPUT: [01001011] [01101110] [01100000] = {75, 110, 96}
byte data13[] = new byte[4];
startOffset = 8;
numBits = 20;
Assert.assertTrue(data13[3] == 96);
// INPUT: {75, 110, 107, 80, 10, 12, 35, 100, 125, 65} =
- // [01001011] [01101110] [01101011] [10100000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001] //OUTPUT: [01001011] [01101110] [01101011] [10100000]= {75, 110, 107, 80}
+ // [01001011] [01101110] [01101011] [10100000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001] //OUTPUT: [01001011] [01101110] [01101011] [10100000]= {75, 110, 107, 80}
byte data14[] = new byte[4];
startOffset = 0;
numBits = 30;
//CASE 3: startOffset%8 != 0, numBits%8 = 0
// INPUT: {75, 110, 107, 80, 10, 12, 35, 100, 125, 65} =
- // [01001011] [01101110] [01101011] [10100000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001] //OUTPUT: [00001001] [11000000] = {72, 96}
+ // [01001011] [01101110] [01101011] [10100000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001] //OUTPUT: [00001001] [11000000] = {72, 96}
byte data16[] = new byte[5];
startOffset = 3;
numBits = 8;
Assert.assertTrue(data16[2] == 0);
// INPUT: {75, 110, 107, 80, 10, 12, 35, 100, 125, 65} =
- // [01001011] [01101110] [01101011] [01010000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001] //OUTPUT: [00000000] [00000100] [10110110] [11100000]= {0, 4, -54, -96}
+ // [01001011] [01101110] [01101011] [01010000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001] //OUTPUT: [00000000] [00000100] [10110110] [11100000]= {0, 4, -54, -96}
// OUTPUT: [00000100] [1011 0110] [1110 0000] = {4, -54, -96}
startOffset = 3;
Assert.assertTrue(data18[2] == -64);
// INPUT: {75, 110, 107, 80, 10, 12, 35, 100, 125, 65} =
- // [01001011] [01101110] [01101011] [01010000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001] //OUTPUT: [00000000] [00000100] [10110110] [11100000]= {0, 4, -54, -96}
+ // [01001011] [01101110] [01101011] [01010000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001] //OUTPUT: [00000000] [00000100] [10110110] [11100000]= {0, 4, -54, -96}
// OUTPUT: [0000 1001] [0110 1101] [1100 1101] [0110 1010] [0000 0001] = {9, 109, -51, 106, 0}
startOffset = 3;
Assert.assertTrue(data19[4] == 0);
// INPUT: {75, 110, 107, 80, 10, 12, 35, 100, 125, 65} =
- // [01001011] [01101110] [01101011] [01010000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001] //OUTPUT: [00000000] [00000100] [10110110] [11100000]= {0, 4, -54, -96}
+ // [01001011] [01101110] [01101011] [01010000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001] //OUTPUT: [00000000] [00000100] [10110110] [11100000]= {0, 4, -54, -96}
// OUTPUT: data[4, 5, 6] = [0 010 0101] [1 011 0111] [0 000 0000] = {37, -73, 0}
startOffset = 33;
numBits = 16;
//CASE 4: extranumBits != 0 AND extraOffsetBits != 0
// INPUT: {75, 110, 107, 80, 10, 12, 35, 100, 125, 65} =
- // [01001011] [01101110] [01101011] [01010000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001] //OUTPUT: [00000000] [00000100] [10110110] [11100000]= {0, 4, -54, -96}
+ // [01001011] [01101110] [01101011] [01010000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001] //OUTPUT: [00000000] [00000100] [10110110] [11100000]= {0, 4, -54, -96}
// OUTPUT: [0000 1001] [0100 0000] = {9, 96}
startOffset = 3;
numBits = 7;
Assert.assertTrue(data21[2] == 0);
// INPUT: {75, 110, 107, 80, 10, 12, 35, 100, 125, 65} =
- // [01001011] [01101110] [01101011] [01010000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001] //OUTPUT: [00000000] [00000100] [10110110] [11100000]= {0, 4, -54, -96}
+ // [01001011] [01101110] [01101011] [01010000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001] //OUTPUT: [00000000] [00000100] [10110110] [11100000]= {0, 4, -54, -96}
// OUTPUT: data = [00000 010] [01011 011] [01110 000] = {37, -73, 0}
startOffset = 5;
numBits = 17;
Assert.assertTrue(data22[2] == 112);
// INPUT: {75, 110, 107, 80, 10, 12, 35, 100, 125, 65} =
- // [01001011] [01101110] [01101011] [01010000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001] //OUTPUT: [00000000] [00000100] [10110110] [11100000]= {0, 4, -54, -96}
+ // [01001011] [01101110] [01101011] [01010000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001] //OUTPUT: [00000000] [00000100] [10110110] [11100000]= {0, 4, -54, -96}
// OUTPUT: [0000 1001] [0110 1101] [110 01101] [01 00000] = {9, 109, -51, 64}
startOffset = 3;
numBits = 23;
Assert.assertTrue(data23[3] == 64);
// INPUT: {75, 110, 107, 80, 10, 12, 35, 100, 125, 65} =
- // [01001011] [01101110] [01101011] [01010000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001] //OUTPUT: [00000000] [00000100] [10110110] [11100000]= {0, 4, -54, -96}
+ // [01001011] [01101110] [01101011] [01010000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001] //OUTPUT: [00000000] [00000100] [10110110] [11100000]= {0, 4, -54, -96}
// OUTPUT: [0000 1001] [0110 1101] = {9, 109}
startOffset = 3;
numBits = 13;
Assert.assertTrue(data24[2] == 0);
// INPUT: {75, 110, 107, 80, 10, 12, 35, 100, 125, 65} =
- // [01001011] [01101110] [01101011] [01010000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001] //OUTPUT: [00000000] [00000100] [10110110] [11100000]= {0, 4, -54, -96}
+ // [01001011] [01101110] [01101011] [01010000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001] //OUTPUT: [00000000] [00000100] [10110110] [11100000]= {0, 4, -54, -96}
// OUTPUT: [0000 0100] [1011 0110] [1110 0110] = {4, -74, -26}
startOffset = 4;
numBits = 20;
Assert.assertTrue(data25[3] == -0);
// INPUT: {75, 110, 107, 80, 10, 12, 35, 100, 125, 65} =
- // [01001011] [01101110] [01101011] [01010000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001] //OUTPUT: [00000000] [00000100] [10110110] [11100000]= {0, 4, -54, -96}
+ // [01001011] [01101110] [01101011] [01010000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001] //OUTPUT: [00000000] [00000100] [10110110] [11100000]= {0, 4, -54, -96}
// OUTPUT: [0000 0010] [0101 1011] = {0, 2, 91, 0}
startOffset = 13;
numBits = 11;
Assert.assertTrue(data26[3] == 0);
// INPUT: {75, 110, 107, 80, 10, 12, 35, 100, 125, 65} =
- // [01001011] [01101110] [01101011] [01010000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001] //OUTPUT: [00000000] [00000100] [10110110] [11100000]= {0, 4, -54, -96}
+ // [01001011] [01101110] [01101011] [01010000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001] //OUTPUT: [00000000] [00000100] [10110110] [11100000]= {0, 4, -54, -96}
// OUTPUT: [000 01001] [011 01101] [110 0 0000] = {9, 109, -64, 0}
startOffset = 3;
numBits = 17;
Assert.assertTrue(data27[3] == 0);
// INPUT: {75, 110, 107, 80, 10, 12, 35, 100, 125, 65} =
- // [01001011] [01101110] [01101011] [01010000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001] //OUTPUT: [00000000] [00000100] [10110110] [11100000]= {0, 4, -54, -96}
+ // [01001011] [01101110] [01101011] [01010000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001] //OUTPUT: [00000000] [00000100] [10110110] [11100000]= {0, 4, -54, -96}
// OUTPUT: [00 000000] [00 000000] [00 010010] [11 011011] [10 011010] [11 010100] [0000 0000] = {0, 0, 18, -37,-102,-44,0}
startOffset = 18;
numBits = 34;
Assert.assertTrue(sMAC[5] == -97);
Assert.assertTrue(etherType == 0x806);
-
+
ARP arpPacket = (ARP) eth.getPayload();
-
+
Assert.assertTrue(arpPacket.getHardwareType() == (byte)0x1);
Assert.assertTrue(arpPacket.getProtocolType() == 2048);
Assert.assertTrue(arpPacket.getHardwareAddressLength() == (byte)0x6);
Assert.assertTrue(arpPacket.getProtocolAddressLength() == (byte)0x4);
Assert.assertTrue(arpPacket.getOpCode() == 1);
-
+
byte[] senderHwAddress = arpPacket.getSenderHardwareAddress();
- byte[] senderProtocolAddress = arpPacket.getSenderProtocolAddress();
-
+ byte[] senderProtocolAddress = arpPacket.getSenderProtocolAddress();
+
byte[] targetHwAddress = arpPacket.getTargetHardwareAddress();
- byte[] targetProtocolAddress = arpPacket.getTargetProtocolAddress();
+ byte[] targetProtocolAddress = arpPacket.getTargetProtocolAddress();
+
-
Assert.assertTrue(senderHwAddress[0] == (byte)0xA6);
Assert.assertTrue(senderHwAddress[1] == (byte)0xEC);
Assert.assertTrue(senderHwAddress[2] == (byte)0x9C);
Assert.assertTrue(senderHwAddress[3] == (byte)0xAE);
Assert.assertTrue(senderHwAddress[4] == (byte)0xB2);
Assert.assertTrue(senderHwAddress[5] == (byte)0x9F);
-
+
Assert.assertTrue(senderProtocolAddress[0] == (byte)0x9);
Assert.assertTrue(senderProtocolAddress[1] == (byte)0x9);
Assert.assertTrue(senderProtocolAddress[2] == (byte)0x9);
Assert.assertTrue(targetProtocolAddress[0] == (byte)0x9);
Assert.assertTrue(targetProtocolAddress[1] == (byte)0x9);
Assert.assertTrue(targetProtocolAddress[2] == (byte)0x9);
- Assert.assertTrue(targetProtocolAddress[3] == (byte)0xFE);
+ Assert.assertTrue(targetProtocolAddress[3] == (byte)0xFE);
}
@Test
import org.opendaylight.controller.sal.utils.EtherTypes;
import org.opendaylight.controller.sal.utils.NodeConnectorCreator;
import org.opendaylight.controller.sal.utils.NodeCreator;
-
+
public class FlowOnNodeTest {
- @Test
- public void testFlowOnNodeMethods () {
- Match match = new Match();
- NodeConnector inNC = NodeConnectorCreator.createNodeConnector((short)10, NodeCreator.createOFNode((long)10));
- NodeConnector outNC = NodeConnectorCreator.createNodeConnector((short)20, NodeCreator.createOFNode((long)20));
-
- match.setField(MatchType.DL_TYPE, EtherTypes.IPv4.shortValue());
- match.setField(MatchType.IN_PORT, inNC);
-
- Output output = new Output(outNC);
- ArrayList<Action> action = new ArrayList<Action>();
- action.add(output);
-
- Flow flow = new Flow (match, action);
-
- FlowOnNode flowOnNode = new FlowOnNode (flow);
-
- Assert.assertTrue(flowOnNode.getFlow().equals(flow));
-
- flowOnNode.setPacketCount((long)100);
- flowOnNode.setByteCount((long)800);
- flowOnNode.setTableId((byte)0x55);
- flowOnNode.setDurationNanoseconds(40);
- flowOnNode.setDurationSeconds(45);
-
- Assert.assertTrue(flowOnNode.getPacketCount() == 100);
- Assert.assertTrue(flowOnNode.getByteCount() == 800);
- Assert.assertTrue(flowOnNode.getDurationNanoseconds() == 40);
- Assert.assertTrue(flowOnNode.getDurationSeconds() == 45);
- Assert.assertTrue(flowOnNode.getTableId() == (byte)0x55);
- }
+ @Test
+ public void testFlowOnNodeMethods () {
+ Match match = new Match();
+ NodeConnector inNC = NodeConnectorCreator.createNodeConnector((short)10, NodeCreator.createOFNode((long)10));
+ NodeConnector outNC = NodeConnectorCreator.createNodeConnector((short)20, NodeCreator.createOFNode((long)20));
+
+ match.setField(MatchType.DL_TYPE, EtherTypes.IPv4.shortValue());
+ match.setField(MatchType.IN_PORT, inNC);
+
+ Output output = new Output(outNC);
+ ArrayList<Action> action = new ArrayList<Action>();
+ action.add(output);
+
+ Flow flow = new Flow (match, action);
+
+ FlowOnNode flowOnNode = new FlowOnNode (flow);
+
+ Assert.assertTrue(flowOnNode.getFlow().equals(flow));
+
+ flowOnNode.setPacketCount((long)100);
+ flowOnNode.setByteCount((long)800);
+ flowOnNode.setTableId((byte)0x55);
+ flowOnNode.setDurationNanoseconds(40);
+ flowOnNode.setDurationSeconds(45);
+
+ Assert.assertTrue(flowOnNode.getPacketCount() == 100);
+ Assert.assertTrue(flowOnNode.getByteCount() == 800);
+ Assert.assertTrue(flowOnNode.getDurationNanoseconds() == 40);
+ Assert.assertTrue(flowOnNode.getDurationSeconds() == 45);
+ Assert.assertTrue(flowOnNode.getTableId() == (byte)0x55);
+ }
}
\ No newline at end of file
public class NodeConnectorStatisticsTest {
- @Test
- public void testNodeConnectorStatisticsMethods() {
- NodeConnector nc = NodeConnectorCreator.createNodeConnector((short)20, NodeCreator.createOFNode((long)20));
- NodeConnectorStatistics ncStats = new NodeConnectorStatistics();
- ncStats.setNodeConnector(nc);
- ncStats.setReceiveByteCount(800);
- ncStats.setReceiveCRCErrorCount(10);
- ncStats.setReceiveDropCount(5);
- ncStats.setReceiveErrorCount(20);
- ncStats.setReceiveFrameErrorCount(25);
- ncStats.setReceiveOverRunErrorCount(30);
- ncStats.setReceivePacketCount(100);
- ncStats.setTransmitByteCount(400);
- ncStats.setTransmitDropCount(15);
- ncStats.setTransmitErrorCount(18);
- ncStats.setTransmitPacketCount(50);
- ncStats.setCollisionCount(2);
-
- Assert.assertTrue(ncStats.getCollisionCount() == 2);
- Assert.assertTrue(ncStats.getTransmitPacketCount() == 50);
- Assert.assertTrue(ncStats.getTransmitErrorCount() == 18);
- Assert.assertTrue(ncStats.getTransmitDropCount() == 15);
- Assert.assertTrue(ncStats.getReceivePacketCount() == 100);
- Assert.assertTrue(ncStats.getReceiveOverRunErrorCount() == 30);
- Assert.assertTrue(ncStats.getReceiveFrameErrorCount() == 25);
- Assert.assertTrue(ncStats.getReceiveDropCount() == 5);
- Assert.assertTrue(ncStats.getReceiveCRCErrorCount() == 10);
- Assert.assertTrue(ncStats.getReceiveByteCount() == 800);
- Assert.assertTrue(ncStats.getNodeConnector().equals(nc));
- }
-}
+ @Test
+ public void testNodeConnectorStatisticsMethods() {
+ NodeConnector nc = NodeConnectorCreator.createNodeConnector((short)20, NodeCreator.createOFNode((long)20));
+ NodeConnectorStatistics ncStats = new NodeConnectorStatistics();
+ ncStats.setNodeConnector(nc);
+ ncStats.setReceiveByteCount(800);
+ ncStats.setReceiveCRCErrorCount(10);
+ ncStats.setReceiveDropCount(5);
+ ncStats.setReceiveErrorCount(20);
+ ncStats.setReceiveFrameErrorCount(25);
+ ncStats.setReceiveOverRunErrorCount(30);
+ ncStats.setReceivePacketCount(100);
+ ncStats.setTransmitByteCount(400);
+ ncStats.setTransmitDropCount(15);
+ ncStats.setTransmitErrorCount(18);
+ ncStats.setTransmitPacketCount(50);
+ ncStats.setCollisionCount(2);
+ Assert.assertTrue(ncStats.getCollisionCount() == 2);
+ Assert.assertTrue(ncStats.getTransmitPacketCount() == 50);
+ Assert.assertTrue(ncStats.getTransmitErrorCount() == 18);
+ Assert.assertTrue(ncStats.getTransmitDropCount() == 15);
+ Assert.assertTrue(ncStats.getReceivePacketCount() == 100);
+ Assert.assertTrue(ncStats.getReceiveOverRunErrorCount() == 30);
+ Assert.assertTrue(ncStats.getReceiveFrameErrorCount() == 25);
+ Assert.assertTrue(ncStats.getReceiveDropCount() == 5);
+ Assert.assertTrue(ncStats.getReceiveCRCErrorCount() == 10);
+ Assert.assertTrue(ncStats.getReceiveByteCount() == 800);
+ Assert.assertTrue(ncStats.getNodeConnector().equals(nc));
+ }
+}
import org.opendaylight.controller.sal.reader.NodeDescription;
public class NodeDescriptionTest {
-
- @Test
- public void testNodeDescriptionMethods() {
- NodeDescription ncDesc = new NodeDescription();
- ncDesc.setHardware("Hardware1");
- ncDesc.setManufacturer("Manufacturer1");
- ncDesc.setDescription("SDNProtocol1");
- ncDesc.setSerialNumber("serialNumber1");
- ncDesc.setSoftware("Software1");
-
- Assert.assertTrue(ncDesc.getHardware().equals("Hardware1"));
- Assert.assertTrue(ncDesc.getManufacturer().equals("Manufacturer1"));
- Assert.assertTrue(ncDesc.getDescription().equals("SDNProtocol1"));
- Assert.assertTrue(ncDesc.getSerialNumber().equals("serialNumber1"));
- Assert.assertTrue(ncDesc.getSoftware().equals("Software1"));
-
- Assert.assertFalse(ncDesc.getHardware().equals("Hardware2"));
- Assert.assertFalse(ncDesc.getManufacturer().equals("Manufacturer2"));
- Assert.assertFalse(ncDesc.getDescription().equals("SDNProtocol2"));
- Assert.assertFalse(ncDesc.getSerialNumber().equals("serialNumber2"));
- Assert.assertFalse(ncDesc.getSoftware().equals("Software2"));
-
- }
-}
-
-
+ @Test
+ public void testNodeDescriptionMethods() {
+ NodeDescription ncDesc = new NodeDescription();
+ ncDesc.setHardware("Hardware1");
+ ncDesc.setManufacturer("Manufacturer1");
+ ncDesc.setDescription("SDNProtocol1");
+ ncDesc.setSerialNumber("serialNumber1");
+ ncDesc.setSoftware("Software1");
+
+ Assert.assertTrue(ncDesc.getHardware().equals("Hardware1"));
+ Assert.assertTrue(ncDesc.getManufacturer().equals("Manufacturer1"));
+ Assert.assertTrue(ncDesc.getDescription().equals("SDNProtocol1"));
+ Assert.assertTrue(ncDesc.getSerialNumber().equals("serialNumber1"));
+ Assert.assertTrue(ncDesc.getSoftware().equals("Software1"));
+
+ Assert.assertFalse(ncDesc.getHardware().equals("Hardware2"));
+ Assert.assertFalse(ncDesc.getManufacturer().equals("Manufacturer2"));
+ Assert.assertFalse(ncDesc.getDescription().equals("SDNProtocol2"));
+ Assert.assertFalse(ncDesc.getSerialNumber().equals("serialNumber2"));
+ Assert.assertFalse(ncDesc.getSoftware().equals("Software2"));
+
+ }
+}
import java.util.ArrayList;
import org.junit.Assert;
import org.junit.Test;
-
+
public class EtherTypesTest {
-
- @Test
- public void testEthertypesCreation() {
-
- EtherTypes arp = EtherTypes.ARP;
-
- Assert.assertTrue(arp.toString().equals("ARP"));
- Assert.assertTrue(arp.intValue() == 2054);
- Assert.assertTrue(arp.shortValue() == (short)2054);
- }
-
- @Test
- public void testGetEtherTypesString() {
-
- Assert.assertTrue(EtherTypes.getEtherTypeName(34984).equals("QINQ"));
- Assert.assertTrue(EtherTypes.getEtherTypeName((short)2048).equals("IPv4"));
- Assert.assertTrue(EtherTypes.getEtherTypeName(0x010B).equals("PVSTP"));
-
- Assert.assertFalse(EtherTypes.getEtherTypeName(0x800).equals("ARP"));
- }
-
- @Test
- public void testGetEtherTypesNumber() {
- Assert.assertTrue(EtherTypes.getEtherTypeNumberInt("VLAN Tagged") == 33024);
- Assert.assertTrue(EtherTypes.getEtherTypeNumberShort("ARP") == 2054);
-
- Assert.assertFalse(EtherTypes.getEtherTypeNumberInt("CDP") == 1000);
- }
-
- @Test
- public void testGetEtherTypesList() {
- ArrayList<String> etherTypeNames = (ArrayList<String>) EtherTypes.getEtherTypesNameList();
- Assert.assertTrue(etherTypeNames.get(0).equals("PVSTP"));
- Assert.assertTrue(etherTypeNames.get(1).equals("CDP"));
- Assert.assertTrue(etherTypeNames.get(2).equals("VTP"));
- Assert.assertTrue(etherTypeNames.get(3).equals("IPv4"));
- Assert.assertTrue(etherTypeNames.get(4).equals("ARP"));
- Assert.assertTrue(etherTypeNames.get(5).equals("Reverse ARP"));
- Assert.assertTrue(etherTypeNames.get(6).equals("VLAN Tagged"));
- Assert.assertTrue(etherTypeNames.get(7).equals("IPv6"));
- Assert.assertTrue(etherTypeNames.get(8).equals("MPLS Unicast"));
- Assert.assertTrue(etherTypeNames.get(9).equals("MPLS Multicast"));
- Assert.assertTrue(etherTypeNames.get(10).equals("QINQ"));
- Assert.assertTrue(etherTypeNames.get(11).equals("LLDP"));
- Assert.assertTrue(etherTypeNames.get(12).equals("Old QINQ"));
- Assert.assertTrue(etherTypeNames.get(13).equals("Cisco QINQ"));
- }
-
- @Test
- public void testGetEtherTypesloadFromString() {
- Assert.assertTrue(EtherTypes.loadFromString("37376").equals(EtherTypes.CISCOQINQ));
- Assert.assertTrue(EtherTypes.loadFromString("100") == null);
- }
-}
+ @Test
+ public void testEthertypesCreation() {
+
+ EtherTypes arp = EtherTypes.ARP;
+
+ Assert.assertTrue(arp.toString().equals("ARP"));
+ Assert.assertTrue(arp.intValue() == 2054);
+ Assert.assertTrue(arp.shortValue() == (short)2054);
+ }
+
+ @Test
+ public void testGetEtherTypesString() {
+ Assert.assertTrue(EtherTypes.getEtherTypeName(34984).equals("QINQ"));
+ Assert.assertTrue(EtherTypes.getEtherTypeName((short)2048).equals("IPv4"));
+ Assert.assertTrue(EtherTypes.getEtherTypeName(0x010B).equals("PVSTP"));
+ Assert.assertFalse(EtherTypes.getEtherTypeName(0x800).equals("ARP"));
+ }
+ @Test
+ public void testGetEtherTypesNumber() {
+ Assert.assertTrue(EtherTypes.getEtherTypeNumberInt("VLAN Tagged") == 33024);
+ Assert.assertTrue(EtherTypes.getEtherTypeNumberShort("ARP") == 2054);
+
+ Assert.assertFalse(EtherTypes.getEtherTypeNumberInt("CDP") == 1000);
+ }
+
+ @Test
+ public void testGetEtherTypesList() {
+ ArrayList<String> etherTypeNames = (ArrayList<String>) EtherTypes.getEtherTypesNameList();
+ Assert.assertTrue(etherTypeNames.get(0).equals("PVSTP"));
+ Assert.assertTrue(etherTypeNames.get(1).equals("CDP"));
+ Assert.assertTrue(etherTypeNames.get(2).equals("VTP"));
+ Assert.assertTrue(etherTypeNames.get(3).equals("IPv4"));
+ Assert.assertTrue(etherTypeNames.get(4).equals("ARP"));
+ Assert.assertTrue(etherTypeNames.get(5).equals("Reverse ARP"));
+ Assert.assertTrue(etherTypeNames.get(6).equals("VLAN Tagged"));
+ Assert.assertTrue(etherTypeNames.get(7).equals("IPv6"));
+ Assert.assertTrue(etherTypeNames.get(8).equals("MPLS Unicast"));
+ Assert.assertTrue(etherTypeNames.get(9).equals("MPLS Multicast"));
+ Assert.assertTrue(etherTypeNames.get(10).equals("QINQ"));
+ Assert.assertTrue(etherTypeNames.get(11).equals("LLDP"));
+ Assert.assertTrue(etherTypeNames.get(12).equals("Old QINQ"));
+ Assert.assertTrue(etherTypeNames.get(13).equals("Cisco QINQ"));
+ }
+
+ @Test
+ public void testGetEtherTypesloadFromString() {
+ Assert.assertTrue(EtherTypes.loadFromString("37376").equals(EtherTypes.CISCOQINQ));
+ Assert.assertTrue(EtherTypes.loadFromString("100") == null);
+ }
+
+}
*/
package org.opendaylight.controller.sal.utils;
-
+
import org.junit.Assert;
import org.junit.Test;
-
+
public class HexEncodeTest {
-
- @Test
- public void testbytesToHexString() {
- byte[] bytes1 = {(byte)0x01, (byte)0x02, (byte)0x03};
- String str1 = HexEncode.bytesToHexString(bytes1);
- Assert.assertTrue(str1.equals("010203"));
-
- byte[] bytes2 = {(byte)0x11, (byte)0x22, (byte)0x33};
- String str2 = HexEncode.bytesToHexString(bytes2);
- Assert.assertFalse(str2.equals("010203"));
-
- }
-
- @Test
- public void testLongToHexString() {
- long value1 = 12345678L;
- String str1 = HexEncode.longToHexString(value1);
- Assert.assertTrue(str1.equals("00:00:00:00:00:bc:61:4e"));
-
- long value2 = 98765432L;
- String str2 = HexEncode.longToHexString(value2);
- Assert.assertFalse(str2.equals("00:44:33:22:11:bc:61:4e"));
-
- }
-
- @Test
- public void testBytesFromHexString() {
- String byteStr1 = "00:11:22:33:44:55";
- byte byteArray1[] = new byte[(byteStr1.length() + 1)/3];
- byteArray1 = HexEncode.bytesFromHexString(byteStr1);
-
- Assert.assertTrue(byteArray1[0] == (byte)0x0);
- Assert.assertTrue(byteArray1[1] == (byte)0x11);
- Assert.assertTrue(byteArray1[2] == (byte)0x22);
- Assert.assertTrue(byteArray1[3] == (byte)0x33);
- Assert.assertTrue(byteArray1[4] == (byte)0x44);
- Assert.assertTrue(byteArray1[5] == (byte)0x55);
-
- String byteStr2 = "00:11:22:33:44:55";
- byte byteArray2[] = new byte[(byteStr2.length() + 1)/3];
- byteArray2 = HexEncode.bytesFromHexString(byteStr2);
-
- Assert.assertFalse(byteArray2[0] == (byte)0x55);
- Assert.assertFalse(byteArray2[1] == (byte)0x44);
- Assert.assertFalse(byteArray2[2] == (byte)0x33);
- Assert.assertFalse(byteArray2[3] == (byte)0x22);
- Assert.assertFalse(byteArray2[4] == (byte)0x11);
- Assert.assertFalse(byteArray2[5] == (byte)0x0);
-
- }
-
-}
+ @Test
+ public void testbytesToHexString() {
+ byte[] bytes1 = {(byte)0x01, (byte)0x02, (byte)0x03};
+ String str1 = HexEncode.bytesToHexString(bytes1);
+ Assert.assertTrue(str1.equals("010203"));
+
+ byte[] bytes2 = {(byte)0x11, (byte)0x22, (byte)0x33};
+ String str2 = HexEncode.bytesToHexString(bytes2);
+ Assert.assertFalse(str2.equals("010203"));
+
+ }
+
+ @Test
+ public void testLongToHexString() {
+ long value1 = 12345678L;
+ String str1 = HexEncode.longToHexString(value1);
+ Assert.assertTrue(str1.equals("00:00:00:00:00:bc:61:4e"));
+
+ long value2 = 98765432L;
+ String str2 = HexEncode.longToHexString(value2);
+ Assert.assertFalse(str2.equals("00:44:33:22:11:bc:61:4e"));
+ }
+ @Test
+ public void testBytesFromHexString() {
+ String byteStr1 = "00:11:22:33:44:55";
+ byte byteArray1[] = new byte[(byteStr1.length() + 1)/3];
+ byteArray1 = HexEncode.bytesFromHexString(byteStr1);
+
+ Assert.assertTrue(byteArray1[0] == (byte)0x0);
+ Assert.assertTrue(byteArray1[1] == (byte)0x11);
+ Assert.assertTrue(byteArray1[2] == (byte)0x22);
+ Assert.assertTrue(byteArray1[3] == (byte)0x33);
+ Assert.assertTrue(byteArray1[4] == (byte)0x44);
+ Assert.assertTrue(byteArray1[5] == (byte)0x55);
+
+ String byteStr2 = "00:11:22:33:44:55";
+ byte byteArray2[] = new byte[(byteStr2.length() + 1)/3];
+ byteArray2 = HexEncode.bytesFromHexString(byteStr2);
+
+ Assert.assertFalse(byteArray2[0] == (byte)0x55);
+ Assert.assertFalse(byteArray2[1] == (byte)0x44);
+ Assert.assertFalse(byteArray2[2] == (byte)0x33);
+ Assert.assertFalse(byteArray2[3] == (byte)0x22);
+ Assert.assertFalse(byteArray2[4] == (byte)0x11);
+ Assert.assertFalse(byteArray2[5] == (byte)0x0);
+
+ }
+
+}
/**
* Function called when the activator starts just after some initializations
* are done by the ComponentActivatorAbstractBase.
- *
+ *
*/
public void init() {
/**
* Function called when the activator stops just before the cleanup done by
* ComponentActivatorAbstractBase
- *
+ *
*/
public void destroy() {
/**
* Function that is used to communicate to dependency manager the list of
* known implementations for services inside a container
- *
- *
+ *
+ *
* @return An array containing all the CLASS objects that will be
* instantiated in order to get an fully working implementation
* Object
/**
* Function that is called when configuration of the dependencies is
* required.
- *
+ *
* @param c
* dependency manager Component object, used for configuring the
* dependencies exported and imported
public class DataPacketService implements IPluginOutDataPacketService,
IDataPacketService {
- private int RXMAXQUEUESIZE = 1000;
private int TXMAXQUEUESIZE = 1000;
protected static final Logger logger = LoggerFactory
.getLogger(DataPacketService.class);
pluginInDataService =
new ConcurrentHashMap<String, IPluginInDataPacketService>();
private Map<String, AtomicInteger> statistics = new HashMap<String, AtomicInteger>();
- /**
- * Queue for packets received from Data Path
- */
- private LinkedBlockingQueue<RawPacket> rxQueue = new LinkedBlockingQueue<RawPacket>(
- RXMAXQUEUESIZE);
+
/**
* Queue for packets that need to be transmitted to Data Path
*/
private LinkedBlockingQueue<RawPacket> txQueue = new LinkedBlockingQueue<RawPacket>(
- RXMAXQUEUESIZE);
+ TXMAXQUEUESIZE);
/**
* Transmission thread
*/
private Thread txThread = new Thread(new TxLoop(),
"DataPacketService TX thread");
- /**
- * Receiving thread
- */
- private Thread rxThread = new Thread(new RxLoop(),
- "DataPacketService RX thread");
/**
* Representation of a Data Packet Listener including of its
* properties
- *
*/
private class DataPacketListener {
// Key fields
/**
* Loop for processing Received packets
- *
*/
- private class RxLoop implements Runnable {
- public void run() {
- RawPacket pkt;
- try {
- for (pkt = rxQueue.take(); pkt != null; pkt = rxQueue.take()) {
- for (List<DataPacketListener> serialListeners : listenDataPacket) {
- int i = 0;
- for (i = 0; i < serialListeners.size(); i++) {
- RawPacket copyPkt = null;
- try {
- copyPkt = new RawPacket(pkt);
- } catch (ConstructionException cex) {
- logger.debug("Error while cloning the packet");
- }
- if (copyPkt == null) {
- increaseStat("RXPacketCopyFailed");
- continue;
- }
- DataPacketListener l = serialListeners.get(i);
- IListenDataPacket s = (l == null ? null
- : l.listener);
- if (s != null) {
- try {
- // TODO Make sure to filter based
- // on the match too, later on
- PacketResult res = s
- .receiveDataPacket(copyPkt);
- increaseStat("RXPacketSuccess");
- if (res.equals(PacketResult.CONSUME)) {
- increaseStat("RXPacketSerialExit");
- break;
- }
- } catch (Exception e) {
- increaseStat("RXPacketFailedForException");
- }
- }
+ private void dispatchPacket(RawPacket pkt) {
+
+ // for now we treat all listeners as serial listeners
+ for (List<DataPacketListener> serialListeners : listenDataPacket) {
+ for (DataPacketListener l : serialListeners) {
+
+ // TODO: possibly deal with read-only and read-write packet
+ // copies
+ IListenDataPacket s = (l == null ? null : l.listener);
+ if (s != null) {
+ try {
+ // TODO Make sure to filter based on the match too,
+ // later on
+ PacketResult res = s.receiveDataPacket(pkt);
+ increaseStat("RXPacketSuccess");
+ if (res.equals(PacketResult.CONSUME)) {
+ increaseStat("RXPacketSerialExit");
+ break;
}
+ } catch (Exception e) {
+ increaseStat("RXPacketFailedForException");
}
}
- } catch (InterruptedException e) {
- // Not a big deal
}
}
}
*/
void init() {
this.txThread.start();
- this.rxThread.start();
}
/**
this.indexDataPacket.clear();
this.pluginInDataService.clear();
this.statistics.clear();
- this.rxQueue.clear();
this.txQueue.clear();
this.txThread.interrupt();
- this.rxThread.interrupt();
// Wait for them to be done
try {
this.txThread.join();
- this.rxThread.join();
} catch (InterruptedException ex) {
// Not a big deal
}
return PacketResult.IGNORED;
}
- // If the queue was full don't wait, rather increase a counter
- // for it
- if (!this.rxQueue.offer(inPkt)) {
- increaseStat("fullRXQueue");
- return PacketResult.IGNORED;
- }
+ // send the packet off to be processed by listeners
+ this.dispatchPacket(inPkt);
// Walk the chain of listener going first throw all the
// parallel ones and for each parallel in serial
/**
* Function called by the dependency manager when all the required
* dependencies are satisfied
- *
+ *
*/
void init() {
logger.debug("INIT called!");
* Function called by the dependency manager when at least one dependency
* become unsatisfied or when the component is shutting down because for
* example bundle is being stopped.
- *
+ *
*/
void destroy() {
// Clear previous registration to avoid they are left hanging
/**
* Function called by dependency manager after "init ()" is called and after
* the services provided by the class are registered in the service registry
- *
+ *
*/
void start() {
logger.debug("START called!");
* Function called by the dependency manager before the services exported by
* the component are unregistered, this will be followed by a "destroy ()"
* calls
- *
+ *
*/
void stop() {
logger.debug("STOP called!");
}
ci.println(this.addFlow(node, getSampleFlow(node)));
}
-
+
public void _modifyflow(CommandInterpreter ci) throws UnknownHostException {
Node node = null;
String nodeId = ci.nextArgument();
/**
* This Request ID generator starts with 1. Each aysnc message is
* associated with an unique Request ID (!= 0).
- *
+ *
* @return Request ID
*/
private long getNextRid() {
/**
* Function called by the dependency manager when all the required
* dependencies are satisfied
- *
+ *
*/
void init() {
}
* Function called by the dependency manager when at least one dependency
* become unsatisfied or when the component is shutting down because for
* example bundle is being stopped.
- *
+ *
*/
void destroy() {
// Make sure to clear all the data structure we use to track
public class DataPacketServiceTest {
- @Test
- public void DataPacketServiceDecodeTest() throws ConstructionException, InstantiationException, IllegalAccessException {
-
- DataPacketService dService = new DataPacketService();
- RawPacket rawPkt = null;
-
- Assert.assertTrue(dService.decodeDataPacket(rawPkt) == null);
-
+ @Test
+ public void DataPacketServiceDecodeTest() throws ConstructionException, InstantiationException, IllegalAccessException {
+
+ DataPacketService dService = new DataPacketService();
+ RawPacket rawPkt = null;
+
+ Assert.assertTrue(dService.decodeDataPacket(rawPkt) == null);
+
byte[] data = { 10, 12, 14, 20, 55, 69, //DMAC
-90, -20, -100, -82, -78, -97, //SMAC
8, 6, //ethype
9, 9, 9, 1, // src proto
0, 0, 0, 0, 0, 0, // target hw addr
9, 9, 9, -2 }; // target proto
-
+
rawPkt = new RawPacket(data);
-
+
Packet decodedPkt = dService.decodeDataPacket(rawPkt);
Class<? extends Packet> payloadClass = ARP.class;
Assert.assertTrue(payloadClass == decodedPkt.getPayload().getClass());
-
+
ARP arpPacket = (ARP) decodedPkt.getPayload();
-
+
Assert.assertTrue(arpPacket.getHardwareType() == (byte)0x1);
Assert.assertTrue(arpPacket.getProtocolType() == 2048);
Assert.assertTrue(arpPacket.getHardwareAddressLength() == (byte)0x6);
Assert.assertTrue(arpPacket.getProtocolAddressLength() == (byte)0x4);
Assert.assertTrue(arpPacket.getOpCode() == 1);
-
+
byte[] senderHwAddress = arpPacket.getSenderHardwareAddress();
- byte[] senderProtocolAddress = arpPacket.getSenderProtocolAddress();
-
+ byte[] senderProtocolAddress = arpPacket.getSenderProtocolAddress();
+
byte[] targetHwAddress = arpPacket.getTargetHardwareAddress();
- byte[] targetProtocolAddress = arpPacket.getTargetProtocolAddress();
+ byte[] targetProtocolAddress = arpPacket.getTargetProtocolAddress();
+
-
Assert.assertTrue(senderHwAddress[0] == (byte)0xA6);
Assert.assertTrue(senderHwAddress[1] == (byte)0xEC);
Assert.assertTrue(senderHwAddress[2] == (byte)0x9C);
Assert.assertTrue(senderHwAddress[3] == (byte)0xAE);
Assert.assertTrue(senderHwAddress[4] == (byte)0xB2);
Assert.assertTrue(senderHwAddress[5] == (byte)0x9F);
-
+
Assert.assertTrue(senderProtocolAddress[0] == (byte)0x9);
Assert.assertTrue(senderProtocolAddress[1] == (byte)0x9);
Assert.assertTrue(senderProtocolAddress[2] == (byte)0x9);
Assert.assertTrue(targetProtocolAddress[0] == (byte)0x9);
Assert.assertTrue(targetProtocolAddress[1] == (byte)0x9);
Assert.assertTrue(targetProtocolAddress[2] == (byte)0x9);
- Assert.assertTrue(targetProtocolAddress[3] == (byte)0xFE);
- }
-
- @Test
- public void DataPacketServiceEncodeTest() throws ConstructionException, InstantiationException, IllegalAccessException {
-
- DataPacketService dService = new DataPacketService();
- Ethernet eth = new Ethernet();
+ Assert.assertTrue(targetProtocolAddress[3] == (byte)0xFE);
+ }
+
+ @Test
+ public void DataPacketServiceEncodeTest() throws ConstructionException, InstantiationException, IllegalAccessException {
+
+ DataPacketService dService = new DataPacketService();
+ Ethernet eth = new Ethernet();
ARP arp = new ARP();
- byte[] data = null;
- RawPacket rawPkt;
+ byte[] data = null;
+ RawPacket rawPkt;
byte[] dMAC = { 10, 12, 14, 20, 55, 69 };
byte[] sMAC = { 82, 97, 109, 117, 127, -50 };
short etherType = 2054;
-
+
eth.setDestinationMACAddress(dMAC);
eth.setSourceMACAddress(sMAC);
eth.setEtherType(etherType);
-
+
arp.setHardwareType((short)1);
arp.setProtocolType((short)2048);
arp.setHardwareAddressLength((byte)0x6);
arp.setProtocolAddressLength((byte)0x4);
arp.setOpCode((byte)0x1);
-
+
byte[] senderHardwareAddress = {(byte)0xA6, (byte)0xEC, (byte)0x9C, (byte)0xAE,
- (byte)0xB2, (byte)0x9F};
+ (byte)0xB2, (byte)0x9F};
byte[] senderProtocolAddress = {(byte)0x09, (byte)0x09, (byte)0x09, (byte)0x01};
byte[] targetProtocolAddress = {(byte)0x09, (byte)0x09, (byte)0x09, (byte)0xFE};
byte[] targetHardwareAddress = {(byte)0x0, (byte)0x0, (byte)0x0, (byte)0x0, (byte)0x0, (byte)0x0};
arp.setSenderProtocolAddress(senderProtocolAddress);
arp.setTargetHardwareAddress(targetHardwareAddress);
arp.setTargetProtocolAddress(targetProtocolAddress);
-
+
arp.setParent(eth);
eth.setPayload(arp);
-
+
rawPkt = dService.encodeDataPacket(eth);
data = rawPkt.getPacketData();
-
+
Assert.assertTrue(data[0] == (byte)0x0A);//Destination MAC
Assert.assertTrue(data[1] == (byte)0x0C);
Assert.assertTrue(data[2] == (byte)0x0E);
Assert.assertTrue(data[38] == (byte)0x09);//Target Protocol Address
Assert.assertTrue(data[39] == (byte)0x09);
Assert.assertTrue(data[40] == (byte)0x09);
- Assert.assertTrue(data[41] == (byte)0xFE);
- }
+ Assert.assertTrue(data[41] == (byte)0xFE);
+ }
}
*/\r
package org.opendaylight.controller.sal.binding.generator.api;\r
\r
-import java.util.List;\r
-\r
import org.opendaylight.controller.sal.binding.model.api.Type;\r
+import org.opendaylight.controller.yang.model.api.Module;\r
import org.opendaylight.controller.yang.model.api.SchemaContext;\r
\r
+import java.util.List;\r
+import java.util.Set;\r
+\r
+/**\r
+ *\r
+ *\r
+ */\r
public interface BindingGenerator {\r
\r
+ /**\r
+ * Generate Types from whole Schema Context.\r
+ * <br>\r
+ * The method will return List of All Generated Types that could be\r
+ * Generated from Schema Context.\r
+ *\r
+ *\r
+ * @param context Schema Context\r
+ * @return List of Generated Types\r
+ *\r
+ * @see SchemaContext\r
+ */\r
public List<Type> generateTypes(final SchemaContext context);\r
\r
+ /**\r
+ * Generate Types from Schema Context restricted by sub set of specified\r
+ * Modules. The Schema Context MUST contain all of the sub modules\r
+ * otherwise the there is no guarantee that result List of Generated\r
+ * Types will contain correct Generated Types.\r
+ *\r
+ * @param context Schema Context\r
+ * @param modules Sub Set of Modules\r
+ * @return List of Generated Types restricted by sub set of Modules\r
+ *\r
+ * @see Module\r
+ * @see SchemaContext\r
+ *\r
+ */\r
+ public List<Type> generateTypes(final SchemaContext context,\r
+ final Set<Module> modules);\r
}\r
import org.opendaylight.controller.binding.generator.util.generated.type.builder.GeneratedTypeBuilderImpl;
import org.opendaylight.controller.sal.binding.generator.api.BindingGenerator;
import org.opendaylight.controller.sal.binding.generator.spi.TypeProvider;
-import org.opendaylight.controller.sal.binding.model.api.GeneratedTransferObject;
-import org.opendaylight.controller.sal.binding.model.api.GeneratedType;
-import org.opendaylight.controller.sal.binding.model.api.Type;
+import org.opendaylight.controller.sal.binding.model.api.*;
import org.opendaylight.controller.sal.binding.model.api.type.builder.*;
import org.opendaylight.controller.sal.binding.yang.types.TypeProviderImpl;
import org.opendaylight.controller.yang.common.QName;
import static org.opendaylight.controller.yang.model.util.SchemaContextUtil.findDataSchemaNode;
import static org.opendaylight.controller.yang.model.util.SchemaContextUtil.findParentModule;
-public class BindingGeneratorImpl implements BindingGenerator {
+public final class BindingGeneratorImpl implements BindingGenerator {
private Map<String, Map<String, GeneratedTypeBuilder>> genTypeBuilders;
private TypeProvider typeProvider;
@Override
public List<Type> generateTypes(final SchemaContext context) {
if (context == null) {
- throw new IllegalArgumentException("Schema Context reference cannot be NULL");
+ throw new IllegalArgumentException("Schema Context reference " +
+ "cannot be NULL!");
}
if (context.getModules() == null) {
- throw new IllegalStateException("Schema Context does not contain defined modules!");
+ throw new IllegalStateException("Schema Context does not contain " +
+ "defined modules!");
}
- final List<Type> genTypes = new ArrayList<>();
+ final List<Type> generatedTypes = new ArrayList<>();
schemaContext = context;
typeProvider = new TypeProviderImpl(context);
final Set<Module> modules = context.getModules();
genTypeBuilders = new HashMap<>();
for (final Module module : modules) {
- final DataNodeIterator moduleIterator = new DataNodeIterator(
- module);
-
- final List<AugmentationSchema> sortedAugmentations = provideSortedAugmentations(module);
- final List<ContainerSchemaNode> schemaContainers = moduleIterator
- .allContainers();
- final List<ListSchemaNode> schemaLists = moduleIterator
- .allLists();
-
- final String basePackageName = moduleNamespaceToPackageName(module);
- if ((schemaContainers != null)
- && !schemaContainers.isEmpty()) {
- for (final ContainerSchemaNode container : schemaContainers) {
- genTypes.add(containerToGenType(basePackageName,
- container));
- }
- }
- if ((schemaLists != null) && !schemaLists.isEmpty()) {
- for (final ListSchemaNode list : schemaLists) {
- genTypes.addAll(listToGenType(basePackageName, list));
- }
+ generatedTypes.add(moduleToDataType(module));
+ generatedTypes.addAll(allTypeDefinitionsToGenTypes(module));
+ generatedTypes.addAll(allContainersToGenTypes(module));
+ generatedTypes.addAll(allListsToGenTypes(module));
+ generatedTypes.addAll(allAugmentsToGenTypes(module));
+ generatedTypes.addAll(allRPCMethodsToGenType(module));
+ generatedTypes.addAll(allNotifycationsToGenType(module));
+ }
+ return generatedTypes;
+ }
+
+ @Override
+ public List<Type> generateTypes(final SchemaContext context,
+ final Set<Module> modules) {
+ if (context == null) {
+ throw new IllegalArgumentException("Schema Context reference " +
+ "cannot be NULL!");
+ }
+ if (context.getModules() == null) {
+ throw new IllegalStateException("Schema Context does not contain " +
+ "defined modules!");
+ }
+ if (modules == null) {
+ throw new IllegalArgumentException("Sef of Modules cannot be " +
+ "NULL!");
+ }
+
+ final List<Type> filteredGenTypes = new ArrayList<>();
+ schemaContext = context;
+ typeProvider = new TypeProviderImpl(context);
+ final Set<Module> contextModules = context.getModules();
+ genTypeBuilders = new HashMap<>();
+ for (final Module contextModule : contextModules) {
+ final List<Type> generatedTypes = new ArrayList<>();
+
+ generatedTypes.add(moduleToDataType(contextModule));
+ generatedTypes.addAll(allTypeDefinitionsToGenTypes(contextModule));
+ generatedTypes.addAll(allContainersToGenTypes(contextModule));
+ generatedTypes.addAll(allListsToGenTypes(contextModule));
+ generatedTypes.addAll(allAugmentsToGenTypes(contextModule));
+ generatedTypes.addAll(allRPCMethodsToGenType(contextModule));
+ generatedTypes.addAll(allNotifycationsToGenType(contextModule));
+
+ if (modules.contains(contextModule)) {
+ filteredGenTypes.addAll(generatedTypes);
}
+ }
+ return filteredGenTypes;
+ }
+
+ private List<Type> allTypeDefinitionsToGenTypes(final Module module) {
+ if (module == null) {
+ throw new IllegalArgumentException("Module reference cannot be NULL!");
+ }
+ if (module.getName() == null) {
+ throw new IllegalArgumentException("Module name cannot be NULL!");
+ }
+ if (module.getTypeDefinitions() == null) {
+ throw new IllegalArgumentException("Type Definitions for module " +
+ module.getName() + " cannot be NULL!");
+ }
- if ((sortedAugmentations != null)
- && !sortedAugmentations.isEmpty()) {
- for (final AugmentationSchema augment : sortedAugmentations) {
- genTypes.addAll(augmentationToGenTypes(basePackageName, augment));
+ final Set<TypeDefinition<?>> typeDefinitions = module
+ .getTypeDefinitions();
+ final List<Type> generatedTypes = new ArrayList<>();
+ for (final TypeDefinition<?> typedef : typeDefinitions) {
+ if (typedef != null) {
+ final Type type = ((TypeProviderImpl)typeProvider)
+ .generatedTypeForExtendedDefinitionType(typedef);
+ if ((type != null) && !generatedTypes.contains(type)) {
+ generatedTypes.add(type);
}
}
+ }
+ return generatedTypes;
+ }
- final GeneratedType genDataType = moduleToDataType(basePackageName, module);
- final List<GeneratedType> genRpcType = rpcMethodsToGenType(basePackageName, module);
- final List<Type> genNotifyType = notifycationsToGenType(basePackageName, module);
+ private List<Type> allContainersToGenTypes(final Module module) {
+ if (module == null) {
+ throw new IllegalArgumentException("Module reference cannot be NULL!");
+ }
- if (genDataType != null) {
- genTypes.add(genDataType);
- }
- if (genRpcType != null) {
- genTypes.addAll(genRpcType);
- }
- if (genNotifyType != null) {
- genTypes.addAll(genNotifyType);
+ if (module.getName() == null) {
+ throw new IllegalArgumentException("Module name cannot be NULL!");
+ }
+
+ if (module.getChildNodes() == null) {
+ throw new IllegalArgumentException("Reference to Set of Child " +
+ "Nodes in module " + module.getName() + " cannot be " +
+ "NULL!");
+ }
+
+ final List<Type> generatedTypes = new ArrayList<>();
+ final DataNodeIterator it = new DataNodeIterator(
+ module);
+ final List<ContainerSchemaNode> schemaContainers = it.allContainers();
+ final String basePackageName = moduleNamespaceToPackageName(module);
+ for (final ContainerSchemaNode container : schemaContainers) {
+ generatedTypes.add(containerToGenType(basePackageName,
+ container));
+ }
+ return generatedTypes;
+ }
+
+ private List<Type> allListsToGenTypes(final Module module) {
+ if (module == null) {
+ throw new IllegalArgumentException("Module reference cannot be NULL!");
+ }
+
+ if (module.getName() == null) {
+ throw new IllegalArgumentException("Module name cannot be NULL!");
+ }
+
+ if (module.getChildNodes() == null) {
+ throw new IllegalArgumentException("Reference to Set of Child " +
+ "Nodes in module " + module.getName() + " cannot be " +
+ "NULL!");
+ }
+
+ final List<Type> generatedTypes = new ArrayList<>();
+ final DataNodeIterator it = new DataNodeIterator(
+ module);
+ final List<ListSchemaNode> schemaLists = it.allLists();
+ final String basePackageName = moduleNamespaceToPackageName(module);
+ if (schemaLists != null) {
+ for (final ListSchemaNode list : schemaLists) {
+ generatedTypes.addAll(listToGenType(basePackageName, list));
}
}
- genTypes.addAll(((TypeProviderImpl) typeProvider)
- .getGeneratedTypeDefs());
+ return generatedTypes;
+ }
- return genTypes;
+ private List<Type> allAugmentsToGenTypes(final Module module) {
+ if (module == null) {
+ throw new IllegalArgumentException("Module reference cannot be NULL!");
+ }
+
+ if (module.getName() == null) {
+ throw new IllegalArgumentException("Module name cannot be NULL!");
+ }
+
+ if (module.getChildNodes() == null) {
+ throw new IllegalArgumentException("Reference to Set of " +
+ "Augmentation Definitions in module " + module.getName()
+ + " cannot be NULL!");
+ }
+
+ final List<Type> generatedTypes = new ArrayList<>();
+ final String basePackageName = moduleNamespaceToPackageName(module);
+ final List<AugmentationSchema> augmentations = resolveAugmentations(module);
+ for (final AugmentationSchema augment : augmentations) {
+ generatedTypes.addAll(augmentationToGenTypes(basePackageName, augment));
+ }
+ return generatedTypes;
}
- private List<AugmentationSchema> provideSortedAugmentations(final Module module) {
+ private List<AugmentationSchema> resolveAugmentations(final Module module) {
if (module == null) {
throw new IllegalArgumentException("Module reference cannot be NULL!");
}
return sortedAugmentations;
}
- private GeneratedType moduleToDataType(final String basePackageName, final Module module) {
+ private GeneratedType moduleToDataType(final Module module) {
if (module == null) {
throw new IllegalArgumentException("Module reference cannot be NULL!");
}
final GeneratedTypeBuilder moduleDataTypeBuilder = moduleTypeBuilder(
module, "Data");
+ final String basePackageName = moduleNamespaceToPackageName(module);
if (moduleDataTypeBuilder != null) {
final Set<DataSchemaNode> dataNodes = module.getChildNodes();
resolveDataSchemaNodes(basePackageName, moduleDataTypeBuilder, dataNodes);
return moduleDataTypeBuilder.toInstance();
}
- private boolean isDerivedFromEnumerationType(
- final TypeDefinition<?> typeDefinition) {
- if (typeDefinition != null) {
- if (typeDefinition.getBaseType() instanceof EnumTypeDefinition) {
- return true;
- } else if (typeDefinition.getBaseType() instanceof ExtendedType) {
- return isDerivedFromEnumerationType(typeDefinition
- .getBaseType());
+ private List<GeneratedType> allRPCMethodsToGenType(final Module module) {
+ if (module == null) {
+ throw new IllegalArgumentException("Module reference cannot be NULL!");
+ }
+
+ if (module.getName() == null) {
+ throw new IllegalArgumentException("Module name cannot be NULL!");
+ }
+
+ if (module.getChildNodes() == null) {
+ throw new IllegalArgumentException("Reference to Set of " +
+ "RPC Method Definitions in module " + module.getName()
+ + " cannot be NULL!");
+ }
+
+ final String basePackageName = moduleNamespaceToPackageName(module);
+ final Set<RpcDefinition> rpcDefinitions = module.getRpcs();
+ final List<GeneratedType> genRPCTypes = new ArrayList<>();
+
+ for (final RpcDefinition rpc : rpcDefinitions) {
+ if (rpc != null) {
+ final List<DataNodeIterator> rpcInOut = new ArrayList<>();
+ rpcInOut.add(new DataNodeIterator(rpc.getInput()));
+ rpcInOut.add(new DataNodeIterator(rpc.getOutput()));
+
+ for (DataNodeIterator it : rpcInOut) {
+ List<ContainerSchemaNode> nContainers = it.allContainers();
+ if ((nContainers != null) && !nContainers.isEmpty()) {
+ for (final ContainerSchemaNode container : nContainers) {
+ genRPCTypes.add(containerToGenType(basePackageName, container));
+ }
+ }
+ }
}
}
- return false;
+ return genRPCTypes;
+ }
+
+ private List<Type> allNotifycationsToGenType(final Module module) {
+ if (module == null) {
+ throw new IllegalArgumentException("Module reference cannot be NULL!");
+ }
+
+ if (module.getName() == null) {
+ throw new IllegalArgumentException("Module name cannot be NULL!");
+ }
+
+ if (module.getChildNodes() == null) {
+ throw new IllegalArgumentException("Reference to Set of " +
+ "Notification Definitions in module " + module.getName()
+ + " cannot be NULL!");
+ }
+
+ final String basePackageName = moduleNamespaceToPackageName(module);
+ final List<Type> genNotifyTypes = new ArrayList<>();
+ final Set<NotificationDefinition> notifications = module
+ .getNotifications();
+
+ for (final NotificationDefinition notification : notifications) {
+ if (notification != null) {
+ final List<DataNodeIterator> notifyChildren = new ArrayList<>();
+
+ for (DataSchemaNode childNode : notification.getChildNodes()) {
+ if (childNode instanceof DataNodeContainer) {
+ notifyChildren.add(new DataNodeIterator((DataNodeContainer) childNode));
+ }
+ }
+
+ for (DataNodeIterator it : notifyChildren) {
+ List<ContainerSchemaNode> nContainers = it.allContainers();
+ List<ListSchemaNode> nLists = it.allLists();
+ if ((nContainers != null) && !nContainers.isEmpty()) {
+ for (final ContainerSchemaNode container : nContainers) {
+ genNotifyTypes.add(containerToGenType(basePackageName, container));
+ }
+ }
+ if ((nLists != null) && !nLists.isEmpty()) {
+ for (final ListSchemaNode list : nLists) {
+ genNotifyTypes.addAll(listToGenType(basePackageName, list));
+ }
+ }
+ }
+ }
+ }
+ return genNotifyTypes;
}
private EnumTypeDefinition enumTypeDefFromExtendedType(
return null;
}
- private EnumBuilder resolveEnumFromTypeDefinition(
+ private EnumBuilder resolveInnerEnumFromTypeDefinition(
final EnumTypeDefinition enumTypeDef, final String enumName,
final GeneratedTypeBuilder typeBuilder) {
if ((enumTypeDef != null) && (typeBuilder != null)
}
- private List<GeneratedType> rpcMethodsToGenType(final String basePackageName, final Module module) {
- if (module == null) {
- throw new IllegalArgumentException("Module reference cannot be NULL!");
- }
-
- final Set<RpcDefinition> rpcDefinitions = module.getRpcs();
- final List<GeneratedType> rpcTypes = new ArrayList<>();
-
- if ((rpcDefinitions != null) && !rpcDefinitions.isEmpty()) {
- for (final RpcDefinition rpc : rpcDefinitions) {
- if (rpc != null) {
- final List<DataNodeIterator> rpcInOut = new ArrayList<>();
- rpcInOut.add(new DataNodeIterator(rpc.getInput()));
- rpcInOut.add(new DataNodeIterator(rpc.getOutput()));
-
- for (DataNodeIterator it : rpcInOut) {
- List<ContainerSchemaNode> nContainers = it.allContainers();
- if ((nContainers != null) && !nContainers.isEmpty()) {
- for (final ContainerSchemaNode container : nContainers) {
- rpcTypes.add(containerToGenType(basePackageName, container));
- }
- }
- }
- }
- }
- }
- return rpcTypes;
- }
-
- private List<Type> notifycationsToGenType(final String basePackageName, final Module module) {
- if (module == null) {
- throw new IllegalArgumentException("Module reference cannot be NULL!");
- }
- final List<Type> notificationTypes = new ArrayList<>();
- final Set<NotificationDefinition> notifications = module
- .getNotifications();
-
- if ((notifications != null) && !notifications.isEmpty()) {
- for (final NotificationDefinition notification : notifications) {
- if (notification != null) {
- final List<DataNodeIterator> notifyChildren = new ArrayList<>();
-
- for (DataSchemaNode childNode : notification.getChildNodes()) {
- if (childNode instanceof DataNodeContainer) {
- notifyChildren.add(new DataNodeIterator((DataNodeContainer) childNode));
- }
- }
-
- for (DataNodeIterator it : notifyChildren) {
- List<ContainerSchemaNode> nContainers = it.allContainers();
- List<ListSchemaNode> nLists = it.allLists();
- if ((nContainers != null) && !nContainers.isEmpty()) {
- for (final ContainerSchemaNode container : nContainers) {
- notificationTypes.add(containerToGenType(basePackageName, container));
- }
- }
- if ((nLists != null) && !nLists.isEmpty()) {
- for (final ListSchemaNode list : nLists) {
-
- notificationTypes.addAll(listToGenType(basePackageName, list));
- }
- }
- }
- }
- }
- }
- return notificationTypes;
- }
-
private List<Type> augmentationToGenTypes(final String augmentPackageName,
final AugmentationSchema augSchema) {
if (augmentPackageName == null) {
}
if ((augLists != null) && !augLists.isEmpty()) {
for (final ListSchemaNode list : augLists) {
-// final String listPackageName = packageNameForAugmentedType(
-// augBasePackageName, list.getPath());
genTypes.addAll(listToGenType(augBasePackageName, list));
}
}
final TypeDefinition<?> typeDef = leaf.getType();
Type returnType = null;
- if (!(typeDef instanceof EnumTypeDefinition)
- && !isDerivedFromEnumerationType(typeDef)) {
+ if (!(typeDef instanceof EnumTypeDefinition)) {
returnType = typeProvider
.javaTypeForSchemaDefinitionType(typeDef);
} else {
- if (isImported(leaf.getPath(), typeDef.getPath())) {
- // TODO: resolving of imported enums as references to
- // GeneratedTypeData interface
- } else {
- final EnumTypeDefinition enumTypeDef = enumTypeDefFromExtendedType(typeDef);
- final EnumBuilder enumBuilder = resolveEnumFromTypeDefinition(
- enumTypeDef, leafName, typeBuilder);
-
- if (enumBuilder != null) {
- returnType = new ReferencedTypeImpl(
- enumBuilder.getPackageName(),
- enumBuilder.getName());
- }
+ final EnumTypeDefinition enumTypeDef = enumTypeDefFromExtendedType(typeDef);
+ final EnumBuilder enumBuilder = resolveInnerEnumFromTypeDefinition(
+ enumTypeDef, leafName, typeBuilder);
+
+ if (enumBuilder != null) {
+ returnType = new ReferencedTypeImpl(
+ enumBuilder.getPackageName(),
+ enumBuilder.getName());
}
+ ((TypeProviderImpl)typeProvider).putReferencedType(leaf
+ .getPath(), returnType);
}
-
if (returnType != null) {
constructGetter(typeBuilder, leafName, leafDesc, returnType);
if (!leaf.isConfiguration()) {
return false;
}
- private boolean isImported(final SchemaPath leafPath,
- final SchemaPath typeDefPath) {
- if ((leafPath != null) && (leafPath.getPath() != null)
- && (typeDefPath != null) && (typeDefPath.getPath() != null)) {
-
- final QName leafPathQName = leafPath.getPath().get(0);
- final QName typePathQName = typeDefPath.getPath().get(0);
-
- if ((leafPathQName != null)
- && (leafPathQName.getNamespace() != null)
- && (typePathQName != null)
- && (typePathQName.getNamespace() != null)) {
-
- return !leafPathQName.getNamespace().equals(
- typePathQName.getNamespace());
- }
- }
- return false;
- }
-
private boolean resolveLeafSchemaNodeAsProperty(
final GeneratedTOBuilder toBuilder, final LeafSchemaNode leaf,
boolean isReadOnly) {
addSchemaNodeToListBuilders(basePackageName, schemaNode, typeBuilder,
genTOBuilder, listKeys);
}
-
-// if (list.isAugmenting()) {
-// for (final DataSchemaNode schemaNode : schemaNodes) {
-// if (schemaNode.isAugmenting()) {
-// addSchemaNodeToListBuilders(basePackageName, schemaNode, typeBuilder,
-// genTOBuilder, listKeys);
-// }
-// }
-// } else {
-// for (final DataSchemaNode schemaNode : schemaNodes) {
-// if (schemaNode.isAugmenting()) {
-// continue;
-// }
-// addSchemaNodeToListBuilders(basePackageName, schemaNode, typeBuilder,
-// genTOBuilder, listKeys);
-// }
-// }
return typeBuildersToGenTypes(typeBuilder, genTOBuilder);
}
} else if (schemaNode instanceof ListSchemaNode) {
resolveListSchemaNode(basePackageName, typeBuilder, (ListSchemaNode) schemaNode);
}
-
}
private List<Type> typeBuildersToGenTypes(
if (listKeys.size() > 0) {
genTOBuilder = resolveListKey(packageName, list);
}
-
return genTOBuilder;
}
}
import org.opendaylight.controller.sal.binding.model.api.Type;
import org.opendaylight.controller.yang.model.api.TypeDefinition;
-public class BaseYangTypes {
+public final class BaseYangTypes {
private static Map<String, Type> typeMap = new HashMap<String, Type>();
public static final Type UINT16_TYPE = Types.typeForClass(Integer.class);
public static final Type UINT32_TYPE = Types.typeForClass(Long.class);
public static final Type UINT64_TYPE = Types.typeForClass(BigInteger.class);
+ public static final Type BINARY_TYPE = Types.primitiveType("byte[]");
static {
typeMap.put("boolean", BOOLEAN_TYPE);
typeMap.put("uint16", UINT16_TYPE);
typeMap.put("uint32", UINT32_TYPE);
typeMap.put("uint64", UINT64_TYPE);
+ typeMap.put("binary", BINARY_TYPE);
}
public static final TypeProvider BASE_YANG_TYPES_PROVIDER = new TypeProvider() {
*/
package org.opendaylight.controller.sal.binding.yang.types;
-import static org.opendaylight.controller.yang.model.util.SchemaContextUtil.*;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.opendaylight.controller.binding.generator.util.BindingGeneratorUtil;
+import org.opendaylight.controller.binding.generator.util.ReferencedTypeImpl;
import org.opendaylight.controller.binding.generator.util.Types;
import org.opendaylight.controller.binding.generator.util.generated.type.builder.EnumerationBuilderImpl;
import org.opendaylight.controller.binding.generator.util.generated.type.builder.GeneratedTOBuilderImpl;
import org.opendaylight.controller.sal.binding.model.api.type.builder.GeneratedPropertyBuilder;
import org.opendaylight.controller.sal.binding.model.api.type.builder.GeneratedTOBuilder;
import org.opendaylight.controller.sal.binding.model.api.type.builder.GeneratedTypeBuilder;
-import org.opendaylight.controller.yang.model.api.DataSchemaNode;
-import org.opendaylight.controller.yang.model.api.LeafListSchemaNode;
-import org.opendaylight.controller.yang.model.api.LeafSchemaNode;
-import org.opendaylight.controller.yang.model.api.Module;
-import org.opendaylight.controller.yang.model.api.RevisionAwareXPath;
-import org.opendaylight.controller.yang.model.api.SchemaContext;
-import org.opendaylight.controller.yang.model.api.TypeDefinition;
+import org.opendaylight.controller.yang.model.api.*;
import org.opendaylight.controller.yang.model.api.type.EnumTypeDefinition;
import org.opendaylight.controller.yang.model.api.type.EnumTypeDefinition.EnumPair;
import org.opendaylight.controller.yang.model.api.type.IdentityrefTypeDefinition;
import org.opendaylight.controller.yang.model.api.type.UnionTypeDefinition;
import org.opendaylight.controller.yang.model.util.ExtendedType;
-public class TypeProviderImpl implements TypeProvider {
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import static org.opendaylight.controller.binding.generator.util.BindingGeneratorUtil.*;
+import static org.opendaylight.controller.yang.model.util.SchemaContextUtil.*;
+
+public final class TypeProviderImpl implements TypeProvider {
private final SchemaContext schemaContext;
- private Map<String, Map<String, GeneratedTransferObject>> genTypeDefsContextMap;
- private final List<GeneratedTransferObject> allTypeDefinitions;
+ private Map<String, Map<String, Type>> genTypeDefsContextMap;
+ private final Map<SchemaPath, Type> referencedTypes;
public TypeProviderImpl(final SchemaContext schemaContext) {
if (schemaContext == null) {
}
this.schemaContext = schemaContext;
- this.genTypeDefsContextMap = new HashMap<String, Map<String, GeneratedTransferObject>>();
- allTypeDefinitions = resolveTypeDefsFromContext();
+ this.genTypeDefsContextMap = new HashMap<>();
+ this.referencedTypes = new HashMap<>();
+ resolveTypeDefsFromContext();
}
- public List<GeneratedTransferObject> getGeneratedTypeDefs() {
- return allTypeDefinitions;
+ public void putReferencedType(final SchemaPath refTypePath,
+ final Type refType) {
+ if (refTypePath == null) {
+ throw new IllegalArgumentException("Path reference of " +
+ "Enumeration Type Definition cannot be NULL!");
+ }
+
+ if (refType == null) {
+ throw new IllegalArgumentException("Reference to Enumeration " +
+ "Type cannot be NULL!");
+ }
+ referencedTypes.put(refTypePath, refType);
}
/*
public Type javaTypeForSchemaDefinitionType(
final TypeDefinition<?> typeDefinition) {
Type returnType = null;
- if (typeDefinition != null) {
- final String typedefName = typeDefinition.getQName().getLocalName();
- if (typeDefinition instanceof ExtendedType) {
- final TypeDefinition<?> baseTypeDef = baseTypeDefForExtendedType(typeDefinition);
-
- if (baseTypeDef instanceof LeafrefTypeDefinition) {
- final LeafrefTypeDefinition leafref = (LeafrefTypeDefinition) baseTypeDef;
- returnType = provideTypeForLeafref(leafref);
- } else if (baseTypeDef instanceof IdentityrefTypeDefinition) {
-
- } else if (baseTypeDef instanceof EnumTypeDefinition) {
- final EnumTypeDefinition enumTypeDef = (EnumTypeDefinition) baseTypeDef;
- returnType = resolveEnumFromTypeDefinition(enumTypeDef,
- typedefName);
- } else {
-
- final Module module = resolveModuleFromTypePath(schemaContext, typeDefinition);
-
- if (module != null) {
- final Map<String, GeneratedTransferObject> genTOs = genTypeDefsContextMap
- .get(module.getName());
- if (genTOs != null) {
- returnType = genTOs.get(typedefName);
- }
+ if (typeDefinition == null) {
+ throw new IllegalArgumentException("Type Definition cannot be " +
+ "NULL!");
+ }
+ if (typeDefinition.getQName() == null) {
+ throw new IllegalArgumentException("Type Definition cannot have " +
+ "non specified QName (QName cannot be NULL!)");
+ }
+ if (typeDefinition.getQName().getLocalName() == null) {
+ throw new IllegalArgumentException("Type Definitions Local Name " +
+ "cannot be NULL!");
+ }
+ final String typedefName = typeDefinition.getQName().getLocalName();
+ if (typeDefinition instanceof ExtendedType) {
+ final TypeDefinition<?> baseTypeDef = baseTypeDefForExtendedType(typeDefinition);
+
+ if (baseTypeDef instanceof LeafrefTypeDefinition) {
+ final LeafrefTypeDefinition leafref = (LeafrefTypeDefinition) baseTypeDef;
+ returnType = provideTypeForLeafref(leafref);
+ } else if (baseTypeDef instanceof IdentityrefTypeDefinition) {
+
+ } else if (baseTypeDef instanceof EnumTypeDefinition) {
+ final EnumTypeDefinition enumTypeDef = (EnumTypeDefinition) baseTypeDef;
+ returnType = resolveEnumFromTypeDefinition(enumTypeDef,
+ typedefName);
+ } else {
+ final Module module = findParentModuleForTypeDefinition(schemaContext,
+ typeDefinition);
+ if (module != null) {
+ final Map<String, Type> genTOs = genTypeDefsContextMap
+ .get(module.getName());
+ if (genTOs != null) {
+ returnType = genTOs.get(typedefName);
+ }
+ if (returnType == null) {
+ returnType = BaseYangTypes.BASE_YANG_TYPES_PROVIDER
+ .javaTypeForSchemaDefinitionType(baseTypeDef);
}
}
+ }
+ } else {
+ if (typeDefinition instanceof LeafrefTypeDefinition) {
+ final LeafrefTypeDefinition leafref = (LeafrefTypeDefinition) typeDefinition;
+ returnType = provideTypeForLeafref(leafref);
+ } else if (typeDefinition instanceof IdentityrefTypeDefinition) {
+
} else {
- if (typeDefinition instanceof LeafrefTypeDefinition) {
- final LeafrefTypeDefinition leafref = (LeafrefTypeDefinition) typeDefinition;
- returnType = provideTypeForLeafref(leafref);
- } else if (typeDefinition instanceof EnumTypeDefinition) {
- final EnumTypeDefinition enumTypeDef = (EnumTypeDefinition) typeDefinition;
- returnType = resolveEnumFromTypeDefinition(enumTypeDef,
- typedefName);
- } else if (typeDefinition instanceof IdentityrefTypeDefinition) {
+ returnType = BaseYangTypes.BASE_YANG_TYPES_PROVIDER
+ .javaTypeForSchemaDefinitionType(typeDefinition);
+ }
+ }
+ //TODO: add throw exception when we will be able to resolve ALL yang
+ // types!
+// if (returnType == null) {
+// throw new IllegalArgumentException("Type Provider can't resolve " +
+// "type for specified Type Definition " + typedefName);
+// }
+ return returnType;
+ }
- } else {
- returnType = BaseYangTypes.BASE_YANG_TYPES_PROVIDER
- .javaTypeForSchemaDefinitionType(typeDefinition);
+ public Type generatedTypeForExtendedDefinitionType(
+ final TypeDefinition<?> typeDefinition) {
+ Type returnType = null;
+ if (typeDefinition == null) {
+ throw new IllegalArgumentException("Type Definition cannot be " +
+ "NULL!");
+ }
+ if (typeDefinition.getQName() == null) {
+ throw new IllegalArgumentException("Type Definition cannot have " +
+ "non specified QName (QName cannot be NULL!)");
+ }
+ if (typeDefinition.getQName() == null) {
+ throw new IllegalArgumentException("Type Definitions Local Name " +
+ "cannot be NULL!");
+ }
+
+ final String typedefName = typeDefinition.getQName().getLocalName();
+ if (typeDefinition instanceof ExtendedType) {
+ final TypeDefinition<?> baseTypeDef = baseTypeDefForExtendedType(typeDefinition);
+
+ if (!(baseTypeDef instanceof LeafrefTypeDefinition)
+ && !(baseTypeDef instanceof IdentityrefTypeDefinition)) {
+ final Module module = findParentModuleForTypeDefinition(schemaContext,
+ typeDefinition);
+
+ if (module != null) {
+ final Map<String, Type> genTOs = genTypeDefsContextMap
+ .get(module.getName());
+ if (genTOs != null) {
+ returnType = genTOs.get(typedefName);
+ }
}
}
}
private TypeDefinition<?> baseTypeDefForExtendedType(
final TypeDefinition<?> extendTypeDef) {
- if (extendTypeDef != null) {
- final TypeDefinition<?> baseTypeDef = extendTypeDef.getBaseType();
- if (baseTypeDef instanceof ExtendedType) {
- return baseTypeDefForExtendedType(baseTypeDef);
- } else {
- return baseTypeDef;
- }
+ if (extendTypeDef == null) {
+ throw new IllegalArgumentException("Type Definiition reference " +
+ "cannot be NULL!");
}
- return null;
+ final TypeDefinition<?> baseTypeDef = extendTypeDef.getBaseType();
+ if (baseTypeDef instanceof ExtendedType) {
+ return baseTypeDefForExtendedType(baseTypeDef);
+ } else {
+ return baseTypeDef;
+ }
+
}
- public Type baseTypeForExtendedType(final TypeDefinition<?> typeDefinition) {
+ public Type provideTypeForLeafref(final LeafrefTypeDefinition leafrefType) {
Type returnType = null;
- if (typeDefinition != null) {
- final TypeDefinition<?> baseTypeDefinition = baseTypeDefForExtendedType(typeDefinition);
+ if (leafrefType == null) {
+ throw new IllegalArgumentException("Leafref Type Definition " +
+ "reference cannot be NULL!");
+ }
- if (baseTypeDefinition instanceof EnumTypeDefinition) {
- final EnumTypeDefinition enumTypeDef = (EnumTypeDefinition) typeDefinition;
- final String enumName = enumTypeDef.getQName().getLocalName();
- return resolveEnumFromTypeDefinition(enumTypeDef, enumName);
- } else {
- returnType = BaseYangTypes.BASE_YANG_TYPES_PROVIDER
- .javaTypeForSchemaDefinitionType(typeDefinition);
- }
+ if (leafrefType.getPathStatement() == null) {
+ throw new IllegalArgumentException("The Path Statement for " +
+ "Leafref Type Definition cannot be NULL!");
}
- return returnType;
- }
- public Type provideTypeForLeafref(final LeafrefTypeDefinition leafrefType) {
- Type returnType = null;
- if ((leafrefType != null) && (leafrefType.getPathStatement() != null)
- && (leafrefType.getPath() != null)) {
+ final RevisionAwareXPath xpath = leafrefType.getPathStatement();
+ final String strXPath = xpath.toString();
- final RevisionAwareXPath xpath = leafrefType.getPathStatement();
- final String strXPath = xpath.toString();
+ if (strXPath != null) {
+ if (strXPath.matches(".*//[.* | .*//].*")) {
+ returnType = Types.typeForClass(Object.class);
+ } else {
+ final Module module = findParentModuleForTypeDefinition(schemaContext, leafrefType);
+ if (module != null) {
+ final DataSchemaNode dataNode;
+ if (xpath.isAbsolute()) {
+ dataNode = findDataSchemaNode(schemaContext, module,
+ xpath);
+ } else {
+ dataNode = findDataSchemaNodeForRelativeXPath(schemaContext,
+ module, leafrefType, xpath);
+ }
- if (strXPath != null) {
- if (strXPath.matches(".*//[.* | .*//].*")) {
- returnType = Types.typeForClass(Object.class);
- } else {
- final Module module = resolveModuleFromTypePath(schemaContext, leafrefType);
- if (module != null) {
- final DataSchemaNode dataNode;
- if (xpath.isAbsolute()) {
- dataNode = findDataSchemaNode(schemaContext,
- module, xpath);
- } else {
- dataNode = findDataSchemaNodeForRelativeXPath(schemaContext, module,
- leafrefType, xpath);
- }
+ if (leafContainsEnumDefinition(dataNode)) {
+ returnType = referencedTypes.get(dataNode.getPath());
+ } else if (leafListContainsEnumDefinition(dataNode)) {
+ returnType = Types.listTypeFor(referencedTypes.get(
+ dataNode.getPath()));
+ } else {
returnType = resolveTypeFromDataSchemaNode(dataNode);
}
}
return returnType;
}
- private EnumBuilder resolveEnumFromTypeDefinition(
- final EnumTypeDefinition enumTypeDef, final String enumName,
- final GeneratedTypeBuilder typeBuilder) {
- if ((enumTypeDef != null) && (typeBuilder != null)
- && (enumTypeDef.getQName() != null)
- && (enumTypeDef.getQName().getLocalName() != null)) {
-
- final String enumerationName = BindingGeneratorUtil
- .parseToClassName(enumName);
- final EnumBuilder enumBuilder = typeBuilder
- .addEnumeration(enumerationName);
-
- if (enumBuilder != null) {
- final List<EnumPair> enums = enumTypeDef.getValues();
- if (enums != null) {
- int listIndex = 0;
- for (final EnumPair enumPair : enums) {
- if (enumPair != null) {
- final String enumPairName = BindingGeneratorUtil
- .parseToClassName(enumPair.getName());
- Integer enumPairValue = enumPair.getValue();
-
- if (enumPairValue == null) {
- enumPairValue = listIndex;
- }
- enumBuilder.addValue(enumPairName, enumPairValue);
- listIndex++;
- }
- }
- }
- return enumBuilder;
+ private boolean leafContainsEnumDefinition(final DataSchemaNode dataNode) {
+ if (dataNode instanceof LeafSchemaNode) {
+ final LeafSchemaNode leaf = (LeafSchemaNode) dataNode;
+ if (leaf.getType() instanceof EnumTypeDefinition) {
+ return true;
}
}
- return null;
+ return false;
+ }
+
+ private boolean leafListContainsEnumDefinition(
+ final DataSchemaNode dataNode) {
+ if (dataNode instanceof LeafListSchemaNode) {
+ final LeafListSchemaNode leafList = (LeafListSchemaNode) dataNode;
+ if (leafList.getType() instanceof EnumTypeDefinition) {
+ return true;
+ }
+ }
+ return false;
}
private Enumeration resolveEnumFromTypeDefinition(
final EnumTypeDefinition enumTypeDef, final String enumName) {
- if ((enumTypeDef != null) && (enumTypeDef.getQName() != null)
- && (enumTypeDef.getQName().getLocalName() != null)) {
-
- final String enumerationName = BindingGeneratorUtil
- .parseToClassName(enumName);
-
- Module module = resolveModuleFromTypePath(schemaContext, enumTypeDef);
-
- final String basePackageName = BindingGeneratorUtil
- .moduleNamespaceToPackageName(module);
- final String packageName = BindingGeneratorUtil
- .packageNameForGeneratedType(basePackageName,
- enumTypeDef.getPath());
-
- final EnumBuilder enumBuilder = new EnumerationBuilderImpl(
- packageName, enumerationName);
-
- if (enumBuilder != null) {
- final List<EnumPair> enums = enumTypeDef.getValues();
- if (enums != null) {
- int listIndex = 0;
- for (final EnumPair enumPair : enums) {
- if (enumPair != null) {
- final String enumPairName = BindingGeneratorUtil
- .parseToClassName(enumPair.getName());
- Integer enumPairValue = enumPair.getValue();
-
- if (enumPairValue == null) {
- enumPairValue = listIndex;
- }
- enumBuilder.addValue(enumPairName, enumPairValue);
- listIndex++;
+ if (enumTypeDef == null) {
+ throw new IllegalArgumentException("EnumTypeDefinition reference " +
+ "cannot be NULL!");
+ }
+ if (enumTypeDef.getValues() == null) {
+ throw new IllegalArgumentException("EnumTypeDefinition MUST " +
+ "contain at least ONE value definition!");
+ }
+ if (enumTypeDef.getQName() == null) {
+ throw new IllegalArgumentException("EnumTypeDefinition MUST " +
+ "contain NON-NULL QName!");
+ }
+ if (enumTypeDef.getQName().getLocalName() == null) {
+ throw new IllegalArgumentException("Local Name in " +
+ "EnumTypeDefinition QName cannot be NULL!");
+ }
+
+ final String enumerationName = parseToClassName(enumName);
+
+ Module module = findParentModuleForTypeDefinition(schemaContext, enumTypeDef);
+ final String basePackageName = moduleNamespaceToPackageName(module);
+
+ final EnumBuilder enumBuilder = new EnumerationBuilderImpl(
+ basePackageName, enumerationName);
+ updateEnumPairsFromEnumTypeDef(enumTypeDef, enumBuilder);
+ return enumBuilder.toInstance(null);
+ }
+
+ private EnumBuilder resolveInnerEnumFromTypeDefinition(
+ final EnumTypeDefinition enumTypeDef, final String enumName,
+ final GeneratedTypeBuilder typeBuilder) {
+ if (enumTypeDef == null) {
+ throw new IllegalArgumentException("EnumTypeDefinition reference " +
+ "cannot be NULL!");
+ }
+ if (enumTypeDef.getValues() == null) {
+ throw new IllegalArgumentException("EnumTypeDefinition MUST " +
+ "contain at least ONE value definition!");
+ }
+ if (enumTypeDef.getQName() == null) {
+ throw new IllegalArgumentException("EnumTypeDefinition MUST " +
+ "contain NON-NULL QName!");
+ }
+ if (enumTypeDef.getQName().getLocalName() == null) {
+ throw new IllegalArgumentException("Local Name in " +
+ "EnumTypeDefinition QName cannot be NULL!");
+ }
+ if (typeBuilder == null) {
+ throw new IllegalArgumentException("Generated Type Builder " +
+ "reference cannot be NULL!");
+ }
+
+ final String enumerationName = parseToClassName(enumName);
+ final EnumBuilder enumBuilder = typeBuilder
+ .addEnumeration(enumerationName);
+
+ updateEnumPairsFromEnumTypeDef(enumTypeDef, enumBuilder);
+
+ return enumBuilder;
+ }
+
+ private void updateEnumPairsFromEnumTypeDef(
+ final EnumTypeDefinition enumTypeDef,
+ final EnumBuilder enumBuilder) {
+ if (enumBuilder != null) {
+ final List<EnumPair> enums = enumTypeDef.getValues();
+ if (enums != null) {
+ int listIndex = 0;
+ for (final EnumPair enumPair : enums) {
+ if (enumPair != null) {
+ final String enumPairName = parseToClassName(enumPair
+ .getName());
+ Integer enumPairValue = enumPair.getValue();
+
+ if (enumPairValue == null) {
+ enumPairValue = listIndex;
}
+ enumBuilder.addValue(enumPairName, enumPairValue);
+ listIndex++;
}
}
- return enumBuilder.toInstance(null);
}
}
- return null;
}
private Type resolveTypeFromDataSchemaNode(final DataSchemaNode dataNode) {
return returnType;
}
- private List<GeneratedTransferObject> resolveTypeDefsFromContext() {
- final List<GeneratedTransferObject> genTypeDefs = new ArrayList<GeneratedTransferObject>();
+ private void resolveTypeDefsFromContext() {
final Set<Module> modules = schemaContext.getModules();
- if (modules != null) {
- for (final Module module : modules) {
- if (module != null) {
- final String moduleName = module.getName();
- final String basePackageName = BindingGeneratorUtil
- .moduleNamespaceToPackageName(module);
-
- final Set<TypeDefinition<?>> typeDefinitions = module
- .getTypeDefinitions();
-
- if ((typeDefinitions != null) && (basePackageName != null)) {
- for (final TypeDefinition<?> typedef : typeDefinitions) {
- final GeneratedTransferObject genTransObj = toGeneratedTransferObject(
- basePackageName, moduleName, typedef);
- if (genTransObj != null) {
- genTypeDefs.add(genTransObj);
- }
- }
- // for (final TypeDefinition<?> typedef :
- // typeDefinitions) {
- // addUnionGeneratedTypeDefinition(basePackageName,
- // module.getName(), typedef);
- // }
- }
+ if (modules == null) {
+ throw new IllegalArgumentException("Sef of Modules cannot be " +
+ "NULL!");
+ }
+ for (final Module module : modules) {
+ if (module == null) {
+ continue;
+ }
+ final String moduleName = module.getName();
+ final String basePackageName = moduleNamespaceToPackageName(module);
+
+ final Set<TypeDefinition<?>> typeDefinitions = module
+ .getTypeDefinitions();
+
+ final Map<String, Type> typeMap = new HashMap<>();
+ genTypeDefsContextMap.put(moduleName, typeMap);
+
+ if ((typeDefinitions != null) && (basePackageName != null)) {
+ for (final TypeDefinition<?> typedef : typeDefinitions) {
+ typedefToGeneratedType(basePackageName, moduleName, typedef);
+ }
+ final List<ExtendedType> extUnions = UnionDependencySort
+ .sort(typeDefinitions);
+ for (final ExtendedType extUnionType : extUnions) {
+ addUnionGeneratedTypeDefinition(basePackageName, extUnionType);
}
}
}
- return genTypeDefs;
}
- private GeneratedTransferObject toGeneratedTransferObject(
- final String basePackageName, final String moduleName,
- final TypeDefinition<?> typedef) {
+ private Type typedefToGeneratedType(final String basePackageName,
+ final String moduleName, final TypeDefinition<?> typedef) {
if ((basePackageName != null) && (moduleName != null)
&& (typedef != null) && (typedef.getQName() != null)) {
- final GeneratedTOBuilder genTOBuilder = typedefToTransferObject(
- basePackageName, typedef);
- final String typedefName = typedef.getQName().getLocalName();
- final String propertyName = BindingGeneratorUtil
- .parseToValidParamName(typedefName);
+ final String typedefName = typedef.getQName().getLocalName();
final TypeDefinition<?> baseTypeDefinition = baseTypeDefForExtendedType(typedef);
if (!(baseTypeDefinition instanceof LeafrefTypeDefinition)
&& !(baseTypeDefinition instanceof IdentityrefTypeDefinition)) {
- Type returnType = null;
+ Type returnType;
if (baseTypeDefinition instanceof EnumTypeDefinition) {
final EnumTypeDefinition enumTypeDef = (EnumTypeDefinition) baseTypeDefinition;
returnType = resolveEnumFromTypeDefinition(enumTypeDef,
typedefName);
+
} else {
- returnType = BaseYangTypes.BASE_YANG_TYPES_PROVIDER
+ final Type javaType = BaseYangTypes.BASE_YANG_TYPES_PROVIDER
.javaTypeForSchemaDefinitionType(baseTypeDefinition);
- }
+ returnType = wrapJavaTypeIntoTO(basePackageName, typedef,
+ javaType);
+ }
if (returnType != null) {
- final GeneratedPropertyBuilder genPropBuilder = genTOBuilder
- .addProperty(propertyName);
-
- genPropBuilder.addReturnType(returnType);
- genTOBuilder.addEqualsIdentity(genPropBuilder);
- genTOBuilder.addHashIdentity(genPropBuilder);
- genTOBuilder.addToStringProperty(genPropBuilder);
-
- Map<String, GeneratedTransferObject> transferObjectsMap = genTypeDefsContextMap
- .get(moduleName);
- if (transferObjectsMap == null) {
- transferObjectsMap = new HashMap<String, GeneratedTransferObject>();
- genTypeDefsContextMap.put(moduleName,
- transferObjectsMap);
- }
-
- final GeneratedTransferObject transferObject = genTOBuilder
- .toInstance();
- if (transferObject != null) {
- transferObjectsMap.put(typedefName, transferObject);
- return transferObject;
+ final Map<String, Type> typeMap = genTypeDefsContextMap.get
+ (moduleName);
+ if (typeMap != null) {
+ typeMap.put(typedefName, returnType);
}
+ return returnType;
}
}
}
return null;
}
+ private GeneratedTransferObject wrapJavaTypeIntoTO(
+ final String basePackageName, final TypeDefinition<?> typedef,
+ final Type javaType) {
+ if (javaType != null) {
+ final String typedefName = typedef.getQName().getLocalName();
+ final String propertyName = parseToValidParamName(typedefName);
+
+ final GeneratedTOBuilder genTOBuilder = typedefToTransferObject(
+ basePackageName, typedef);
+
+ final GeneratedPropertyBuilder genPropBuilder = genTOBuilder
+ .addProperty(propertyName);
+
+ genPropBuilder.addReturnType(javaType);
+ genTOBuilder.addEqualsIdentity(genPropBuilder);
+ genTOBuilder.addHashIdentity(genPropBuilder);
+ genTOBuilder.addToStringProperty(genPropBuilder);
+ return genTOBuilder.toInstance();
+ }
+ return null;
+ }
+
private void addUnionGeneratedTypeDefinition(final String basePackageName,
- final String moduleName, final TypeDefinition<?> typedef) {
- if ((basePackageName != null) && (moduleName != null)
- && (typedef != null) && (typedef.getQName() != null)) {
- final TypeDefinition<?> baseTypeDefinition = baseTypeDefForExtendedType(typedef);
+ final TypeDefinition<?> typedef) {
+ if (basePackageName == null) {
+ throw new IllegalArgumentException("Base Package Name cannot be " +
+ "NULL!");
+ }
+ if (typedef == null) {
+ throw new IllegalArgumentException("Type Definition cannot be " +
+ "NULL!");
+ }
+ if (typedef.getQName() == null) {
+ throw new IllegalArgumentException("Type Definition cannot have " +
+ "non specified QName (QName cannot be NULL!)");
+ }
- if ((baseTypeDefinition != null)
- && (baseTypeDefinition instanceof UnionTypeDefinition)) {
- final UnionTypeDefinition unionTypeDef = (UnionTypeDefinition) baseTypeDefinition;
-
- final List<TypeDefinition<?>> unionTypes = unionTypeDef
- .getTypes();
- final Map<String, GeneratedTransferObject> genTOsMap = genTypeDefsContextMap
- .get(moduleName);
- final GeneratedTOBuilder unionGenTransObject = typedefToTransferObject(
- basePackageName, typedef);
- if ((unionTypes != null) && (genTOsMap != null)
- && (unionGenTransObject != null)) {
- for (final TypeDefinition<?> unionType : unionTypes) {
- final String typeName = unionType.getQName()
- .getLocalName();
- final GeneratedTransferObject genTransferObject = genTOsMap
- .get(typeName);
-
- if (genTransferObject != null) {
- unionGenTransObject
- .addProperty(
- BindingGeneratorUtil
- .parseToValidParamName(genTransferObject
- .getName()))
- .addReturnType(genTransferObject);
+ final TypeDefinition<?> baseTypeDefinition = typedef.getBaseType();
+ if ((baseTypeDefinition != null)
+ && (baseTypeDefinition instanceof UnionTypeDefinition)) {
+ final UnionTypeDefinition unionTypeDef = (UnionTypeDefinition) baseTypeDefinition;
+ final List<TypeDefinition<?>> unionTypes = unionTypeDef
+ .getTypes();
+ final Module parentModule = findParentModuleForTypeDefinition(schemaContext,
+ typedef);
+
+ Map<String, Type> genTOsMap = null;
+ if (parentModule != null && parentModule.getName() != null) {
+ genTOsMap = genTypeDefsContextMap.get(parentModule.getName());
+ }
+
+ final GeneratedTOBuilder unionGenTransObject = typedefToTransferObject(
+ basePackageName, typedef);
+ if ((unionTypes != null) && (unionGenTransObject != null)) {
+ for (final TypeDefinition<?> unionType : unionTypes) {
+ final String typeName = unionType.getQName()
+ .getLocalName();
+ if (unionType instanceof ExtendedType) {
+ final Module unionTypeModule = findParentModuleForTypeDefinition(schemaContext,
+ unionType);
+ if (unionTypeModule != null && unionTypeModule.getName() != null) {
+ final Map<String, Type> innerGenTOs = genTypeDefsContextMap
+ .get(unionTypeModule.getName());
+
+ final GeneratedTransferObject genTransferObject =
+ (GeneratedTransferObject) innerGenTOs.get(typeName);
+ if (genTransferObject != null) {
+ updateUnionTypeAsProperty(unionGenTransObject,
+ genTransferObject,
+ genTransferObject.getName());
+ }
+ }
+ } else if (unionType instanceof EnumTypeDefinition) {
+ final EnumBuilder
+ enumBuilder = resolveInnerEnumFromTypeDefinition(
+ (EnumTypeDefinition) unionType, typeName,
+ unionGenTransObject);
+ final Type enumRefType = new ReferencedTypeImpl(
+ enumBuilder.getPackageName(),
+ enumBuilder.getName());
+ updateUnionTypeAsProperty(unionGenTransObject,
+ enumRefType, typeName);
+ } else {
+ final Type javaType = BaseYangTypes.BASE_YANG_TYPES_PROVIDER
+ .javaTypeForSchemaDefinitionType(unionType);
+ if (javaType != null) {
+ updateUnionTypeAsProperty(unionGenTransObject,
+ javaType, typeName);
}
}
- genTOsMap.put(unionTypeDef.getQName().getLocalName(),
- unionGenTransObject.toInstance());
}
+ genTOsMap.put(typedef.getQName().getLocalName(),
+ unionGenTransObject.toInstance());
+ }
+ }
+ }
+
+ private void updateUnionTypeAsProperty(
+ final GeneratedTOBuilder unionGenTransObject, final Type type,
+ final String propertyName) {
+ if (unionGenTransObject != null && type != null) {
+ final GeneratedPropertyBuilder propBuilder =
+ unionGenTransObject.addProperty(parseToValidParamName(
+ propertyName));
+ propBuilder.addReturnType(type);
+ propBuilder.setReadOnly(false);
+
+ if (!(type instanceof Enumeration)) {
+ unionGenTransObject.addEqualsIdentity(propBuilder);
+ unionGenTransObject.addHashIdentity(propBuilder);
+ unionGenTransObject.addToStringProperty(propBuilder);
}
}
}
private GeneratedTOBuilder typedefToTransferObject(
final String basePackageName, final TypeDefinition<?> typedef) {
- final String packageName = BindingGeneratorUtil
- .packageNameForGeneratedType(basePackageName, typedef.getPath());
+ final String packageName = packageNameForGeneratedType(basePackageName,
+ typedef.getPath());
final String typeDefTOName = typedef.getQName().getLocalName();
if ((packageName != null) && (typedef != null)
&& (typeDefTOName != null)) {
- final String genTOName = BindingGeneratorUtil
- .parseToClassName(typeDefTOName);
+ final String genTOName = parseToClassName(typeDefTOName);
final GeneratedTOBuilder newType = new GeneratedTOBuilderImpl(
packageName, genTOName);
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.sal.binding.yang.types;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Function;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+import org.opendaylight.controller.yang.model.api.TypeDefinition;
+import org.opendaylight.controller.yang.model.api.type.UnionTypeDefinition;
+import org.opendaylight.controller.yang.model.util.ExtendedType;
+import org.opendaylight.controller.yang.parser.util.TopologicalSort;
+import org.opendaylight.controller.yang.parser.util.TopologicalSort.Node;
+import org.opendaylight.controller.yang.parser.util.TopologicalSort.NodeImpl;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+public class UnionDependencySort {
+ private static final Logger logger = LoggerFactory
+ .getLogger(UnionDependencySort.class);
+
+ public static List<ExtendedType> sort(
+ final Set<TypeDefinition<?>> typeDefinitions) {
+ if (typeDefinitions == null) {
+ logger.error("Set of Type Definitions cannot be NULL!");
+ throw new IllegalArgumentException("Set of Type Definitions " +
+ "cannot be NULL!");
+ }
+
+ final Set<ExtendedType> extUnionTypes =
+ unionsFromTypeDefinitions(typeDefinitions);
+
+ final Set<Node> unsorted = unionTypesToUnionNodes(extUnionTypes);
+
+ final List<Node> sortedNodes = TopologicalSort.sort(unsorted);
+ return Lists.transform(sortedNodes, new Function<Node, ExtendedType>() {
+ @Override
+ public ExtendedType apply(Node input) {
+ return ((UnionNode) input).getUnionType();
+ }
+ });
+ }
+
+ private static Set<ExtendedType> unionsFromTypeDefinitions(
+ final Set<TypeDefinition<?>> typeDefinitions) {
+ final Set<ExtendedType> unions = Sets.newHashSet();
+
+ for (final TypeDefinition<?> typedef : typeDefinitions) {
+ if ((typedef != null) && (typedef.getBaseType() != null)) {
+ if (typedef instanceof ExtendedType
+ && typedef.getBaseType() instanceof UnionTypeDefinition) {
+ unions.add((ExtendedType) typedef);
+ }
+ }
+ }
+ return unions;
+ }
+
+ private static Set<Node> unionTypesToUnionNodes(
+ final Set<ExtendedType> extUnionTypes) {
+ final Map<ExtendedType, Node> nodeMap = Maps.newHashMap();
+ final Set<Node> resultNodes = Sets.newHashSet();
+
+ for (final ExtendedType unionType : extUnionTypes) {
+ final Node node = new UnionNode(unionType);
+ nodeMap.put(unionType, node);
+ resultNodes.add(node);
+ }
+
+ for (final Node node : resultNodes) {
+ final UnionNode unionNode = (UnionNode) node;
+ final ExtendedType extUnionType = unionNode.getUnionType();
+
+ final UnionTypeDefinition unionType = (UnionTypeDefinition)
+ extUnionType.getBaseType();
+
+ final List<TypeDefinition<?>> innerTypes = unionType.getTypes();
+ for (final TypeDefinition<?> typedef : innerTypes) {
+ if (extUnionTypes.contains(typedef)) {
+ final Node toNode = nodeMap.get(typedef);
+ unionNode.addEdge(toNode);
+ }
+ }
+ }
+
+ return resultNodes;
+ }
+
+ private static UnionNode unionTypeToUnionNode(
+ final ExtendedType extUnionType,
+ final Set<ExtendedType> extUnionTypes) {
+ final UnionNode node = new UnionNode(extUnionType);
+
+ if (extUnionType.getBaseType() instanceof UnionTypeDefinition) {
+ final UnionTypeDefinition unionType = (UnionTypeDefinition)
+ extUnionType.getBaseType();
+
+ final List<TypeDefinition<?>> innerTypes = unionType.getTypes();
+ for (final TypeDefinition<?> typedef : innerTypes) {
+ if ((typedef != null) && (typedef instanceof ExtendedType)
+ && (typedef.getBaseType() instanceof UnionTypeDefinition)) {
+ if (extUnionTypes.contains(typedef)) {
+ node.addEdge(new UnionNode((ExtendedType) typedef));
+ }
+ }
+ }
+ }
+
+ return node;
+ }
+
+ @VisibleForTesting
+ static final class UnionNode extends NodeImpl {
+ private final ExtendedType unionType;
+
+ UnionNode(ExtendedType unionType) {
+ this.unionType = unionType;
+ }
+
+ ExtendedType getUnionType() {
+ return unionType;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (!(o instanceof UnionNode)) {
+ return false;
+ }
+ UnionNode unionNode = (UnionNode) o;
+ if (!unionType.equals(unionNode.unionType)) {
+ return false;
+ }
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ return unionType.hashCode();
+ }
+
+ @Override
+ public String toString() {
+ return "UnionNode{" +
+ "unionType=" + unionType +
+ '}';
+ }
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.sal.binding.generator.impl;
+
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.opendaylight.controller.sal.binding.generator.api.BindingGenerator;
+import org.opendaylight.controller.sal.binding.model.api.Type;
+import org.opendaylight.controller.yang.model.api.Module;
+import org.opendaylight.controller.yang.model.api.SchemaContext;
+import org.opendaylight.controller.yang.model.parser.api.YangModelParser;
+import org.opendaylight.controller.yang.parser.impl.YangParserImpl;
+
+import java.io.File;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Set;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+
+public class AugmentRleativeXPathTest {
+
+ private final static List<File> augmentModels = new ArrayList<>();
+ private final static String augmentFolderPath = AugmentedTypeTest.class
+ .getResource("/augment-relative-xpath-models").getPath();
+
+ @BeforeClass
+ public static void loadTestResources() {
+ final File augFolder = new File(augmentFolderPath);
+
+ for (final File fileEntry : augFolder.listFiles()) {
+ if (fileEntry.isFile()) {
+ augmentModels.add(fileEntry);
+ }
+ }
+ }
+
+ @Test
+ public void AugmentationWithRelativeXPathTest() {
+ final YangModelParser parser = new YangParserImpl();
+ final Set<Module> modules = parser.parseYangModels(augmentModels);
+ final SchemaContext context = parser.resolveSchemaContext(modules);
+
+ assertNotNull("context is null", context);
+ final BindingGenerator bindingGen = new BindingGeneratorImpl();
+ final List<Type> genTypes = bindingGen.generateTypes(context);
+
+ assertNotNull("genTypes is null", genTypes);
+ assertFalse("genTypes is empty", genTypes.isEmpty());
+
+ //TODO: implement test
+ }
+}
*/
package org.opendaylight.controller.sal.binding.generator.impl;
-import static org.junit.Assert.*;
-
-import java.io.File;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Set;
-
import org.junit.BeforeClass;
import org.junit.Test;
import org.opendaylight.controller.sal.binding.generator.api.BindingGenerator;
-import org.opendaylight.controller.sal.binding.model.api.GeneratedProperty;
-import org.opendaylight.controller.sal.binding.model.api.GeneratedTransferObject;
-import org.opendaylight.controller.sal.binding.model.api.GeneratedType;
-import org.opendaylight.controller.sal.binding.model.api.MethodSignature;
-import org.opendaylight.controller.sal.binding.model.api.Type;
+import org.opendaylight.controller.sal.binding.model.api.*;
import org.opendaylight.controller.yang.model.api.Module;
import org.opendaylight.controller.yang.model.api.SchemaContext;
import org.opendaylight.controller.yang.model.parser.api.YangModelParser;
import org.opendaylight.controller.yang.parser.impl.YangParserImpl;
+import java.io.File;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Set;
+
+import static org.junit.Assert.*;
+
public class AugmentedTypeTest {
private final static List<File> augmentModels = new ArrayList<>();
GeneratedTransferObject gtTunnelKey = null;
GeneratedType gtNetworkLink2 = null;
- for(final Type type : genTypes) {
- if(type.getName().equals("InterfaceKey") && type.getPackageName().contains("augment._abstract.topology")) {
+ for (final Type type : genTypes) {
+ if (type.getName().equals("InterfaceKey") && type.getPackageName().contains("augment._abstract.topology")) {
gtInterfaceKey = (GeneratedTransferObject) type;
- } else if(type.getName().equals("Interface") && type.getPackageName().contains("augment._abstract.topology")) {
+ } else if (type.getName().equals("Interface") && type.getPackageName().contains("augment._abstract.topology")) {
gtInterface = (GeneratedType) type;
- } else if(type.getName().equals("Tunnel") && type.getPackageName().contains("augment._abstract.topology")) {
+ } else if (type.getName().equals("Tunnel") && type.getPackageName().contains("augment._abstract.topology")) {
gtTunnel = (GeneratedType) type;
- } else if(type.getName().equals("TunnelKey") && type.getPackageName().contains("augment._abstract.topology")) {
+ } else if (type.getName().equals("TunnelKey") && type.getPackageName().contains("augment._abstract.topology")) {
gtTunnelKey = (GeneratedTransferObject) type;
- } else if(type.getName().equals("NetworkLink2") && type.getPackageName().contains("augment._abstract.topology")) {
+ } else if (type.getName().equals("NetworkLink2") && type.getPackageName().contains("augment._abstract.topology")) {
gtNetworkLink2 = (GeneratedType) type;
}
}
}
assertNotNull("getIfcKeyMethod is null", getIfcKeyMethod);
assertNotNull("getIfcKeyMethod.getReturnType() is null", getIfcKeyMethod.getReturnType());
- assertFalse("getIfcKeyMethod.getReturnType() should not be Void", getIfcKeyMethod.getReturnType().equals("java.lang.Void"));
- assertTrue("getIfcKeyMethod.getReturnType().getName() must be InterfaceKey", getIfcKeyMethod.getReturnType().getName().equals("InterfaceKey"));
+ assertFalse("getIfcKeyMethod.getReturnType() should not be Void",
+ getIfcKeyMethod.getReturnType().equals("java.lang.Void"));
+ assertTrue("getIfcKeyMethod.getReturnType().getName() must be InterfaceKey",
+ getIfcKeyMethod.getReturnType().getName().equals("InterfaceKey"));
MethodSignature getHigherLayerIfMethod = null;
for (final MethodSignature method : gtInterfaceMethods) {
}
}
assertNotNull("getHigherLayerIfMethod is null", getHigherLayerIfMethod);
- assertNotNull("getHigherLayerIfMethod.getReturnType() is null", getHigherLayerIfMethod.getReturnType());
- assertFalse("getHigherLayerIfMethod.getReturnType() should not be Void", getHigherLayerIfMethod.getReturnType().equals("java.lang.Void"));
- assertTrue("getHigherLayerIfMethod.getReturnType().getName() must be List", getHigherLayerIfMethod.getReturnType().getName().equals("List"));
+ assertNotNull("getHigherLayerIfMethod.getReturnType() is null",
+ getHigherLayerIfMethod.getReturnType());
+ assertFalse("getHigherLayerIfMethod.getReturnType() should not be Void",
+ getHigherLayerIfMethod.getReturnType().equals("java.lang.Void"));
+ assertTrue("getHigherLayerIfMethod.getReturnType().getName() must be List",
+ getHigherLayerIfMethod.getReturnType().getName().equals("List"));
// 'InterfaceKey'
assertNotNull("gtInterfaceKey is null", gtInterfaceKey);
}
assertNotNull("gtInterfaceId is null", gtInterfaceId);
assertNotNull("gtInterfaceId.getReturnType() is null", gtInterfaceId.getReturnType());
- assertFalse("gtInterfaceId.getReturnType() should not be Void", gtInterfaceId.getReturnType().equals("java.lang.Void"));
- assertTrue("gtInterfaceId.getReturnType().getName() must be String", gtInterfaceId.getReturnType().getName().equals("String"));
+ assertFalse("gtInterfaceId.getReturnType() should not be Void",
+ gtInterfaceId.getReturnType().equals("java.lang.Void"));
+ assertTrue("gtInterfaceId.getReturnType().getName() must be String",
+ gtInterfaceId.getReturnType().getName().equals("String"));
// 'Tunnel'
assertNotNull("gtTunnel is null", gtTunnel);
}
}
assertNotNull("getTunnelKeyMethod is null", getTunnelKeyMethod);
- assertNotNull("getTunnelKeyMethod.getReturnType()", getTunnelKeyMethod.getReturnType());
- assertFalse("getTunnelKeyMethod.getReturnType() should not be Void", getTunnelKeyMethod.getReturnType().equals("java.lang.Void"));
- assertTrue("getTunnelKeyMethod.getReturnType().getName() must be TunnelKey", getTunnelKeyMethod.getReturnType().getName().equals("TunnelKey"));
+ assertNotNull("getTunnelKeyMethod.getReturnType()",
+ getTunnelKeyMethod.getReturnType());
+ assertFalse("getTunnelKeyMethod.getReturnType() should not be Void",
+ getTunnelKeyMethod.getReturnType().equals("java.lang.Void"));
+ assertTrue("getTunnelKeyMethod.getReturnType().getName() must be TunnelKey",
+ getTunnelKeyMethod.getReturnType().getName().equals("TunnelKey"));
// 'TunnelKey'
assertNotNull("gtTunnelKey is null", gtTunnelKey);
}
}
assertNotNull("gtTunnelId is null", gtTunnelId);
- assertNotNull("gtTunnelId.getReturnType() is null", gtTunnelId.getReturnType());
- assertFalse("gtTunnelId.getReturnType() should not be Void", gtTunnelId.getReturnType().equals("java.lang.Void"));
- assertTrue("gtTunnelId.getReturnType().getName() must be Integer", gtTunnelId.getReturnType().getName().equals("Integer"));
+ assertNotNull("gtTunnelId.getReturnType() is null",
+ gtTunnelId.getReturnType());
+ assertFalse("gtTunnelId.getReturnType() should not be Void",
+ gtTunnelId.getReturnType().equals("java.lang.Void"));
+ assertTrue("gtTunnelId.getReturnType().getName() must be Integer",
+ gtTunnelId.getReturnType().getName().equals("Integer"));
// 'NetworkLink2'
assertNotNull("gtNetworkLink2 is null", gtNetworkLink2);
assertNotNull("networkLink2Methods is null", networkLink2Methods);
// FIXME: in some cases getIfcMethod is null which causes test fail. fix ASAP
-// MethodSignature getIfcMethod = null;
-// for (MethodSignature method : networkLink2Methods) {
-// if (method.getName().equals("getInterface")) {
-// getIfcMethod = method;
-// break;
-// }
-// }
+// MethodSignature getIfcMethod = null;
+// for (MethodSignature method : networkLink2Methods) {
+// if (method.getName().equals("getInterface")) {
+// getIfcMethod = method;
+// break;
+// }
+// }
//
-// assertNotNull("getIfcMethod is null", getIfcMethod);
-// assertNotNull("getIfcMethod.getReturnType() is null",
-// getIfcMethod.getReturnType());
-// assertFalse("getIfcMethod.getReturnType() should not be Void",
-// getIfcMethod.getReturnType().equals("java.lang.Void"));
-// assertTrue("getIfcMethod.getReturnType().getName() must be String",
-// getIfcMethod.getReturnType().getName().equals("String"));
+// assertNotNull("getIfcMethod is null", getIfcMethod);
+// assertNotNull("getIfcMethod.getReturnType() is null", getIfcMethod.getReturnType());
+// assertFalse("getIfcMethod.getReturnType() should not be Void", getIfcMethod.getReturnType().equals("java.lang.Void"));
+// assertTrue("getIfcMethod.getReturnType().getName() must be String", getIfcMethod.getReturnType().getName().equals("String"));
}
@Test
--- /dev/null
+/*
+* Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+*
+* This program and the accompanying materials are made available under the
+* terms of the Eclipse Public License v1.0 which accompanies this distribution,
+* and is available at http://www.eclipse.org/legal/epl-v10.html
+*/
+package org.opendaylight.controller.sal.binding.generator.impl;
+
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.opendaylight.controller.sal.binding.generator.api.BindingGenerator;
+import org.opendaylight.controller.sal.binding.model.api.Type;
+import org.opendaylight.controller.yang.model.api.Module;
+import org.opendaylight.controller.yang.model.api.SchemaContext;
+import org.opendaylight.controller.yang.model.parser.api.YangModelParser;
+import org.opendaylight.controller.yang.parser.impl.YangParserImpl;
+
+import java.io.File;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Set;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+
+public class BinaryTypeTest {
+ private final static List<File> yangModels = new ArrayList<>();
+ private final static String yangModelsFolder = AugmentedTypeTest.class
+ .getResource("/binary-type-test-models").getPath();
+
+ @BeforeClass
+ public static void loadTestResources() {
+ final File augFolder = new File(yangModelsFolder);
+ for (final File fileEntry : augFolder.listFiles()) {
+ if (fileEntry.isFile()) {
+ yangModels.add(fileEntry);
+ }
+ }
+ }
+
+ @Test
+ public void binaryTypeTest() {
+ final YangModelParser parser = new YangParserImpl();
+ final Set<Module> modules = parser.parseYangModels(yangModels);
+ final SchemaContext context = parser.resolveSchemaContext(modules);
+
+ assertNotNull("context is null", context);
+ final BindingGenerator bindingGen = new BindingGeneratorImpl();
+ final List<Type> genTypes = bindingGen.generateTypes(context);
+
+ assertNotNull("genTypes is null", genTypes);
+ assertFalse("genTypes is empty", genTypes.isEmpty());
+
+ //TODO: implement test
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.sal.binding.generator.impl;
+
+import org.junit.Test;
+import org.opendaylight.controller.binding.generator.util.ReferencedTypeImpl;
+import org.opendaylight.controller.sal.binding.generator.api.BindingGenerator;
+import org.opendaylight.controller.sal.binding.model.api.Enumeration;
+import org.opendaylight.controller.sal.binding.model.api.GeneratedType;
+import org.opendaylight.controller.sal.binding.model.api.MethodSignature;
+import org.opendaylight.controller.sal.binding.model.api.Type;
+import org.opendaylight.controller.yang.model.api.Module;
+import org.opendaylight.controller.yang.model.api.SchemaContext;
+import org.opendaylight.controller.yang.model.parser.api.YangModelParser;
+import org.opendaylight.controller.yang.parser.impl.YangParserImpl;
+
+import java.io.File;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Set;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+public class GenEnumResolvingTest {
+
+ private SchemaContext resolveSchemaContextFromFiles(
+ final String... yangFiles) {
+ final YangModelParser parser = new YangParserImpl();
+
+ final List<File> inputFiles = new ArrayList<File>();
+ for (int i = 0; i < yangFiles.length; ++i) {
+ inputFiles.add(new File(yangFiles[i]));
+ }
+
+ final Set<Module> modules = parser.parseYangModels(inputFiles);
+ return parser.resolveSchemaContext(modules);
+ }
+
+ @Test
+ public void testLeafEnumResolving() {
+ final String ietfInterfacesPath = getClass().getResource(
+ "/enum-test-models/ietf-interfaces@2012-11-15.yang").getPath();
+ final String ifTypePath = getClass().getResource(
+ "/enum-test-models/iana-if-type@2012-06-05.yang").getPath();
+ final String yangTypesPath = getClass().getResource(
+ "/enum-test-models/ietf-yang-types@2010-09-24.yang").getPath();
+
+ final SchemaContext context = resolveSchemaContextFromFiles(
+ ietfInterfacesPath, ifTypePath, yangTypesPath);
+ assertTrue(context != null);
+
+ final BindingGenerator bindingGen = new BindingGeneratorImpl();
+ final List<Type> genTypes = bindingGen.generateTypes(context);
+ assertTrue(genTypes != null);
+
+ assertEquals("Expected count of all Generated Types from yang models " +
+ "is 22", 22, genTypes.size());
+
+ GeneratedType genInterface = null;
+ for (final Type type : genTypes) {
+ if (type instanceof GeneratedType) {
+ if (type.getName().equals("Interface")) {
+ genInterface = (GeneratedType) type;
+ }
+ }
+ }
+ assertNotNull("Generated Type Interface is not present in list of " +
+ "Generated Types", genInterface);
+
+ Enumeration linkUpDownTrapEnable = null;
+ Enumeration operStatus = null;
+ final List<Enumeration> enums = genInterface.getEnumDefintions();
+ assertNotNull("Generated Type Interface cannot contain NULL reference" +
+ " to Enumeration types!", enums);
+ assertEquals("Generated Type Interface MUST contain 2 Enumeration " +
+ "Types", 2, enums.size());
+ for (final Enumeration e : enums) {
+ if (e.getName().equals("LinkUpDownTrapEnable")) {
+ linkUpDownTrapEnable = e;
+ } else if (e.getName().equals("OperStatus")) {
+ operStatus = e;
+ }
+ }
+
+ assertNotNull("Expected Enum LinkUpDownTrapEnable, but was NULL!",
+ linkUpDownTrapEnable);
+ assertNotNull("Expected Enum OperStatus, but was NULL!", operStatus);
+
+ assertNotNull("Enum LinkUpDownTrapEnable MUST contain Values definition " +
+ "not NULL reference!", linkUpDownTrapEnable.getValues());
+ assertNotNull("Enum OperStatus MUST contain Values definition not " +
+ "NULL reference!", operStatus.getValues());
+ assertEquals("Enum LinkUpDownTrapEnable MUST contain 2 values!", 2,
+ linkUpDownTrapEnable.getValues().size());
+ assertEquals("Enum OperStatus MUST contain 7 values!", 7,
+ operStatus.getValues().size());
+
+ final List<MethodSignature> methods = genInterface
+ .getMethodDefinitions();
+
+ assertNotNull("Generated Interface cannot contain NULL reference for " +
+ "Method Signature Definitions!", methods);
+
+ assertEquals("Expected count of method signature definitions is 26",
+ 26, methods.size());
+ Enumeration ianaIfType = null;
+ for (final MethodSignature method : methods) {
+ if (method.getName().equals("getType")) {
+ if (method.getReturnType() instanceof Enumeration) {
+ ianaIfType = (Enumeration)method.getReturnType();
+ }
+ }
+ }
+
+ assertNotNull("Method getType MUST return Enumeration Type, " +
+ "not NULL reference!", ianaIfType);
+ assertEquals("Enumeration getType MUST contain 272 values!", 272,
+ ianaIfType.getValues().size());
+ }
+
+ @Test
+ public void testTypedefEnumResolving() {
+ final String ianaIfTypePath = getClass().getResource(
+ "/leafref-test-models/iana-if-type@2012-06-05.yang").getPath();
+
+ final SchemaContext context = resolveSchemaContextFromFiles(ianaIfTypePath);
+ assertTrue(context != null);
+ final BindingGenerator bindingGen = new BindingGeneratorImpl();
+ final List<Type> genTypes = bindingGen.generateTypes(context);
+ assertTrue(genTypes != null);
+ assertEquals(2, genTypes.size());
+
+ final Type type = genTypes.get(1);
+ assertTrue(type instanceof Enumeration);
+
+ final Enumeration enumer = (Enumeration) type;
+ assertEquals("Enumeration type MUST contain 272 values!", 272,
+ enumer.getValues().size());
+ }
+
+ @Test
+ public void testLeafrefEnumResolving() {
+ final String ietfInterfacesPath = getClass().getResource(
+ "/enum-test-models/ietf-interfaces@2012-11-15.yang").getPath();
+ final String ifTypePath = getClass().getResource(
+ "/enum-test-models/iana-if-type@2012-06-05.yang").getPath();
+ final String yangTypesPath = getClass().getResource(
+ "/enum-test-models/ietf-yang-types@2010-09-24.yang").getPath();
+ final String topologyPath = getClass().getResource(
+ "/enum-test-models/abstract-topology@2013-02-08.yang")
+ .getPath();
+ final String inetTypesPath = getClass().getResource(
+ "/enum-test-models/ietf-inet-types@2010-09-24.yang")
+ .getPath();
+ final SchemaContext context = resolveSchemaContextFromFiles(
+ ietfInterfacesPath, ifTypePath, yangTypesPath, topologyPath,
+ inetTypesPath);
+
+ assertNotNull(context);
+ final BindingGenerator bindingGen = new BindingGeneratorImpl();
+ final List<Type> genTypes = bindingGen.generateTypes(context);
+ assertNotNull(genTypes);
+ assertTrue(!genTypes.isEmpty());
+
+ GeneratedType genInterface = null;
+ for (final Type type : genTypes) {
+ if (type instanceof GeneratedType) {
+ if (type.getPackageName().equals("org.opendaylight.yang.gen.v1.urn.model._abstract.topology.rev201328.topology.interfaces")
+ && type.getName().equals("Interface")) {
+ genInterface = (GeneratedType) type;
+ }
+ }
+ }
+ assertNotNull("Generated Type Interface is not present in list of " +
+ "Generated Types", genInterface);
+
+ Type linkUpDownTrapEnable = null;
+ Type operStatus = null;
+ final List<MethodSignature> methods = genInterface.getMethodDefinitions();
+ assertNotNull("Generated Type Interface cannot contain NULL reference" +
+ " to Enumeration types!", methods);
+ assertEquals("Generated Type Interface MUST contain 7 Methods ",
+ 7, methods.size());
+ for (final MethodSignature method : methods) {
+ if (method.getName().equals("getLinkUpDownTrapEnable")) {
+ linkUpDownTrapEnable = method.getReturnType();
+ } else if (method.getName().equals("getOperStatus")) {
+ operStatus = method.getReturnType();
+ }
+ }
+
+ assertNotNull("Expected Referenced Enum LinkUpDownTrapEnable, but was NULL!",
+ linkUpDownTrapEnable);
+ assertTrue("Expected LinkUpDownTrapEnable of type ReferencedTypeImpl",
+ linkUpDownTrapEnable instanceof ReferencedTypeImpl);
+ assertEquals(linkUpDownTrapEnable.getPackageName(),
+ "org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.interfaces.rev20121115.interfaces.Interface");
+
+ assertNotNull("Expected Referenced Enum OperStatus, but was NULL!",
+ operStatus);
+ assertTrue("Expected OperStatus of type ReferencedTypeImpl",
+ operStatus instanceof ReferencedTypeImpl);
+ assertEquals(operStatus.getPackageName(),
+ "org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.interfaces.rev20121115.interfaces.Interface");
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.sal.binding.generator.impl;
+
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.opendaylight.controller.sal.binding.generator.api.BindingGenerator;
+import org.opendaylight.controller.sal.binding.model.api.Type;
+import org.opendaylight.controller.yang.model.api.Module;
+import org.opendaylight.controller.yang.model.api.SchemaContext;
+import org.opendaylight.controller.yang.model.parser.api.YangModelParser;
+import org.opendaylight.controller.yang.parser.impl.YangParserImpl;
+
+import java.io.File;
+import java.util.*;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+
+public class GenTypesSubSetTest {
+
+ private final static List<File> yangModels = new ArrayList<>();
+ private final static String yangModelsFolder = AugmentedTypeTest.class
+ .getResource("/leafref-test-models").getPath();
+
+ @BeforeClass
+ public static void loadTestResources() {
+ final File augFolder = new File(yangModelsFolder);
+
+ for (final File fileEntry : augFolder.listFiles()) {
+ if (fileEntry.isFile()) {
+ yangModels.add(fileEntry);
+ }
+ }
+ }
+
+ @Test
+ public void genTypesFromSubsetOfTwoModulesTest() {
+ final YangModelParser parser = new YangParserImpl();
+ final Set<Module> modules = parser.parseYangModels(yangModels);
+ final SchemaContext context = parser.resolveSchemaContext(modules);
+
+ final Set<Module> toGenModules = new HashSet<>();
+ for (final Module module : modules) {
+ if (module.getName().equals("abstract-topology")) {
+ toGenModules.add(module);
+ } else if (module.getName().equals("ietf-interfaces")) {
+ toGenModules.add(module);
+ }
+ }
+
+ assertEquals("Set of to Generate Modules must contain 2 modules", 2,
+ toGenModules.size());
+ assertNotNull("Schema Context is null", context);
+ final BindingGenerator bindingGen = new BindingGeneratorImpl();
+ final List<Type> genTypes = bindingGen.generateTypes(context, toGenModules);
+ assertNotNull("genTypes is null", genTypes);
+ assertFalse("genTypes is empty", genTypes.isEmpty());
+ assertEquals("Expected Generated Types from provided sub set of " +
+ "modules should be 23!", 23,
+ genTypes.size());
+ }
+
+ @Test
+ public void genTypesFromSubsetOfThreeModulesTest() {
+ final YangModelParser parser = new YangParserImpl();
+ final Set<Module> modules = parser.parseYangModels(yangModels);
+ final SchemaContext context = parser.resolveSchemaContext(modules);
+
+ final Set<Module> toGenModules = new HashSet<>();
+ for (final Module module : modules) {
+ if (module.getName().equals("abstract-topology")) {
+ toGenModules.add(module);
+ } else if (module.getName().equals("ietf-interfaces")) {
+ toGenModules.add(module);
+ } else if (module.getName().equals("iana-if-type")) {
+ toGenModules.add(module);
+ }
+ }
+
+ assertEquals("Set of to Generate Modules must contain 3 modules", 3,
+ toGenModules.size());
+
+ assertNotNull("Schema Context is null", context);
+ final BindingGenerator bindingGen = new BindingGeneratorImpl();
+ final List<Type> genTypes = bindingGen.generateTypes(context, toGenModules);
+ assertNotNull("genTypes is null", genTypes);
+ assertFalse("genTypes is empty", genTypes.isEmpty());
+ assertEquals("Expected Generated Types from provided sub set of " +
+ "modules should be 25!", 25, genTypes.size());
+ }
+}
return parser.resolveSchemaContext(modules);
}
- @Test
- public void testLeafEnumResolving() {
- final String ietfInterfacesPath = getClass().getResource(
- "/enum-test-models/ietf-interfaces@2012-11-15.yang").getPath();
- final String ifTypePath = getClass().getResource(
- "/enum-test-models/iana-if-type@2012-06-05.yang").getPath();
- final String yangTypesPath = getClass().getResource(
- "/enum-test-models/ietf-yang-types@2010-09-24.yang").getPath();
-
- final SchemaContext context = resolveSchemaContextFromFiles(
- ietfInterfacesPath, ifTypePath, yangTypesPath);
- assertTrue(context != null);
-
- final BindingGenerator bindingGen = new BindingGeneratorImpl();
- final List<Type> genTypes = bindingGen.generateTypes(context);
- assertTrue(genTypes != null);
- }
-
- @Test
- public void testTypedefEnumResolving() {
- final String ianaIfTypePath = getClass().getResource(
- "/leafref-test-models/iana-if-type@2012-06-05.yang").getPath();
-
- final SchemaContext context = resolveSchemaContextFromFiles(ianaIfTypePath);
- assertTrue(context != null);
- final BindingGenerator bindingGen = new BindingGeneratorImpl();
- final List<Type> genTypes = bindingGen.generateTypes(context);
- assertTrue(genTypes != null);
- assertEquals(2, genTypes.size());
-
- final Type type = genTypes.get(1);
- assertTrue(type instanceof GeneratedTransferObject);
-
- final GeneratedTransferObject genTransObj = (GeneratedTransferObject) type;
- final List<GeneratedProperty> properties = genTransObj.getProperties();
- assertNotNull(properties);
- assertEquals(1, properties.size());
-
- GeneratedProperty property = properties.get(0);
- assertNotNull(property);
- assertNotNull(property.getReturnType());
-
- assertTrue(property.getReturnType() instanceof Enumeration);
- final Enumeration enumer = (Enumeration) property.getReturnType();
- assertEquals(272, enumer.getValues().size());
- }
-
@Test
public void testMultipleModulesResolving() {
final String topologyPath = getClass().getResource(
final List<Type> genTypes = bindingGen.generateTypes(context);
assertTrue(genTypes != null);
- assertEquals(24, genTypes.size());
+ assertEquals(27, genTypes.size());
}
@Test
final BindingGenerator bindingGen = new BindingGeneratorImpl();
final List<Type> genTypes = bindingGen.generateTypes(context);
- assertEquals(50, genTypes.size());
+ assertEquals(53, genTypes.size());
assertTrue(genTypes != null);
GeneratedTransferObject gtIfcKey = null;
assertTrue(genTypes != null);
assertEquals(3, genTypes.size());
- final GeneratedType simpleContainer = (GeneratedType) genTypes.get(0);
- final GeneratedType nestedContainer = (GeneratedType) genTypes.get(1);
+ final GeneratedType simpleContainer = (GeneratedType) genTypes.get(1);
+ final GeneratedType nestedContainer = (GeneratedType) genTypes.get(2);
assertEquals("SimpleContainer", simpleContainer.getName());
assertEquals("NestedContainer", nestedContainer.getName());
assertTrue(genTypes != null);
assertEquals(3, genTypes.size());
- final GeneratedType simpleContainer = (GeneratedType) genTypes.get(0);
- final GeneratedType nestedContainer = (GeneratedType) genTypes.get(1);
+ final GeneratedType simpleContainer = (GeneratedType) genTypes.get(1);
+ final GeneratedType nestedContainer = (GeneratedType) genTypes.get(2);
assertEquals("SimpleContainer", simpleContainer.getName());
assertEquals("NestedContainer", nestedContainer.getName());
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.sal.binding.generator.impl;
+
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.opendaylight.controller.sal.binding.generator.api.BindingGenerator;
+import org.opendaylight.controller.sal.binding.model.api.Type;
+import org.opendaylight.controller.yang.model.api.Module;
+import org.opendaylight.controller.yang.model.api.SchemaContext;
+import org.opendaylight.controller.yang.model.parser.api.YangModelParser;
+import org.opendaylight.controller.yang.parser.impl.YangParserImpl;
+
+import java.io.File;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Set;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+
+public class UnionTypeDefTest {
+ private final static List<File> yangModels = new ArrayList<>();
+ private final static String yangModelsFolder = AugmentedTypeTest.class
+ .getResource("/union-test-models").getPath();
+
+ @BeforeClass
+ public static void loadTestResources() {
+ final File augFolder = new File(yangModelsFolder);
+
+ for (final File fileEntry : augFolder.listFiles()) {
+ if (fileEntry.isFile()) {
+ yangModels.add(fileEntry);
+ }
+ }
+ }
+
+ @Test
+ public void unionTypeResolvingTest() {
+ final YangModelParser parser = new YangParserImpl();
+ final Set<Module> modules = parser.parseYangModels(yangModels);
+ final SchemaContext context = parser.resolveSchemaContext(modules);
+
+ assertNotNull("context is null", context);
+ final BindingGenerator bindingGen = new BindingGeneratorImpl();
+ final List<Type> genTypes = bindingGen.generateTypes(context);
+
+ assertNotNull("genTypes is null", genTypes);
+ assertFalse("genTypes is empty", genTypes.isEmpty());
+
+ //TODO: implement test
+ }
+}
--- /dev/null
+module abstract-topology {
+ yang-version 1;
+ namespace "urn:model:abstract:topology";
+ prefix "tp";
+
+ import ietf-inet-types {
+ prefix "inet";
+ revision-date 2010-09-24;
+ }
+
+ organization "OPEN DAYLIGHT";
+ contact "http://www.opendaylight.org/";
+
+ description
+ "This module contains the definitions of elements that creates network
+ topology i.e. definition of network nodes and links. This module is not designed
+ to be used solely for network representation. This module SHOULD be used as base
+ module in defining the network topology.";
+
+ revision "2013-02-08" {
+ reference "http://www.opendaylight.org/";
+ }
+
+ typedef node-id-ref {
+ type leafref {
+ path "/tp:topology/tp:network-nodes/tp:network-node/tp:node-id";
+ }
+ description "This type is used for leafs that reference network node instance.";
+ }
+
+ typedef link-id-ref {
+ type leafref {
+ path "/tp:topology/tp:network-links/tp:network-link/tp:link-id";
+ }
+ description "This type is used for leafs that reference network link instance.";
+ }
+
+ container topology {
+ description "This is the model of abstract topology which contains
+ only Network Nodes and Network Links. Each topology MUST be identified by unique
+ topology-id for reason that the store could contain many topologies.";
+
+ leaf topology-id {
+ type inet:uri;
+ description "It is presumed that datastore will contain many topologies.
+ To distinguish between topologies it is vital to have
+ UNIQUE topology identifier.";
+ }
+
+ container network-nodes {
+ list network-node {
+ key "node-id";
+
+ leaf node-id {
+ type inet:uri;
+ description "The Topology identifier of network-node.";
+ }
+
+ container attributes {
+ description "Aditional attributes that can Network Node contains.";
+ }
+ description "The list of network nodes defined for topology.";
+ }
+ }
+
+ container network-links {
+ list network-link {
+ key "link-id";
+
+ leaf link-id {
+ type inet:uri;
+ description "";
+ }
+
+ container source-node {
+ leaf id {
+ type node-id-ref;
+ description "Source node identifier.";
+ }
+ }
+
+ container destination-node {
+ leaf id {
+ type node-id-ref;
+ description "Destination node identifier.";
+ }
+ }
+
+ container attributes {
+ description "Aditional attributes that can Network Link contains.";
+ }
+ description "The Network Link which is defined by Local (Source) and Remote (Destination)
+ Network Nodes. Every link MUST be defined either by identifier and
+ his local and remote Network Nodes (In real applications it is common that
+ many links are originated from one node and end up in same remote node).
+ To ensure that we would always know to distinguish between links, every link
+ SHOULD have identifier.";
+ }
+ }
+ }
+}
\ No newline at end of file
--- /dev/null
+module augment-abstract-topology {
+ yang-version 1;
+ namespace "urn:model:augment:abstract:topology";
+ prefix "atp";
+
+ import ietf-inet-types {
+ prefix "inet";
+ revision-date 2010-09-24;
+ }
+
+ import ietf-interfaces {
+ prefix "if";
+ revision-date 2012-11-15;
+ }
+
+ import abstract-topology {
+ prefix "at";
+ revision-date 2013-02-08;
+ }
+
+ organization "OPEN DAYLIGHT";
+ contact "http://www.opendaylight.org/";
+
+ revision "2013-05-03" {
+ reference "http://www.opendaylight.org/";
+ }
+
+ augment "at:topology" {
+ container interfaces {
+ list interface {
+ key "interface-id";
+
+ leaf interface-id {
+ type leafref {
+ path "/if:interfaces/if:interface/if:name";
+ }
+ }
+
+ leaf-list higher-layer-if {
+ type leafref {
+ path "/if:interfaces/if:interface/if:higher-layer-if";
+ }
+ }
+ }
+ }
+ }
+
+ augment "at:topology/at:network-links/at:network-link" {
+ container tunnels {
+ list tunnel {
+ key "tunnel-id";
+
+ leaf tunnel-id {
+ type leafref {
+ path "../../../link-id";
+ }
+ }
+ }
+ }
+ }
+
+ augment "at:topology/at:network-links/at:network-link" {
+ leaf interface {
+ type leafref {
+ path "../../../atp:interfaces/atp:interface/atp:interface-id";
+ }
+ }
+ }
+}
\ No newline at end of file
--- /dev/null
+module iana-if-type {
+ namespace "urn:ietf:params:xml:ns:yang:iana-if-type";
+ prefix ianaift;
+
+ organization "IANA";
+ contact
+ " Internet Assigned Numbers Authority
+
+ Postal: ICANN
+ 4676 Admiralty Way, Suite 330
+ Marina del Rey, CA 90292
+
+ Tel: +1 310 823 9358
+ E-Mail: iana&iana.org";
+ description
+ "This YANG module defines the iana-if-type typedef, which
+ contains YANG definitions for IANA-registered interface types.
+
+ This YANG module is maintained by IANA, and reflects the
+ 'ifType definitions' registry.
+
+ The latest revision of this YANG module can be obtained from
+ the IANA web site.
+
+ Copyright (c) 2011 IETF Trust and the persons identified as
+ authors of the code. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or
+ without modification, is permitted pursuant to, and subject
+ to the license terms contained in, the Simplified BSD License
+ set forth in Section 4.c of the IETF Trust's Legal Provisions
+ Relating to IETF Documents
+ (http://trustee.ietf.org/license-info).
+
+ This version of this YANG module is part of RFC XXXX; see
+ the RFC itself for full legal notices.";
+ // RFC Ed.: replace XXXX with actual RFC number and remove this
+ // note.
+
+ // RFC Ed.: update the date below with the date of RFC publication
+ // and remove this note.
+ revision 2012-06-05 {
+ description
+ "Initial revision.";
+ reference
+ "RFC XXXX: TITLE";
+ }
+
+ typedef iana-if-type {
+ type enumeration {
+ enum "other" {
+ value 1;
+ description
+ "None of the following";
+ }
+ enum "regular1822" {
+ value 2;
+ }
+ enum "hdh1822" {
+ value 3;
+ }
+ enum "ddnX25" {
+ value 4;
+ }
+ enum "rfc877x25" {
+ value 5;
+ reference
+ "RFC 1382 - SNMP MIB Extension for the X.25 Packet Layer";
+ }
+ enum "ethernetCsmacd" {
+ value 6;
+ description
+ "For all ethernet-like interfaces, regardless of speed,
+ as per RFC3635.";
+ reference
+ "RFC 3635 - Definitions of Managed Objects for the
+ Ethernet-like Interface Types.";
+ }
+ enum "iso88023Csmacd" {
+ value 7;
+ status deprecated;
+ description
+ "Deprecated via RFC3635.
+ Use ethernetCsmacd(6) instead.";
+ reference
+ "RFC 3635 - Definitions of Managed Objects for the
+ Ethernet-like Interface Types.";
+ }
+ enum "iso88024TokenBus" {
+ value 8;
+ }
+ enum "iso88025TokenRing" {
+ value 9;
+ }
+ enum "iso88026Man" {
+ value 10;
+ }
+ enum "starLan" {
+ value 11;
+ status deprecated;
+ description
+ "Deprecated via RFC3635.
+ Use ethernetCsmacd(6) instead.";
+ reference
+ "RFC 3635 - Definitions of Managed Objects for the
+ Ethernet-like Interface Types.";
+ }
+ enum "proteon10Mbit" {
+ value 12;
+ }
+ enum "proteon80Mbit" {
+ value 13;
+ }
+ enum "hyperchannel" {
+ value 14;
+ }
+ enum "fddi" {
+ value 15;
+ reference
+ "RFC 1512 - FDDI Management Information Base";
+ }
+ enum "lapb" {
+ value 16;
+ reference
+ "RFC 1381 - SNMP MIB Extension for X.25 LAPB";
+ }
+ enum "sdlc" {
+ value 17;
+ }
+ enum "ds1" {
+ value 18;
+ description
+ "DS1-MIB";
+ reference
+ "RFC 4805 - Definitions of Managed Objects for the
+ DS1, J1, E1, DS2, and E2 Interface Types";
+ }
+ enum "e1" {
+ value 19;
+ status obsolete;
+ description
+ "Obsolete see DS1-MIB";
+ reference
+ "RFC 4805 - Definitions of Managed Objects for the
+ DS1, J1, E1, DS2, and E2 Interface Types";
+ }
+ enum "basicISDN" {
+ value 20;
+ description
+ "see also RFC2127";
+ }
+ enum "primaryISDN" {
+ value 21;
+ }
+ enum "propPointToPointSerial" {
+ value 22;
+ description
+ "proprietary serial";
+ }
+ enum "ppp" {
+ value 23;
+ }
+ enum "softwareLoopback" {
+ value 24;
+ }
+ enum "eon" {
+ value 25;
+ description
+ "CLNP over IP";
+ }
+ enum "ethernet3Mbit" {
+ value 26;
+ }
+ enum "nsip" {
+ value 27;
+ description
+ "XNS over IP";
+ }
+ enum "slip" {
+ value 28;
+ description
+ "generic SLIP";
+ }
+ enum "ultra" {
+ value 29;
+ description
+ "ULTRA technologies";
+ }
+ enum "ds3" {
+ value 30;
+ description
+ "DS3-MIB";
+ reference
+ "RFC 3896 - Definitions of Managed Objects for the
+ DS3/E3 Interface Type";
+ }
+ enum "sip" {
+ value 31;
+ description
+ "SMDS, coffee";
+ reference
+ "RFC 1694 - Definitions of Managed Objects for SMDS
+ Interfaces using SMIv2";
+ }
+ enum "frameRelay" {
+ value 32;
+ description
+ "DTE only.";
+ reference
+ "RFC 2115 - Management Information Base for Frame Relay
+ DTEs Using SMIv2";
+ }
+ enum "rs232" {
+ value 33;
+ reference
+ "RFC 1659 - Definitions of Managed Objects for RS-232-like
+ Hardware Devices using SMIv2";
+ }
+ enum "para" {
+ value 34;
+ description
+ "parallel-port";
+ reference
+ "RFC 1660 - Definitions of Managed Objects for
+ Parallel-printer-like Hardware Devices using
+ SMIv2";
+ }
+ enum "arcnet" {
+ value 35;
+ description
+ "arcnet";
+ }
+ enum "arcnetPlus" {
+ value 36;
+ description
+ "arcnet plus";
+ }
+ enum "atm" {
+ value 37;
+ description
+ "ATM cells";
+ }
+ enum "miox25" {
+ value 38;
+ reference
+ "RFC 1461 - SNMP MIB extension for Multiprotocol
+ Interconnect over X.25";
+ }
+ enum "sonet" {
+ value 39;
+ description
+ "SONET or SDH";
+ }
+ enum "x25ple" {
+ value 40;
+ reference
+ "RFC 2127 - ISDN Management Information Base using SMIv2";
+ }
+ enum "iso88022llc" {
+ value 41;
+ }
+ enum "localTalk" {
+ value 42;
+ }
+ enum "smdsDxi" {
+ value 43;
+ }
+ enum "frameRelayService" {
+ value 44;
+ description
+ "FRNETSERV-MIB";
+ reference
+ "RFC 2954 - Definitions of Managed Objects for Frame
+ Relay Service";
+ }
+ enum "v35" {
+ value 45;
+ }
+ enum "hssi" {
+ value 46;
+ }
+ enum "hippi" {
+ value 47;
+ }
+ enum "modem" {
+ value 48;
+ description
+ "Generic modem";
+ }
+ enum "aal5" {
+ value 49;
+ description
+ "AAL5 over ATM";
+ }
+ enum "sonetPath" {
+ value 50;
+ }
+ enum "sonetVT" {
+ value 51;
+ }
+ enum "smdsIcip" {
+ value 52;
+ description
+ "SMDS InterCarrier Interface";
+ }
+ enum "propVirtual" {
+ value 53;
+ description
+ "proprietary virtual/internal";
+ reference
+ "RFC 2863 - The Interfaces Group MIB";
+ }
+ enum "propMultiplexor" {
+ value 54;
+ description
+ "proprietary multiplexing";
+ reference
+ "RFC 2863 - The Interfaces Group MIB";
+ }
+ enum "ieee80212" {
+ value 55;
+ description
+ "100BaseVG";
+ }
+ enum "fibreChannel" {
+ value 56;
+ description
+ "Fibre Channel";
+ }
+ enum "hippiInterface" {
+ value 57;
+ description
+ "HIPPI interfaces";
+ }
+ enum "frameRelayInterconnect" {
+ value 58;
+ status obsolete;
+ description
+ "Obsolete use either
+ frameRelay(32) or frameRelayService(44).";
+ }
+ enum "aflane8023" {
+ value 59;
+ description
+ "ATM Emulated LAN for 802.3";
+ }
+ enum "aflane8025" {
+ value 60;
+ description
+ "ATM Emulated LAN for 802.5";
+ }
+ enum "cctEmul" {
+ value 61;
+ description
+ "ATM Emulated circuit";
+ }
+ enum "fastEther" {
+ value 62;
+ status deprecated;
+ description
+ "Obsoleted via RFC3635.
+ ethernetCsmacd(6) should be used instead";
+ reference
+ "RFC 3635 - Definitions of Managed Objects for the
+ Ethernet-like Interface Types.";
+ }
+ enum "isdn" {
+ value 63;
+ description
+ "ISDN and X.25";
+ reference
+ "RFC 1356 - Multiprotocol Interconnect on X.25 and ISDN
+ in the Packet Mode";
+ }
+ enum "v11" {
+ value 64;
+ description
+ "CCITT V.11/X.21";
+ }
+ enum "v36" {
+ value 65;
+ description
+ "CCITT V.36";
+ }
+ enum "g703at64k" {
+ value 66;
+ description
+ "CCITT G703 at 64Kbps";
+ }
+ enum "g703at2mb" {
+ value 67;
+ status obsolete;
+ description
+ "Obsolete see DS1-MIB";
+ }
+ enum "qllc" {
+ value 68;
+ description
+ "SNA QLLC";
+ }
+ enum "fastEtherFX" {
+ value 69;
+ status deprecated;
+ description
+ "Obsoleted via RFC3635
+ ethernetCsmacd(6) should be used instead";
+ reference
+ "RFC 3635 - Definitions of Managed Objects for the
+ Ethernet-like Interface Types.";
+ }
+ enum "channel" {
+ value 70;
+ description
+ "channel";
+ }
+ enum "ieee80211" {
+ value 71;
+ description
+ "radio spread spectrum";
+ }
+ enum "ibm370parChan" {
+ value 72;
+ description
+ "IBM System 360/370 OEMI Channel";
+ }
+ enum "escon" {
+ value 73;
+ description
+ "IBM Enterprise Systems Connection";
+ }
+ enum "dlsw" {
+ value 74;
+ description
+ "Data Link Switching";
+ }
+ enum "isdns" {
+ value 75;
+ description
+ "ISDN S/T interface";
+ }
+ enum "isdnu" {
+ value 76;
+ description
+ "ISDN U interface";
+ }
+ enum "lapd" {
+ value 77;
+ description
+ "Link Access Protocol D";
+ }
+ enum "ipSwitch" {
+ value 78;
+ description
+ "IP Switching Objects";
+ }
+ enum "rsrb" {
+ value 79;
+ description
+ "Remote Source Route Bridging";
+ }
+ enum "atmLogical" {
+ value 80;
+ description
+ "ATM Logical Port";
+ reference
+ "RFC 3606 - Definitions of Supplemental Managed Objects
+ for ATM Interface";
+ }
+ enum "ds0" {
+ value 81;
+ description
+ "Digital Signal Level 0";
+ reference
+ "RFC 2494 - Definitions of Managed Objects for the DS0
+ and DS0 Bundle Interface Type";
+ }
+ enum "ds0Bundle" {
+ value 82;
+ description
+ "group of ds0s on the same ds1";
+ reference
+ "RFC 2494 - Definitions of Managed Objects for the DS0
+ and DS0 Bundle Interface Type";
+ }
+ enum "bsc" {
+ value 83;
+ description
+ "Bisynchronous Protocol";
+ }
+ enum "async" {
+ value 84;
+ description
+ "Asynchronous Protocol";
+ }
+ enum "cnr" {
+ value 85;
+ description
+ "Combat Net Radio";
+ }
+ enum "iso88025Dtr" {
+ value 86;
+ description
+ "ISO 802.5r DTR";
+ }
+ enum "eplrs" {
+ value 87;
+ description
+ "Ext Pos Loc Report Sys";
+ }
+ enum "arap" {
+ value 88;
+ description
+ "Appletalk Remote Access Protocol";
+ }
+ enum "propCnls" {
+ value 89;
+ description
+ "Proprietary Connectionless Protocol";
+ }
+ enum "hostPad" {
+ value 90;
+ description
+ "CCITT-ITU X.29 PAD Protocol";
+ }
+ enum "termPad" {
+ value 91;
+ description
+ "CCITT-ITU X.3 PAD Facility";
+ }
+ enum "frameRelayMPI" {
+ value 92;
+ description
+ "Multiproto Interconnect over FR";
+ }
+ enum "x213" {
+ value 93;
+ description
+ "CCITT-ITU X213";
+ }
+ enum "adsl" {
+ value 94;
+ description
+ "Asymmetric Digital Subscriber Loop";
+ }
+ enum "radsl" {
+ value 95;
+ description
+ "Rate-Adapt. Digital Subscriber Loop";
+ }
+ enum "sdsl" {
+ value 96;
+ description
+ "Symmetric Digital Subscriber Loop";
+ }
+ enum "vdsl" {
+ value 97;
+ description
+ "Very H-Speed Digital Subscrib. Loop";
+ }
+ enum "iso88025CRFPInt" {
+ value 98;
+ description
+ "ISO 802.5 CRFP";
+ }
+ enum "myrinet" {
+ value 99;
+ description
+ "Myricom Myrinet";
+ }
+ enum "voiceEM" {
+ value 100;
+ description
+ "voice recEive and transMit";
+ }
+ enum "voiceFXO" {
+ value 101;
+ description
+ "voice Foreign Exchange Office";
+ }
+ enum "voiceFXS" {
+ value 102;
+ description
+ "voice Foreign Exchange Station";
+ }
+ enum "voiceEncap" {
+ value 103;
+ description
+ "voice encapsulation";
+ }
+ enum "voiceOverIp" {
+ value 104;
+ description
+ "voice over IP encapsulation";
+ }
+ enum "atmDxi" {
+ value 105;
+ description
+ "ATM DXI";
+ }
+ enum "atmFuni" {
+ value 106;
+ description
+ "ATM FUNI";
+ }
+ enum "atmIma" {
+ value 107;
+ description
+ "ATM IMA";
+ }
+ enum "pppMultilinkBundle" {
+ value 108;
+ description
+ "PPP Multilink Bundle";
+ }
+ enum "ipOverCdlc" {
+ value 109;
+ description
+ "IBM ipOverCdlc";
+ }
+ enum "ipOverClaw" {
+ value 110;
+ description
+ "IBM Common Link Access to Workstn";
+ }
+ enum "stackToStack" {
+ value 111;
+ description
+ "IBM stackToStack";
+ }
+ enum "virtualIpAddress" {
+ value 112;
+ description
+ "IBM VIPA";
+ }
+ enum "mpc" {
+ value 113;
+ description
+ "IBM multi-protocol channel support";
+ }
+ enum "ipOverAtm" {
+ value 114;
+ description
+ "IBM ipOverAtm";
+ reference
+ "RFC 2320 - Definitions of Managed Objects for Classical IP
+ and ARP Over ATM Using SMIv2 (IPOA-MIB)";
+ }
+ enum "iso88025Fiber" {
+ value 115;
+ description
+ "ISO 802.5j Fiber Token Ring";
+ }
+ enum "tdlc" {
+ value 116;
+ description
+ "IBM twinaxial data link control";
+ }
+ enum "gigabitEthernet" {
+ value 117;
+ status deprecated;
+ description
+ "Obsoleted via RFC3635
+ ethernetCsmacd(6) should be used instead";
+ reference
+ "RFC 3635 - Definitions of Managed Objects for the
+ Ethernet-like Interface Types.";
+ }
+ enum "hdlc" {
+ value 118;
+ description
+ "HDLC";
+ }
+ enum "lapf" {
+ value 119;
+ description
+ "LAP F";
+ }
+ enum "v37" {
+ value 120;
+ description
+ "V.37";
+ }
+ enum "x25mlp" {
+ value 121;
+ description
+ "Multi-Link Protocol";
+ }
+ enum "x25huntGroup" {
+ value 122;
+ description
+ "X25 Hunt Group";
+ }
+ enum "transpHdlc" {
+ value 123;
+ description
+ "Transp HDLC";
+ }
+ enum "interleave" {
+ value 124;
+ description
+ "Interleave channel";
+ }
+ enum "fast" {
+ value 125;
+ description
+ "Fast channel";
+ }
+ enum "ip" {
+ value 126;
+ description
+ "IP (for APPN HPR in IP networks)";
+ }
+ enum "docsCableMaclayer" {
+ value 127;
+ description
+ "CATV Mac Layer";
+ }
+ enum "docsCableDownstream" {
+ value 128;
+ description
+ "CATV Downstream interface";
+ }
+ enum "docsCableUpstream" {
+ value 129;
+ description
+ "CATV Upstream interface";
+ }
+ enum "a12MppSwitch" {
+ value 130;
+ description
+ "Avalon Parallel Processor";
+ }
+ enum "tunnel" {
+ value 131;
+ description
+ "Encapsulation interface";
+ }
+ enum "coffee" {
+ value 132;
+ description
+ "coffee pot";
+ reference
+ "RFC 2325 - Coffee MIB";
+ }
+ enum "ces" {
+ value 133;
+ description
+ "Circuit Emulation Service";
+ }
+ enum "atmSubInterface" {
+ value 134;
+ description
+ "ATM Sub Interface";
+ }
+ enum "l2vlan" {
+ value 135;
+ description
+ "Layer 2 Virtual LAN using 802.1Q";
+ }
+ enum "l3ipvlan" {
+ value 136;
+ description
+ "Layer 3 Virtual LAN using IP";
+ }
+ enum "l3ipxvlan" {
+ value 137;
+ description
+ "Layer 3 Virtual LAN using IPX";
+ }
+ enum "digitalPowerline" {
+ value 138;
+ description
+ "IP over Power Lines";
+ }
+ enum "mediaMailOverIp" {
+ value 139;
+ description
+ "Multimedia Mail over IP";
+ }
+ enum "dtm" {
+ value 140;
+ description
+ "Dynamic syncronous Transfer Mode";
+ }
+ enum "dcn" {
+ value 141;
+ description
+ "Data Communications Network";
+ }
+ enum "ipForward" {
+ value 142;
+ description
+ "IP Forwarding Interface";
+ }
+ enum "msdsl" {
+ value 143;
+ description
+ "Multi-rate Symmetric DSL";
+ }
+ enum "ieee1394" {
+ value 144;
+ description
+ "IEEE1394 High Performance Serial Bus";
+ }
+ enum "if-gsn" {
+ value 145;
+ description
+ "HIPPI-6400";
+ }
+ enum "dvbRccMacLayer" {
+ value 146;
+ description
+ "DVB-RCC MAC Layer";
+ }
+ enum "dvbRccDownstream" {
+ value 147;
+ description
+ "DVB-RCC Downstream Channel";
+ }
+ enum "dvbRccUpstream" {
+ value 148;
+ description
+ "DVB-RCC Upstream Channel";
+ }
+ enum "atmVirtual" {
+ value 149;
+ description
+ "ATM Virtual Interface";
+ }
+ enum "mplsTunnel" {
+ value 150;
+ description
+ "MPLS Tunnel Virtual Interface";
+ }
+ enum "srp" {
+ value 151;
+ description
+ "Spatial Reuse Protocol ";
+ }
+ enum "voiceOverAtm" {
+ value 152;
+ description
+ "Voice Over ATM";
+ }
+ enum "voiceOverFrameRelay" {
+ value 153;
+ description
+ "Voice Over Frame Relay";
+ }
+ enum "idsl" {
+ value 154;
+ description
+ "Digital Subscriber Loop over ISDN";
+ }
+ enum "compositeLink" {
+ value 155;
+ description
+ "Avici Composite Link Interface";
+ }
+ enum "ss7SigLink" {
+ value 156;
+ description
+ "SS7 Signaling Link";
+ }
+ enum "propWirelessP2P" {
+ value 157;
+ description
+ "Prop. P2P wireless interface";
+ }
+ enum "frForward" {
+ value 158;
+ description
+ "Frame Forward Interface";
+ }
+ enum "rfc1483" {
+ value 159;
+ description
+ "Multiprotocol over ATM AAL5";
+ reference
+ "RFC 1483 - Multiprotocol Encapsulation over ATM
+ Adaptation Layer 5";
+ }
+ enum "usb" {
+ value 160;
+ description
+ "USB Interface";
+ }
+ enum "ieee8023adLag" {
+ value 161;
+ description
+ "IEEE 802.3ad Link Aggregate";
+ }
+ enum "bgppolicyaccounting" {
+ value 162;
+ description
+ "BGP Policy Accounting";
+ }
+ enum "frf16MfrBundle" {
+ value 163;
+ description
+ "FRF .16 Multilink Frame Relay";
+ }
+ enum "h323Gatekeeper" {
+ value 164;
+ description
+ "H323 Gatekeeper";
+ }
+ enum "h323Proxy" {
+ value 165;
+ description
+ "H323 Voice and Video Proxy";
+ }
+ enum "mpls" {
+ value 166;
+ description
+ "MPLS";
+ }
+ enum "mfSigLink" {
+ value 167;
+ description
+ "Multi-frequency signaling link";
+ }
+ enum "hdsl2" {
+ value 168;
+ description
+ "High Bit-Rate DSL - 2nd generation";
+ }
+ enum "shdsl" {
+ value 169;
+ description
+ "Multirate HDSL2";
+ }
+ enum "ds1FDL" {
+ value 170;
+ description
+ "Facility Data Link 4Kbps on a DS1";
+ }
+ enum "pos" {
+ value 171;
+ description
+ "Packet over SONET/SDH Interface";
+ }
+ enum "dvbAsiIn" {
+ value 172;
+ description
+ "DVB-ASI Input";
+ }
+ enum "dvbAsiOut" {
+ value 173;
+ description
+ "DVB-ASI Output";
+ }
+ enum "plc" {
+ value 174;
+ description
+ "Power Line Communtications";
+ }
+ enum "nfas" {
+ value 175;
+ description
+ "Non Facility Associated Signaling";
+ }
+ enum "tr008" {
+ value 176;
+ description
+ "TR008";
+ }
+ enum "gr303RDT" {
+ value 177;
+ description
+ "Remote Digital Terminal";
+ }
+ enum "gr303IDT" {
+ value 178;
+ description
+ "Integrated Digital Terminal";
+ }
+ enum "isup" {
+ value 179;
+ description
+ "ISUP";
+ }
+ enum "propDocsWirelessMaclayer" {
+ value 180;
+ description
+ "Cisco proprietary Maclayer";
+ }
+ enum "propDocsWirelessDownstream" {
+ value 181;
+ description
+ "Cisco proprietary Downstream";
+ }
+ enum "propDocsWirelessUpstream" {
+ value 182;
+ description
+ "Cisco proprietary Upstream";
+ }
+ enum "hiperlan2" {
+ value 183;
+ description
+ "HIPERLAN Type 2 Radio Interface";
+ }
+ enum "propBWAp2Mp" {
+ value 184;
+ description
+ "PropBroadbandWirelessAccesspt2multipt use of this value
+ for IEEE 802.16 WMAN interfaces as per IEEE Std 802.16f
+ is deprecated and ieee80216WMAN(237) should be used
+ instead.";
+ }
+ enum "sonetOverheadChannel" {
+ value 185;
+ description
+ "SONET Overhead Channel";
+ }
+ enum "digitalWrapperOverheadChannel" {
+ value 186;
+ description
+ "Digital Wrapper";
+ }
+ enum "aal2" {
+ value 187;
+ description
+ "ATM adaptation layer 2";
+ }
+ enum "radioMAC" {
+ value 188;
+ description
+ "MAC layer over radio links";
+ }
+ enum "atmRadio" {
+ value 189;
+ description
+ "ATM over radio links";
+ }
+ enum "imt" {
+ value 190;
+ description
+ "Inter Machine Trunks";
+ }
+ enum "mvl" {
+ value 191;
+ description
+ "Multiple Virtual Lines DSL";
+ }
+ enum "reachDSL" {
+ value 192;
+ description
+ "Long Reach DSL";
+ }
+ enum "frDlciEndPt" {
+ value 193;
+ description
+ "Frame Relay DLCI End Point";
+ }
+ enum "atmVciEndPt" {
+ value 194;
+ description
+ "ATM VCI End Point";
+ }
+ enum "opticalChannel" {
+ value 195;
+ description
+ "Optical Channel";
+ }
+ enum "opticalTransport" {
+ value 196;
+ description
+ "Optical Transport";
+ }
+ enum "propAtm" {
+ value 197;
+ description
+ "Proprietary ATM";
+ }
+ enum "voiceOverCable" {
+ value 198;
+ description
+ "Voice Over Cable Interface";
+ }
+ enum "infiniband" {
+ value 199;
+ description
+ "Infiniband";
+ }
+ enum "teLink" {
+ value 200;
+ description
+ "TE Link";
+ }
+ enum "q2931" {
+ value 201;
+ description
+ "Q.2931";
+ }
+ enum "virtualTg" {
+ value 202;
+ description
+ "Virtual Trunk Group";
+ }
+ enum "sipTg" {
+ value 203;
+ description
+ "SIP Trunk Group";
+ }
+ enum "sipSig" {
+ value 204;
+ description
+ "SIP Signaling";
+ }
+ enum "docsCableUpstreamChannel" {
+ value 205;
+ description
+ "CATV Upstream Channel";
+ }
+ enum "econet" {
+ value 206;
+ description
+ "Acorn Econet";
+ }
+ enum "pon155" {
+ value 207;
+ description
+ "FSAN 155Mb Symetrical PON interface";
+ }
+ enum "pon622" {
+ value 208;
+ description
+ "FSAN622Mb Symetrical PON interface";
+ }
+ enum "bridge" {
+ value 209;
+ description
+ "Transparent bridge interface";
+ }
+ enum "linegroup" {
+ value 210;
+ description
+ "Interface common to multiple lines";
+ }
+ enum "voiceEMFGD" {
+ value 211;
+ description
+ "voice E&M Feature Group D";
+ }
+ enum "voiceFGDEANA" {
+ value 212;
+ description
+ "voice FGD Exchange Access North American";
+ }
+ enum "voiceDID" {
+ value 213;
+ description
+ "voice Direct Inward Dialing";
+ }
+ enum "mpegTransport" {
+ value 214;
+ description
+ "MPEG transport interface";
+ }
+ enum "sixToFour" {
+ value 215;
+ status deprecated;
+ description
+ "6to4 interface (DEPRECATED)";
+ reference
+ "RFC 4087 - IP Tunnel MIB";
+ }
+ enum "gtp" {
+ value 216;
+ description
+ "GTP (GPRS Tunneling Protocol)";
+ }
+ enum "pdnEtherLoop1" {
+ value 217;
+ description
+ "Paradyne EtherLoop 1";
+ }
+ enum "pdnEtherLoop2" {
+ value 218;
+ description
+ "Paradyne EtherLoop 2";
+ }
+ enum "opticalChannelGroup" {
+ value 219;
+ description
+ "Optical Channel Group";
+ }
+ enum "homepna" {
+ value 220;
+ description
+ "HomePNA ITU-T G.989";
+ }
+ enum "gfp" {
+ value 221;
+ description
+ "Generic Framing Procedure (GFP)";
+ }
+ enum "ciscoISLvlan" {
+ value 222;
+ description
+ "Layer 2 Virtual LAN using Cisco ISL";
+ }
+ enum "actelisMetaLOOP" {
+ value 223;
+ description
+ "Acteleis proprietary MetaLOOP High Speed Link";
+ }
+ enum "fcipLink" {
+ value 224;
+ description
+ "FCIP Link";
+ }
+ enum "rpr" {
+ value 225;
+ description
+ "Resilient Packet Ring Interface Type";
+ }
+ enum "qam" {
+ value 226;
+ description
+ "RF Qam Interface";
+ }
+ enum "lmp" {
+ value 227;
+ description
+ "Link Management Protocol";
+ reference
+ "RFC 4327 - Link Management Protocol (LMP) Management
+ Information Base (MIB)";
+ }
+ enum "cblVectaStar" {
+ value 228;
+ description
+ "Cambridge Broadband Networks Limited VectaStar";
+ }
+ enum "docsCableMCmtsDownstream" {
+ value 229;
+ description
+ "CATV Modular CMTS Downstream Interface";
+ }
+ enum "adsl2" {
+ value 230;
+ status deprecated;
+ description
+ "Asymmetric Digital Subscriber Loop Version 2
+ (DEPRECATED/OBSOLETED - please use adsl2plus(238)
+ instead)";
+ reference
+ "RFC 4706 - Definitions of Managed Objects for Asymmetric
+ Digital Subscriber Line 2 (ADSL2)";
+ }
+ enum "macSecControlledIF" {
+ value 231;
+ description
+ "MACSecControlled";
+ }
+ enum "macSecUncontrolledIF" {
+ value 232;
+ description
+ "MACSecUncontrolled";
+ }
+ enum "aviciOpticalEther" {
+ value 233;
+ description
+ "Avici Optical Ethernet Aggregate";
+ }
+ enum "atmbond" {
+ value 234;
+ description
+ "atmbond";
+ }
+ enum "voiceFGDOS" {
+ value 235;
+ description
+ "voice FGD Operator Services";
+ }
+ enum "mocaVersion1" {
+ value 236;
+ description
+ "MultiMedia over Coax Alliance (MoCA) Interface
+ as documented in information provided privately to IANA";
+ }
+ enum "ieee80216WMAN" {
+ value 237;
+ description
+ "IEEE 802.16 WMAN interface";
+ }
+ enum "adsl2plus" {
+ value 238;
+ description
+ "Asymmetric Digital Subscriber Loop Version 2,
+ Version 2 Plus and all variants";
+ }
+ enum "dvbRcsMacLayer" {
+ value 239;
+ description
+ "DVB-RCS MAC Layer";
+ reference
+ "RFC 5728 - The SatLabs Group DVB-RCS MIB";
+ }
+ enum "dvbTdm" {
+ value 240;
+ description
+ "DVB Satellite TDM";
+ reference
+ "RFC 5728 - The SatLabs Group DVB-RCS MIB";
+ }
+ enum "dvbRcsTdma" {
+ value 241;
+ description
+ "DVB-RCS TDMA";
+ reference
+ "RFC 5728 - The SatLabs Group DVB-RCS MIB";
+ }
+ enum "x86Laps" {
+ value 242;
+ description
+ "LAPS based on ITU-T X.86/Y.1323";
+ }
+ enum "wwanPP" {
+ value 243;
+ description
+ "3GPP WWAN";
+ }
+ enum "wwanPP2" {
+ value 244;
+ description
+ "3GPP2 WWAN";
+ }
+ enum "voiceEBS" {
+ value 245;
+ description
+ "voice P-phone EBS physical interface";
+ }
+ enum "ifPwType" {
+ value 246;
+ description
+ "Pseudowire interface type";
+ reference
+ "RFC 5601 - Pseudowire (PW) Management Information Base";
+ }
+ enum "ilan" {
+ value 247;
+ description
+ "Internal LAN on a bridge per IEEE 802.1ap";
+ }
+ enum "pip" {
+ value 248;
+ description
+ "Provider Instance Port on a bridge per IEEE 802.1ah PBB";
+ }
+ enum "aluELP" {
+ value 249;
+ description
+ "Alcatel-Lucent Ethernet Link Protection";
+ }
+ enum "gpon" {
+ value 250;
+ description
+ "Gigabit-capable passive optical networks (G-PON) as per
+ ITU-T G.948";
+ }
+ enum "vdsl2" {
+ value 251;
+ description
+ "Very high speed digital subscriber line Version 2
+ (as per ITU-T Recommendation G.993.2)";
+ reference
+ "RFC 5650 - Definitions of Managed Objects for Very High
+ Speed Digital Subscriber Line 2 (VDSL2)";
+ }
+ enum "capwapDot11Profile" {
+ value 252;
+ description
+ "WLAN Profile Interface";
+ reference
+ "RFC 5834 - Control and Provisioning of Wireless Access
+ Points (CAPWAP) Protocol Binding MIB for
+ IEEE 802.11";
+ }
+ enum "capwapDot11Bss" {
+ value 253;
+ description
+ "WLAN BSS Interface";
+ reference
+ "RFC 5834 - Control and Provisioning of Wireless Access
+ Points (CAPWAP) Protocol Binding MIB for
+ IEEE 802.11";
+ }
+ enum "capwapWtpVirtualRadio" {
+ value 254;
+ description
+ "WTP Virtual Radio Interface";
+ reference
+ "RFC 5833 - Control and Provisioning of Wireless Access
+ Points (CAPWAP) Protocol Base MIB";
+ }
+ enum "bits" {
+ value 255;
+ description
+ "bitsport";
+ }
+ enum "docsCableUpstreamRfPort" {
+ value 256;
+ description
+ "DOCSIS CATV Upstream RF Port";
+ }
+ enum "cableDownstreamRfPort" {
+ value 257;
+ description
+ "CATV downstream RF port";
+ }
+ enum "vmwareVirtualNic" {
+ value 258;
+ description
+ "VMware Virtual Network Interface";
+ }
+ enum "ieee802154" {
+ value 259;
+ description
+ "IEEE 802.15.4 WPAN interface";
+ reference
+ "IEEE 802.15.4-2006";
+ }
+ enum "otnOdu" {
+ value 260;
+ description
+ "OTN Optical Data Unit";
+ }
+ enum "otnOtu" {
+ value 261;
+ description
+ "OTN Optical channel Transport Unit";
+ }
+ enum "ifVfiType" {
+ value 262;
+ description
+ "VPLS Forwarding Instance Interface Type";
+ }
+ enum "g9981" {
+ value 263;
+ description
+ "G.998.1 bonded interface";
+ }
+ enum "g9982" {
+ value 264;
+ description
+ "G.998.2 bonded interface";
+ }
+ enum "g9983" {
+ value 265;
+ description
+ "G.998.3 bonded interface";
+ }
+ enum "aluEpon" {
+ value 266;
+ description
+ "Ethernet Passive Optical Networks (E-PON)";
+ }
+ enum "aluEponOnu" {
+ value 267;
+ description
+ "EPON Optical Network Unit";
+ }
+ enum "aluEponPhysicalUni" {
+ value 268;
+ description
+ "EPON physical User to Network interface";
+ }
+ enum "aluEponLogicalLink" {
+ value 269;
+ description
+ "The emulation of a point-to-point link over the EPON
+ layer";
+ }
+ enum "aluGponOnu" {
+ value 270;
+ description
+ "GPON Optical Network Unit";
+ reference
+ "ITU-T G.984.2";
+ }
+ enum "aluGponPhysicalUni" {
+ value 271;
+ description
+ "GPON physical User to Network interface";
+ reference
+ "ITU-T G.984.2";
+ }
+ enum "vmwareNicTeam" {
+ value 272;
+ description
+ "VMware NIC Team";
+ }
+ }
+ description
+ "This data type is used as the syntax of the 'type'
+ leaf in the 'interface' list in the YANG module
+ ietf-interface.
+
+ The definition of this typedef with the
+ addition of newly assigned values is published
+ periodically by the IANA, in either the Assigned
+ Numbers RFC, or some derivative of it specific to
+ Internet Network Management number assignments. (The
+ latest arrangements can be obtained by contacting the
+ IANA.)
+
+ Requests for new values should be made to IANA via
+ email (iana&iana.org).";
+ reference
+ "ifType definitions registry.
+ <http://www.iana.org/assignments/smi-numbers>";
+ }
+}
\ No newline at end of file
--- /dev/null
+ module ietf-inet-types {
+
+ namespace "urn:ietf:params:xml:ns:yang:ietf-inet-types";
+ prefix "inet";
+
+ organization
+ "IETF NETMOD (NETCONF Data Modeling Language) Working Group";
+
+ contact
+ "WG Web: <http://tools.ietf.org/wg/netmod/>
+ WG List: <mailto:netmod@ietf.org>
+
+ WG Chair: David Partain
+ <mailto:david.partain@ericsson.com>
+
+ WG Chair: David Kessens
+ <mailto:david.kessens@nsn.com>
+
+ Editor: Juergen Schoenwaelder
+ <mailto:j.schoenwaelder@jacobs-university.de>";
+
+ description
+ "This module contains a collection of generally useful derived
+ YANG data types for Internet addresses and related things.
+
+ Copyright (c) 2010 IETF Trust and the persons identified as
+ authors of the code. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, is permitted pursuant to, and subject to the license
+ terms contained in, the Simplified BSD License set forth in Section
+ 4.c of the IETF Trust's Legal Provisions Relating to IETF Documents
+ (http://trustee.ietf.org/license-info).
+
+ This version of this YANG module is part of RFC 6021; see
+ the RFC itself for full legal notices.";
+
+ revision 2010-09-24 {
+ description
+ "Initial revision.";
+ reference
+ "RFC 6021: Common YANG Data Types";
+ }
+
+ /*** collection of protocol field related types ***/
+
+ typedef ip-version {
+ type enumeration {
+ enum unknown {
+ value "0";
+ description
+ "An unknown or unspecified version of the Internet protocol.";
+ }
+ enum ipv4 {
+ value "1";
+ description
+ "The IPv4 protocol as defined in RFC 791.";
+ }
+ enum ipv6 {
+ value "2";
+ description
+ "The IPv6 protocol as defined in RFC 2460.";
+ }
+ }
+ description
+ "This value represents the version of the IP protocol.
+
+ In the value set and its semantics, this type is equivalent
+ to the InetVersion textual convention of the SMIv2.";
+ reference
+ "RFC 791: Internet Protocol
+ RFC 2460: Internet Protocol, Version 6 (IPv6) Specification
+ RFC 4001: Textual Conventions for Internet Network Addresses";
+ }
+
+ typedef dscp {
+ type uint8 {
+ range "0..63";
+ }
+ description
+ "The dscp type represents a Differentiated Services Code-Point
+ that may be used for marking packets in a traffic stream.
+
+ In the value set and its semantics, this type is equivalent
+ to the Dscp textual convention of the SMIv2.";
+ reference
+ "RFC 3289: Management Information Base for the Differentiated
+ Services Architecture
+ RFC 2474: Definition of the Differentiated Services Field
+ (DS Field) in the IPv4 and IPv6 Headers
+ RFC 2780: IANA Allocation Guidelines For Values In
+ the Internet Protocol and Related Headers";
+ }
+
+ typedef ipv6-flow-label {
+ type uint32 {
+ range "0..1048575";
+ }
+ description
+ "The flow-label type represents flow identifier or Flow Label
+ in an IPv6 packet header that may be used to discriminate
+ traffic flows.
+
+ In the value set and its semantics, this type is equivalent
+ to the IPv6FlowLabel textual convention of the SMIv2.";
+ reference
+ "RFC 3595: Textual Conventions for IPv6 Flow Label
+ RFC 2460: Internet Protocol, Version 6 (IPv6) Specification";
+ }
+
+ typedef port-number {
+ type uint16 {
+ range "0..65535";
+ }
+ description
+ "The port-number type represents a 16-bit port number of an
+ Internet transport layer protocol such as UDP, TCP, DCCP, or
+ SCTP. Port numbers are assigned by IANA. A current list of
+ all assignments is available from <http://www.iana.org/>.
+
+ Note that the port number value zero is reserved by IANA. In
+ situations where the value zero does not make sense, it can
+ be excluded by subtyping the port-number type.
+
+ In the value set and its semantics, this type is equivalent
+ to the InetPortNumber textual convention of the SMIv2.";
+ reference
+ "RFC 768: User Datagram Protocol
+ RFC 793: Transmission Control Protocol
+ RFC 4960: Stream Control Transmission Protocol
+ RFC 4340: Datagram Congestion Control Protocol (DCCP)
+ RFC 4001: Textual Conventions for Internet Network Addresses";
+ }
+
+ /*** collection of autonomous system related types ***/
+
+ typedef as-number {
+ type uint32;
+ description
+ "The as-number type represents autonomous system numbers
+ which identify an Autonomous System (AS). An AS is a set
+ of routers under a single technical administration, using
+ an interior gateway protocol and common metrics to route
+ packets within the AS, and using an exterior gateway
+ protocol to route packets to other ASs'. IANA maintains
+ the AS number space and has delegated large parts to the
+ regional registries.
+
+ Autonomous system numbers were originally limited to 16
+ bits. BGP extensions have enlarged the autonomous system
+ number space to 32 bits. This type therefore uses an uint32
+ base type without a range restriction in order to support
+ a larger autonomous system number space.
+
+ In the value set and its semantics, this type is equivalent
+ to the InetAutonomousSystemNumber textual convention of
+ the SMIv2.";
+ reference
+ "RFC 1930: Guidelines for creation, selection, and registration
+ of an Autonomous System (AS)
+ RFC 4271: A Border Gateway Protocol 4 (BGP-4)
+ RFC 4893: BGP Support for Four-octet AS Number Space
+ RFC 4001: Textual Conventions for Internet Network Addresses";
+ }
+
+ /*** collection of IP address and hostname related types ***/
+
+ typedef ip-address {
+ type union {
+ type inet:ipv4-address;
+ type inet:ipv6-address;
+ }
+ description
+ "The ip-address type represents an IP address and is IP
+ version neutral. The format of the textual representations
+ implies the IP version.";
+ }
+
+ typedef ipv4-address {
+ type string {
+ pattern
+ '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}'
+ + '([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])'
+ + '(%[\p{N}\p{L}]+)?';
+ }
+ description
+ "The ipv4-address type represents an IPv4 address in
+ dotted-quad notation. The IPv4 address may include a zone
+ index, separated by a % sign.
+
+ The zone index is used to disambiguate identical address
+ values. For link-local addresses, the zone index will
+ typically be the interface index number or the name of an
+ interface. If the zone index is not present, the default
+ zone of the device will be used.
+
+ The canonical format for the zone index is the numerical
+ format";
+ }
+
+ typedef ipv6-address {
+ type string {
+ pattern '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}'
+ + '((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|'
+ + '(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\.){3}'
+ + '(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))'
+ + '(%[\p{N}\p{L}]+)?';
+ pattern '(([^:]+:){6}(([^:]+:[^:]+)|(.*\..*)))|'
+ + '((([^:]+:)*[^:]+)?::(([^:]+:)*[^:]+)?)'
+ + '(%.+)?';
+ }
+ description
+ "The ipv6-address type represents an IPv6 address in full,
+ mixed, shortened, and shortened-mixed notation. The IPv6
+ address may include a zone index, separated by a % sign.
+
+ The zone index is used to disambiguate identical address
+ values. For link-local addresses, the zone index will
+ typically be the interface index number or the name of an
+ interface. If the zone index is not present, the default
+ zone of the device will be used.
+
+ The canonical format of IPv6 addresses uses the compressed
+ format described in RFC 4291, Section 2.2, item 2 with the
+ following additional rules: the :: substitution must be
+ applied to the longest sequence of all-zero 16-bit chunks
+ in an IPv6 address. If there is a tie, the first sequence
+ of all-zero 16-bit chunks is replaced by ::. Single
+ all-zero 16-bit chunks are not compressed. The canonical
+ format uses lowercase characters and leading zeros are
+ not allowed. The canonical format for the zone index is
+ the numerical format as described in RFC 4007, Section
+ 11.2.";
+ reference
+ "RFC 4291: IP Version 6 Addressing Architecture
+ RFC 4007: IPv6 Scoped Address Architecture
+ RFC 5952: A Recommendation for IPv6 Address Text Representation";
+ }
+
+ typedef ip-prefix {
+ type union {
+ type inet:ipv4-prefix;
+ type inet:ipv6-prefix;
+ }
+ description
+ "The ip-prefix type represents an IP prefix and is IP
+ version neutral. The format of the textual representations
+ implies the IP version.";
+ }
+
+ typedef ipv4-prefix {
+ type string {
+ pattern
+ '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}'
+ + '([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])'
+ + '/(([0-9])|([1-2][0-9])|(3[0-2]))';
+ }
+ description
+ "The ipv4-prefix type represents an IPv4 address prefix.
+ The prefix length is given by the number following the
+ slash character and must be less than or equal to 32.
+
+ A prefix length value of n corresponds to an IP address
+ mask that has n contiguous 1-bits from the most
+ significant bit (MSB) and all other bits set to 0.
+
+ The canonical format of an IPv4 prefix has all bits of
+ the IPv4 address set to zero that are not part of the
+ IPv4 prefix.";
+ }
+
+ typedef ipv6-prefix {
+ type string {
+ pattern '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}'
+ + '((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|'
+ + '(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\.){3}'
+ + '(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))'
+ + '(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))';
+ pattern '(([^:]+:){6}(([^:]+:[^:]+)|(.*\..*)))|'
+ + '((([^:]+:)*[^:]+)?::(([^:]+:)*[^:]+)?)'
+ + '(/.+)';
+ }
+ description
+ "The ipv6-prefix type represents an IPv6 address prefix.
+ The prefix length is given by the number following the
+ slash character and must be less than or equal 128.
+
+ A prefix length value of n corresponds to an IP address
+ mask that has n contiguous 1-bits from the most
+ significant bit (MSB) and all other bits set to 0.
+
+ The IPv6 address should have all bits that do not belong
+ to the prefix set to zero.
+
+ The canonical format of an IPv6 prefix has all bits of
+ the IPv6 address set to zero that are not part of the
+ IPv6 prefix. Furthermore, IPv6 address is represented
+ in the compressed format described in RFC 4291, Section
+ 2.2, item 2 with the following additional rules: the ::
+ substitution must be applied to the longest sequence of
+ all-zero 16-bit chunks in an IPv6 address. If there is
+ a tie, the first sequence of all-zero 16-bit chunks is
+ replaced by ::. Single all-zero 16-bit chunks are not
+ compressed. The canonical format uses lowercase
+ characters and leading zeros are not allowed.";
+ reference
+ "RFC 4291: IP Version 6 Addressing Architecture";
+ }
+
+ /*** collection of domain name and URI types ***/
+
+ typedef domain-name {
+ type string {
+ pattern '((([a-zA-Z0-9_]([a-zA-Z0-9\-_]){0,61})?[a-zA-Z0-9]\.)*'
+ + '([a-zA-Z0-9_]([a-zA-Z0-9\-_]){0,61})?[a-zA-Z0-9]\.?)'
+ + '|\.';
+ length "1..253";
+ }
+ description
+ "The domain-name type represents a DNS domain name. The
+ name SHOULD be fully qualified whenever possible.
+
+ Internet domain names are only loosely specified. Section
+ 3.5 of RFC 1034 recommends a syntax (modified in Section
+ 2.1 of RFC 1123). The pattern above is intended to allow
+ for current practice in domain name use, and some possible
+ future expansion. It is designed to hold various types of
+ domain names, including names used for A or AAAA records
+ (host names) and other records, such as SRV records. Note
+ that Internet host names have a stricter syntax (described
+ in RFC 952) than the DNS recommendations in RFCs 1034 and
+ 1123, and that systems that want to store host names in
+ schema nodes using the domain-name type are recommended to
+ adhere to this stricter standard to ensure interoperability.
+
+ The encoding of DNS names in the DNS protocol is limited
+ to 255 characters. Since the encoding consists of labels
+ prefixed by a length bytes and there is a trailing NULL
+ byte, only 253 characters can appear in the textual dotted
+ notation.
+
+ The description clause of schema nodes using the domain-name
+ type MUST describe when and how these names are resolved to
+ IP addresses. Note that the resolution of a domain-name value
+ may require to query multiple DNS records (e.g., A for IPv4
+ and AAAA for IPv6). The order of the resolution process and
+ which DNS record takes precedence can either be defined
+ explicitely or it may depend on the configuration of the
+ resolver.
+
+ Domain-name values use the US-ASCII encoding. Their canonical
+ format uses lowercase US-ASCII characters. Internationalized
+ domain names MUST be encoded in punycode as described in RFC
+ 3492";
+ reference
+ "RFC 952: DoD Internet Host Table Specification
+ RFC 1034: Domain Names - Concepts and Facilities
+ RFC 1123: Requirements for Internet Hosts -- Application
+ and Support
+ RFC 2782: A DNS RR for specifying the location of services
+ (DNS SRV)
+ RFC 3492: Punycode: A Bootstring encoding of Unicode for
+ Internationalized Domain Names in Applications
+ (IDNA)
+ RFC 5891: Internationalizing Domain Names in Applications
+ (IDNA): Protocol";
+ }
+
+ typedef host {
+ type union {
+ type inet:ip-address;
+ type inet:domain-name;
+ }
+ description
+ "The host type represents either an IP address or a DNS
+ domain name.";
+ }
+
+ typedef uri {
+ type string;
+ description
+ "The uri type represents a Uniform Resource Identifier
+ (URI) as defined by STD 66.
+
+ Objects using the uri type MUST be in US-ASCII encoding,
+ and MUST be normalized as described by RFC 3986 Sections
+ 6.2.1, 6.2.2.1, and 6.2.2.2. All unnecessary
+ percent-encoding is removed, and all case-insensitive
+ characters are set to lowercase except for hexadecimal
+ digits, which are normalized to uppercase as described in
+ Section 6.2.2.1.
+
+ The purpose of this normalization is to help provide
+ unique URIs. Note that this normalization is not
+ sufficient to provide uniqueness. Two URIs that are
+ textually distinct after this normalization may still be
+ equivalent.
+
+ Objects using the uri type may restrict the schemes that
+ they permit. For example, 'data:' and 'urn:' schemes
+ might not be appropriate.
+
+ A zero-length URI is not a valid URI. This can be used to
+ express 'URI absent' where required.
+
+ In the value set and its semantics, this type is equivalent
+ to the Uri SMIv2 textual convention defined in RFC 5017.";
+ reference
+ "RFC 3986: Uniform Resource Identifier (URI): Generic Syntax
+ RFC 3305: Report from the Joint W3C/IETF URI Planning Interest
+ Group: Uniform Resource Identifiers (URIs), URLs,
+ and Uniform Resource Names (URNs): Clarifications
+ and Recommendations
+ RFC 5017: MIB Textual Conventions for Uniform Resource
+ Identifiers (URIs)";
+ }
+
+ }
--- /dev/null
+module ietf-interfaces {
+
+ namespace "urn:ietf:params:xml:ns:yang:ietf-interfaces";
+ prefix if;
+
+ import ietf-yang-types {
+ prefix yang;
+ }
+ import iana-if-type {
+ prefix ianaift;
+ }
+
+ organization
+ "IETF NETMOD (NETCONF Data Modeling Language) Working Group";
+
+ contact
+ "WG Web: <http://tools.ietf.org/wg/netmod/>
+ WG List: <mailto:netmod@ietf.org>
+
+ WG Chair: David Kessens
+ <mailto:david.kessens@nsn.com>
+
+ WG Chair: Juergen Schoenwaelder
+ <mailto:j.schoenwaelder@jacobs-university.de>
+
+ Editor: Martin Bjorklund
+ <mailto:mbj@tail-f.com>";
+
+ description
+ "This module contains a collection of YANG definitions for
+ managing network interfaces.
+
+ Copyright (c) 2012 IETF Trust and the persons identified as
+ authors of the code. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or
+ without modification, is permitted pursuant to, and subject
+ to the license terms contained in, the Simplified BSD License
+ set forth in Section 4.c of the IETF Trust's Legal Provisions
+ Relating to IETF Documents
+ (http://trustee.ietf.org/license-info).
+
+ This version of this YANG module is part of RFC XXXX; see
+ the RFC itself for full legal notices.";
+
+ // RFC Ed.: replace XXXX with actual RFC number and remove this
+ // note.
+
+ // RFC Ed.: update the date below with the date of RFC publication
+ // and remove this note.
+ revision 2012-11-15 {
+ description
+ "Initial revision.";
+ reference
+ "RFC XXXX: A YANG Data Model for Interface Management";
+ }
+
+ /* Typedefs */
+
+ typedef interface-ref {
+ type leafref {
+ path "/if:interfaces/if:interface/if:name";
+ }
+ description
+ "This type is used by data models that need to reference
+ interfaces.";
+ }
+
+ /* Features */
+
+ feature arbitrary-names {
+ description
+ "This feature indicates that the server allows interfaces to
+ be named arbitrarily.";
+ }
+
+ feature if-mib {
+ description
+ "This feature indicates that the server implements IF-MIB.";
+ reference
+ "RFC 2863: The Interfaces Group MIB";
+ }
+
+ /* Data nodes */
+
+ container interfaces {
+ description
+ "Interface parameters.";
+
+ list interface {
+ key "name";
+ unique "type location";
+
+ description
+ "The list of interfaces on the device.";
+
+ leaf name {
+ type string;
+ description
+ "The name of the interface.
+
+ A device MAY restrict the allowed values for this leaf,
+ possibly depending on the type and location.
+
+ If the device allows arbitrarily named interfaces, the
+ feature 'arbitrary-names' is advertised.
+
+ This leaf MAY be mapped to ifName by an implementation.
+ Such an implementation MAY restrict the allowed values for
+ this leaf so that it matches the restrictions of ifName.
+ If a NETCONF server that implements this restriction is
+ sent a value that doesn't match the restriction, it MUST
+ reply with an rpc-error with the error-tag
+ 'invalid-value'.";
+ reference
+ "RFC 2863: The Interfaces Group MIB - ifName";
+ }
+
+ leaf description {
+ type string;
+ description
+ "A textual description of the interface.
+
+ This leaf MAY be mapped to ifAlias by an implementation.
+ Such an implementation MAY restrict the allowed values for
+ this leaf so that it matches the restrictions of ifAlias.
+ If a NETCONF server that implements this restriction is
+ sent a value that doesn't match the restriction, it MUST
+ reply with an rpc-error with the error-tag
+ 'invalid-value'.";
+ reference
+ "RFC 2863: The Interfaces Group MIB - ifAlias";
+ }
+
+ leaf type {
+ type ianaift:iana-if-type;
+ mandatory true;
+ description
+ "The type of the interface.
+
+ When an interface entry is created, a server MAY
+ initialize the type leaf with a valid value, e.g., if it
+ is possible to derive the type from the name of the
+ interface.";
+ reference
+ "RFC 2863: The Interfaces Group MIB - ifType";
+ }
+
+ leaf location {
+ type string;
+ description
+ "The device-specific location of the interface of a
+ particular type. The format of the location string
+ depends on the interface type and the device.
+
+ If the interface's type represents a physical interface,
+ this leaf MUST be set.
+
+ When an interface entry is created, a server MAY
+ initialize the location leaf with a valid value, e.g., if
+ it is possible to derive the location from the name of
+ the interface.";
+ }
+
+ leaf enabled {
+ type boolean;
+ default "true";
+ description
+ "The desired state of the interface.
+
+ This leaf contains the configured, desired state of the
+ interface. Systems that implement the IF-MIB use the
+ value of this leaf to set IF-MIB.ifAdminStatus to 'up' or
+ 'down' after an ifEntry has been initialized, as described
+ in RFC 2863.";
+ reference
+ "RFC 2863: The Interfaces Group MIB - ifAdminStatus";
+ }
+
+ leaf oper-status {
+ type enumeration {
+ enum up {
+ value 1;
+ description
+ "Ready to pass packets.";
+ }
+ enum down {
+ value 2;
+ description
+ "The interface does not pass any packets.";
+ }
+ enum testing {
+ value 3;
+ description
+ "In some test mode. No operational packets can
+ be passed.";
+ }
+ enum unknown {
+ value 4;
+ description
+ "Status cannot be determined for some reason.";
+ }
+ enum dormant {
+ value 5;
+ description
+ "Waiting for some external event.";
+ }
+ enum not-present {
+ value 6;
+ description
+ "Some component is missing.";
+ }
+ enum lower-layer-down {
+ value 7;
+ description
+ "Down due to state of lower-layer interface(s).";
+ }
+ }
+ config false;
+ description
+ "The current operational state of the interface.
+
+ If 'enabled' is 'false' then 'oper-status'
+ should be 'down'. If 'enabled' is changed to 'true'
+ then 'oper-status' should change to 'up' if the interface
+ is ready to transmit and receive network traffic; it
+ should change to 'dormant' if the interface is waiting for
+ external actions (such as a serial line waiting for an
+ incoming connection); it should remain in the 'down' state
+ if and only if there is a fault that prevents it from
+ going to the 'up' state; it should remain in the
+ 'not-present' state if the interface has missing
+ (typically, hardware) components.";
+ reference
+ "RFC 2863: The Interfaces Group MIB - ifOperStatus";
+ }
+
+ leaf last-change {
+ type yang:date-and-time;
+ config false;
+ description
+ "The time the interface entered its current operational
+ state. If the current state was entered prior to the
+ last re-initialization of the local network management
+ subsystem, then this node is not present.";
+ reference
+ "RFC 2863: The Interfaces Group MIB - ifLastChange";
+ }
+
+ leaf if-index {
+ if-feature if-mib;
+ type int32 {
+ range "1..2147483647";
+ }
+ config false;
+ description
+ "The ifIndex value for the ifEntry represented by this
+ interface.
+
+ Media-specific modules must specify how the type is
+ mapped to entries in the ifTable.";
+ reference
+ "RFC 2863: The Interfaces Group MIB - ifIndex";
+ }
+
+ leaf link-up-down-trap-enable {
+ if-feature if-mib;
+ type enumeration {
+ enum enabled {
+ value 1;
+ }
+ enum disabled {
+ value 2;
+ }
+ }
+ description
+ "Indicates whether linkUp/linkDown SNMP notifications
+ should be generated for this interface.
+ If this node is not configured, the value 'enabled' is
+ operationally used by the server for interfaces which do
+ not operate on top of any other interface (i.e., there are
+ no 'lower-layer-if' entries), and 'disabled' otherwise.";
+ reference
+ "RFC 2863: The Interfaces Group MIB -
+ ifLinkUpDownTrapEnable";
+ }
+
+ leaf phys-address {
+ type yang:phys-address;
+ config false;
+ description
+ "The interface's address at its protocol sub-layer. For
+ example, for an 802.x interface, this object normally
+ contains a MAC address. The interface's media-specific
+ modules must define the bit and byte ordering and the
+ format of the value of this object. For interfaces that do
+ not have such an address (e.g., a serial line), this node
+ is not present.";
+ reference
+ "RFC 2863: The Interfaces Group MIB - ifPhysAddress";
+ }
+
+ leaf-list higher-layer-if {
+ type interface-ref;
+ config false;
+ description
+ "A list of references to interfaces layered on top of this
+ interface.";
+ reference
+ "RFC 2863: The Interfaces Group MIB - ifStackTable";
+ }
+
+ leaf-list lower-layer-if {
+ type interface-ref;
+ config false;
+ description
+ "A list of references to interfaces layered underneath this
+ interface.";
+ reference
+ "RFC 2863: The Interfaces Group MIB - ifStackTable";
+ }
+
+ leaf speed {
+ type yang:gauge64;
+ units "bits / second";
+ config false;
+ description
+ "An estimate of the interface's current bandwidth in bits
+ per second. For interfaces which do not vary in
+ bandwidth or for those where no accurate estimation can
+ be made, this node should contain the nominal bandwidth.
+ For interfaces that has no concept of bandwidth, this
+ node is not present.";
+ reference
+ "RFC 2863: The Interfaces Group MIB -
+ ifSpeed, ifHighSpeed";
+ }
+
+ container statistics {
+ config false;
+ description
+ "A collection of interface-related statistics objects.";
+
+ leaf discontinuity-time {
+ type yang:date-and-time;
+ description
+ "The time on the most recent occasion at which any one or
+ more of this interface's counters suffered a
+ discontinuity. If no such discontinuities have occurred
+ since the last re-initialization of the local management
+ subsystem, then this node contains the time the local
+ management subsystem re-initialized itself.";
+ }
+
+ leaf in-octets {
+ type yang:counter64;
+ description
+ "The total number of octets received on the interface,
+ including framing characters.
+
+ Discontinuities in the value of this counter can occur
+ at re-initialization of the management system, and at
+ other times as indicated by the value of
+ 'discontinuity-time'.";
+ reference
+ "RFC 2863: The Interfaces Group MIB - ifHCInOctets";
+ }
+ leaf in-unicast-pkts {
+ type yang:counter64;
+ description
+ "The number of packets, delivered by this sub-layer to a
+ higher (sub-)layer, which were not addressed to a
+ multicast or broadcast address at this sub-layer.
+
+ Discontinuities in the value of this counter can occur
+ at re-initialization of the management system, and at
+ other times as indicated by the value of
+ 'discontinuity-time'.";
+ reference
+ "RFC 2863: The Interfaces Group MIB - ifHCInUcastPkts";
+ }
+ leaf in-broadcast-pkts {
+ type yang:counter64;
+ description
+ "The number of packets, delivered by this sub-layer to a
+ higher (sub-)layer, which were addressed to a broadcast
+ address at this sub-layer.
+
+ Discontinuities in the value of this counter can occur
+ at re-initialization of the management system, and at
+ other times as indicated by the value of
+ 'discontinuity-time'.";
+ reference
+ "RFC 2863: The Interfaces Group MIB -
+ ifHCInBroadcastPkts";
+ }
+ leaf in-multicast-pkts {
+ type yang:counter64;
+ description
+ "The number of packets, delivered by this sub-layer to a
+ higher (sub-)layer, which were addressed to a multicast
+ address at this sub-layer. For a MAC layer protocol,
+ this includes both Group and Functional addresses.
+
+ Discontinuities in the value of this counter can occur
+ at re-initialization of the management system, and at
+ other times as indicated by the value of
+ 'discontinuity-time'.";
+ reference
+ "RFC 2863: The Interfaces Group MIB -
+ ifHCInMulticastPkts";
+ }
+ leaf in-discards {
+ type yang:counter32;
+ description
+ "The number of inbound packets which were chosen to be
+ discarded even though no errors had been detected to
+ prevent their being deliverable to a higher-layer
+ protocol. One possible reason for discarding such a
+ packet could be to free up buffer space.
+
+ Discontinuities in the value of this counter can occur
+ at re-initialization of the management system, and at
+ other times as indicated by the value of
+ 'discontinuity-time'.";
+ reference
+ "RFC 2863: The Interfaces Group MIB - ifInDiscards";
+ }
+ leaf in-errors {
+ type yang:counter32;
+ description
+ "For packet-oriented interfaces, the number of inbound
+ packets that contained errors preventing them from being
+ deliverable to a higher-layer protocol. For character-
+ oriented or fixed-length interfaces, the number of
+ inbound transmission units that contained errors
+ preventing them from being deliverable to a higher-layer
+ protocol.
+
+ Discontinuities in the value of this counter can occur
+ at re-initialization of the management system, and at
+ other times as indicated by the value of
+ 'discontinuity-time'.";
+ reference
+ "RFC 2863: The Interfaces Group MIB - ifInErrors";
+ }
+ leaf in-unknown-protos {
+ type yang:counter32;
+ description
+ "For packet-oriented interfaces, the number of packets
+ received via the interface which were discarded because
+ of an unknown or unsupported protocol. For
+ character-oriented or fixed-length interfaces that
+ support protocol multiplexing the number of transmission
+ units received via the interface which were discarded
+ because of an unknown or unsupported protocol. For any
+ interface that does not support protocol multiplexing,
+ this counter is not present.
+
+ Discontinuities in the value of this counter can occur
+ at re-initialization of the management system, and at
+ other times as indicated by the value of
+ 'discontinuity-time'.";
+ reference
+ "RFC 2863: The Interfaces Group MIB - ifInUnknownProtos";
+ }
+
+ leaf out-octets {
+ type yang:counter64;
+ description
+ "The total number of octets transmitted out of the
+ interface, including framing characters.
+
+ Discontinuities in the value of this counter can occur
+ at re-initialization of the management system, and at
+ other times as indicated by the value of
+ 'discontinuity-time'.";
+ reference
+ "RFC 2863: The Interfaces Group MIB - ifHCOutOctets";
+ }
+ leaf out-unicast-pkts {
+ type yang:counter64;
+ description
+ "The total number of packets that higher-level protocols
+ requested be transmitted, and which were not addressed
+ to a multicast or broadcast address at this sub-layer,
+ including those that were discarded or not sent.
+
+ Discontinuities in the value of this counter can occur
+ at re-initialization of the management system, and at
+ other times as indicated by the value of
+ 'discontinuity-time'.";
+ reference
+ "RFC 2863: The Interfaces Group MIB - ifHCOutUcastPkts";
+ }
+ leaf out-broadcast-pkts {
+ type yang:counter64;
+ description
+ "The total number of packets that higher-level protocols
+ requested be transmitted, and which were addressed to a
+ broadcast address at this sub-layer, including those
+ that were discarded or not sent.
+
+ Discontinuities in the value of this counter can occur
+ at re-initialization of the management system, and at
+ other times as indicated by the value of
+ 'discontinuity-time'.";
+ reference
+ "RFC 2863: The Interfaces Group MIB -
+ ifHCOutBroadcastPkts";
+ }
+ leaf out-multicast-pkts {
+ type yang:counter64;
+ description
+ "The total number of packets that higher-level protocols
+ requested be transmitted, and which were addressed to a
+ multicast address at this sub-layer, including those
+ that were discarded or not sent. For a MAC layer
+ protocol, this includes both Group and Functional
+ addresses.
+
+ Discontinuities in the value of this counter can occur
+ at re-initialization of the management system, and at
+ other times as indicated by the value of
+ 'discontinuity-time'.";
+ reference
+ "RFC 2863: The Interfaces Group MIB -
+ ifHCOutMulticastPkts";
+ }
+ leaf out-discards {
+ type yang:counter32;
+ description
+ "The number of outbound packets which were chosen to be
+ discarded even though no errors had been detected to
+ prevent their being transmitted. One possible reason
+ for discarding such a packet could be to free up buffer
+ space.
+
+ Discontinuities in the value of this counter can occur
+ at re-initialization of the management system, and at
+ other times as indicated by the value of
+ 'discontinuity-time'.";
+ reference
+ "RFC 2863: The Interfaces Group MIB - ifOutDiscards";
+ }
+ leaf out-errors {
+ type yang:counter32;
+ description
+ "For packet-oriented interfaces, the number of outbound
+ packets that could not be transmitted because of errors.
+ For character-oriented or fixed-length interfaces, the
+ number of outbound transmission units that could not be
+ transmitted because of errors.
+
+ Discontinuities in the value of this counter can occur
+ at re-initialization of the management system, and at
+ other times as indicated by the value of
+ 'discontinuity-time'.";
+ reference
+ "RFC 2863: The Interfaces Group MIB - ifOutErrors";
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
--- /dev/null
+ module ietf-yang-types {
+
+ namespace "urn:ietf:params:xml:ns:yang:ietf-yang-types";
+ prefix "yang";
+
+ organization
+ "IETF NETMOD (NETCONF Data Modeling Language) Working Group";
+
+ contact
+ "WG Web: <http://tools.ietf.org/wg/netmod/>
+ WG List: <mailto:netmod@ietf.org>
+
+ WG Chair: David Partain
+ <mailto:david.partain@ericsson.com>
+
+ WG Chair: David Kessens
+ <mailto:david.kessens@nsn.com>
+
+ Editor: Juergen Schoenwaelder
+ <mailto:j.schoenwaelder@jacobs-university.de>";
+
+ description
+ "This module contains a collection of generally useful derived
+ YANG data types.
+
+ Copyright (c) 2010 IETF Trust and the persons identified as
+ authors of the code. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, is permitted pursuant to, and subject to the license
+ terms contained in, the Simplified BSD License set forth in Section
+ 4.c of the IETF Trust's Legal Provisions Relating to IETF Documents
+ (http://trustee.ietf.org/license-info).
+
+ This version of this YANG module is part of RFC 6021; see
+ the RFC itself for full legal notices.";
+
+ revision 2010-09-24 {
+ description
+ "Initial revision.";
+ reference
+ "RFC 6021: Common YANG Data Types";
+ }
+
+ /*** collection of counter and gauge types ***/
+
+ typedef counter32 {
+ type uint32;
+ description
+ "The counter32 type represents a non-negative integer
+ that monotonically increases until it reaches a
+ maximum value of 2^32-1 (4294967295 decimal), when it
+ wraps around and starts increasing again from zero.
+
+ Counters have no defined 'initial' value, and thus, a
+ single value of a counter has (in general) no information
+ content. Discontinuities in the monotonically increasing
+ value normally occur at re-initialization of the
+ management system, and at other times as specified in the
+ description of a schema node using this type. If such
+ other times can occur, for example, the creation of
+ a schema node of type counter32 at times other than
+ re-initialization, then a corresponding schema node
+ should be defined, with an appropriate type, to indicate
+ the last discontinuity.
+
+ The counter32 type should not be used for configuration
+ schema nodes. A default statement SHOULD NOT be used in
+ combination with the type counter32.
+
+ In the value set and its semantics, this type is equivalent
+ to the Counter32 type of the SMIv2.";
+ reference
+ "RFC 2578: Structure of Management Information Version 2 (SMIv2)";
+ }
+
+ typedef zero-based-counter32 {
+ type yang:counter32;
+ default "0";
+ description
+ "The zero-based-counter32 type represents a counter32
+ that has the defined 'initial' value zero.
+
+ A schema node of this type will be set to zero (0) on creation
+ and will thereafter increase monotonically until it reaches
+ a maximum value of 2^32-1 (4294967295 decimal), when it
+ wraps around and starts increasing again from zero.
+
+ Provided that an application discovers a new schema node
+ of this type within the minimum time to wrap, it can use the
+ 'initial' value as a delta. It is important for a management
+ station to be aware of this minimum time and the actual time
+ between polls, and to discard data if the actual time is too
+ long or there is no defined minimum time.
+
+ In the value set and its semantics, this type is equivalent
+ to the ZeroBasedCounter32 textual convention of the SMIv2.";
+ reference
+ "RFC 4502: Remote Network Monitoring Management Information
+ Base Version 2";
+ }
+
+ typedef counter64 {
+ type uint64;
+ description
+ "The counter64 type represents a non-negative integer
+ that monotonically increases until it reaches a
+ maximum value of 2^64-1 (18446744073709551615 decimal),
+ when it wraps around and starts increasing again from zero.
+
+ Counters have no defined 'initial' value, and thus, a
+ single value of a counter has (in general) no information
+ content. Discontinuities in the monotonically increasing
+ value normally occur at re-initialization of the
+ management system, and at other times as specified in the
+ description of a schema node using this type. If such
+ other times can occur, for example, the creation of
+ a schema node of type counter64 at times other than
+ re-initialization, then a corresponding schema node
+ should be defined, with an appropriate type, to indicate
+ the last discontinuity.
+
+ The counter64 type should not be used for configuration
+ schema nodes. A default statement SHOULD NOT be used in
+ combination with the type counter64.
+
+ In the value set and its semantics, this type is equivalent
+ to the Counter64 type of the SMIv2.";
+ reference
+ "RFC 2578: Structure of Management Information Version 2 (SMIv2)";
+ }
+
+ typedef zero-based-counter64 {
+ type yang:counter64;
+ default "0";
+ description
+ "The zero-based-counter64 type represents a counter64 that
+ has the defined 'initial' value zero.
+
+ A schema node of this type will be set to zero (0) on creation
+ and will thereafter increase monotonically until it reaches
+ a maximum value of 2^64-1 (18446744073709551615 decimal),
+ when it wraps around and starts increasing again from zero.
+
+ Provided that an application discovers a new schema node
+ of this type within the minimum time to wrap, it can use the
+ 'initial' value as a delta. It is important for a management
+ station to be aware of this minimum time and the actual time
+ between polls, and to discard data if the actual time is too
+ long or there is no defined minimum time.
+
+ In the value set and its semantics, this type is equivalent
+ to the ZeroBasedCounter64 textual convention of the SMIv2.";
+ reference
+ "RFC 2856: Textual Conventions for Additional High Capacity
+ Data Types";
+ }
+
+ typedef gauge32 {
+ type uint32;
+ description
+ "The gauge32 type represents a non-negative integer, which
+ may increase or decrease, but shall never exceed a maximum
+ value, nor fall below a minimum value. The maximum value
+ cannot be greater than 2^32-1 (4294967295 decimal), and
+ the minimum value cannot be smaller than 0. The value of
+ a gauge32 has its maximum value whenever the information
+ being modeled is greater than or equal to its maximum
+ value, and has its minimum value whenever the information
+ being modeled is smaller than or equal to its minimum value.
+ If the information being modeled subsequently decreases
+ below (increases above) the maximum (minimum) value, the
+ gauge32 also decreases (increases).
+
+ In the value set and its semantics, this type is equivalent
+ to the Gauge32 type of the SMIv2.";
+ reference
+ "RFC 2578: Structure of Management Information Version 2 (SMIv2)";
+ }
+
+ typedef gauge64 {
+ type uint64;
+ description
+ "The gauge64 type represents a non-negative integer, which
+ may increase or decrease, but shall never exceed a maximum
+ value, nor fall below a minimum value. The maximum value
+ cannot be greater than 2^64-1 (18446744073709551615), and
+ the minimum value cannot be smaller than 0. The value of
+ a gauge64 has its maximum value whenever the information
+ being modeled is greater than or equal to its maximum
+ value, and has its minimum value whenever the information
+ being modeled is smaller than or equal to its minimum value.
+ If the information being modeled subsequently decreases
+ below (increases above) the maximum (minimum) value, the
+ gauge64 also decreases (increases).
+
+ In the value set and its semantics, this type is equivalent
+ to the CounterBasedGauge64 SMIv2 textual convention defined
+ in RFC 2856";
+ reference
+ "RFC 2856: Textual Conventions for Additional High Capacity
+ Data Types";
+ }
+
+ /*** collection of identifier related types ***/
+
+ typedef object-identifier {
+ type string {
+ pattern '(([0-1](\.[1-3]?[0-9]))|(2\.(0|([1-9]\d*))))'
+ + '(\.(0|([1-9]\d*)))*';
+ }
+ description
+ "The object-identifier type represents administratively
+ assigned names in a registration-hierarchical-name tree.
+
+ Values of this type are denoted as a sequence of numerical
+ non-negative sub-identifier values. Each sub-identifier
+ value MUST NOT exceed 2^32-1 (4294967295). Sub-identifiers
+ are separated by single dots and without any intermediate
+ whitespace.
+
+ The ASN.1 standard restricts the value space of the first
+ sub-identifier to 0, 1, or 2. Furthermore, the value space
+ of the second sub-identifier is restricted to the range
+ 0 to 39 if the first sub-identifier is 0 or 1. Finally,
+ the ASN.1 standard requires that an object identifier
+ has always at least two sub-identifier. The pattern
+ captures these restrictions.
+
+ Although the number of sub-identifiers is not limited,
+ module designers should realize that there may be
+ implementations that stick with the SMIv2 limit of 128
+ sub-identifiers.
+
+ This type is a superset of the SMIv2 OBJECT IDENTIFIER type
+ since it is not restricted to 128 sub-identifiers. Hence,
+ this type SHOULD NOT be used to represent the SMIv2 OBJECT
+ IDENTIFIER type, the object-identifier-128 type SHOULD be
+ used instead.";
+ reference
+ "ISO9834-1: Information technology -- Open Systems
+ Interconnection -- Procedures for the operation of OSI
+ Registration Authorities: General procedures and top
+ arcs of the ASN.1 Object Identifier tree";
+ }
+
+
+
+
+ typedef object-identifier-128 {
+ type object-identifier {
+ pattern '\d*(\.\d*){1,127}';
+ }
+ description
+ "This type represents object-identifiers restricted to 128
+ sub-identifiers.
+
+ In the value set and its semantics, this type is equivalent
+ to the OBJECT IDENTIFIER type of the SMIv2.";
+ reference
+ "RFC 2578: Structure of Management Information Version 2 (SMIv2)";
+ }
+
+ /*** collection of date and time related types ***/
+
+ typedef date-and-time {
+ type string {
+ pattern '\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d+)?'
+ + '(Z|[\+\-]\d{2}:\d{2})';
+ }
+ description
+ "The date-and-time type is a profile of the ISO 8601
+ standard for representation of dates and times using the
+ Gregorian calendar. The profile is defined by the
+ date-time production in Section 5.6 of RFC 3339.
+
+ The date-and-time type is compatible with the dateTime XML
+ schema type with the following notable exceptions:
+
+ (a) The date-and-time type does not allow negative years.
+
+ (b) The date-and-time time-offset -00:00 indicates an unknown
+ time zone (see RFC 3339) while -00:00 and +00:00 and Z all
+ represent the same time zone in dateTime.
+
+ (c) The canonical format (see below) of data-and-time values
+ differs from the canonical format used by the dateTime XML
+ schema type, which requires all times to be in UTC using the
+ time-offset 'Z'.
+
+ This type is not equivalent to the DateAndTime textual
+ convention of the SMIv2 since RFC 3339 uses a different
+ separator between full-date and full-time and provides
+ higher resolution of time-secfrac.
+
+ The canonical format for date-and-time values with a known time
+ zone uses a numeric time zone offset that is calculated using
+ the device's configured known offset to UTC time. A change of
+ the device's offset to UTC time will cause date-and-time values
+ to change accordingly. Such changes might happen periodically
+ in case a server follows automatically daylight saving time
+ (DST) time zone offset changes. The canonical format for
+ date-and-time values with an unknown time zone (usually referring
+ to the notion of local time) uses the time-offset -00:00.";
+ reference
+ "RFC 3339: Date and Time on the Internet: Timestamps
+ RFC 2579: Textual Conventions for SMIv2
+ XSD-TYPES: XML Schema Part 2: Datatypes Second Edition";
+ }
+
+ typedef timeticks {
+ type uint32;
+ description
+ "The timeticks type represents a non-negative integer that
+ represents the time, modulo 2^32 (4294967296 decimal), in
+ hundredths of a second between two epochs. When a schema
+ node is defined that uses this type, the description of
+ the schema node identifies both of the reference epochs.
+
+ In the value set and its semantics, this type is equivalent
+ to the TimeTicks type of the SMIv2.";
+ reference
+ "RFC 2578: Structure of Management Information Version 2 (SMIv2)";
+ }
+
+ typedef timestamp {
+ type yang:timeticks;
+ description
+ "The timestamp type represents the value of an associated
+ timeticks schema node at which a specific occurrence happened.
+ The specific occurrence must be defined in the description
+ of any schema node defined using this type. When the specific
+ occurrence occurred prior to the last time the associated
+ timeticks attribute was zero, then the timestamp value is
+ zero. Note that this requires all timestamp values to be
+ reset to zero when the value of the associated timeticks
+ attribute reaches 497+ days and wraps around to zero.
+
+ The associated timeticks schema node must be specified
+ in the description of any schema node using this type.
+
+ In the value set and its semantics, this type is equivalent
+ to the TimeStamp textual convention of the SMIv2.";
+ reference
+ "RFC 2579: Textual Conventions for SMIv2";
+ }
+
+ /*** collection of generic address types ***/
+
+ typedef phys-address {
+ type string {
+ pattern '([0-9a-fA-F]{2}(:[0-9a-fA-F]{2})*)?';
+ }
+ description
+ "Represents media- or physical-level addresses represented
+ as a sequence octets, each octet represented by two hexadecimal
+ numbers. Octets are separated by colons. The canonical
+ representation uses lowercase characters.
+
+ In the value set and its semantics, this type is equivalent
+ to the PhysAddress textual convention of the SMIv2.";
+ reference
+ "RFC 2579: Textual Conventions for SMIv2";
+ }
+
+ typedef mac-address {
+ type string {
+ pattern '[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}';
+ }
+ description
+ "The mac-address type represents an IEEE 802 MAC address.
+ The canonical representation uses lowercase characters.
+
+ In the value set and its semantics, this type is equivalent
+ to the MacAddress textual convention of the SMIv2.";
+ reference
+ "IEEE 802: IEEE Standard for Local and Metropolitan Area
+ Networks: Overview and Architecture
+ RFC 2579: Textual Conventions for SMIv2";
+ }
+
+ /*** collection of XML specific types ***/
+
+ typedef xpath1.0 {
+ type string;
+ description
+ "This type represents an XPATH 1.0 expression.
+
+ When a schema node is defined that uses this type, the
+ description of the schema node MUST specify the XPath
+ context in which the XPath expression is evaluated.";
+ reference
+ "XPATH: XML Path Language (XPath) Version 1.0";
+ }
+
+ }
\ No newline at end of file
--- /dev/null
+module binary-type-test {
+ yang-version 1;
+ namespace "urn:binary:types:model";
+ prefix "btt";
+
+ import binary-types {
+ prefix "bin";
+ revision-date 2013-06-13;
+ }
+
+ organization "OPEN DAYLIGHT";
+ contact "http://www.opendaylight.org/";
+
+ description
+ "Simple test to test imported binary types and resolving of binary
+ type in leaf statement.";
+
+ revision "2013-06-13" {
+ reference "NO REF";
+ }
+
+ typedef binary-type {
+ type binary {
+ length 128;
+ }
+ }
+
+ container container-foo {
+ leaf binary-leaf {
+ type binary {
+ length 128;
+ }
+ }
+
+ list binary-list {
+ key "binary-key";
+
+ leaf binary-key {
+ type btt:binary-type;
+ }
+
+ leaf imported-simple-binary {
+ type bin:simple-binary;
+ }
+ }
+
+ leaf imported-restrict-binary {
+ type bin:restricted-binary;
+ }
+
+ leaf-list binary-list {
+ type binary {
+ length 256;
+ }
+ }
+ }
+}
\ No newline at end of file
--- /dev/null
+ module binary-types {
+
+ namespace "urn:binary:types";
+ prefix "bin";
+
+ organization "OPEN DAYLIGHT";
+
+ contact "http://www.opendaylight.org/";
+
+ description "Stand alone binary types declaration file for testing
+ purposes only.";
+
+ revision 2013-06-13 {
+ description
+ "Initial revision.";
+ reference
+ "NO REFERENCE";
+ }
+
+ /*** collection of protocol field related types ***/
+
+ typedef simple-binary {
+ type binary;
+ }
+
+ typedef restricted-binary {
+ type binary {
+ length 24;
+ }
+ }
+
+ typedef composite-binary {
+ type union {
+ type bin:simple-binary;
+ type bin:restricted-binary;
+ }
+ }
+ }
--- /dev/null
+module abstract-topology {
+ yang-version 1;
+ namespace "urn:model:abstract:topology";
+ prefix "tp";
+
+ import ietf-inet-types {
+ prefix "inet";
+ revision-date 2010-09-24;
+ }
+
+ import ietf-interfaces {
+ prefix "if";
+ revision-date 2012-11-15;
+ }
+
+ organization "OPEN DAYLIGHT";
+ contact "http://www.opendaylight.org/";
+
+ description
+ "This module contains the definitions of elements that creates network
+ topology i.e. definition of network nodes and links. This module is not designed to be used solely for network representation. This module SHOULD be used as base module in defining the network topology.";
+
+ revision "2013-02-08" {
+ reference "~~~ WILL BE DEFINED LATER";
+ }
+
+ revision "2013-01-01" {
+ reference "~~~ WILL BE DEFINED LATER";
+ }
+
+ typedef node-id-ref {
+ type leafref {
+ path "/tp:topology/tp:network-nodes/tp:network-node/tp:node-id";
+ }
+ description "This type is used for leafs that reference network node instance.";
+ }
+
+ typedef link-id-ref {
+ type leafref {
+ path "/tp:topology/tp:network-links/tp:network-link/tp:link-id";
+ }
+ description "This type is used for leafs that reference network link instance.";
+ }
+
+ typedef interface-id-ref {
+ type leafref {
+ path "/tp:topology/tp:interfaces/tp:interface/tp:interface-id";
+ }
+ }
+
+ container topology {
+ description "This is the model of abstract topology which contains only Network Nodes and Network Links. Each topology MUST be identified by unique topology-id for reason that the store could contain many topologies.";
+
+ leaf topology-id {
+ type inet:uri;
+ description "It is presumed that datastore will contain many topologies. To distinguish between topologies it is vital to have
+ UNIQUE topology identifier.";
+ }
+
+ container network-nodes {
+ list network-node {
+ key "node-id";
+
+ leaf node-id {
+ type inet:uri;
+ description "The Topology identifier of network-node.";
+ }
+
+ container attributes {
+ description "Aditional attributes that can Network Node contains.";
+ }
+ description "The list of network nodes defined for topology.";
+ }
+ }
+
+ container interfaces {
+ list interface {
+ key "interface-id";
+
+ leaf interface-id {
+ type leafref {
+ path "/if:interfaces/if:interface/if:name";
+ }
+ }
+
+ leaf-list higher-layer-if {
+ type leafref {
+ path "/if:interfaces/if:interface/if:higher-layer-if";
+ }
+ }
+
+ leaf oper-status {
+ type leafref {
+ path "/if:interfaces/if:interface/if:oper-status";
+ }
+ }
+
+ leaf link-up-down-trap-enable {
+ type leafref {
+ path "/if:interfaces/if:interface/if:link-up-down-trap-enable";
+ }
+ }
+ }
+ }
+
+ container network-links {
+ list network-link {
+ key "link-id";
+
+ leaf link-id {
+ type inet:uri;
+ description "";
+ }
+
+ container source-node {
+ leaf id {
+ type node-id-ref;
+ description "Source node identifier.";
+ }
+ }
+
+ container destination-node {
+ leaf id {
+ type node-id-ref;
+ description "Destination node identifier.";
+ }
+ }
+
+ container tunnels {
+ list tunnel {
+ key "tunnel-id";
+
+ leaf tunnel-id {
+ type leafref {
+ path "../../../link-id";
+ }
+ }
+ }
+ }
+
+ leaf interface {
+ type interface-id-ref;
+ }
+
+ container attributes {
+ description "Aditional attributes that can Network Link contains.";
+ }
+ description "The Network Link which is defined by Local (Source) and Remote (Destination) Network Nodes. Every link MUST be defined either by identifier and
+ his local and remote Network Nodes (In real applications it is common that many links are originated from one node and end up in same remote node). To ensure that we would always know to distinguish between links, every link SHOULD have identifier.";
+ }
+ }
+ }
+}
\ No newline at end of file
--- /dev/null
+ module ietf-inet-types {
+
+ namespace "urn:ietf:params:xml:ns:yang:ietf-inet-types";
+ prefix "inet";
+
+ organization
+ "IETF NETMOD (NETCONF Data Modeling Language) Working Group";
+
+ contact
+ "WG Web: <http://tools.ietf.org/wg/netmod/>
+ WG List: <mailto:netmod@ietf.org>
+
+ WG Chair: David Partain
+ <mailto:david.partain@ericsson.com>
+
+ WG Chair: David Kessens
+ <mailto:david.kessens@nsn.com>
+
+ Editor: Juergen Schoenwaelder
+ <mailto:j.schoenwaelder@jacobs-university.de>";
+
+ description
+ "This module contains a collection of generally useful derived
+ YANG data types for Internet addresses and related things.
+
+ Copyright (c) 2010 IETF Trust and the persons identified as
+ authors of the code. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, is permitted pursuant to, and subject to the license
+ terms contained in, the Simplified BSD License set forth in Section
+ 4.c of the IETF Trust's Legal Provisions Relating to IETF Documents
+ (http://trustee.ietf.org/license-info).
+
+ This version of this YANG module is part of RFC 6021; see
+ the RFC itself for full legal notices.";
+
+ revision 2010-09-24 {
+ description
+ "Initial revision.";
+ reference
+ "RFC 6021: Common YANG Data Types";
+ }
+
+ /*** collection of protocol field related types ***/
+
+ typedef ip-version {
+ type enumeration {
+ enum unknown {
+ value "0";
+ description
+ "An unknown or unspecified version of the Internet protocol.";
+ }
+ enum ipv4 {
+ value "1";
+ description
+ "The IPv4 protocol as defined in RFC 791.";
+ }
+ enum ipv6 {
+ value "2";
+ description
+ "The IPv6 protocol as defined in RFC 2460.";
+ }
+ }
+ description
+ "This value represents the version of the IP protocol.
+
+ In the value set and its semantics, this type is equivalent
+ to the InetVersion textual convention of the SMIv2.";
+ reference
+ "RFC 791: Internet Protocol
+ RFC 2460: Internet Protocol, Version 6 (IPv6) Specification
+ RFC 4001: Textual Conventions for Internet Network Addresses";
+ }
+
+ typedef dscp {
+ type uint8 {
+ range "0..63";
+ }
+ description
+ "The dscp type represents a Differentiated Services Code-Point
+ that may be used for marking packets in a traffic stream.
+
+ In the value set and its semantics, this type is equivalent
+ to the Dscp textual convention of the SMIv2.";
+ reference
+ "RFC 3289: Management Information Base for the Differentiated
+ Services Architecture
+ RFC 2474: Definition of the Differentiated Services Field
+ (DS Field) in the IPv4 and IPv6 Headers
+ RFC 2780: IANA Allocation Guidelines For Values In
+ the Internet Protocol and Related Headers";
+ }
+
+ typedef ipv6-flow-label {
+ type uint32 {
+ range "0..1048575";
+ }
+ description
+ "The flow-label type represents flow identifier or Flow Label
+ in an IPv6 packet header that may be used to discriminate
+ traffic flows.
+
+ In the value set and its semantics, this type is equivalent
+ to the IPv6FlowLabel textual convention of the SMIv2.";
+ reference
+ "RFC 3595: Textual Conventions for IPv6 Flow Label
+ RFC 2460: Internet Protocol, Version 6 (IPv6) Specification";
+ }
+
+ typedef port-number {
+ type uint16 {
+ range "0..65535";
+ }
+ description
+ "The port-number type represents a 16-bit port number of an
+ Internet transport layer protocol such as UDP, TCP, DCCP, or
+ SCTP. Port numbers are assigned by IANA. A current list of
+ all assignments is available from <http://www.iana.org/>.
+
+ Note that the port number value zero is reserved by IANA. In
+ situations where the value zero does not make sense, it can
+ be excluded by subtyping the port-number type.
+
+ In the value set and its semantics, this type is equivalent
+ to the InetPortNumber textual convention of the SMIv2.";
+ reference
+ "RFC 768: User Datagram Protocol
+ RFC 793: Transmission Control Protocol
+ RFC 4960: Stream Control Transmission Protocol
+ RFC 4340: Datagram Congestion Control Protocol (DCCP)
+ RFC 4001: Textual Conventions for Internet Network Addresses";
+ }
+
+ /*** collection of autonomous system related types ***/
+
+ typedef as-number {
+ type uint32;
+ description
+ "The as-number type represents autonomous system numbers
+ which identify an Autonomous System (AS). An AS is a set
+ of routers under a single technical administration, using
+ an interior gateway protocol and common metrics to route
+ packets within the AS, and using an exterior gateway
+ protocol to route packets to other ASs'. IANA maintains
+ the AS number space and has delegated large parts to the
+ regional registries.
+
+ Autonomous system numbers were originally limited to 16
+ bits. BGP extensions have enlarged the autonomous system
+ number space to 32 bits. This type therefore uses an uint32
+ base type without a range restriction in order to support
+ a larger autonomous system number space.
+
+ In the value set and its semantics, this type is equivalent
+ to the InetAutonomousSystemNumber textual convention of
+ the SMIv2.";
+ reference
+ "RFC 1930: Guidelines for creation, selection, and registration
+ of an Autonomous System (AS)
+ RFC 4271: A Border Gateway Protocol 4 (BGP-4)
+ RFC 4893: BGP Support for Four-octet AS Number Space
+ RFC 4001: Textual Conventions for Internet Network Addresses";
+ }
+
+ /*** collection of IP address and hostname related types ***/
+
+ typedef ip-address {
+ type union {
+ type inet:ipv4-address;
+ type inet:ipv6-address;
+ }
+ description
+ "The ip-address type represents an IP address and is IP
+ version neutral. The format of the textual representations
+ implies the IP version.";
+ }
+
+ typedef ipv4-address {
+ type string {
+ pattern
+ '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}'
+ + '([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])'
+ + '(%[\p{N}\p{L}]+)?';
+ }
+ description
+ "The ipv4-address type represents an IPv4 address in
+ dotted-quad notation. The IPv4 address may include a zone
+ index, separated by a % sign.
+
+ The zone index is used to disambiguate identical address
+ values. For link-local addresses, the zone index will
+ typically be the interface index number or the name of an
+ interface. If the zone index is not present, the default
+ zone of the device will be used.
+
+ The canonical format for the zone index is the numerical
+ format";
+ }
+
+ typedef ipv6-address {
+ type string {
+ pattern '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}'
+ + '((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|'
+ + '(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\.){3}'
+ + '(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))'
+ + '(%[\p{N}\p{L}]+)?';
+ pattern '(([^:]+:){6}(([^:]+:[^:]+)|(.*\..*)))|'
+ + '((([^:]+:)*[^:]+)?::(([^:]+:)*[^:]+)?)'
+ + '(%.+)?';
+ }
+ description
+ "The ipv6-address type represents an IPv6 address in full,
+ mixed, shortened, and shortened-mixed notation. The IPv6
+ address may include a zone index, separated by a % sign.
+
+ The zone index is used to disambiguate identical address
+ values. For link-local addresses, the zone index will
+ typically be the interface index number or the name of an
+ interface. If the zone index is not present, the default
+ zone of the device will be used.
+
+ The canonical format of IPv6 addresses uses the compressed
+ format described in RFC 4291, Section 2.2, item 2 with the
+ following additional rules: the :: substitution must be
+ applied to the longest sequence of all-zero 16-bit chunks
+ in an IPv6 address. If there is a tie, the first sequence
+ of all-zero 16-bit chunks is replaced by ::. Single
+ all-zero 16-bit chunks are not compressed. The canonical
+ format uses lowercase characters and leading zeros are
+ not allowed. The canonical format for the zone index is
+ the numerical format as described in RFC 4007, Section
+ 11.2.";
+ reference
+ "RFC 4291: IP Version 6 Addressing Architecture
+ RFC 4007: IPv6 Scoped Address Architecture
+ RFC 5952: A Recommendation for IPv6 Address Text Representation";
+ }
+
+ typedef ip-prefix {
+ type union {
+ type inet:ipv4-prefix;
+ type inet:ipv6-prefix;
+ }
+ description
+ "The ip-prefix type represents an IP prefix and is IP
+ version neutral. The format of the textual representations
+ implies the IP version.";
+ }
+
+ typedef ipv4-prefix {
+ type string {
+ pattern
+ '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}'
+ + '([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])'
+ + '/(([0-9])|([1-2][0-9])|(3[0-2]))';
+ }
+ description
+ "The ipv4-prefix type represents an IPv4 address prefix.
+ The prefix length is given by the number following the
+ slash character and must be less than or equal to 32.
+
+ A prefix length value of n corresponds to an IP address
+ mask that has n contiguous 1-bits from the most
+ significant bit (MSB) and all other bits set to 0.
+
+ The canonical format of an IPv4 prefix has all bits of
+ the IPv4 address set to zero that are not part of the
+ IPv4 prefix.";
+ }
+
+ typedef ipv6-prefix {
+ type string {
+ pattern '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}'
+ + '((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|'
+ + '(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\.){3}'
+ + '(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))'
+ + '(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))';
+ pattern '(([^:]+:){6}(([^:]+:[^:]+)|(.*\..*)))|'
+ + '((([^:]+:)*[^:]+)?::(([^:]+:)*[^:]+)?)'
+ + '(/.+)';
+ }
+ description
+ "The ipv6-prefix type represents an IPv6 address prefix.
+ The prefix length is given by the number following the
+ slash character and must be less than or equal 128.
+
+ A prefix length value of n corresponds to an IP address
+ mask that has n contiguous 1-bits from the most
+ significant bit (MSB) and all other bits set to 0.
+
+ The IPv6 address should have all bits that do not belong
+ to the prefix set to zero.
+
+ The canonical format of an IPv6 prefix has all bits of
+ the IPv6 address set to zero that are not part of the
+ IPv6 prefix. Furthermore, IPv6 address is represented
+ in the compressed format described in RFC 4291, Section
+ 2.2, item 2 with the following additional rules: the ::
+ substitution must be applied to the longest sequence of
+ all-zero 16-bit chunks in an IPv6 address. If there is
+ a tie, the first sequence of all-zero 16-bit chunks is
+ replaced by ::. Single all-zero 16-bit chunks are not
+ compressed. The canonical format uses lowercase
+ characters and leading zeros are not allowed.";
+ reference
+ "RFC 4291: IP Version 6 Addressing Architecture";
+ }
+
+ /*** collection of domain name and URI types ***/
+
+ typedef domain-name {
+ type string {
+ pattern '((([a-zA-Z0-9_]([a-zA-Z0-9\-_]){0,61})?[a-zA-Z0-9]\.)*'
+ + '([a-zA-Z0-9_]([a-zA-Z0-9\-_]){0,61})?[a-zA-Z0-9]\.?)'
+ + '|\.';
+ length "1..253";
+ }
+ description
+ "The domain-name type represents a DNS domain name. The
+ name SHOULD be fully qualified whenever possible.
+
+ Internet domain names are only loosely specified. Section
+ 3.5 of RFC 1034 recommends a syntax (modified in Section
+ 2.1 of RFC 1123). The pattern above is intended to allow
+ for current practice in domain name use, and some possible
+ future expansion. It is designed to hold various types of
+ domain names, including names used for A or AAAA records
+ (host names) and other records, such as SRV records. Note
+ that Internet host names have a stricter syntax (described
+ in RFC 952) than the DNS recommendations in RFCs 1034 and
+ 1123, and that systems that want to store host names in
+ schema nodes using the domain-name type are recommended to
+ adhere to this stricter standard to ensure interoperability.
+
+ The encoding of DNS names in the DNS protocol is limited
+ to 255 characters. Since the encoding consists of labels
+ prefixed by a length bytes and there is a trailing NULL
+ byte, only 253 characters can appear in the textual dotted
+ notation.
+
+ The description clause of schema nodes using the domain-name
+ type MUST describe when and how these names are resolved to
+ IP addresses. Note that the resolution of a domain-name value
+ may require to query multiple DNS records (e.g., A for IPv4
+ and AAAA for IPv6). The order of the resolution process and
+ which DNS record takes precedence can either be defined
+ explicitely or it may depend on the configuration of the
+ resolver.
+
+ Domain-name values use the US-ASCII encoding. Their canonical
+ format uses lowercase US-ASCII characters. Internationalized
+ domain names MUST be encoded in punycode as described in RFC
+ 3492";
+ reference
+ "RFC 952: DoD Internet Host Table Specification
+ RFC 1034: Domain Names - Concepts and Facilities
+ RFC 1123: Requirements for Internet Hosts -- Application
+ and Support
+ RFC 2782: A DNS RR for specifying the location of services
+ (DNS SRV)
+ RFC 3492: Punycode: A Bootstring encoding of Unicode for
+ Internationalized Domain Names in Applications
+ (IDNA)
+ RFC 5891: Internationalizing Domain Names in Applications
+ (IDNA): Protocol";
+ }
+
+ typedef host {
+ type union {
+ type inet:ip-address;
+ type inet:domain-name;
+ }
+ description
+ "The host type represents either an IP address or a DNS
+ domain name.";
+ }
+
+ typedef uri {
+ type string;
+ description
+ "The uri type represents a Uniform Resource Identifier
+ (URI) as defined by STD 66.
+
+ Objects using the uri type MUST be in US-ASCII encoding,
+ and MUST be normalized as described by RFC 3986 Sections
+ 6.2.1, 6.2.2.1, and 6.2.2.2. All unnecessary
+ percent-encoding is removed, and all case-insensitive
+ characters are set to lowercase except for hexadecimal
+ digits, which are normalized to uppercase as described in
+ Section 6.2.2.1.
+
+ The purpose of this normalization is to help provide
+ unique URIs. Note that this normalization is not
+ sufficient to provide uniqueness. Two URIs that are
+ textually distinct after this normalization may still be
+ equivalent.
+
+ Objects using the uri type may restrict the schemes that
+ they permit. For example, 'data:' and 'urn:' schemes
+ might not be appropriate.
+
+ A zero-length URI is not a valid URI. This can be used to
+ express 'URI absent' where required.
+
+ In the value set and its semantics, this type is equivalent
+ to the Uri SMIv2 textual convention defined in RFC 5017.";
+ reference
+ "RFC 3986: Uniform Resource Identifier (URI): Generic Syntax
+ RFC 3305: Report from the Joint W3C/IETF URI Planning Interest
+ Group: Uniform Resource Identifiers (URIs), URLs,
+ and Uniform Resource Names (URNs): Clarifications
+ and Recommendations
+ RFC 5017: MIB Textual Conventions for Uniform Resource
+ Identifiers (URIs)";
+ }
+
+ }
\ No newline at end of file
--- /dev/null
+module abstract-topology {
+ yang-version 1;
+ namespace "urn:model:abstract:topology";
+ prefix "tp";
+
+ import ietf-inet-types {
+ prefix "inet";
+ revision-date 2010-09-24;
+ }
+
+ organization "OPEN DAYLIGHT";
+ contact "http://www.opendaylight.org/";
+
+ description
+ "This module contains the definitions of elements that creates network
+ topology i.e. definition of network nodes and links. This module is not designed to be used solely for network representation. This module SHOULD be used as base module in defining the network topology.";
+
+ revision "2013-02-08" {
+ reference "~~~ WILL BE DEFINED LATER";
+ }
+
+ revision "2013-01-01" {
+ reference "~~~ WILL BE DEFINED LATER";
+ }
+
+ typedef union-enum {
+ type union {
+ type int32;
+ type enumeration {
+ enum "unbounded";
+ }
+ }
+ }
+
+ typedef link-address {
+ type union {
+ type inet:ip-address;
+ type string;
+ }
+ }
+
+ typedef node-id-ref {
+ type leafref {
+ path "/tp:topology/tp:network-nodes/tp:network-node/tp:node-id";
+ }
+ description "This type is used for leafs that reference network node instance.";
+ }
+
+ typedef link-id-ref {
+ type leafref {
+ path "/tp:topology/tp:network-links/tp:network-link/tp:link-id";
+ }
+ description "This type is used for leafs that reference network link instance.";
+ }
+
+ container topology {
+ description "This is the model of abstract topology which contains only Network Nodes and Network Links. Each topology MUST be identified by unique topology-id for reason that the store could contain many topologies.";
+
+ leaf topology-id {
+ type inet:uri;
+ description "It is presumed that datastore will contain many topologies. To distinguish between topologies it is vital to have
+ UNIQUE topology identifier.";
+ }
+
+ container network-nodes {
+ list network-node {
+ key "node-id";
+
+ leaf node-id {
+ type inet:uri;
+ description "The Topology identifier of network-node.";
+ }
+
+ leaf bounding {
+ type union-enum;
+ }
+
+ container attributes {
+ description "Aditional attributes that can Network Node contains.";
+ }
+ description "The list of network nodes defined for topology.";
+ }
+ }
+
+ container network-links {
+ list network-link {
+ key "link-id";
+
+ leaf link-id {
+ type inet:uri;
+ description "";
+ }
+
+ leaf link-address {
+ type link-address;
+ }
+
+ container source-node {
+ leaf id {
+ type node-id-ref;
+ description "Source node identifier.";
+ }
+ }
+
+ container destination-node {
+ leaf id {
+ type node-id-ref;
+ description "Destination node identifier.";
+ }
+ }
+
+ container attributes {
+ description "Aditional attributes that can Network Link contains.";
+ }
+ description "The Network Link which is defined by Local (Source) and Remote (Destination) Network Nodes. Every link MUST be defined either by identifier and
+ his local and remote Network Nodes (In real applications it is common that many links are originated from one node and end up in same remote node). To ensure that we would always know to distinguish between links, every link SHOULD have identifier.";
+ }
+ }
+ }
+}
\ No newline at end of file
--- /dev/null
+ module ietf-inet-types {
+
+ namespace "urn:ietf:params:xml:ns:yang:ietf-inet-types";
+ prefix "inet";
+
+ organization
+ "IETF NETMOD (NETCONF Data Modeling Language) Working Group";
+
+ contact
+ "WG Web: <http://tools.ietf.org/wg/netmod/>
+ WG List: <mailto:netmod@ietf.org>
+
+ WG Chair: David Partain
+ <mailto:david.partain@ericsson.com>
+
+ WG Chair: David Kessens
+ <mailto:david.kessens@nsn.com>
+
+ Editor: Juergen Schoenwaelder
+ <mailto:j.schoenwaelder@jacobs-university.de>";
+
+ description
+ "This module contains a collection of generally useful derived
+ YANG data types for Internet addresses and related things.
+
+ Copyright (c) 2010 IETF Trust and the persons identified as
+ authors of the code. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, is permitted pursuant to, and subject to the license
+ terms contained in, the Simplified BSD License set forth in Section
+ 4.c of the IETF Trust's Legal Provisions Relating to IETF Documents
+ (http://trustee.ietf.org/license-info).
+
+ This version of this YANG module is part of RFC 6021; see
+ the RFC itself for full legal notices.";
+
+ revision 2010-09-24 {
+ description
+ "Initial revision.";
+ reference
+ "RFC 6021: Common YANG Data Types";
+ }
+
+ /*** collection of protocol field related types ***/
+
+ typedef ip-version {
+ type enumeration {
+ enum unknown {
+ value "0";
+ description
+ "An unknown or unspecified version of the Internet protocol.";
+ }
+ enum ipv4 {
+ value "1";
+ description
+ "The IPv4 protocol as defined in RFC 791.";
+ }
+ enum ipv6 {
+ value "2";
+ description
+ "The IPv6 protocol as defined in RFC 2460.";
+ }
+ }
+ description
+ "This value represents the version of the IP protocol.
+
+ In the value set and its semantics, this type is equivalent
+ to the InetVersion textual convention of the SMIv2.";
+ reference
+ "RFC 791: Internet Protocol
+ RFC 2460: Internet Protocol, Version 6 (IPv6) Specification
+ RFC 4001: Textual Conventions for Internet Network Addresses";
+ }
+
+ typedef dscp {
+ type uint8 {
+ range "0..63";
+ }
+ description
+ "The dscp type represents a Differentiated Services Code-Point
+ that may be used for marking packets in a traffic stream.
+
+ In the value set and its semantics, this type is equivalent
+ to the Dscp textual convention of the SMIv2.";
+ reference
+ "RFC 3289: Management Information Base for the Differentiated
+ Services Architecture
+ RFC 2474: Definition of the Differentiated Services Field
+ (DS Field) in the IPv4 and IPv6 Headers
+ RFC 2780: IANA Allocation Guidelines For Values In
+ the Internet Protocol and Related Headers";
+ }
+
+ typedef ipv6-flow-label {
+ type uint32 {
+ range "0..1048575";
+ }
+ description
+ "The flow-label type represents flow identifier or Flow Label
+ in an IPv6 packet header that may be used to discriminate
+ traffic flows.
+
+ In the value set and its semantics, this type is equivalent
+ to the IPv6FlowLabel textual convention of the SMIv2.";
+ reference
+ "RFC 3595: Textual Conventions for IPv6 Flow Label
+ RFC 2460: Internet Protocol, Version 6 (IPv6) Specification";
+ }
+
+ typedef port-number {
+ type uint16 {
+ range "0..65535";
+ }
+ description
+ "The port-number type represents a 16-bit port number of an
+ Internet transport layer protocol such as UDP, TCP, DCCP, or
+ SCTP. Port numbers are assigned by IANA. A current list of
+ all assignments is available from <http://www.iana.org/>.
+
+ Note that the port number value zero is reserved by IANA. In
+ situations where the value zero does not make sense, it can
+ be excluded by subtyping the port-number type.
+
+ In the value set and its semantics, this type is equivalent
+ to the InetPortNumber textual convention of the SMIv2.";
+ reference
+ "RFC 768: User Datagram Protocol
+ RFC 793: Transmission Control Protocol
+ RFC 4960: Stream Control Transmission Protocol
+ RFC 4340: Datagram Congestion Control Protocol (DCCP)
+ RFC 4001: Textual Conventions for Internet Network Addresses";
+ }
+
+ /*** collection of autonomous system related types ***/
+
+ typedef as-number {
+ type uint32;
+ description
+ "The as-number type represents autonomous system numbers
+ which identify an Autonomous System (AS). An AS is a set
+ of routers under a single technical administration, using
+ an interior gateway protocol and common metrics to route
+ packets within the AS, and using an exterior gateway
+ protocol to route packets to other ASs'. IANA maintains
+ the AS number space and has delegated large parts to the
+ regional registries.
+
+ Autonomous system numbers were originally limited to 16
+ bits. BGP extensions have enlarged the autonomous system
+ number space to 32 bits. This type therefore uses an uint32
+ base type without a range restriction in order to support
+ a larger autonomous system number space.
+
+ In the value set and its semantics, this type is equivalent
+ to the InetAutonomousSystemNumber textual convention of
+ the SMIv2.";
+ reference
+ "RFC 1930: Guidelines for creation, selection, and registration
+ of an Autonomous System (AS)
+ RFC 4271: A Border Gateway Protocol 4 (BGP-4)
+ RFC 4893: BGP Support for Four-octet AS Number Space
+ RFC 4001: Textual Conventions for Internet Network Addresses";
+ }
+
+ /*** collection of IP address and hostname related types ***/
+
+ typedef ip-address {
+ type union {
+ type inet:ipv4-address;
+ type inet:ipv6-address;
+ }
+ description
+ "The ip-address type represents an IP address and is IP
+ version neutral. The format of the textual representations
+ implies the IP version.";
+ }
+
+ typedef ipv4-address {
+ type string {
+ pattern
+ '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}'
+ + '([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])'
+ + '(%[\p{N}\p{L}]+)?';
+ }
+ description
+ "The ipv4-address type represents an IPv4 address in
+ dotted-quad notation. The IPv4 address may include a zone
+ index, separated by a % sign.
+
+ The zone index is used to disambiguate identical address
+ values. For link-local addresses, the zone index will
+ typically be the interface index number or the name of an
+ interface. If the zone index is not present, the default
+ zone of the device will be used.
+
+ The canonical format for the zone index is the numerical
+ format";
+ }
+
+ typedef ipv6-address {
+ type string {
+ pattern '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}'
+ + '((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|'
+ + '(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\.){3}'
+ + '(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))'
+ + '(%[\p{N}\p{L}]+)?';
+ pattern '(([^:]+:){6}(([^:]+:[^:]+)|(.*\..*)))|'
+ + '((([^:]+:)*[^:]+)?::(([^:]+:)*[^:]+)?)'
+ + '(%.+)?';
+ }
+ description
+ "The ipv6-address type represents an IPv6 address in full,
+ mixed, shortened, and shortened-mixed notation. The IPv6
+ address may include a zone index, separated by a % sign.
+
+ The zone index is used to disambiguate identical address
+ values. For link-local addresses, the zone index will
+ typically be the interface index number or the name of an
+ interface. If the zone index is not present, the default
+ zone of the device will be used.
+
+ The canonical format of IPv6 addresses uses the compressed
+ format described in RFC 4291, Section 2.2, item 2 with the
+ following additional rules: the :: substitution must be
+ applied to the longest sequence of all-zero 16-bit chunks
+ in an IPv6 address. If there is a tie, the first sequence
+ of all-zero 16-bit chunks is replaced by ::. Single
+ all-zero 16-bit chunks are not compressed. The canonical
+ format uses lowercase characters and leading zeros are
+ not allowed. The canonical format for the zone index is
+ the numerical format as described in RFC 4007, Section
+ 11.2.";
+ reference
+ "RFC 4291: IP Version 6 Addressing Architecture
+ RFC 4007: IPv6 Scoped Address Architecture
+ RFC 5952: A Recommendation for IPv6 Address Text Representation";
+ }
+
+ typedef ip-prefix {
+ type union {
+ type inet:ipv4-prefix;
+ type inet:ipv6-prefix;
+ }
+ description
+ "The ip-prefix type represents an IP prefix and is IP
+ version neutral. The format of the textual representations
+ implies the IP version.";
+ }
+
+ typedef ipv4-prefix {
+ type string {
+ pattern
+ '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}'
+ + '([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])'
+ + '/(([0-9])|([1-2][0-9])|(3[0-2]))';
+ }
+ description
+ "The ipv4-prefix type represents an IPv4 address prefix.
+ The prefix length is given by the number following the
+ slash character and must be less than or equal to 32.
+
+ A prefix length value of n corresponds to an IP address
+ mask that has n contiguous 1-bits from the most
+ significant bit (MSB) and all other bits set to 0.
+
+ The canonical format of an IPv4 prefix has all bits of
+ the IPv4 address set to zero that are not part of the
+ IPv4 prefix.";
+ }
+
+ typedef ipv6-prefix {
+ type string {
+ pattern '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}'
+ + '((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|'
+ + '(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\.){3}'
+ + '(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))'
+ + '(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))';
+ pattern '(([^:]+:){6}(([^:]+:[^:]+)|(.*\..*)))|'
+ + '((([^:]+:)*[^:]+)?::(([^:]+:)*[^:]+)?)'
+ + '(/.+)';
+ }
+ description
+ "The ipv6-prefix type represents an IPv6 address prefix.
+ The prefix length is given by the number following the
+ slash character and must be less than or equal 128.
+
+ A prefix length value of n corresponds to an IP address
+ mask that has n contiguous 1-bits from the most
+ significant bit (MSB) and all other bits set to 0.
+
+ The IPv6 address should have all bits that do not belong
+ to the prefix set to zero.
+
+ The canonical format of an IPv6 prefix has all bits of
+ the IPv6 address set to zero that are not part of the
+ IPv6 prefix. Furthermore, IPv6 address is represented
+ in the compressed format described in RFC 4291, Section
+ 2.2, item 2 with the following additional rules: the ::
+ substitution must be applied to the longest sequence of
+ all-zero 16-bit chunks in an IPv6 address. If there is
+ a tie, the first sequence of all-zero 16-bit chunks is
+ replaced by ::. Single all-zero 16-bit chunks are not
+ compressed. The canonical format uses lowercase
+ characters and leading zeros are not allowed.";
+ reference
+ "RFC 4291: IP Version 6 Addressing Architecture";
+ }
+
+ /*** collection of domain name and URI types ***/
+
+ typedef domain-name {
+ type string {
+ pattern '((([a-zA-Z0-9_]([a-zA-Z0-9\-_]){0,61})?[a-zA-Z0-9]\.)*'
+ + '([a-zA-Z0-9_]([a-zA-Z0-9\-_]){0,61})?[a-zA-Z0-9]\.?)'
+ + '|\.';
+ length "1..253";
+ }
+ description
+ "The domain-name type represents a DNS domain name. The
+ name SHOULD be fully qualified whenever possible.
+
+ Internet domain names are only loosely specified. Section
+ 3.5 of RFC 1034 recommends a syntax (modified in Section
+ 2.1 of RFC 1123). The pattern above is intended to allow
+ for current practice in domain name use, and some possible
+ future expansion. It is designed to hold various types of
+ domain names, including names used for A or AAAA records
+ (host names) and other records, such as SRV records. Note
+ that Internet host names have a stricter syntax (described
+ in RFC 952) than the DNS recommendations in RFCs 1034 and
+ 1123, and that systems that want to store host names in
+ schema nodes using the domain-name type are recommended to
+ adhere to this stricter standard to ensure interoperability.
+
+ The encoding of DNS names in the DNS protocol is limited
+ to 255 characters. Since the encoding consists of labels
+ prefixed by a length bytes and there is a trailing NULL
+ byte, only 253 characters can appear in the textual dotted
+ notation.
+
+ The description clause of schema nodes using the domain-name
+ type MUST describe when and how these names are resolved to
+ IP addresses. Note that the resolution of a domain-name value
+ may require to query multiple DNS records (e.g., A for IPv4
+ and AAAA for IPv6). The order of the resolution process and
+ which DNS record takes precedence can either be defined
+ explicitely or it may depend on the configuration of the
+ resolver.
+
+ Domain-name values use the US-ASCII encoding. Their canonical
+ format uses lowercase US-ASCII characters. Internationalized
+ domain names MUST be encoded in punycode as described in RFC
+ 3492";
+ reference
+ "RFC 952: DoD Internet Host Table Specification
+ RFC 1034: Domain Names - Concepts and Facilities
+ RFC 1123: Requirements for Internet Hosts -- Application
+ and Support
+ RFC 2782: A DNS RR for specifying the location of services
+ (DNS SRV)
+ RFC 3492: Punycode: A Bootstring encoding of Unicode for
+ Internationalized Domain Names in Applications
+ (IDNA)
+ RFC 5891: Internationalizing Domain Names in Applications
+ (IDNA): Protocol";
+ }
+
+ typedef host {
+ type union {
+ type inet:ip-address;
+ type inet:domain-name;
+ }
+ description
+ "The host type represents either an IP address or a DNS
+ domain name.";
+ }
+
+ typedef uri {
+ type string;
+ description
+ "The uri type represents a Uniform Resource Identifier
+ (URI) as defined by STD 66.
+
+ Objects using the uri type MUST be in US-ASCII encoding,
+ and MUST be normalized as described by RFC 3986 Sections
+ 6.2.1, 6.2.2.1, and 6.2.2.2. All unnecessary
+ percent-encoding is removed, and all case-insensitive
+ characters are set to lowercase except for hexadecimal
+ digits, which are normalized to uppercase as described in
+ Section 6.2.2.1.
+
+ The purpose of this normalization is to help provide
+ unique URIs. Note that this normalization is not
+ sufficient to provide uniqueness. Two URIs that are
+ textually distinct after this normalization may still be
+ equivalent.
+
+ Objects using the uri type may restrict the schemes that
+ they permit. For example, 'data:' and 'urn:' schemes
+ might not be appropriate.
+
+ A zero-length URI is not a valid URI. This can be used to
+ express 'URI absent' where required.
+
+ In the value set and its semantics, this type is equivalent
+ to the Uri SMIv2 textual convention defined in RFC 5017.";
+ reference
+ "RFC 3986: Uniform Resource Identifier (URI): Generic Syntax
+ RFC 3305: Report from the Joint W3C/IETF URI Planning Interest
+ Group: Uniform Resource Identifiers (URIs), URLs,
+ and Uniform Resource Names (URNs): Clarifications
+ and Recommendations
+ RFC 5017: MIB Textual Conventions for Uniform Resource
+ Identifiers (URIs)";
+ }
+
+ }
\ No newline at end of file
@Deprecated\r
Type javaTypeForYangType(String type);\r
\r
+ /**\r
+ * Resolve of yang Type Definition to it's java counter part.\r
+ * If the Type Definition contains one of yang primitive types the method\r
+ * will return java.lang. counterpart. (For example if yang type is int32\r
+ * the java counterpart is java.lang.Integer). In case that Type\r
+ * Definition contains extended type defined via yang typedef statement\r
+ * the method SHOULD return Generated Type or Generated Transfer Object\r
+ * if that Type is correctly referenced to resolved imported yang module.\r
+ * The method will return <cdoe>null</cdoe> value in situations that\r
+ * TypeDefinition can't be resolved (either due missing yang import or\r
+ * incorrectly specified type).\r
+ *\r
+ *\r
+ * @param type Type Definition to resolve from\r
+ * @return Resolved Type\r
+ */\r
Type javaTypeForSchemaDefinitionType(final TypeDefinition<?> type);\r
}\r
\r
@Override\r
public String toString() {\r
-\r
+ if (packageName.isEmpty()) {\r
+ return "Type (" + name + ")";\r
+ }\r
return "Type (" + packageName + "." + name + ")";\r
}\r
\r
import org.opendaylight.controller.binding.generator.util.generated.type.builder.GeneratedTOBuilderImpl;
import org.opendaylight.controller.sal.binding.model.api.type.builder.GeneratedTOBuilder;
import org.opendaylight.controller.yang.common.QName;
-import org.opendaylight.controller.yang.model.api.*;
+import org.opendaylight.controller.yang.model.api.Module;
+import org.opendaylight.controller.yang.model.api.SchemaNode;
+import org.opendaylight.controller.yang.model.api.SchemaPath;
+import org.opendaylight.controller.yang.model.api.TypeDefinition;
import java.util.*;
-public class BindingGeneratorUtil {
+public final class BindingGeneratorUtil {
private static final String[] SET_VALUES = new String[]{"abstract",
"assert", "boolean", "break", "byte", "case", "catch", "char",
"super", "switch", "synchronized", "this", "throw", "throws",
"transient", "true", "try", "void", "volatile", "while"};
- private BindingGeneratorUtil() {}
+ private BindingGeneratorUtil() {
+ }
public static final Set<String> JAVA_RESERVED_WORDS = new HashSet<String>(
Arrays.asList(SET_VALUES));
public static String packageNameForGeneratedType(
final String basePackageName, final SchemaPath schemaPath) {
+ if (basePackageName == null) {
+ throw new IllegalArgumentException("Base Package Name cannot be " +
+ "NULL!");
+ }
+ if (schemaPath == null) {
+ throw new IllegalArgumentException("Schema Path cannot be NULL!");
+ }
+
final StringBuilder builder = new StringBuilder();
builder.append(basePackageName);
- if ((schemaPath != null) && (schemaPath.getPath() != null)) {
- final List<QName> pathToNode = schemaPath.getPath();
- final int traversalSteps = (pathToNode.size() - 1);
- for (int i = 0; i < traversalSteps; ++i) {
- builder.append(".");
- String nodeLocalName = pathToNode.get(i).getLocalName();
-
- nodeLocalName = nodeLocalName.replace(":", ".");
- nodeLocalName = nodeLocalName.replace("-", ".");
- builder.append(nodeLocalName);
- }
- return validateJavaPackage(builder.toString());
+ final List<QName> pathToNode = schemaPath.getPath();
+ final int traversalSteps = (pathToNode.size() - 1);
+ for (int i = 0; i < traversalSteps; ++i) {
+ builder.append(".");
+ String nodeLocalName = pathToNode.get(i).getLocalName();
+
+ nodeLocalName = nodeLocalName.replace(":", ".");
+ nodeLocalName = nodeLocalName.replace("-", ".");
+ builder.append(nodeLocalName);
}
- return null;
+ return validateJavaPackage(builder.toString());
+ }
+
+ public static String packageNameForTypeDefinition(
+ final String basePackageName, final TypeDefinition<?> typeDefinition) {
+ if (basePackageName == null) {
+ throw new IllegalArgumentException("Base Package Name cannot be " +
+ "NULL!");
+ }
+ if (typeDefinition == null) {
+ throw new IllegalArgumentException("Type Definition reference " +
+ "cannot be NULL!");
+ }
+
+ final StringBuilder builder = new StringBuilder();
+ builder.append(basePackageName);
+ return validateJavaPackage(builder.toString());
}
public static String parseToClassName(String token) {
import org.opendaylight.controller.yang.binding.Augmentation;\r
import org.opendaylight.controller.yang.binding.DataObject;\r
\r
-public class Types {\r
+public final class Types {\r
private static final Type SET_TYPE = typeForClass(Set.class);\r
private static final Type LIST_TYPE = typeForClass(List.class);\r
private static final Type MAP_TYPE = typeForClass(Map.class);\r
- \r
public static final Type DATA_OBJECT = typeForClass(DataObject.class);\r
- \r
- private Types() {\r
- }\r
\r
public static ConcreteType voidType() {\r
return new ConcreteTypeImpl(Void.class.getPackage().getName(),\r
Void.class.getSimpleName());\r
}\r
\r
+ public static final Type primitiveType(final String primitiveType) {\r
+ return new ConcreteTypeImpl("", primitiveType);\r
+ }\r
+\r
+\r
/**\r
* Returns an instance of {@link ConcreteType} describing the class\r
* \r
@Override
public EnumBuilder addEnumeration(String name) {
- final EnumBuilder builder = new EnumerationBuilderImpl(packageName,
+ final String innerPackageName = packageName + "." + this.name;
+ final EnumBuilder builder = new EnumerationBuilderImpl(innerPackageName,
name);
enumerations.add(builder);
return builder;
import java.util.Map;
import org.opendaylight.controller.sal.binding.model.api.CodeGenerator;
+import org.opendaylight.controller.sal.binding.model.api.Enumeration;
import org.opendaylight.controller.sal.binding.model.api.GeneratedProperty;
import org.opendaylight.controller.sal.binding.model.api.GeneratedTransferObject;
import org.opendaylight.controller.sal.binding.model.api.Type;
-public class ClassCodeGenerator implements CodeGenerator {
+public final class ClassCodeGenerator implements CodeGenerator {
private Map<String, LinkedHashMap<String, Integer>> imports;
public Writer generate(Type type) throws IOException {
final Writer writer = new StringWriter();
if (type instanceof GeneratedTransferObject) {
- GeneratedTransferObject genTO = (GeneratedTransferObject) type;
+ GeneratedTransferObject genTO = (GeneratedTransferObject) type;
imports = GeneratorUtil.createImports(genTO);
-
+
final String currentPkg = genTO.getPackageName();
final List<GeneratedProperty> fields = genTO.getProperties();
+ final List<Enumeration> enums = genTO.getEnumDefintions();
writer.write(GeneratorUtil.createPackageDeclaration(currentPkg));
writer.write(NL);
imports));
writer.write(NL);
writer.write(NL);
+
+ if (enums != null) {
+ EnumGenerator enumGenerator = new EnumGenerator();
+ for ( Enumeration e : enums ) {
+ writer.write(enumGenerator.generateInnerEnumeration(e, TAB).toString());
+ writer.write(NL);
+ }
+ }
if (fields != null) {
for (GeneratedProperty field : fields) {
*/
package org.opendaylight.controller.sal.java.api.generator;
-public class Constants {
+final class Constants {
public static final String IFC = "interface";
public static final String CLASS = "class";
public static final String FINAL = "final";
public static final String EXTENDS = "extends";
public static final String IMPLEMENTS = "implements";
+
+ public static final String ENUMERATION_NAME = "value";
+ public static final String ENUMERATION_TYPE = "int";
+
+ private Constants() {}
}
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.sal.java.api.generator;
+
+import static org.opendaylight.controller.sal.java.api.generator.Constants.*;
+
+import java.io.IOException;
+import java.io.StringWriter;
+import java.io.Writer;
+
+import org.opendaylight.controller.sal.binding.model.api.CodeGenerator;
+import org.opendaylight.controller.sal.binding.model.api.Enumeration;
+import org.opendaylight.controller.sal.binding.model.api.Type;
+
+public class EnumGenerator implements CodeGenerator {
+
+ @Override
+ public Writer generate(Type type) throws IOException {
+ final Writer writer = new StringWriter();
+
+ if (type instanceof Enumeration) {
+ Enumeration enums = (Enumeration) type;
+ writer.write(GeneratorUtil.createPackageDeclaration(enums
+ .getPackageName()));
+ writer.write(NL + NL);
+ writer.write(GeneratorUtil.createEnum(enums, ""));
+ }
+
+ return writer;
+ }
+
+ public Writer generateInnerEnumeration(Type type, String indent) throws IOException {
+ final Writer writer = new StringWriter();
+
+ if (type instanceof Enumeration) {
+ Enumeration enums = (Enumeration) type;
+ writer.write(GeneratorUtil.createEnum(enums, indent));
+ }
+
+ return writer;
+ }
+
+}
import java.util.Set;
import org.opendaylight.controller.sal.binding.model.api.CodeGenerator;
+import org.opendaylight.controller.sal.binding.model.api.Enumeration;
import org.opendaylight.controller.sal.binding.model.api.GeneratedTransferObject;
import org.opendaylight.controller.sal.binding.model.api.GeneratedType;
import org.opendaylight.controller.sal.binding.model.api.Type;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-public class GeneratorJavaFile {
+public final class GeneratorJavaFile {
private static final Logger log = LoggerFactory
.getLogger(GeneratorJavaFile.class);
private final CodeGenerator interfaceGenerator;
private final ClassCodeGenerator classGenerator;
+ private final EnumGenerator enumGenerator;
+
private final Set<GeneratedType> genTypes;
private final Set<GeneratedTransferObject> genTransferObjects;
+ private final Set<Enumeration> enumerations;
public GeneratorJavaFile(final CodeGenerator codeGenerator,
final Set<GeneratedType> types) {
this.interfaceGenerator = codeGenerator;
this.genTypes = types;
this.genTransferObjects = new HashSet<>();
- classGenerator = new ClassCodeGenerator();
+ this.enumerations = new HashSet<>();
+ this.classGenerator = new ClassCodeGenerator();
+ this.enumGenerator = new EnumGenerator();
}
public GeneratorJavaFile(final Set<GeneratedType> types,
- final Set<GeneratedTransferObject> genTransferObjects) {
+ final Set<GeneratedTransferObject> genTransferObjects,
+ final Set<Enumeration> enumerations) {
this.interfaceGenerator = new InterfaceGenerator();
this.classGenerator = new ClassCodeGenerator();
+ this.enumGenerator = new EnumGenerator();
+
this.genTypes = types;
this.genTransferObjects = genTransferObjects;
+ this.enumerations = enumerations;
}
public List<File> generateToFile(final File parentDirectory) throws IOException {
result.add(genFile);
}
}
+
+ for (Enumeration enumeration : enumerations) {
+ final File genFile = generateTypeToJavaFile(parentDirectory,
+ enumeration, enumGenerator);
+
+ if (genFile != null) {
+ result.add(genFile);
+ }
+ }
+
return result;
}
import org.opendaylight.controller.sal.binding.model.api.ParameterizedType;
import org.opendaylight.controller.sal.binding.model.api.Type;
-public class GeneratorUtil {
-
- private GeneratorUtil() {
- }
-
- public static String createIfcDeclaration(final GeneratedType genType,
- final String indent,
- final Map<String, LinkedHashMap<String, Integer>> availableImports) {
- return createFileDeclaration(IFC, genType, indent, availableImports);
- }
-
- public static String createClassDeclaration(
- final GeneratedTransferObject genTransferObject,
- final String indent,
- final Map<String, LinkedHashMap<String, Integer>> availableImports) {
- return createFileDeclaration(CLASS, genTransferObject, indent,
- availableImports);
- }
-
- public static String createPackageDeclaration(final String packageName) {
- return PKG + GAP + packageName + SC;
- }
-
- private static String createFileDeclaration(final String type,
- final GeneratedType genType, final String indent,
- final Map<String, LinkedHashMap<String, Integer>> availableImports) {
- final StringBuilder builder = new StringBuilder();
- final String currentPkg = genType.getPackageName();
-
- createComment(builder, genType.getComment(), indent);
-
- if (!genType.getAnnotations().isEmpty()) {
- final List<AnnotationType> annotations = genType.getAnnotations();
- appendAnnotations(builder, annotations);
- builder.append(NL);
- }
- builder.append(PUBLIC + GAP + type + GAP + genType.getName() + GAP);
-
- if (genType instanceof GeneratedTransferObject) {
- GeneratedTransferObject genTO = (GeneratedTransferObject) genType;
-
- if (genTO.getExtends() != null) {
- builder.append(EXTENDS + GAP);
- builder.append(genTO.getExtends() + GAP);
- }
- }
-
- final List<Type> genImplements = genType.getImplements();
- if (!genImplements.isEmpty()) {
- if (genType instanceof GeneratedTransferObject) {
- builder.append(IMPLEMENTS + GAP);
- } else {
- builder.append(EXTENDS + GAP);
- }
- builder.append(getExplicitType(genImplements.get(0),
- availableImports, currentPkg));
-
- for (int i = 1; i < genImplements.size(); ++i) {
- builder.append(", ");
- builder.append(getExplicitType(genImplements.get(i),
- availableImports, currentPkg));
- }
- }
-
- builder.append(GAP + LCB);
- return builder.toString();
- }
-
- private static StringBuilder appendAnnotations(final StringBuilder builder,
- final List<AnnotationType> annotations) {
- if ((builder != null) && (annotations != null)) {
- for (final AnnotationType annotation : annotations) {
- builder.append("@");
- builder.append(annotation.getPackageName());
- builder.append(".");
- builder.append(annotation.getName());
-
- if (annotation.containsParameters()) {
- builder.append("(");
- final List<AnnotationType.Parameter> parameters = annotation
- .getParameters();
- appendAnnotationParams(builder, parameters);
- builder.append(")");
- }
- }
- }
- return builder;
- }
-
- private static StringBuilder appendAnnotationParams(
- final StringBuilder builder,
- final List<AnnotationType.Parameter> parameters) {
- if (parameters != null) {
- int i = 0;
- for (final AnnotationType.Parameter param : parameters) {
- if (param == null) {
- continue;
- }
- if (i > 0) {
- builder.append(", ");
- }
- final String paramName = param.getName();
- if (param.getValue() != null) {
- builder.append(paramName);
- builder.append(" = ");
- builder.append(param.getValue());
+public final class GeneratorUtil {
+
+ private GeneratorUtil() {
+ }
+
+ public static String createIfcDeclaration(final GeneratedType genType,
+ final String indent,
+ final Map<String, LinkedHashMap<String, Integer>> availableImports) {
+ return createFileDeclaration(IFC, genType, indent, availableImports);
+ }
+
+ public static String createClassDeclaration(
+ final GeneratedTransferObject genTransferObject,
+ final String indent,
+ final Map<String, LinkedHashMap<String, Integer>> availableImports) {
+ return createFileDeclaration(CLASS, genTransferObject, indent,
+ availableImports);
+ }
+
+ public static String createPackageDeclaration(final String packageName) {
+ return PKG + GAP + packageName + SC;
+ }
+
+ private static String createFileDeclaration(final String type,
+ final GeneratedType genType, final String indent,
+ final Map<String, LinkedHashMap<String, Integer>> availableImports) {
+ final StringBuilder builder = new StringBuilder();
+ final String currentPkg = genType.getPackageName();
+
+ createComment(builder, genType.getComment(), indent);
+
+ if (!genType.getAnnotations().isEmpty()) {
+ final List<AnnotationType> annotations = genType.getAnnotations();
+ appendAnnotations(builder, annotations);
+ builder.append(NL);
+ }
+ builder.append(PUBLIC + GAP + type + GAP + genType.getName() + GAP);
+
+ if (genType instanceof GeneratedTransferObject) {
+ GeneratedTransferObject genTO = (GeneratedTransferObject) genType;
+
+ if (genTO.getExtends() != null) {
+ builder.append(EXTENDS + GAP);
+ builder.append(genTO.getExtends() + GAP);
+ }
+ }
+
+ final List<Type> genImplements = genType.getImplements();
+ if (!genImplements.isEmpty()) {
+ if (genType instanceof GeneratedTransferObject) {
+ builder.append(IMPLEMENTS + GAP);
+ } else {
+ builder.append(EXTENDS + GAP);
+ }
+ builder.append(getExplicitType(genImplements.get(0),
+ availableImports, currentPkg));
+
+ for (int i = 1; i < genImplements.size(); ++i) {
+ builder.append(", ");
+ builder.append(getExplicitType(genImplements.get(i),
+ availableImports, currentPkg));
+ }
+ }
+
+ builder.append(GAP + LCB);
+ return builder.toString();
+ }
+
+ private static StringBuilder appendAnnotations(final StringBuilder builder,
+ final List<AnnotationType> annotations) {
+ if ((builder != null) && (annotations != null)) {
+ for (final AnnotationType annotation : annotations) {
+ builder.append("@");
+ builder.append(annotation.getPackageName());
+ builder.append(".");
+ builder.append(annotation.getName());
+
+ if (annotation.containsParameters()) {
+ builder.append("(");
+ final List<AnnotationType.Parameter> parameters = annotation
+ .getParameters();
+ appendAnnotationParams(builder, parameters);
+ builder.append(")");
+ }
+ }
+ }
+ return builder;
+ }
+
+ private static StringBuilder appendAnnotationParams(
+ final StringBuilder builder,
+ final List<AnnotationType.Parameter> parameters) {
+ if (parameters != null) {
+ int i = 0;
+ for (final AnnotationType.Parameter param : parameters) {
+ if (param == null) {
+ continue;
+ }
+ if (i > 0) {
+ builder.append(", ");
+ }
+ final String paramName = param.getName();
+ if (param.getValue() != null) {
+ builder.append(paramName);
+ builder.append(" = ");
+ builder.append(param.getValue());
+ } else {
+ builder.append(paramName);
+ builder.append(" = {");
+ final List<String> values = param.getValues();
+ builder.append(values.get(0));
+ for (int j = 1; j < values.size(); ++j) {
+ builder.append(", ");
+ builder.append(values.get(j));
+ }
+ builder.append("}");
+ }
+ i++;
+ }
+ }
+ return builder;
+ }
+
+ public static String createConstant(final Constant constant,
+ final String indent,
+ final Map<String, LinkedHashMap<String, Integer>> availableImports,
+ final String currentPkg) {
+ final StringBuilder builder = new StringBuilder();
+ builder.append(indent + PUBLIC + GAP + STATIC + GAP + FINAL + GAP);
+ builder.append(getExplicitType(constant.getType(), availableImports,
+ currentPkg) + GAP + constant.getName());
+ builder.append(GAP + "=" + GAP);
+ builder.append(constant.getValue() + SC);
+ return builder.toString();
+ }
+
+ public static String createField(final GeneratedProperty property,
+ final String indent,
+ Map<String, LinkedHashMap<String, Integer>> availableImports,
+ final String currentPkg) {
+ final StringBuilder builder = new StringBuilder();
+ builder.append(indent);
+ if (!property.getAnnotations().isEmpty()) {
+ final List<AnnotationType> annotations = property.getAnnotations();
+ appendAnnotations(builder, annotations);
+ builder.append(NL);
+ }
+ builder.append(indent + PRIVATE + GAP);
+ builder.append(getExplicitType(property.getReturnType(),
+ availableImports, currentPkg) + GAP + property.getName());
+ builder.append(SC);
+ return builder.toString();
+ }
+
+ /**
+ * Create method declaration in interface.
+ *
+ * @param method
+ * @param indent
+ * @return
+ */
+ public static String createMethodDeclaration(final MethodSignature method,
+ final String indent,
+ Map<String, LinkedHashMap<String, Integer>> availableImports,
+ final String currentPkg) {
+ final StringBuilder builder = new StringBuilder();
+
+ if (method == null) {
+ throw new IllegalArgumentException(
+ "Method Signature parameter MUST be specified and cannot be NULL!");
+ }
+
+ final String comment = method.getComment();
+ final String name = method.getName();
+ if (name == null) {
+ throw new IllegalStateException("Method Name cannot be NULL!");
+ }
+
+ final Type type = method.getReturnType();
+ if (type == null) {
+ throw new IllegalStateException(
+ "Method Return type cannot be NULL!");
+ }
+
+ final List<Parameter> parameters = method.getParameters();
+
+ createComment(builder, comment, indent);
+ builder.append(NL);
+ builder.append(indent);
+
+ if (!method.getAnnotations().isEmpty()) {
+ final List<AnnotationType> annotations = method.getAnnotations();
+ appendAnnotations(builder, annotations);
+ builder.append(NL);
+ }
+
+ builder.append(indent
+ + getExplicitType(type, availableImports, currentPkg) + GAP
+ + name);
+ builder.append(LB);
+ for (int i = 0; i < parameters.size(); i++) {
+ Parameter p = parameters.get(i);
+ String separator = COMMA;
+ if (i + 1 == parameters.size()) {
+ separator = "";
+ }
+ builder.append(getExplicitType(p.getType(), availableImports,
+ currentPkg) + GAP + p.getName() + separator);
+ }
+ builder.append(RB);
+ builder.append(SC);
+
+ return builder.toString();
+ }
+
+ public static String createConstructor(
+ GeneratedTransferObject genTransferObject, final String indent,
+ Map<String, LinkedHashMap<String, Integer>> availableImports) {
+ final StringBuilder builder = new StringBuilder();
+
+ final String currentPkg = genTransferObject.getPackageName();
+ final List<GeneratedProperty> properties = genTransferObject
+ .getProperties();
+ final List<GeneratedProperty> ctorParams = new ArrayList<GeneratedProperty>();
+ for (final GeneratedProperty property : properties) {
+ if (property.isReadOnly()) {
+ ctorParams.add(property);
+ }
+ }
+
+ builder.append(indent);
+ builder.append(PUBLIC);
+ builder.append(GAP);
+ builder.append(genTransferObject.getName());
+ builder.append(LB);
+
+ if (!ctorParams.isEmpty()) {
+ builder.append(getExplicitType(ctorParams.get(0).getReturnType(),
+ availableImports, currentPkg));
+ builder.append(" ");
+ builder.append(ctorParams.get(0).getName());
+ for (int i = 1; i < ctorParams.size(); ++i) {
+ final GeneratedProperty param = ctorParams.get(i);
+ builder.append(", ");
+ builder.append(getExplicitType(param.getReturnType(),
+ availableImports, currentPkg));
+ builder.append(GAP);
+ builder.append(param.getName());
+ }
+ }
+ builder.append(RB + GAP + LCB + NL + indent + TAB + "super();" + NL);
+ if (!ctorParams.isEmpty()) {
+ for (final GeneratedProperty property : ctorParams) {
+ builder.append(indent);
+ builder.append(TAB);
+ builder.append("this.");
+ builder.append(property.getName());
+ builder.append(" = ");
+ builder.append(property.getName());
+ builder.append(SC);
+ builder.append(NL);
+ }
+ }
+ builder.append(indent);
+ builder.append(RCB);
+ return builder.toString();
+ }
+
+ public static String createGetter(final GeneratedProperty property,
+ final String indent,
+ Map<String, LinkedHashMap<String, Integer>> availableImports,
+ final String currentPkg) {
+ final StringBuilder builder = new StringBuilder();
+
+ final Type type = property.getReturnType();
+ final String varName = property.getName();
+ final char first = Character.toUpperCase(varName.charAt(0));
+ final String methodName = "get" + first + varName.substring(1);
+
+ builder.append(indent + PUBLIC + GAP
+ + getExplicitType(type, availableImports, currentPkg) + GAP
+ + methodName);
+ builder.append(LB + RB + LCB + NL);
+
+ String currentIndent = indent + TAB;
+
+ builder.append(currentIndent + "return " + varName + SC + NL);
+
+ builder.append(indent + RCB);
+ return builder.toString();
+ }
+
+ public static String createSetter(final GeneratedProperty property,
+ final String indent,
+ Map<String, LinkedHashMap<String, Integer>> availableImports,
+ String currentPkg) {
+ final StringBuilder builder = new StringBuilder();
+
+ final Type type = property.getReturnType();
+ final String varName = property.getName();
+ final char first = Character.toUpperCase(varName.charAt(0));
+ final String methodName = "set" + first + varName.substring(1);
+
+ builder.append(indent + PUBLIC + GAP + "void" + GAP + methodName);
+ builder.append(LB + getExplicitType(type, availableImports, currentPkg)
+ + GAP + varName + RB + LCB + NL);
+ String currentIndent = indent + TAB;
+ builder.append(currentIndent + "this." + varName + " = " + varName + SC
+ + NL);
+ builder.append(indent + RCB);
+ return builder.toString();
+ }
+
+ public static String createHashCode(
+ final List<GeneratedProperty> properties, final String indent) {
+ StringBuilder builder = new StringBuilder();
+ builder.append(indent + "public int hashCode() {" + NL);
+ builder.append(indent + TAB + "final int prime = 31;" + NL);
+ builder.append(indent + TAB + "int result = 1;" + NL);
+
+ for (GeneratedProperty property : properties) {
+ String fieldName = property.getName();
+ builder.append(indent + TAB + "result = prime * result + (("
+ + fieldName + " == null) ? 0 : " + fieldName
+ + ".hashCode());" + NL);
+ }
+
+ builder.append(indent + TAB + "return result;" + NL);
+ builder.append(indent + RCB + NL);
+ return builder.toString();
+ }
+
+ public static String createEquals(final GeneratedTransferObject type,
+ final List<GeneratedProperty> properties, final String indent) {
+ StringBuilder builder = new StringBuilder();
+ final String indent1 = indent + TAB;
+ final String indent2 = indent1 + TAB;
+ final String indent3 = indent2 + TAB;
+
+ builder.append(indent + "public boolean equals(Object obj) {" + NL);
+ builder.append(indent1 + "if (this == obj) {" + NL);
+ builder.append(indent2 + "return true;" + NL);
+ builder.append(indent1 + "}" + NL);
+ builder.append(indent1 + "if (obj == null) {" + NL);
+ builder.append(indent2 + "return false;" + NL);
+ builder.append(indent1 + "}" + NL);
+ builder.append(indent1 + "if (getClass() != obj.getClass()) {" + NL);
+ builder.append(indent2 + "return false;" + NL);
+ builder.append(indent1 + "}" + NL);
+
+ String typeStr = type.getName();
+ builder.append(indent1 + typeStr + " other = (" + typeStr + ") obj;"
+ + NL);
+
+ for (GeneratedProperty property : properties) {
+ String fieldName = property.getName();
+ builder.append(indent1 + "if (" + fieldName + " == null) {" + NL);
+ builder.append(indent2 + "if (other." + fieldName + " != null) {"
+ + NL);
+ builder.append(indent3 + "return false;" + NL);
+ builder.append(indent2 + "}" + NL);
+ builder.append(indent1 + "} else if (!" + fieldName
+ + ".equals(other." + fieldName + ")) {" + NL);
+ builder.append(indent2 + "return false;" + NL);
+ builder.append(indent1 + "}" + NL);
+ }
+
+ builder.append(indent1 + "return true;" + NL);
+
+ builder.append(indent + RCB + NL);
+ return builder.toString();
+ }
+
+ public static String createToString(final GeneratedTransferObject type,
+ final List<GeneratedProperty> properties, final String indent) {
+ StringBuilder builder = new StringBuilder();
+ builder.append(indent);
+ builder.append("public String toString() {");
+ builder.append(NL);
+ builder.append(indent);
+ builder.append(TAB);
+ builder.append("StringBuilder builder = new StringBuilder();");
+ builder.append(NL);
+ builder.append(indent);
+ builder.append(TAB);
+ builder.append("builder.append(\"");
+ builder.append(type.getName());
+ builder.append(" [");
+
+ boolean first = true;
+ for (GeneratedProperty property : properties) {
+ if (first) {
+ builder.append(property.getName());
+ builder.append("=\");");
+ builder.append(NL);
+ builder.append(indent);
+ builder.append(TAB);
+ builder.append("builder.append(");
+ builder.append(property.getName());
+ builder.append(");");
+ first = false;
+ } else {
+ builder.append(NL);
+ builder.append(indent);
+ builder.append(TAB);
+ builder.append("builder.append(\", ");
+ builder.append(property.getName());
+ builder.append("=\");");
+ builder.append(NL);
+ builder.append(indent);
+ builder.append(TAB);
+ builder.append("builder.append(");
+ builder.append(property.getName());
+ builder.append(");");
+ }
+ }
+ builder.append(NL);
+ builder.append(indent);
+ builder.append(TAB);
+ builder.append("builder.append(\"]\");");
+ builder.append(NL);
+ builder.append(indent);
+ builder.append(TAB);
+ builder.append("return builder.toString();");
+
+ builder.append(NL);
+ builder.append(indent);
+ builder.append(RCB);
+ builder.append(NL);
+ return builder.toString();
+ }
+
+ public static String createEnum(final Enumeration enumeration,
+ final String indent) {
+ if (enumeration == null || indent == null)
+ throw new IllegalArgumentException();
+ final StringBuilder builder = new StringBuilder(indent + PUBLIC + GAP
+ + ENUM + GAP + enumeration.getName() + GAP + LCB + NL);
+
+ String separator = COMMA + NL;
+ final List<Pair> values = enumeration.getValues();
+
+ for (int i = 0; i < values.size(); i++) {
+ if (i + 1 == values.size()) {
+ separator = SC;
+ }
+ builder.append(indent + TAB + values.get(i).getName() + LB
+ + values.get(i).getValue() + RB + separator);
+ }
+ builder.append(NL);
+ builder.append(NL);
+ final String ENUMERATION_NAME = "value";
+ final String ENUMERATION_TYPE = "int";
+ builder.append(indent + TAB + ENUMERATION_TYPE + GAP + ENUMERATION_NAME
+ + SC);
+ builder.append(NL);
+ builder.append(indent + TAB + PRIVATE + GAP + enumeration.getName()
+ + LB + ENUMERATION_TYPE + GAP + ENUMERATION_NAME + RB + GAP
+ + LCB + NL);
+ builder.append(indent + TAB + TAB + "this." + ENUMERATION_NAME + GAP
+ + "=" + GAP + ENUMERATION_NAME + SC + NL);
+ builder.append(indent + TAB + RCB + NL);
+
+ builder.append(indent + RCB);
+ builder.append(NL);
+ return builder.toString();
+ }
+
+ private static String getExplicitType(final Type type,
+ Map<String, LinkedHashMap<String, Integer>> availableImports,
+ final String currentPkg) {
+ if (type == null) {
+ throw new IllegalArgumentException(
+ "Type parameter MUST be specified and cannot be NULL!");
+ }
+ String packageName = type.getPackageName();
+
+ LinkedHashMap<String, Integer> imports = availableImports.get(type
+ .getName());
+
+ if ((imports != null && packageName
+ .equals(findMaxValue(imports).get(0)))
+ || packageName.equals(currentPkg)) {
+ final StringBuilder builder = new StringBuilder(type.getName());
+ if (type instanceof ParameterizedType) {
+ ParameterizedType pType = (ParameterizedType) type;
+ Type[] pTypes = pType.getActualTypeArguments();
+ builder.append("<");
+ builder.append(getParameters(pTypes, availableImports,
+ currentPkg));
+ builder.append(">");
+ }
+ if (builder.toString().equals("Void")) {
+ return "void";
+ }
+ return builder.toString();
+ } else {
+ final StringBuilder builder = new StringBuilder();
+ if (packageName.startsWith("java.lang")) {
+ builder.append(type.getName());
+ } else {
+ if (!packageName.isEmpty()) {
+ builder.append(packageName + "." + type.getName());
} else {
- builder.append(paramName);
- builder.append(" = {");
- final List<String> values = param.getValues();
- builder.append(values.get(0));
- for (int j = 1; j < values.size(); ++j) {
- builder.append(", ");
- builder.append(values.get(j));
- }
- builder.append("}");
+ builder.append(type.getName());
}
- i++;
- }
- }
- return builder;
- }
-
- public static String createConstant(final Constant constant,
- final String indent,
- final Map<String, LinkedHashMap<String, Integer>> availableImports,
- final String currentPkg) {
- final StringBuilder builder = new StringBuilder();
- builder.append(indent + PUBLIC + GAP + STATIC + GAP + FINAL + GAP);
- builder.append(getExplicitType(constant.getType(), availableImports,
- currentPkg) + GAP + constant.getName());
- builder.append(GAP + "=" + GAP);
- builder.append(constant.getValue() + SC);
- return builder.toString();
- }
-
- public static String createField(final GeneratedProperty property,
- final String indent,
- Map<String, LinkedHashMap<String, Integer>> availableImports,
- final String currentPkg) {
- final StringBuilder builder = new StringBuilder();
- builder.append(indent);
- if (!property.getAnnotations().isEmpty()) {
- final List<AnnotationType> annotations = property.getAnnotations();
- appendAnnotations(builder, annotations);
- builder.append(NL);
- }
- builder.append(indent + PRIVATE + GAP);
- builder.append(getExplicitType(property.getReturnType(),
- availableImports, currentPkg) + GAP + property.getName());
- builder.append(SC);
- return builder.toString();
- }
-
- /**
- * Create method declaration in interface.
- *
- * @param method
- * @param indent
- * @return
- */
- public static String createMethodDeclaration(final MethodSignature method,
- final String indent,
- Map<String, LinkedHashMap<String, Integer>> availableImports,
- final String currentPkg) {
- final StringBuilder builder = new StringBuilder();
-
- if (method == null) {
- throw new IllegalArgumentException(
- "Method Signature parameter MUST be specified and cannot be NULL!");
- }
-
- final String comment = method.getComment();
- final String name = method.getName();
- if (name == null) {
- throw new IllegalStateException("Method Name cannot be NULL!");
- }
-
- final Type type = method.getReturnType();
- if (type == null) {
- throw new IllegalStateException(
- "Method Return type cannot be NULL!");
- }
-
- final List<Parameter> parameters = method.getParameters();
-
- createComment(builder, comment, indent);
- builder.append(NL);
- builder.append(indent);
-
- if (!method.getAnnotations().isEmpty()) {
- final List<AnnotationType> annotations = method.getAnnotations();
- appendAnnotations(builder, annotations);
- builder.append(NL);
- }
-
- builder.append(indent
- + getExplicitType(type, availableImports, currentPkg) + GAP
- + name);
- builder.append(LB);
- for (int i = 0; i < parameters.size(); i++) {
- Parameter p = parameters.get(i);
- String separator = COMMA;
- if (i + 1 == parameters.size()) {
- separator = "";
- }
- builder.append(getExplicitType(p.getType(), availableImports,
- currentPkg) + GAP + p.getName() + separator);
- }
- builder.append(RB);
- builder.append(SC);
-
- return builder.toString();
- }
-
- public static String createConstructor(
- GeneratedTransferObject genTransferObject, final String indent,
- Map<String, LinkedHashMap<String, Integer>> availableImports) {
- final StringBuilder builder = new StringBuilder();
-
- final String currentPkg = genTransferObject.getPackageName();
- final List<GeneratedProperty> properties = genTransferObject
- .getProperties();
- final List<GeneratedProperty> ctorParams = new ArrayList<GeneratedProperty>();
- for (final GeneratedProperty property : properties) {
- if (property.isReadOnly()) {
- ctorParams.add(property);
- }
- }
-
- builder.append(indent);
- builder.append(PUBLIC);
- builder.append(GAP);
- builder.append(genTransferObject.getName());
- builder.append(LB);
-
- if (!ctorParams.isEmpty()) {
- builder.append(getExplicitType(ctorParams.get(0).getReturnType(),
- availableImports, currentPkg));
- builder.append(" ");
- builder.append(ctorParams.get(0).getName());
- for (int i = 1; i < ctorParams.size(); ++i) {
- final GeneratedProperty param = ctorParams.get(i);
- builder.append(", ");
- builder.append(getExplicitType(param.getReturnType(),
- availableImports, currentPkg));
- builder.append(GAP);
- builder.append(param.getName());
- }
- }
- builder.append(RB + GAP + LCB + NL + indent + TAB + "super();" + NL);
- if (!ctorParams.isEmpty()) {
- for (final GeneratedProperty property : ctorParams) {
- builder.append(indent);
- builder.append(TAB);
- builder.append("this.");
- builder.append(property.getName());
- builder.append(" = ");
- builder.append(property.getName());
- builder.append(SC);
- builder.append(NL);
- }
- }
- builder.append(indent);
- builder.append(RCB);
- return builder.toString();
- }
-
- public static String createGetter(final GeneratedProperty property,
- final String indent,
- Map<String, LinkedHashMap<String, Integer>> availableImports,
- final String currentPkg) {
- final StringBuilder builder = new StringBuilder();
-
- final Type type = property.getReturnType();
- final String varName = property.getName();
- final char first = Character.toUpperCase(varName.charAt(0));
- final String methodName = "get" + first + varName.substring(1);
-
- builder.append(indent + PUBLIC + GAP
- + getExplicitType(type, availableImports, currentPkg) + GAP
- + methodName);
- builder.append(LB + RB + LCB + NL);
-
- String currentIndent = indent + TAB;
-
- builder.append(currentIndent + "return " + varName + SC + NL);
-
- builder.append(indent + RCB);
- return builder.toString();
- }
-
- public static String createSetter(final GeneratedProperty property,
- final String indent,
- Map<String, LinkedHashMap<String, Integer>> availableImports,
- String currentPkg) {
- final StringBuilder builder = new StringBuilder();
-
- final Type type = property.getReturnType();
- final String varName = property.getName();
- final char first = Character.toUpperCase(varName.charAt(0));
- final String methodName = "set" + first + varName.substring(1);
-
- builder.append(indent + PUBLIC + GAP + "void" + GAP + methodName);
- builder.append(LB + getExplicitType(type, availableImports, currentPkg)
- + GAP + varName + RB + LCB + NL);
- String currentIndent = indent + TAB;
- builder.append(currentIndent + "this." + varName + " = " + varName + SC
- + NL);
- builder.append(indent + RCB);
- return builder.toString();
- }
-
- public static String createHashCode(
- final List<GeneratedProperty> properties, final String indent) {
- StringBuilder builder = new StringBuilder();
- builder.append(indent + "public int hashCode() {" + NL);
- builder.append(indent + TAB + "final int prime = 31;" + NL);
- builder.append(indent + TAB + "int result = 1;" + NL);
-
- for (GeneratedProperty property : properties) {
- String fieldName = property.getName();
- builder.append(indent + TAB + "result = prime * result + (("
- + fieldName + " == null) ? 0 : " + fieldName
- + ".hashCode());" + NL);
- }
-
- builder.append(indent + TAB + "return result;" + NL);
- builder.append(indent + RCB + NL);
- return builder.toString();
- }
-
- public static String createEquals(final GeneratedTransferObject type,
- final List<GeneratedProperty> properties, final String indent) {
- StringBuilder builder = new StringBuilder();
- final String indent1 = indent + TAB;
- final String indent2 = indent1 + TAB;
- final String indent3 = indent2 + TAB;
-
- builder.append(indent + "public boolean equals(Object obj) {" + NL);
- builder.append(indent1 + "if (this == obj) {" + NL);
- builder.append(indent2 + "return true;" + NL);
- builder.append(indent1 + "}" + NL);
- builder.append(indent1 + "if (obj == null) {" + NL);
- builder.append(indent2 + "return false;" + NL);
- builder.append(indent1 + "}" + NL);
- builder.append(indent1 + "if (getClass() != obj.getClass()) {" + NL);
- builder.append(indent2 + "return false;" + NL);
- builder.append(indent1 + "}" + NL);
-
- String typeStr = type.getName();
- builder.append(indent1 + typeStr + " other = (" + typeStr + ") obj;"
- + NL);
-
- for (GeneratedProperty property : properties) {
- String fieldName = property.getName();
- builder.append(indent1 + "if (" + fieldName + " == null) {" + NL);
- builder.append(indent2 + "if (other." + fieldName + " != null) {"
- + NL);
- builder.append(indent3 + "return false;" + NL);
- builder.append(indent2 + "}" + NL);
- builder.append(indent1 + "} else if (!" + fieldName
- + ".equals(other." + fieldName + ")) {" + NL);
- builder.append(indent2 + "return false;" + NL);
- builder.append(indent1 + "}" + NL);
- }
-
- builder.append(indent1 + "return true;" + NL);
-
- builder.append(indent + RCB + NL);
- return builder.toString();
- }
-
- public static String createToString(final GeneratedTransferObject type,
- final List<GeneratedProperty> properties, final String indent) {
- StringBuilder builder = new StringBuilder();
- builder.append(indent);
- builder.append("public String toString() {");
- builder.append(NL);
- builder.append(indent);
- builder.append(TAB);
- builder.append("StringBuilder builder = new StringBuilder();");
- builder.append(NL);
- builder.append(indent);
- builder.append(TAB);
- builder.append("builder.append(\"");
- builder.append(type.getName());
- builder.append(" [");
-
- boolean first = true;
- for (GeneratedProperty property : properties) {
- if (first) {
- builder.append(property.getName());
- builder.append("=\");");
- builder.append(NL);
- builder.append(indent);
- builder.append(TAB);
- builder.append("builder.append(");
- builder.append(property.getName());
- builder.append(");");
- first = false;
- } else {
- builder.append(NL);
- builder.append(indent);
- builder.append(TAB);
- builder.append("builder.append(\", ");
- builder.append(property.getName());
- builder.append("=\");");
- builder.append(NL);
- builder.append(indent);
- builder.append(TAB);
- builder.append("builder.append(\", ");
- builder.append(property.getName());
- builder.append(");");
- }
- }
- builder.append(NL);
- builder.append(indent);
- builder.append(TAB);
- builder.append("builder.append(\"]\");");
- builder.append(NL);
- builder.append(indent);
- builder.append(TAB);
- builder.append("return builder.toString();");
-
- builder.append(NL);
- builder.append(indent);
- builder.append(RCB);
- builder.append(NL);
- return builder.toString();
- }
-
- public static String createEnum(final Enumeration enumeration,
- final String indent) {
- final StringBuilder builder = new StringBuilder(indent + ENUM + GAP
- + enumeration.getName() + GAP + LCB + NL);
-
- String separator = COMMA;
- final List<Pair> values = enumeration.getValues();
- builder.append(indent + TAB);
- for (int i = 0; i < values.size(); i++) {
- if (i + 1 == values.size()) {
- separator = SC;
- }
- builder.append(values.get(i).getName() + separator);
- }
- builder.append(NL);
- builder.append(indent + RCB);
- return builder.toString();
- }
-
- private static String getExplicitType(final Type type,
- Map<String, LinkedHashMap<String, Integer>> availableImports,
- final String currentPkg) {
- if (type == null) {
- throw new IllegalArgumentException(
- "Type parameter MUST be specified and cannot be NULL!");
- }
- String packageName = type.getPackageName();
-
- LinkedHashMap<String, Integer> imports = availableImports.get(type
- .getName());
-
- if ((imports != null && packageName
- .equals(findMaxValue(imports).get(0)))
- || packageName.equals(currentPkg)) {
- final StringBuilder builder = new StringBuilder(type.getName());
- if (type instanceof ParameterizedType) {
- ParameterizedType pType = (ParameterizedType) type;
- Type[] pTypes = pType.getActualTypeArguments();
- builder.append("<");
- builder.append(getParameters(pTypes, availableImports,
- currentPkg));
- builder.append(">");
- }
- if (builder.toString().equals("Void")) {
- return "void";
- }
- return builder.toString();
- } else {
- final StringBuilder builder = new StringBuilder();
- if (packageName.startsWith("java.lang")) {
- builder.append(type.getName());
- } else {
- builder.append(packageName + "." + type.getName());
- }
- if (type instanceof ParameterizedType) {
- ParameterizedType pType = (ParameterizedType) type;
- Type[] pTypes = pType.getActualTypeArguments();
- builder.append("<");
- builder.append(getParameters(pTypes, availableImports,
- currentPkg));
- builder.append(">");
- }
- if (builder.toString().equals("Void")) {
- return "void";
- }
- return builder.toString();
- }
- }
-
- private static String getParameters(final Type[] pTypes,
- Map<String, LinkedHashMap<String, Integer>> availableImports,
- String currentPkg) {
- final StringBuilder builder = new StringBuilder();
- for (int i = 0; i < pTypes.length; i++) {
- Type t = pTypes[i];
-
- String separator = COMMA;
- if (i + 1 == pTypes.length) {
- separator = "";
- }
- builder.append(getExplicitType(t, availableImports, currentPkg)
- + separator);
- }
- return builder.toString();
- }
-
- private static List<String> findMaxValue(
- LinkedHashMap<String, Integer> imports) {
- final List<String> result = new ArrayList<String>();
-
- int maxValue = 0;
- int currentValue = 0;
- for (Map.Entry<String, Integer> entry : imports.entrySet()) {
- currentValue = entry.getValue();
- if (currentValue > maxValue) {
- result.clear();
- result.add(entry.getKey());
- } else if (currentValue == maxValue) {
- result.add(entry.getKey());
- }
- }
- return result;
- }
-
- private static void createComment(final StringBuilder builder,
- final String comment, final String indent) {
- if (comment != null && comment.length() > 0) {
- builder.append(indent + "/*" + NL);
- builder.append(indent + comment + NL);
- builder.append(indent + "*/" + NL);
- }
- }
-
- public static Map<String, LinkedHashMap<String, Integer>> createImports(
- GeneratedType genType) {
- final Map<String, LinkedHashMap<String, Integer>> imports = new HashMap<String, LinkedHashMap<String, Integer>>();
- final String genTypePkg = genType.getPackageName();
-
- final List<Constant> constants = genType.getConstantDefinitions();
- final List<MethodSignature> methods = genType.getMethodDefinitions();
- List<Type> impl = genType.getImplements();
-
- // IMPLEMENTATIONS
- if (impl != null) {
- for (Type t : impl) {
- addTypeToImports(t, imports, genTypePkg);
- }
- }
-
- // CONSTANTS
- if (constants != null) {
- for (Constant c : constants) {
- Type ct = c.getType();
- addTypeToImports(ct, imports, genTypePkg);
- }
- }
-
- // METHODS
- if (methods != null) {
- for (MethodSignature m : methods) {
- Type ct = m.getReturnType();
- addTypeToImports(ct, imports, genTypePkg);
- for (MethodSignature.Parameter p : m.getParameters()) {
- addTypeToImports(p.getType(), imports, genTypePkg);
- }
- }
- }
-
- // PROPERTIES
- if (genType instanceof GeneratedTransferObject) {
- GeneratedTransferObject genTO = (GeneratedTransferObject) genType;
-
- List<GeneratedProperty> props = genTO.getProperties();
- if (props != null) {
- for (GeneratedProperty prop : props) {
- Type pt = prop.getReturnType();
- addTypeToImports(pt, imports, genTypePkg);
- }
- }
- }
-
- return imports;
- }
-
- private static void addTypeToImports(Type type,
- Map<String, LinkedHashMap<String, Integer>> importedTypes,
- String genTypePkg) {
- String typeName = type.getName();
- String typePkg = type.getPackageName();
- if (typePkg.startsWith("java.lang") || typePkg.equals(genTypePkg)) {
- return;
- }
- LinkedHashMap<String, Integer> packages = importedTypes.get(typeName);
- if (packages == null) {
- packages = new LinkedHashMap<String, Integer>();
- packages.put(typePkg, 1);
- importedTypes.put(typeName, packages);
- } else {
- Integer occurrence = packages.get(typePkg);
- if (occurrence == null) {
- packages.put(typePkg, 1);
- } else {
- occurrence++;
- packages.put(typePkg, occurrence);
- }
- }
-
- if (type instanceof ParameterizedType) {
- ParameterizedType pt = (ParameterizedType) type;
- Type[] params = pt.getActualTypeArguments();
- for (Type param : params) {
- addTypeToImports(param, importedTypes, genTypePkg);
- }
- }
- }
-
- public static List<String> createImportLines(
- Map<String, LinkedHashMap<String, Integer>> imports) {
- List<String> importLines = new ArrayList<String>();
-
- for (Map.Entry<String, LinkedHashMap<String, Integer>> entry : imports
- .entrySet()) {
- String typeName = entry.getKey();
- LinkedHashMap<String, Integer> typePkgMap = entry.getValue();
- String typePkg = typePkgMap.keySet().iterator().next();
- importLines.add("import " + typePkg + "." + typeName + SC);
- }
- return importLines;
- }
+
+ }
+ if (type instanceof ParameterizedType) {
+ ParameterizedType pType = (ParameterizedType) type;
+ Type[] pTypes = pType.getActualTypeArguments();
+ builder.append("<");
+ builder.append(getParameters(pTypes, availableImports,
+ currentPkg));
+ builder.append(">");
+ }
+ if (builder.toString().equals("Void")) {
+ return "void";
+ }
+ return builder.toString();
+ }
+ }
+
+ private static String getParameters(final Type[] pTypes,
+ Map<String, LinkedHashMap<String, Integer>> availableImports,
+ String currentPkg) {
+ final StringBuilder builder = new StringBuilder();
+ for (int i = 0; i < pTypes.length; i++) {
+ Type t = pTypes[i];
+
+ String separator = COMMA;
+ if (i + 1 == pTypes.length) {
+ separator = "";
+ }
+ builder.append(getExplicitType(t, availableImports, currentPkg)
+ + separator);
+ }
+ return builder.toString();
+ }
+
+ private static List<String> findMaxValue(
+ LinkedHashMap<String, Integer> imports) {
+ final List<String> result = new ArrayList<String>();
+
+ int maxValue = 0;
+ int currentValue = 0;
+ for (Map.Entry<String, Integer> entry : imports.entrySet()) {
+ currentValue = entry.getValue();
+ if (currentValue > maxValue) {
+ result.clear();
+ result.add(entry.getKey());
+ } else if (currentValue == maxValue) {
+ result.add(entry.getKey());
+ }
+ }
+ return result;
+ }
+
+ private static void createComment(final StringBuilder builder,
+ final String comment, final String indent) {
+ if (comment != null && comment.length() > 0) {
+ builder.append(indent + "/*" + NL);
+ builder.append(indent + comment + NL);
+ builder.append(indent + "*/" + NL);
+ }
+ }
+
+ public static Map<String, LinkedHashMap<String, Integer>> createImports(
+ GeneratedType genType) {
+ final Map<String, LinkedHashMap<String, Integer>> imports = new HashMap<String, LinkedHashMap<String, Integer>>();
+ final String genTypePkg = genType.getPackageName();
+
+ final List<Constant> constants = genType.getConstantDefinitions();
+ final List<MethodSignature> methods = genType.getMethodDefinitions();
+ List<Type> impl = genType.getImplements();
+
+ // IMPLEMENTATIONS
+ if (impl != null) {
+ for (Type t : impl) {
+ addTypeToImports(t, imports, genTypePkg);
+ }
+ }
+
+ // CONSTANTS
+ if (constants != null) {
+ for (Constant c : constants) {
+ Type ct = c.getType();
+ addTypeToImports(ct, imports, genTypePkg);
+ }
+ }
+
+ // METHODS
+ if (methods != null) {
+ for (MethodSignature m : methods) {
+ Type ct = m.getReturnType();
+ addTypeToImports(ct, imports, genTypePkg);
+ for (MethodSignature.Parameter p : m.getParameters()) {
+ addTypeToImports(p.getType(), imports, genTypePkg);
+ }
+ }
+ }
+
+ // PROPERTIES
+ if (genType instanceof GeneratedTransferObject) {
+ GeneratedTransferObject genTO = (GeneratedTransferObject) genType;
+
+ List<GeneratedProperty> props = genTO.getProperties();
+ if (props != null) {
+ for (GeneratedProperty prop : props) {
+ Type pt = prop.getReturnType();
+ addTypeToImports(pt, imports, genTypePkg);
+ }
+ }
+ }
+
+ return imports;
+ }
+
+ private static void addTypeToImports(Type type,
+ Map<String, LinkedHashMap<String, Integer>> importedTypes,
+ String genTypePkg) {
+ String typeName = type.getName();
+ String typePkg = type.getPackageName();
+ if (typePkg.startsWith("java.lang") || typePkg.equals(genTypePkg) ||
+ typePkg.isEmpty()) {
+ return;
+ }
+ LinkedHashMap<String, Integer> packages = importedTypes.get(typeName);
+ if (packages == null) {
+ packages = new LinkedHashMap<String, Integer>();
+ packages.put(typePkg, 1);
+ importedTypes.put(typeName, packages);
+ } else {
+ Integer occurrence = packages.get(typePkg);
+ if (occurrence == null) {
+ packages.put(typePkg, 1);
+ } else {
+ occurrence++;
+ packages.put(typePkg, occurrence);
+ }
+ }
+
+ if (type instanceof ParameterizedType) {
+ ParameterizedType pt = (ParameterizedType) type;
+ Type[] params = pt.getActualTypeArguments();
+ for (Type param : params) {
+ addTypeToImports(param, importedTypes, genTypePkg);
+ }
+ }
+ }
+
+ public static List<String> createImportLines(
+ Map<String, LinkedHashMap<String, Integer>> imports) {
+ List<String> importLines = new ArrayList<String>();
+
+ for (Map.Entry<String, LinkedHashMap<String, Integer>> entry : imports
+ .entrySet()) {
+ String typeName = entry.getKey();
+ LinkedHashMap<String, Integer> typePkgMap = entry.getValue();
+ String typePkg = typePkgMap.keySet().iterator().next();
+ importLines.add("import " + typePkg + "." + typeName + SC);
+ }
+ return importLines;
+ }
}
import org.opendaylight.controller.sal.binding.model.api.MethodSignature;
import org.opendaylight.controller.sal.binding.model.api.Type;
-public class InterfaceGenerator implements CodeGenerator {
-
- private Map<String, LinkedHashMap<String, Integer>> imports;
-
- public Writer generate(Type type) throws IOException {
- Writer writer = new StringWriter();
- if (type instanceof GeneratedType
- && !(type instanceof GeneratedTransferObject)) {
- GeneratedType genType = (GeneratedType) type;
- imports = GeneratorUtil.createImports(genType);
-
- final String currentPkg = genType.getPackageName();
- final List<Constant> constants = genType.getConstantDefinitions();
- final List<MethodSignature> methods = genType
- .getMethodDefinitions();
- final List<Enumeration> enums = genType.getEnumDefintions();
-
- writer.write(GeneratorUtil.createPackageDeclaration(genType
- .getPackageName()));
- writer.write(NL);
-
- List<String> importLines = GeneratorUtil.createImportLines(imports);
- for (String line : importLines) {
- writer.write(line + NL);
- }
- writer.write(NL);
-
- writer.write(GeneratorUtil.createIfcDeclaration(genType, "",
- imports));
- writer.write(NL);
-
- if (constants != null) {
- for (Constant c : constants) {
- writer.write(GeneratorUtil.createConstant(c, TAB, imports,
- currentPkg) + NL);
- }
- writer.write(NL);
- }
-
- if (methods != null) {
- for (MethodSignature m : methods) {
- writer.write(GeneratorUtil.createMethodDeclaration(m, TAB,
- imports, currentPkg) + NL);
- }
- writer.write(NL);
- }
-
- if (enums != null) {
- for (Enumeration e : enums) {
- writer.write(GeneratorUtil.createEnum(e, TAB) + NL);
- }
- writer.write(NL);
- }
-
- writer.write(RCB);
- }
- return writer;
- }
+public final class InterfaceGenerator implements CodeGenerator {
+
+ private Map<String, LinkedHashMap<String, Integer>> imports;
+
+ private String generateEnums(List<Enumeration> enums) {
+ String result = "";
+ if (enums != null) {
+ EnumGenerator enumGenerator = new EnumGenerator();
+ for (Enumeration en : enums) {
+ try {
+ result = result
+ + (enumGenerator.generateInnerEnumeration(en, TAB).toString() + NL);
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ }
+ }
+ return result;
+ }
+
+ private String generateConstants(List<Constant> constants, String pkgName) {
+ String result = "";
+ if (constants != null) {
+ for (Constant c : constants) {
+ result = result
+ + GeneratorUtil
+ .createConstant(c, TAB, imports, pkgName) + NL;
+ }
+ result.concat(NL);
+ }
+ return result;
+ }
+
+ public String generateMethods(List<MethodSignature> methods, String pkgName) {
+ String result = "";
+
+ if (methods != null) {
+ for (MethodSignature m : methods) {
+ result = result
+ + GeneratorUtil.createMethodDeclaration(m, TAB,
+ imports, pkgName) + NL;
+ }
+ result = result + NL;
+ }
+ return result;
+ }
+
+ public Writer generate(Type type) throws IOException {
+ Writer writer = new StringWriter();
+ if (type instanceof GeneratedType
+ && !(type instanceof GeneratedTransferObject)) {
+ GeneratedType genType = (GeneratedType) type;
+ imports = GeneratorUtil.createImports(genType);
+
+ final String currentPkg = genType.getPackageName();
+ final List<Constant> constants = genType.getConstantDefinitions();
+ final List<MethodSignature> methods = genType
+ .getMethodDefinitions();
+ final List<Enumeration> enums = genType.getEnumDefintions();
+
+ writer.write(GeneratorUtil.createPackageDeclaration(genType
+ .getPackageName()));
+ writer.write(NL);
+
+ List<String> importLines = GeneratorUtil.createImportLines(imports);
+ for (String line : importLines) {
+ writer.write(line + NL);
+ }
+ writer.write(NL);
+ writer.write(GeneratorUtil.createIfcDeclaration(genType, "",
+ imports));
+ writer.write(NL);
+
+ writer.write(generateEnums(enums));
+ writer.write(generateConstants(constants, currentPkg));
+ writer.write(generateMethods(methods, currentPkg));
+
+ writer.write(RCB);
+ }
+ return writer;
+ }
}
e.printStackTrace();
}
}
+
+ @Test
+ public void toStringTest() {
+ final GeneratedTOBuilder toBuilder = new GeneratedTOBuilderImpl(
+ "simple.pack", "DefCtor");
+
+ GeneratedPropertyBuilder propBuilder = toBuilder.addProperty("foo");
+ propBuilder.addReturnType(Types.typeForClass(String.class));
+ propBuilder.setReadOnly(false);
+ toBuilder.addToStringProperty(propBuilder);
+
+ propBuilder = toBuilder.addProperty("bar");
+ propBuilder.addReturnType(Types.typeForClass(Integer.class));
+ propBuilder.setReadOnly(false);
+ toBuilder.addToStringProperty(propBuilder);
+ final GeneratedTransferObject genTO = toBuilder.toInstance();
+ final ClassCodeGenerator clsGen = new ClassCodeGenerator();
+ try {
+ final Writer writer = clsGen.generate(genTO);
+ assertNotNull(writer);
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ }
}
import org.opendaylight.controller.binding.generator.util.generated.type.builder.GeneratedTypeBuilderImpl;
import org.opendaylight.controller.sal.binding.generator.api.BindingGenerator;
import org.opendaylight.controller.sal.binding.generator.impl.BindingGeneratorImpl;
+import org.opendaylight.controller.sal.binding.model.api.Enumeration;
import org.opendaylight.controller.sal.binding.model.api.GeneratedTransferObject;
import org.opendaylight.controller.sal.binding.model.api.GeneratedType;
import org.opendaylight.controller.sal.binding.model.api.Type;
final List<Type> types = bindingGenerator.generateTypes(context);
final Set<GeneratedType> typesToGenerate = new HashSet<GeneratedType>();
final Set<GeneratedTransferObject> tosToGenerate = new HashSet<GeneratedTransferObject>();
+ final Set<Enumeration> enumerationToGenerate = new HashSet<Enumeration>();
for (Type type : types) {
if (type instanceof GeneratedType
&& !(type instanceof GeneratedTransferObject)) {
if (type instanceof GeneratedTransferObject) {
tosToGenerate.add((GeneratedTransferObject) type);
}
+ if (type instanceof Enumeration) {
+ enumerationToGenerate.add((Enumeration) type);
+ }
}
final GeneratorJavaFile generator = new GeneratorJavaFile(
- typesToGenerate, tosToGenerate);
+ typesToGenerate, tosToGenerate, enumerationToGenerate);
generator.generateToFile(new File(GENERATOR_OUTPUT_PATH));
JavaCompiler compiler = ToolProvider.getSystemJavaCompiler();
import java.util.Set;
import org.apache.maven.plugin.logging.Log;
+import org.apache.maven.project.MavenProject;
import org.opendaylight.controller.sal.binding.generator.api.BindingGenerator;
import org.opendaylight.controller.sal.binding.generator.impl.BindingGeneratorImpl;
import org.opendaylight.controller.sal.binding.model.api.GeneratedTransferObject;
+import org.opendaylight.controller.sal.binding.model.api.Enumeration;
import org.opendaylight.controller.sal.binding.model.api.GeneratedType;
import org.opendaylight.controller.sal.binding.model.api.Type;
import org.opendaylight.controller.sal.java.api.generator.GeneratorJavaFile;
import org.opendaylight.controller.yang.model.api.SchemaContext;
import org.opendaylight.controller.yang2sources.spi.CodeGenerator;
-public class CodeGeneratorImpl implements CodeGenerator {
+public final class CodeGeneratorImpl implements CodeGenerator {
- @Override
- public Collection<File> generateSources(SchemaContext context,
- File outputBaseDir, Set<Module> yangModules, File projectBaseDir)
- throws IOException {
+ @Override
+ public Collection<File> generateSources(SchemaContext context,
+ File outputBaseDir, Set<Module> yangModules) throws IOException {
- final BindingGenerator bindingGenerator = new BindingGeneratorImpl();
- final List<Type> types = bindingGenerator.generateTypes(context);
- final Set<GeneratedType> typesToGenerate = new HashSet<GeneratedType>();
- final Set<GeneratedTransferObject> tosToGenerate = new HashSet<GeneratedTransferObject>();
- for (Type type : types) {
- if (type instanceof GeneratedTransferObject) {
- tosToGenerate.add((GeneratedTransferObject) type);
- } else if (type instanceof GeneratedType) {
- typesToGenerate.add((GeneratedType) type);
- }
+ final BindingGenerator bindingGenerator = new BindingGeneratorImpl();
+ final List<Type> types = bindingGenerator.generateTypes(context);
+ final Set<GeneratedType> typesToGenerate = new HashSet<>();
+ final Set<GeneratedTransferObject> tosToGenerate = new HashSet<>();
+ final Set<Enumeration> enumsToGenerate = new HashSet<>();
+ for (Type type : types) {
+ if (type instanceof GeneratedTransferObject) {
+ tosToGenerate.add((GeneratedTransferObject) type);
+ } else if (type instanceof GeneratedType) {
+ typesToGenerate.add((GeneratedType) type);
+ } else if (type instanceof Enumeration) {
+ enumsToGenerate.add((Enumeration) type);
+ }
}
final GeneratorJavaFile generator = new GeneratorJavaFile(
- typesToGenerate, tosToGenerate);
+ typesToGenerate, tosToGenerate, enumsToGenerate);
- return generator.generateToFile(outputBaseDir.getPath().startsWith(
- projectBaseDir.getPath()) ? outputBaseDir : new File(
- projectBaseDir, outputBaseDir.getPath()));
+ return generator.generateToFile(outputBaseDir);
}
@Override
// no additional config utilized
}
+ @Override
+ public void setResourceBaseDir(File resourceBaseDir) {
+ // no resource processing necessary
+ }
+
+ @Override
+ public void setMavenProject(MavenProject project) {
+ // no additional information needed
+ }
+
}
import static org.junit.matchers.JUnitMatchers.*;
import java.io.File;
+import java.net.URL;
import org.apache.maven.it.VerificationException;
import org.apache.maven.it.Verifier;
@Test
public void testAdditionalConfiguration() throws VerificationException {
Verifier v = setUp("AdditionalConfig/", false);
- v.verifyTextInLog("[INFO] yang-to-sources: Additional configuration picked up for : org.opendaylight.controller.yang2sources.spi.CodeGeneratorTestImpl: {nm1=abcd=a.b.c.d, nm2=abcd2=a.b.c.d.2}");
- v.verifyTextInLog("[INFO] yang-to-sources: Additional configuration picked up for : org.opendaylight.controller.yang2sources.spi.CodeGeneratorTestImpl: {c1=config}");
+ v.verifyTextInLog("[DEBUG] yang-to-sources: Additional configuration picked up for : org.opendaylight.controller.yang2sources.spi.CodeGeneratorTestImpl: {nm1=abcd=a.b.c.d, nm2=abcd2=a.b.c.d.2}");
+ v.verifyTextInLog("[DEBUG] yang-to-sources: Additional configuration picked up for : org.opendaylight.controller.yang2sources.spi.CodeGeneratorTestImpl: {c1=config}");
+ v.verifyTextInLog(File.separator
+ + "files marked as resources: META-INF" + File.separator
+ + "yang");
+ v.verifyTextInLog("target"
+ + File.separator
+ + "generated-resources marked as resources for generator: org.opendaylight.controller.yang2sources.spi.CodeGeneratorTestImpl");
}
@Test
static Verifier setUp(String project, boolean ignoreF)
throws VerificationException {
- Verifier verifier = new Verifier(new File("src/test/resources/"
- + project).getAbsolutePath());
+ final URL path = YangToSourcesPluginTestIT.class.getResource("/"
+ + project + "pom.xml");
+ File parent = new File(path.getPath());
+ Verifier verifier = new Verifier(parent.getParent());
if (ignoreF)
verifier.addCliOption("-fn");
+ verifier.setMavenDebug(true);
verifier.executeGoal("generate-sources");
return verifier;
}
@Test
public void testFindResourceOnCp() throws VerificationException {
- Verifier v1 = new Verifier(
- new File("src/test/resources/GenerateTest1/").getAbsolutePath());
+ Verifier v1 = new Verifier(new File(getClass().getResource(
+ "/GenerateTest1/pom.xml").getPath()).getParent());
v1.executeGoal("clean");
v1.executeGoal("package");
v1.assertFilePresent("target/classes/META-INF/yang/testfile1.yang");
v2.assertFileNotPresent("target/classes/META-INF/yang/testfile2.yang");
v2.assertFileNotPresent("target/classes/META-INF/yang/testfile3.yang");
}
-
}
<nm1>abcd=a.b.c.d</nm1>
<nm2>abcd2=a.b.c.d.2</nm2>
</additionalConfiguration>
+ <resourceBaseDir>/target/resourcesGenerated</resourceBaseDir>
</generator>
<generator>
<codeGeneratorClass>
</outputBaseDir>
</generator>
</codeGenerators>
- <resourceProviders>
- <provider>
- <resourceProviderClass>
- org.opendaylight.controller.yang2sources.spi.ResourceProviderTestImpl
- </resourceProviderClass>
- <outputBaseDir>
- outDir/
- </outputBaseDir>
- </provider>
- </resourceProviders>
</configuration>
</execution>
</executions>
public static final class CodeGeneratorArg extends ConfigArg {
private static final String CODE_GEN_DEFAULT_DIR = "target"
+ File.separator + "generated-sources";
+ private static final String CODE_GEN_DEFAULT_RESOURCE_DIR = "target"
+ + File.separator + "generated-resources";
private String codeGeneratorClass;
+ private File resourceBaseDir = new File(CODE_GEN_DEFAULT_RESOURCE_DIR);
private Map<String, String> additionalConfiguration = Maps.newHashMap();
this.codeGeneratorClass = codeGeneratorClass;
}
+ public CodeGeneratorArg(String codeGeneratorClass,
+ String outputBaseDir, String resourceBaseDir) {
+ super(outputBaseDir);
+ this.codeGeneratorClass = codeGeneratorClass;
+ this.resourceBaseDir = new File(resourceBaseDir);
+ }
+
@Override
public void check() {
Preconditions.checkNotNull(codeGeneratorClass,
return codeGeneratorClass;
}
+ public File getResourceBaseDir(MavenProject project) {
+ if (resourceBaseDir.isAbsolute()) {
+ return resourceBaseDir;
+ } else {
+ return new File(project.getBasedir(), resourceBaseDir.getPath());
+ }
+ }
+
public Map<String, String> getAdditionalConfiguration() {
return additionalConfiguration;
}
*/
package org.opendaylight.controller.yang2sources.plugin;
+import java.io.Closeable;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
+import java.io.FilenameFilter;
+import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.Collection;
+import java.util.Enumeration;
import java.util.List;
import java.util.Map;
+import java.util.Set;
+import java.util.zip.ZipEntry;
+import java.util.zip.ZipFile;
import org.apache.commons.io.FileUtils;
import org.apache.maven.artifact.Artifact;
+import org.apache.maven.plugin.MojoFailureException;
+import org.apache.maven.plugin.logging.Log;
import org.apache.maven.project.MavenProject;
+import org.opendaylight.controller.yang.model.api.Module;
+import org.opendaylight.controller.yang.model.api.SchemaContext;
-import com.google.common.base.Function;
-import com.google.common.collect.Collections2;
+import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
}
}
- static String[] listFilesAsArrayOfPaths(File rootDir)
- throws FileNotFoundException {
- String[] filesArray = new String[] {};
- Collection<File> yangFiles = listFiles(rootDir);
-
- // If collection is empty, return empty array [] rather then [null]
- // array, that is created by default
- return yangFiles.isEmpty() ? filesArray : Collections2.transform(
- yangFiles, new Function<File, String>() {
-
- @Override
- public String apply(File input) {
- return input.getPath();
- }
- }).toArray(filesArray);
- }
-
private static void toCache(final File rootDir,
final Collection<File> yangFiles) {
cache.put(rootDir, yangFiles);
return String.format("%s %s", logPrefix, innerMessage);
}
- public static List<File> getClassPath(MavenProject project) {
+ static List<File> getClassPath(MavenProject project) {
List<File> dependencies = Lists.newArrayList();
for (Artifact element : project.getArtifacts()) {
File asFile = element.getFile();
: false;
}
- public static boolean acceptedFilter(String name, List<String> filter) {
- for (String f : filter) {
- if (name.endsWith(f)) {
- return true;
+ static <T> T checkNotNull(T obj, String paramName) {
+ return Preconditions.checkNotNull(obj, "Parameter " + paramName
+ + " is null");
+ }
+
+ final static class YangsInZipsResult implements Closeable {
+ final List<InputStream> yangStreams;
+ private final List<Closeable> zipInputStreams;
+
+ private YangsInZipsResult(List<InputStream> yangStreams,
+ List<Closeable> zipInputStreams) {
+ this.yangStreams = yangStreams;
+ this.zipInputStreams = zipInputStreams;
+ }
+
+ @Override
+ public void close() throws IOException {
+ for (InputStream is : yangStreams) {
+ is.close();
+ }
+ for (Closeable is : zipInputStreams) {
+ is.close();
}
}
- return false;
+ }
+
+ static YangsInZipsResult findYangFilesInDependenciesAsStream(Log log,
+ MavenProject project)
+ throws MojoFailureException {
+ List<InputStream> yangsFromDependencies = new ArrayList<>();
+ List<Closeable> zips = new ArrayList<>();
+ try {
+ List<File> filesOnCp = Util.getClassPath(project);
+ log.info(Util.message(
+ "Searching for yang files in following dependencies: %s",
+ YangToSourcesProcessor.LOG_PREFIX, filesOnCp));
+
+ for (File file : filesOnCp) {
+ List<String> foundFilesForReporting = new ArrayList<>();
+ // is it jar file or directory?
+ if (file.isDirectory()) {
+ File yangDir = new File(file,
+ YangToSourcesProcessor.META_INF_YANG_STRING);
+ if (yangDir.exists() && yangDir.isDirectory()) {
+ File[] yangFiles = yangDir
+ .listFiles(new FilenameFilter() {
+ @Override
+ public boolean accept(File dir, String name) {
+ return name.endsWith(".yang")
+ && new File(dir, name).isFile();
+ }
+ });
+ for (File yangFile : yangFiles) {
+ yangsFromDependencies.add(new NamedFileInputStream(
+ yangFile));
+ }
+ }
+
+ } else {
+ ZipFile zip = new ZipFile(file);
+ zips.add(zip);
+
+ Enumeration<? extends ZipEntry> entries = zip.entries();
+ while (entries.hasMoreElements()) {
+ ZipEntry entry = entries.nextElement();
+ String entryName = entry.getName();
+
+ if (entryName
+ .startsWith(YangToSourcesProcessor.META_INF_YANG_STRING_JAR)) {
+ if (entry.isDirectory() == false
+ && entryName.endsWith(".yang")) {
+ foundFilesForReporting.add(entryName);
+ // This will be closed after all strams are
+ // parsed.
+ InputStream entryStream = zip
+ .getInputStream(entry);
+ yangsFromDependencies.add(entryStream);
+ }
+ }
+ }
+ }
+ if (foundFilesForReporting.size() > 0) {
+ log.info(Util.message("Found %d yang files in %s: %s",
+ YangToSourcesProcessor.LOG_PREFIX,
+ foundFilesForReporting.size(), file,
+ foundFilesForReporting));
+ }
+
+ }
+ } catch (Exception e) {
+ throw new MojoFailureException(e.getMessage(), e);
+ }
+ return new YangsInZipsResult(yangsFromDependencies, zips);
+ }
+
+ final static class ContextHolder {
+ private final SchemaContext context;
+ private final Set<Module> yangModules;
+
+ ContextHolder(SchemaContext context, Set<Module> yangModules) {
+ this.context = context;
+ this.yangModules = yangModules;
+ }
+
+ SchemaContext getContext() {
+ return context;
+ }
+
+ Set<Module> getYangModules() {
+ return yangModules;
+ }
}
}
@Parameter(property = "inspectDependencies", required = true, readonly = true)
private boolean inspectDependencies;
+ private YangToSourcesProcessor yangToSourcesProcessor;
+
public YangToSourcesMojo() {
}
@VisibleForTesting
- YangToSourcesMojo(CodeGeneratorArg[] codeGeneratorArgs,
- String yangFilesRootDir) {
- this.codeGenerators = codeGeneratorArgs;
- this.yangFilesRootDir = yangFilesRootDir;
+ YangToSourcesMojo(YangToSourcesProcessor processor) {
+ this.yangToSourcesProcessor = processor;
}
@Override
public void execute() throws MojoExecutionException, MojoFailureException {
+ if (yangToSourcesProcessor == null) {
+ List<CodeGeneratorArg> codeGeneratorArgs = processCodeGenerators(codeGenerators);
+
+ // defaults to ${basedir}/src/main/yang
+ File yangFilesRootFile = processYangFilesRootDir(yangFilesRootDir,
+ project.getBasedir());
+
+ yangToSourcesProcessor = new YangToSourcesProcessor(getLog(),
+ yangFilesRootFile, codeGeneratorArgs, project,
+ inspectDependencies);
+ }
+ yangToSourcesProcessor.execute();
+ }
+
+ private static List<CodeGeneratorArg> processCodeGenerators(
+ CodeGeneratorArg[] codeGenerators) {
List<CodeGeneratorArg> codeGeneratorArgs;
if (codeGenerators == null) {
codeGeneratorArgs = Collections.emptyList();
} else {
codeGeneratorArgs = Arrays.asList(codeGenerators);
}
+ return codeGeneratorArgs;
+ }
- // defaults to ${basedir}/src/main/yang
+ private static File processYangFilesRootDir(String yangFilesRootDir,
+ File baseDir) {
File yangFilesRootFile;
if (yangFilesRootDir == null) {
- yangFilesRootFile = new File(project.getBasedir(), "src"
- + File.separator + "main" + File.separator + "yang");
+ yangFilesRootFile = new File(baseDir, "src" + File.separator
+ + "main" + File.separator + "yang");
} else {
File file = new File(yangFilesRootDir);
if (file.isAbsolute()) {
yangFilesRootFile = file;
} else {
- yangFilesRootFile = new File(project.getBasedir(),
- file.getPath());
+ yangFilesRootFile = new File(baseDir, file.getPath());
}
}
- new YangToSourcesProcessor(getLog(), yangFilesRootFile,
- codeGeneratorArgs, project, inspectDependencies).execute();
+ return yangFilesRootFile;
}
}
import java.io.Closeable;
import java.io.File;
-import java.io.FilenameFilter;
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
-import java.util.Enumeration;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
-import java.util.zip.ZipEntry;
-import java.util.zip.ZipFile;
import org.apache.maven.model.Resource;
import org.apache.maven.plugin.MojoExecutionException;
import org.opendaylight.controller.yang.model.api.SchemaContext;
import org.opendaylight.controller.yang.parser.impl.YangParserImpl;
import org.opendaylight.controller.yang2sources.plugin.ConfigArg.CodeGeneratorArg;
-import org.opendaylight.controller.yang2sources.plugin.Util.NamedFileInputStream;
+import org.opendaylight.controller.yang2sources.plugin.Util.ContextHolder;
+import org.opendaylight.controller.yang2sources.plugin.Util.YangsInZipsResult;
import org.opendaylight.controller.yang2sources.spi.CodeGenerator;
+import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.Maps;
class YangToSourcesProcessor {
- private static final String LOG_PREFIX = "yang-to-sources:";
- private static final String META_INF_YANG_STRING = "META-INF"
- + File.separator + "yang";
- private static final File META_INF_YANG_DIR = new File(META_INF_YANG_STRING);
+ static final String LOG_PREFIX = "yang-to-sources:";
+ static final String META_INF_YANG_STRING = "META-INF" + File.separator
+ + "yang";
+ static final String META_INF_YANG_STRING_JAR = "META-INF" + "/" + "yang";
+ static final File META_INF_YANG_DIR = new File(META_INF_YANG_STRING);
private final Log log;
private final File yangFilesRootDir;
private final List<CodeGeneratorArg> codeGenerators;
private final MavenProject project;
private final boolean inspectDependencies;
+ private YangProvider yangProvider;
+ @VisibleForTesting
YangToSourcesProcessor(Log log, File yangFilesRootDir,
- List<CodeGeneratorArg> codeGenerators, MavenProject project,
- boolean inspectDependencies) {
- this.log = checkNotNull(log, "log");
- this.yangFilesRootDir = checkNotNull(yangFilesRootDir,
+ List<CodeGeneratorArg> codeGenerators, MavenProject project,
+ boolean inspectDependencies, YangProvider yangProvider) {
+ this.log = Util.checkNotNull(log, "log");
+ this.yangFilesRootDir = Util.checkNotNull(yangFilesRootDir,
"yangFilesRootDir");
- this.codeGenerators = Collections.unmodifiableList(checkNotNull(
+ this.codeGenerators = Collections.unmodifiableList(Util.checkNotNull(
codeGenerators, "codeGenerators"));
- this.project = checkNotNull(project, "project");
+ this.project = Util.checkNotNull(project, "project");
this.inspectDependencies = inspectDependencies;
+ this.yangProvider = yangProvider;
}
- private static <T> T checkNotNull(T obj, String paramName) {
- if (obj == null)
- throw new NullPointerException("Parameter '" + paramName
- + "' is null");
- return obj;
+ YangToSourcesProcessor(Log log, File yangFilesRootDir,
+ List<CodeGeneratorArg> codeGenerators, MavenProject project,
+ boolean inspectDependencies) {
+ this(log, yangFilesRootDir, codeGenerators, project,
+ inspectDependencies, new YangProvider());
}
public void execute() throws MojoExecutionException, MojoFailureException {
ContextHolder context = processYang();
generateSources(context);
- addYangsToMETA_INF();
+ yangProvider.addYangsToMETA_INF(log, project, yangFilesRootDir);
}
private ContextHolder processYang() throws MojoExecutionException {
Set<Module> projectYangModules;
try {
if (inspectDependencies) {
- YangsInZipsResult dependentYangResult = findYangFilesInDependenciesAsStream();
+ YangsInZipsResult dependentYangResult = Util
+ .findYangFilesInDependenciesAsStream(log, project);
Closeable dependentYangResult1 = dependentYangResult;
closeables.add(dependentYangResult1);
all.addAll(dependentYangResult.yangStreams);
}
}
- private void addYangsToMETA_INF() throws MojoFailureException {
- Resource res = new Resource();
+ static class YangProvider {
- File targetYangDir = new File(project.getBasedir(), "target"
- + File.separator + "yang");
- res.setDirectory(targetYangDir.getPath());
+ private static final String yangResourceDir = "target" + File.separator
+ + "yang";
- res.setTargetPath(META_INF_YANG_DIR.getPath());
- try {
- FileUtils.copyDirectory(yangFilesRootDir, targetYangDir);
- } catch (IOException e) {
- throw new MojoFailureException(e.getMessage(), e);
+ void addYangsToMETA_INF(Log log, MavenProject project,
+ File yangFilesRootDir) throws MojoFailureException {
+ File targetYangDir = new File(project.getBasedir(), yangResourceDir);
+
+ try {
+ FileUtils.copyDirectory(yangFilesRootDir, targetYangDir);
+ } catch (IOException e) {
+ String message = "Unable to copy yang files into resource folder";
+ log.warn(message, e);
+ throw new MojoFailureException(message, e);
+ }
+
+ setResource(targetYangDir, META_INF_YANG_DIR.getPath(), project);
+
+ log.debug(Util.message(
+ "Yang files from: %s marked as resources: %s", LOG_PREFIX,
+ yangFilesRootDir, META_INF_YANG_DIR.getPath()));
+ }
+
+ private static void setResource(File targetYangDir, String targetPath,
+ MavenProject project) {
+ Resource res = new Resource();
+ res.setDirectory(targetYangDir.getPath());
+ if (targetPath != null)
+ res.setTargetPath(targetPath);
+ project.addResource(res);
}
- project.addResource(res);
}
/**
* Instantiate generator from class and call required method
*/
private void generateSourcesWithOneGenerator(ContextHolder context,
- CodeGeneratorArg codeGeneratorCfg) throws ClassNotFoundException,
+ CodeGeneratorArg codeGeneratorCfg) throws ClassNotFoundException,
InstantiationException, IllegalAccessException, IOException {
codeGeneratorCfg.check();
log.info(Util.message("Sources will be generated to %s", LOG_PREFIX,
outputDir));
- log.info(Util.message("Project root dir is %s", LOG_PREFIX,
+ log.debug(Util.message("Project root dir is %s", LOG_PREFIX,
project.getBasedir()));
- log.info(Util.message(
+ log.debug(Util.message(
"Additional configuration picked up for : %s: %s", LOG_PREFIX,
codeGeneratorCfg.getCodeGeneratorClass(),
codeGeneratorCfg.getAdditionalConfiguration()));
+
project.addCompileSourceRoot(outputDir.getAbsolutePath());
g.setLog(log);
+ g.setMavenProject(project);
g.setAdditionalConfig(codeGeneratorCfg.getAdditionalConfiguration());
- Collection<File> generated = g.generateSources(context.getContext(),
- outputDir, context.getYangModules(), project.getBasedir());
- log.info(Util.message("Sources generated by %s: %s", LOG_PREFIX,
- codeGeneratorCfg.getCodeGeneratorClass(), generated));
- }
-
- private class YangsInZipsResult implements Closeable {
- private final List<InputStream> yangStreams;
- private final List<Closeable> zipInputStreams;
+ File resourceBaseDir = codeGeneratorCfg.getResourceBaseDir(project);
- private YangsInZipsResult(List<InputStream> yangStreams,
- List<Closeable> zipInputStreams) {
- this.yangStreams = yangStreams;
- this.zipInputStreams = zipInputStreams;
- }
-
- @Override
- public void close() throws IOException {
- for (InputStream is : yangStreams) {
- is.close();
- }
- for (Closeable is : zipInputStreams) {
- is.close();
- }
- }
- }
+ YangProvider.setResource(resourceBaseDir, null, project);
+ g.setResourceBaseDir(resourceBaseDir);
+ log.debug(Util.message(
+ "Folder: %s marked as resources for generator: %s", LOG_PREFIX,
+ resourceBaseDir, codeGeneratorCfg.getCodeGeneratorClass()));
- private YangsInZipsResult findYangFilesInDependenciesAsStream()
- throws MojoFailureException {
- List<InputStream> yangsFromDependencies = new ArrayList<>();
- List<Closeable> zips = new ArrayList<>();
- try {
- List<File> filesOnCp = Util.getClassPath(project);
- log.info(Util.message(
- "Searching for yang files in following dependencies: %s",
- LOG_PREFIX, filesOnCp));
-
- for (File file : filesOnCp) {
- List<String> foundFilesForReporting = new ArrayList<>();
- // is it jar file or directory?
- if (file.isDirectory()) {
- File yangDir = new File(file, META_INF_YANG_STRING);
- if (yangDir.exists() && yangDir.isDirectory()) {
- File[] yangFiles = yangDir
- .listFiles(new FilenameFilter() {
- @Override
- public boolean accept(File dir, String name) {
- return name.endsWith(".yang")
- && new File(dir, name).isFile();
- }
- });
- for (File yangFile : yangFiles) {
- yangsFromDependencies.add(new NamedFileInputStream(
- yangFile));
- }
- }
-
- } else {
- ZipFile zip = new ZipFile(file);
- zips.add(zip);
-
- Enumeration<? extends ZipEntry> entries = zip.entries();
- while (entries.hasMoreElements()) {
- ZipEntry entry = entries.nextElement();
- String entryName = entry.getName();
-
- if (entryName.startsWith(META_INF_YANG_STRING)) {
- if (entry.isDirectory() == false
- && entryName.endsWith(".yang")) {
- foundFilesForReporting.add(entryName);
- // This will be closed after all strams are
- // parsed.
- InputStream entryStream = zip
- .getInputStream(entry);
- yangsFromDependencies.add(entryStream);
- }
- }
- }
- }
- if (foundFilesForReporting.size() > 0) {
- log.info(Util.message("Found %d yang files in %s: %s",
- LOG_PREFIX, foundFilesForReporting.size(), file,
- foundFilesForReporting));
- }
+ Collection<File> generated = g.generateSources(context.getContext(),
+ outputDir, context.getYangModules());
- }
- } catch (Exception e) {
- throw new MojoFailureException(e.getMessage(), e);
- }
- return new YangsInZipsResult(yangsFromDependencies, zips);
+ log.info(Util.message("Sources generated by %s: %s", LOG_PREFIX,
+ codeGeneratorCfg.getCodeGeneratorClass(), generated));
}
- private class ContextHolder {
- private final SchemaContext context;
- private final Set<Module> yangModules;
-
- private ContextHolder(SchemaContext context, Set<Module> yangModules) {
- this.context = context;
- this.yangModules = yangModules;
- }
-
- public SchemaContext getContext() {
- return context;
- }
-
- public Set<Module> getYangModules() {
- return yangModules;
- }
- }
}
*/
package org.opendaylight.controller.yang2sources.plugin;
-import static org.hamcrest.core.Is.is;
+import static org.hamcrest.core.Is.*;
import static org.junit.Assert.*;
-import static org.mockito.Mockito.doReturn;
+import static org.junit.matchers.JUnitMatchers.*;
+import static org.mockito.Matchers.*;
+import static org.mockito.Mockito.*;
import java.io.File;
import java.io.IOException;
import java.util.Map;
import java.util.Set;
+import org.apache.maven.plugin.MojoFailureException;
import org.apache.maven.plugin.logging.Log;
import org.apache.maven.project.MavenProject;
import org.junit.Before;
-import org.junit.Ignore;
import org.junit.Test;
import org.mockito.Mock;
import org.mockito.MockitoAnnotations;
import org.opendaylight.controller.yang.model.api.Module;
import org.opendaylight.controller.yang.model.api.SchemaContext;
import org.opendaylight.controller.yang2sources.plugin.ConfigArg.CodeGeneratorArg;
+import org.opendaylight.controller.yang2sources.plugin.YangToSourcesProcessor.YangProvider;
import org.opendaylight.controller.yang2sources.spi.CodeGenerator;
import com.google.common.collect.Lists;
private MavenProject project;
@Before
- public void setUp() {
+ public void setUp() throws MojoFailureException {
MockitoAnnotations.initMocks(this);
- yang = new File(getClass().getResource("/mock.yang").getFile())
+ yang = new File(getClass().getResource("/yang/mock.yang").getFile())
.getParent();
outDir = new File("/outputDir");
- mojo = new YangToSourcesMojo(
- new CodeGeneratorArg[] { new CodeGeneratorArg(
- GeneratorMock.class.getName(), "outputDir") }, yang);
+ YangProvider mock = mock(YangProvider.class);
+ doNothing().when(mock).addYangsToMETA_INF(any(Log.class),
+ any(MavenProject.class), any(File.class));
+
+ YangToSourcesProcessor processor = new YangToSourcesProcessor(
+ mock(Log.class), new File(yang),
+ Lists.newArrayList(new CodeGeneratorArg(GeneratorMock.class
+ .getName(), "outputDir")), project, false,
+ mock);
+ mojo = new YangToSourcesMojo(processor);
doReturn(new File("")).when(project).getBasedir();
mojo.project = project;
}
- @Ignore
@Test
public void test() throws Exception {
mojo.execute();
assertThat(GeneratorMock.called, is(1));
assertThat(GeneratorMock.outputDir, is(outDir));
+ assertThat(GeneratorMock.project, is(project));
assertNotNull(GeneratorMock.log);
assertTrue(GeneratorMock.additionalCfg.isEmpty());
+ assertThat(GeneratorMock.resourceBaseDir.toString(),
+ containsString("target" + File.separator
+ + "generated-resources"));
}
public static class GeneratorMock implements CodeGenerator {
private static File outputDir;
private static Log log;
private static Map<String, String> additionalCfg;
+ private static File resourceBaseDir;
+ private static MavenProject project;
@Override
public Collection<File> generateSources(SchemaContext context,
- File outputBaseDir, Set<Module> currentModules,
- File projectBaseDir) throws IOException {
+ File outputBaseDir, Set<Module> currentModules)
+ throws IOException {
called++;
outputDir = outputBaseDir;
return Lists.newArrayList();
@Override
public void setLog(Log log) {
- this.log = log;
+ GeneratorMock.log = log;
}
@Override
public void setAdditionalConfig(
Map<String, String> additionalConfiguration) {
- this.additionalCfg = additionalConfiguration;
+ GeneratorMock.additionalCfg = additionalConfiguration;
+ }
+
+ @Override
+ public void setResourceBaseDir(File resourceBaseDir) {
+ GeneratorMock.resourceBaseDir = resourceBaseDir;
+
+ }
+
+ @Override
+ public void setMavenProject(MavenProject project) {
+ GeneratorMock.project = project;
}
}
*/
package org.opendaylight.controller.yang2sources.plugin;
-import org.junit.Test;
+import static org.junit.Assert.*;
import java.io.File;
import java.io.FileNotFoundException;
import java.util.Collection;
-import static org.junit.Assert.assertTrue;
+import org.junit.Test;
public class UtilTest {
@Test
public void testCache() throws FileNotFoundException {
- String yang = new File(getClass().getResource("/mock.yang").getFile())
+ String yang = new File(getClass().getResource("/yang/mock.yang")
+ .getFile())
.getParent();
Collection<File> files = Util.listFiles(new File(yang));
Collection<File> files2 = Util.listFiles(new File(yang));
<artifactId>maven-plugin-api</artifactId>
<version>3.0.5</version>
</dependency>
+ <dependency>
+ <groupId>org.apache.maven</groupId>
+ <artifactId>maven-core</artifactId>
+ <version>3.0.5</version>
+ </dependency>
</dependencies>
<build>
import java.util.Set;
import org.apache.maven.plugin.logging.Log;
+import org.apache.maven.project.MavenProject;
import org.opendaylight.controller.yang.model.api.Module;
import org.opendaylight.controller.yang.model.api.SchemaContext;
* @throws IOException
*/
Collection<File> generateSources(SchemaContext context, File outputBaseDir,
- Set<Module> currentModules, File projectBaseDir)
- throws IOException;
+ Set<Module> currentModules) throws IOException;
+ /**
+ * Utilize maven logging if necessary
+ *
+ * @param log
+ */
void setLog(Log log);
+ /**
+ * Provided map contains all configuration that was set in pom for code
+ * generator in additionalConfiguration tag
+ *
+ * @param additionalConfiguration
+ */
void setAdditionalConfig(Map<String, String> additionalConfiguration);
+
+ /**
+ * Provided folder is marked as resources and its content will be packaged
+ * in resulting jar. Feel free to add necessary resources
+ *
+ * @param resourceBaseDir
+ */
+ void setResourceBaseDir(File resourceBaseDir);
+
+ /**
+ * Provided maven project object. Any additional information about current
+ * maven project can be accessed from it.
+ *
+ * @param resourceBaseDir
+ */
+ void setMavenProject(MavenProject project);
}
import java.util.Set;
import org.apache.maven.plugin.logging.Log;
+import org.apache.maven.project.MavenProject;
import org.opendaylight.controller.yang.model.api.Module;
import org.opendaylight.controller.yang.model.api.SchemaContext;
public class CodeGeneratorTestImpl implements CodeGenerator {
+ private Log log;
+
@Override
public Collection<File> generateSources(SchemaContext context,
- File outputBaseDir, Set<Module> currentModuleBuilders,
- File projectMainDir) {
- // no-op
+ File outputBaseDir, Set<Module> currentModuleBuilders) {
+ if (log != null) {
+ log.debug(getClass().getCanonicalName()
+ + " generateSources:context: " + context);
+ log.debug(getClass().getCanonicalName()
+ + " generateSources:outputBaseDir: " + outputBaseDir);
+ log.debug(getClass().getCanonicalName()
+ + " generateSources:currentModuleBuilders: "
+ + currentModuleBuilders);
+
+ }
return null;
}
@Override
public void setLog(Log log) {
- // no-op
+ this.log = log;
}
@Override
public void setAdditionalConfig(Map<String, String> additionalConfiguration) {
- // no-op
+ if (log != null)
+ log.debug(getClass().getCanonicalName() + " additionalConfig: "
+ + additionalConfiguration);
+ }
+
+
+ @Override
+ public void setResourceBaseDir(File resourceBaseDir) {
+ if (log != null)
+ log.debug(getClass().getCanonicalName() + " resourceBaseDir: "
+ + resourceBaseDir);
+ }
+
+ @Override
+ public void setMavenProject(MavenProject project) {
+ if (log != null)
+ log.debug(getClass().getCanonicalName() + " maven project: "
+ + project);
}
}
-
typedef topology-id {
type string;
}
private final Map<List<String>, GroupingBuilder> addedGroupings = new HashMap<List<String>, GroupingBuilder>();
private final List<AugmentationSchemaBuilder> addedAugments = new ArrayList<AugmentationSchemaBuilder>();
private final Map<List<String>, UsesNodeBuilder> addedUsesNodes = new HashMap<List<String>, UsesNodeBuilder>();
- //private final Map<List<String>, RefineHolder> addedRefines = new HashMap<List<String>, RefineHolder>();
private final Map<List<String>, RpcDefinitionBuilder> addedRpcs = new HashMap<List<String>, RpcDefinitionBuilder>();
private final Set<NotificationBuilder> addedNotifications = new HashSet<NotificationBuilder>();
private final Set<IdentitySchemaNodeBuilder> addedIdentities = new HashSet<IdentitySchemaNodeBuilder>();
actualPath.pop();
}
+ public Builder getActualNode() {
+ if (actualPath.isEmpty()) {
+ return null;
+ } else {
+ return actualPath.get(0);
+ }
+ }
+
public Builder getModuleNode(final List<String> path) {
return childNodes.get(path);
}
return builder;
}
+ @Override
+ public String toString() {
+ return ModuleBuilder.class.getSimpleName() + "[" + name + "]";
+ }
+
private final class ModuleImpl implements Module {
private URI namespace;
private final String name;
if (parent instanceof AugmentationSchemaBuilder) {
nodeBuilder.setAugmenting(true);
}
- ((DataNodeContainerBuilder) parent)
- .addChildNode(nodeBuilder);
+ ((DataNodeContainerBuilder) parent).addChildNode(nodeBuilder);
} else if (parent instanceof ChoiceBuilder) {
((ChoiceBuilder) parent).addChildNode(nodeBuilder);
} else {
private SchemaPath schemaPath;
private final List<UnknownSchemaNodeBuilder> addedUnknownNodes = new ArrayList<UnknownSchemaNodeBuilder>();
+ private List<UnknownSchemaNode> unknownNodes;
private List<RangeConstraint> ranges = Collections.emptyList();
private List<LengthConstraint> lengths = Collections.emptyList();
private List<PatternConstraint> patterns = Collections.emptyList();
typeBuilder.fractionDigits(fractionDigits);
// UNKNOWN NODES
- final List<UnknownSchemaNode> unknownNodes = new ArrayList<UnknownSchemaNode>();
- for (UnknownSchemaNodeBuilder b : addedUnknownNodes) {
- unknownNodes.add(b.build());
+ if (unknownNodes == null) {
+ unknownNodes = new ArrayList<UnknownSchemaNode>();
+ for (UnknownSchemaNodeBuilder b : addedUnknownNodes) {
+ unknownNodes.add(b.build());
+ }
}
typeBuilder.unknownSchemaNodes(unknownNodes);
result = typeBuilder.build();
addedUnknownNodes.add(unknownNode);
}
+ public void setUnknownNodes(List<UnknownSchemaNode> unknownNodes) {
+ this.unknownNodes = unknownNodes;
+ }
+
@Override
public List<RangeConstraint> getRanges() {
return ranges;
import org.opendaylight.controller.yang.model.api.type.BinaryTypeDefinition;
import org.opendaylight.controller.yang.model.api.type.DecimalTypeDefinition;
import org.opendaylight.controller.yang.model.api.type.IntegerTypeDefinition;
-import org.opendaylight.controller.yang.model.api.type.LengthConstraint;
-import org.opendaylight.controller.yang.model.api.type.PatternConstraint;
-import org.opendaylight.controller.yang.model.api.type.RangeConstraint;
import org.opendaylight.controller.yang.model.api.type.StringTypeDefinition;
import org.opendaylight.controller.yang.model.parser.api.YangModelParser;
import org.opendaylight.controller.yang.model.util.ExtendedType;
import org.opendaylight.controller.yang.model.util.IdentityrefType;
import org.opendaylight.controller.yang.model.util.UnknownType;
-import org.opendaylight.controller.yang.model.util.YangTypesConverter;
import org.opendaylight.controller.yang.parser.builder.api.AugmentationSchemaBuilder;
import org.opendaylight.controller.yang.parser.builder.api.AugmentationTargetBuilder;
import org.opendaylight.controller.yang.parser.builder.api.Builder;
final YangLexer lexer = new YangLexer(input);
final CommonTokenStream tokens = new CommonTokenStream(lexer);
final YangParser parser = new YangParser(tokens);
+
result = parser.yang();
} catch (IOException e) {
logger.warn("Exception while reading yang file: " + yangStream, e);
if (!dirtyNodes.isEmpty()) {
for (Map.Entry<List<String>, TypeAwareBuilder> entry : dirtyNodes
.entrySet()) {
-
final TypeAwareBuilder nodeToResolve = entry.getValue();
- // different handling for union types
+
if (nodeToResolve instanceof UnionTypeBuilder) {
- final UnionTypeBuilder union = (UnionTypeBuilder) nodeToResolve;
- final List<TypeDefinition<?>> unionTypes = union.getTypes();
- final List<UnknownType> toRemove = new ArrayList<UnknownType>();
- for (TypeDefinition<?> td : unionTypes) {
- if (td instanceof UnknownType) {
- final UnknownType unknownType = (UnknownType) td;
- final TypeDefinitionBuilder resolvedType = resolveTypeUnion(
- nodeToResolve, unknownType, modules, module);
- union.setTypedef(resolvedType);
- toRemove.add(unknownType);
- }
- }
- unionTypes.removeAll(toRemove);
+ // special handling for union types
+ resolveTypeUnion((UnionTypeBuilder) nodeToResolve, modules,
+ module);
} else if (nodeToResolve.getTypedef() instanceof IdentityrefTypeBuilder) {
- // different handling for identityref types
+ // special handling for identityref types
IdentityrefTypeBuilder idref = (IdentityrefTypeBuilder) nodeToResolve
.getTypedef();
nodeToResolve.setType(new IdentityrefType(findFullQName(
modules, module, idref), idref.getPath()));
} else {
- final TypeDefinitionBuilder resolvedType = resolveType(
- nodeToResolve, modules, module);
- nodeToResolve.setTypedef(resolvedType);
+ resolveType(nodeToResolve, modules, module);
}
}
}
}
- private TypeDefinitionBuilder resolveType(
- final TypeAwareBuilder nodeToResolve,
- final Map<String, TreeMap<Date, ModuleBuilder>> modules,
- final ModuleBuilder builder) {
- final TypeConstraints constraints = new TypeConstraints();
-
- final TypeDefinitionBuilder targetTypeBuilder = getTypeDefinitionBuilderFromDirtyNode(
- nodeToResolve, modules, builder);
- final TypeConstraints tConstraints = findConstraints(nodeToResolve,
- constraints, modules, builder);
- targetTypeBuilder.setRanges(tConstraints.getRange());
- targetTypeBuilder.setLengths(tConstraints.getLength());
- targetTypeBuilder.setPatterns(tConstraints.getPatterns());
- targetTypeBuilder.setFractionDigits(tConstraints.getFractionDigits());
-
- return targetTypeBuilder;
- }
-
- private TypeDefinitionBuilder resolveTypeUnion(
- final TypeAwareBuilder typeToResolve,
- final UnknownType unknownType,
+ private void resolveType(final TypeAwareBuilder nodeToResolve,
final Map<String, TreeMap<Date, ModuleBuilder>> modules,
final ModuleBuilder builder) {
- final TypeConstraints constraints = new TypeConstraints();
-
- final TypeDefinitionBuilder targetTypeBuilder = getUnionBuilder(
- typeToResolve, unknownType, modules, builder);
- final TypeConstraints tConstraints = findConstraints(typeToResolve,
- constraints, modules, builder);
- targetTypeBuilder.setRanges(tConstraints.getRange());
- targetTypeBuilder.setLengths(tConstraints.getLength());
- targetTypeBuilder.setPatterns(tConstraints.getPatterns());
- targetTypeBuilder.setFractionDigits(tConstraints.getFractionDigits());
-
- return targetTypeBuilder;
- }
-
- private TypeDefinitionBuilder getTypeDefinitionBuilderFromDirtyNode(
- final TypeAwareBuilder nodeToResolve,
- final Map<String, TreeMap<Date, ModuleBuilder>> modules,
- final ModuleBuilder module) {
-
- final UnknownType unknownType = (UnknownType) nodeToResolve.getType();
- final QName unknownTypeQName = unknownType.getQName();
-
- // search for module which contains referenced typedef
+ TypeDefinitionBuilder resolvedType = null;
+ final int line = nodeToResolve.getLine();
+ final TypeDefinition<?> typedefType = nodeToResolve.getType();
+ final QName unknownTypeQName = typedefType.getBaseType().getQName();
final ModuleBuilder dependentModule = findDependentModule(modules,
- module, unknownTypeQName.getPrefix(), nodeToResolve.getLine());
-
- final TypeDefinitionBuilder lookedUpBuilder = findTypeDefinitionBuilder(
- nodeToResolve.getPath(), dependentModule,
- unknownTypeQName.getLocalName(), module.getName(),
- nodeToResolve.getLine());
-
- final TypeDefinitionBuilder lookedUpBuilderCopy = copyTypedefBuilder(
- lookedUpBuilder, nodeToResolve instanceof TypeDefinitionBuilder);
- final TypeDefinitionBuilder resolvedCopy = resolveCopiedBuilder(
- lookedUpBuilderCopy, modules, dependentModule);
- return resolvedCopy;
- }
-
- private TypeDefinitionBuilder getUnionBuilder(
- final TypeAwareBuilder nodeToResolve,
- final UnknownType unknownType,
- final Map<String, TreeMap<Date, ModuleBuilder>> modules,
- final ModuleBuilder module) {
+ builder, unknownTypeQName.getPrefix(), line);
- final TypeDefinition<?> baseTypeToResolve = nodeToResolve.getType();
- if (baseTypeToResolve != null
- && !(baseTypeToResolve instanceof UnknownType)) {
- return (TypeDefinitionBuilder) nodeToResolve;
- }
-
- final QName unknownTypeQName = unknownType.getQName();
- // search for module which contains referenced typedef
- final ModuleBuilder dependentModule = findDependentModule(modules,
- module, unknownTypeQName.getPrefix(), nodeToResolve.getLine());
- final TypeDefinitionBuilder lookedUpBuilder = findTypeDefinitionBuilder(
+ final TypeDefinitionBuilder targetTypeBuilder = findTypeDefinitionBuilder(
nodeToResolve.getPath(), dependentModule,
- unknownTypeQName.getLocalName(), module.getName(),
- nodeToResolve.getLine());
-
- final TypeDefinitionBuilder lookedUpBuilderCopy = copyTypedefBuilder(
- lookedUpBuilder, nodeToResolve instanceof TypeDefinitionBuilder);
- final TypeDefinitionBuilder resolvedCopy = resolveCopiedBuilder(
- lookedUpBuilderCopy, modules, dependentModule);
- return resolvedCopy;
- }
+ unknownTypeQName.getLocalName(), builder.getName(), line);
- private TypeDefinitionBuilder copyTypedefBuilder(
- final TypeDefinitionBuilder old, final boolean seekByTypedefBuilder) {
- if (old instanceof UnionTypeBuilder) {
- final UnionTypeBuilder oldUnion = (UnionTypeBuilder) old;
- final UnionTypeBuilder newUnion = new UnionTypeBuilder(
- old.getLine());
- for (TypeDefinition<?> td : oldUnion.getTypes()) {
- newUnion.setType(td);
- }
- for (TypeDefinitionBuilder tdb : oldUnion.getTypedefs()) {
- newUnion.setTypedef(copyTypedefBuilder(tdb, true));
- }
- newUnion.setPath(old.getPath());
- return newUnion;
- }
-
- final QName oldName = old.getQName();
- final QName newName = new QName(oldName.getNamespace(),
- oldName.getRevision(), oldName.getPrefix(),
- oldName.getLocalName());
- final TypeDefinitionBuilder tdb = new TypeDefinitionBuilderImpl(
- newName, old.getLine());
-
- tdb.setRanges(old.getRanges());
- tdb.setLengths(old.getLengths());
- tdb.setPatterns(old.getPatterns());
- tdb.setFractionDigits(old.getFractionDigits());
- tdb.setPath(old.getPath());
-
- final TypeDefinition<?> oldType = old.getType();
- if (oldType == null) {
- tdb.setTypedef(old.getTypedef());
+ if (typedefType instanceof ExtendedType) {
+ final ExtendedType extType = (ExtendedType) typedefType;
+ final TypeDefinitionBuilder newType = extendedTypeWithNewBaseType(
+ nodeToResolve, targetTypeBuilder, extType, modules, builder);
+ resolvedType = newType;
} else {
- tdb.setType(oldType);
+ resolvedType = targetTypeBuilder;
}
-
- if (!seekByTypedefBuilder) {
- tdb.setDescription(old.getDescription());
- tdb.setReference(old.getReference());
- tdb.setStatus(old.getStatus());
- tdb.setDefaultValue(old.getDefaultValue());
- tdb.setUnits(old.getUnits());
- }
- return tdb;
+ nodeToResolve.setTypedef(resolvedType);
}
- private TypeDefinitionBuilder resolveCopiedBuilder(
- final TypeDefinitionBuilder copy,
+ private void resolveTypeUnion(final UnionTypeBuilder union,
final Map<String, TreeMap<Date, ModuleBuilder>> modules,
final ModuleBuilder builder) {
- if (copy instanceof UnionTypeBuilder) {
- final UnionTypeBuilder union = (UnionTypeBuilder) copy;
- final List<TypeDefinition<?>> unionTypes = union.getTypes();
- final List<UnknownType> toRemove = new ArrayList<UnknownType>();
- for (TypeDefinition<?> td : unionTypes) {
- if (td instanceof UnknownType) {
- final UnknownType unknownType = (UnknownType) td;
- final TypeDefinitionBuilder resolvedType = resolveTypeUnion(
- union, unknownType, modules, builder);
- union.setTypedef(resolvedType);
- toRemove.add(unknownType);
+ final List<TypeDefinition<?>> unionTypes = union.getTypes();
+ final List<TypeDefinition<?>> toRemove = new ArrayList<TypeDefinition<?>>();
+ for (TypeDefinition<?> unionType : unionTypes) {
+ if (unionType instanceof UnknownType) {
+ final UnknownType ut = (UnknownType) unionType;
+ final ModuleBuilder dependentModule = findDependentModule(
+ modules, builder, ut.getQName().getPrefix(),
+ union.getLine());
+ final TypeDefinitionBuilder resolvedType = findTypeDefinitionBuilder(
+ union.getPath(), dependentModule, ut.getQName()
+ .getLocalName(), builder.getName(),
+ union.getLine());
+ union.setTypedef(resolvedType);
+ toRemove.add(ut);
+ } else if (unionType instanceof ExtendedType) {
+ final ExtendedType extType = (ExtendedType) unionType;
+ TypeDefinition<?> extTypeBase = extType.getBaseType();
+ if (extTypeBase instanceof UnknownType) {
+ final UnknownType ut = (UnknownType) extTypeBase;
+ final ModuleBuilder dependentModule = findDependentModule(
+ modules, builder, ut.getQName().getPrefix(),
+ union.getLine());
+ final TypeDefinitionBuilder targetTypeBuilder = findTypeDefinitionBuilder(
+ union.getPath(), dependentModule, ut.getQName()
+ .getLocalName(), builder.getName(),
+ union.getLine());
+
+ final TypeDefinitionBuilder newType = extendedTypeWithNewBaseType(
+ targetTypeBuilder, targetTypeBuilder, extType,
+ modules, builder);
+
+ union.setTypedef(newType);
+ toRemove.add(extType);
}
}
- unionTypes.removeAll(toRemove);
-
- return union;
}
+ unionTypes.removeAll(toRemove);
+ }
- final TypeDefinition<?> base = copy.getType();
- final TypeDefinitionBuilder baseTdb = copy.getTypedef();
- if (base != null && !(base instanceof UnknownType)) {
- return copy;
- } else if (base instanceof UnknownType) {
- final UnknownType unknownType = (UnknownType) base;
- final QName unknownTypeQName = unknownType.getQName();
- final String unknownTypePrefix = unknownTypeQName.getPrefix();
- final ModuleBuilder dependentModule = findDependentModule(modules,
- builder, unknownTypePrefix, copy.getLine());
- final TypeDefinitionBuilder utBuilder = getTypeDefinitionBuilderFromDirtyNode(
- copy, modules, dependentModule);
- copy.setTypedef(utBuilder);
- return copy;
- } else if (base == null && baseTdb != null) {
- // make a copy of baseTypeDef and call again
- final TypeDefinitionBuilder baseTdbCopy = copyTypedefBuilder(
- baseTdb, true);
- final TypeDefinitionBuilder baseTdbCopyResolved = resolveCopiedBuilder(
- baseTdbCopy, modules, builder);
- copy.setTypedef(baseTdbCopyResolved);
- return copy;
- } else {
- throw new YangParseException(copy.getLine(),
- "Failed to resolve type " + copy.getQName().getLocalName());
- }
+ private TypeDefinitionBuilder extendedTypeWithNewBaseType(
+ final TypeAwareBuilder nodeToResolve,
+ final TypeDefinitionBuilder newBaseType,
+ final ExtendedType oldExtendedType,
+ final Map<String, TreeMap<Date, ModuleBuilder>> modules,
+ final ModuleBuilder builder) {
+ final TypeConstraints constraints = findConstraints(nodeToResolve,
+ new TypeConstraints(), modules, builder);
+ final TypeDefinitionBuilderImpl newType = new TypeDefinitionBuilderImpl(
+ oldExtendedType.getQName(), nodeToResolve.getLine());
+ newType.setTypedef(newBaseType);
+ newType.setPath(oldExtendedType.getPath());
+ newType.setDescription(oldExtendedType.getDescription());
+ newType.setReference(oldExtendedType.getReference());
+ newType.setStatus(oldExtendedType.getStatus());
+ newType.setLengths(constraints.getLength());
+ newType.setPatterns(constraints.getPatterns());
+ newType.setRanges(constraints.getRange());
+ newType.setFractionDigits(constraints.getFractionDigits());
+ newType.setUnits(oldExtendedType.getUnits());
+ newType.setDefaultValue(oldExtendedType.getDefaultValue());
+ newType.setUnknownNodes(oldExtendedType.getUnknownSchemaNodes());
+ return newType;
}
private TypeConstraints findConstraints(
final TypeConstraints constraints,
final Map<String, TreeMap<Date, ModuleBuilder>> modules,
final ModuleBuilder builder) {
+
// union type cannot be restricted
if (nodeToResolve instanceof UnionTypeBuilder) {
return constraints;
}
- // if referenced type is UnknownType again, search recursively with
- // current constraints
- final TypeDefinition<?> referencedType = nodeToResolve.getType();
- List<RangeConstraint> ranges = Collections.emptyList();
- List<LengthConstraint> lengths = Collections.emptyList();
- List<PatternConstraint> patterns = Collections.emptyList();
- Integer fractionDigits = null;
- if (referencedType == null) {
- final TypeDefinitionBuilder tdb = nodeToResolve.getTypedef();
- ranges = tdb.getRanges();
- constraints.addRanges(ranges);
- lengths = tdb.getLengths();
- constraints.addLengths(lengths);
- patterns = tdb.getPatterns();
- constraints.addPatterns(patterns);
- fractionDigits = tdb.getFractionDigits();
- constraints.setFractionDigits(fractionDigits);
- return constraints;
- } else if (referencedType instanceof ExtendedType) {
- final ExtendedType ext = (ExtendedType) referencedType;
- ranges = ext.getRanges();
- constraints.addRanges(ranges);
- lengths = ext.getLengths();
- constraints.addLengths(lengths);
- patterns = ext.getPatterns();
- constraints.addPatterns(patterns);
- fractionDigits = ext.getFractionDigits();
- constraints.setFractionDigits(fractionDigits);
- if(YangTypesConverter.isBaseYangType(ext.getBaseType().getQName().getLocalName())) {
- mergeConstraints(ext.getBaseType(), constraints);
- return constraints;
+ if (nodeToResolve instanceof TypeDefinitionBuilder) {
+ TypeDefinitionBuilder typedefToResolve = (TypeDefinitionBuilder) nodeToResolve;
+ constraints.addFractionDigits(typedefToResolve.getFractionDigits());
+ constraints.addLengths(typedefToResolve.getLengths());
+ constraints.addPatterns(typedefToResolve.getPatterns());
+ constraints.addRanges(typedefToResolve.getRanges());
+ }
+
+ TypeDefinition<?> type = nodeToResolve.getType();
+ if (type == null) {
+ return findConstraints(nodeToResolve.getTypedef(), constraints,
+ modules, builder);
+ } else {
+ if (type instanceof UnknownType) {
+ ModuleBuilder dependentModule = findDependentModule(modules,
+ builder, type.getQName().getPrefix(),
+ nodeToResolve.getLine());
+ TypeDefinitionBuilder tdb = findTypeDefinitionBuilder(
+ nodeToResolve.getPath(), dependentModule, type
+ .getQName().getLocalName(), builder.getName(),
+ nodeToResolve.getLine());
+ return findConstraints(tdb, constraints, modules,
+ dependentModule);
+ } else if (type instanceof ExtendedType) {
+ ExtendedType extType = (ExtendedType) type;
+ constraints.addFractionDigits(extType.getFractionDigits());
+ constraints.addLengths(extType.getLengths());
+ constraints.addPatterns(extType.getPatterns());
+ constraints.addRanges(extType.getRanges());
+
+ TypeDefinition<?> base = extType.getBaseType();
+ if (base instanceof UnknownType) {
+ ModuleBuilder dependentModule = findDependentModule(
+ modules, builder, base.getQName().getPrefix(),
+ nodeToResolve.getLine());
+ TypeDefinitionBuilder tdb = findTypeDefinitionBuilder(
+ nodeToResolve.getPath(), dependentModule, base
+ .getQName().getLocalName(),
+ builder.getName(), nodeToResolve.getLine());
+ return findConstraints(tdb, constraints, modules,
+ dependentModule);
+ } else {
+ // it has to be base yang type
+ mergeConstraints(type, constraints);
+ return constraints;
+ }
} else {
- return findConstraints(
- findTypeDefinitionBuilder(nodeToResolve.getPath(), builder,
- ext.getQName().getLocalName(), builder.getName(),
- nodeToResolve.getLine()), constraints, modules,
- builder);
- }
- } else if (referencedType instanceof UnknownType) {
- final UnknownType unknown = (UnknownType) referencedType;
- ranges = unknown.getRangeStatements();
- constraints.addRanges(ranges);
- lengths = unknown.getLengthStatements();
- constraints.addLengths(lengths);
- patterns = unknown.getPatterns();
- constraints.addPatterns(patterns);
- fractionDigits = unknown.getFractionDigits();
- constraints.setFractionDigits(fractionDigits);
-
- String unknownTypePrefix = unknown.getQName().getPrefix();
- if (unknownTypePrefix == null || "".equals(unknownTypePrefix)) {
- unknownTypePrefix = builder.getPrefix();
+ // it is base yang type
+ mergeConstraints(type, constraints);
+ return constraints;
}
- final ModuleBuilder dependentModule = findDependentModule(modules,
- builder, unknown.getQName().getPrefix(),
- nodeToResolve.getLine());
- final TypeDefinitionBuilder utBuilder = findTypeDefinitionBuilder(
- nodeToResolve.getPath(), dependentModule, unknown
- .getQName().getLocalName(), builder.getName(),
- nodeToResolve.getLine());
- return findConstraints(utBuilder, constraints, modules,
- dependentModule);
- } else {
- // HANDLE BASE YANG TYPE
- mergeConstraints(referencedType, constraints);
- return constraints;
}
}
constraints.addRanges(((DecimalTypeDefinition) referencedType)
.getRangeStatements());
constraints
- .setFractionDigits(((DecimalTypeDefinition) referencedType)
+ .addFractionDigits(((DecimalTypeDefinition) referencedType)
.getFractionDigits());
} else if (referencedType instanceof IntegerTypeDefinition) {
constraints.addRanges(((IntegerTypeDefinition) referencedType)
if ("union".equals(typeName)) {
List<String> typePath = new ArrayList<String>(actualPath);
typePath.add(typeName);
-
SchemaPath p = createActualSchemaPath(typePath, namespace,
revision, yangModelPrefix);
UnionTypeBuilder unionBuilder = moduleBuilder.addUnionType(
line);
} else {
type = parseTypeBody(typeName, typeBody, actualPath,
- namespace, revision, yangModelPrefix);
+ namespace, revision, yangModelPrefix,
+ moduleBuilder.getActualNode());
moduleBuilder.setType(type, actualPath);
}
}
} else {
- type = parseUnknownTypeBody(typeQName, typeBody);
+ type = parseUnknownTypeBody(typeQName, typeBody, actualPath,
+ namespace, revision, yangModelPrefix,
+ moduleBuilder.getActualNode(), moduleBuilder);
// mark parent node of this type statement as dirty
moduleBuilder.addDirtyNode(actualPath);
moduleBuilder.setType(type, actualPath);
import java.lang.reflect.Method;
import java.util.ArrayList;
import java.util.List;
+import java.util.Set;
import org.opendaylight.controller.yang.common.QName;
import org.opendaylight.controller.yang.model.api.ModuleImport;
import org.opendaylight.controller.yang.model.api.type.InstanceIdentifierTypeDefinition;
import org.opendaylight.controller.yang.model.api.type.IntegerTypeDefinition;
import org.opendaylight.controller.yang.model.api.type.LeafrefTypeDefinition;
+import org.opendaylight.controller.yang.model.api.type.LengthConstraint;
+import org.opendaylight.controller.yang.model.api.type.PatternConstraint;
+import org.opendaylight.controller.yang.model.api.type.RangeConstraint;
import org.opendaylight.controller.yang.model.api.type.StringTypeDefinition;
import org.opendaylight.controller.yang.model.api.type.UnionTypeDefinition;
import org.opendaylight.controller.yang.model.api.type.UnsignedIntegerTypeDefinition;
import org.opendaylight.controller.yang.model.util.Decimal64;
import org.opendaylight.controller.yang.model.util.EmptyType;
import org.opendaylight.controller.yang.model.util.EnumerationType;
+import org.opendaylight.controller.yang.model.util.ExtendedType;
import org.opendaylight.controller.yang.model.util.IdentityrefType;
import org.opendaylight.controller.yang.model.util.InstanceIdentifier;
import org.opendaylight.controller.yang.model.util.Int16;
/**
* Get module import referenced by given prefix.
- *
+ *
* @param builder
* module to search
* @param prefix
/**
* Parse uses path.
- *
+ *
* @param usesPath
* as String
* @return SchemaPath from given String
/**
* Add all augment's child nodes to given target.
- *
+ *
* @param augment
* @param target
*/
final DataNodeContainerBuilder target) {
for (DataSchemaNodeBuilder builder : augment.getChildNodes()) {
builder.setAugmenting(true);
- correctAugmentChildPath(augment, target.getPath());
+ correctAugmentChildPath(builder, target.getPath());
target.addChildNode(builder);
}
}
public static void fillAugmentTarget(
- final AugmentationSchemaBuilder augment,
- final ChoiceBuilder target) {
+ final AugmentationSchemaBuilder augment, final ChoiceBuilder target) {
for (DataSchemaNodeBuilder builder : augment.getChildNodes()) {
builder.setAugmenting(true);
- correctAugmentChildPath(augment, target.getPath());
+ correctAugmentChildPath(builder, target.getPath());
target.addChildNode(builder);
}
}
- private static void correctAugmentChildPath(final DataNodeContainerBuilder node,
+ private static void correctAugmentChildPath(
+ final DataSchemaNodeBuilder childNode,
final SchemaPath parentSchemaPath) {
- for (DataSchemaNodeBuilder builder : node.getChildNodes()) {
-
- // add correct path
- List<QName> targetNodePath = new ArrayList<QName>(
- parentSchemaPath.getPath());
- targetNodePath.add(builder.getQName());
- builder.setPath(new SchemaPath(targetNodePath, true));
- if (builder instanceof DataNodeContainerBuilder) {
- DataNodeContainerBuilder cnb = (DataNodeContainerBuilder) builder;
- correctAugmentChildPath(cnb, builder.getPath());
+ // set correct path
+ List<QName> targetNodePath = new ArrayList<QName>(
+ parentSchemaPath.getPath());
+ targetNodePath.add(childNode.getQName());
+ childNode.setPath(new SchemaPath(targetNodePath, true));
+
+ // set correct path for all child nodes
+ if (childNode instanceof DataNodeContainerBuilder) {
+ DataNodeContainerBuilder dataNodeContainer = (DataNodeContainerBuilder) childNode;
+ for (DataSchemaNodeBuilder child : dataNodeContainer
+ .getChildNodes()) {
+ correctAugmentChildPath(child, childNode.getPath());
}
+ }
- // if child can contains type, correct path for this type too
- if (builder instanceof TypeAwareBuilder) {
- TypeAwareBuilder nodeBuilder = (TypeAwareBuilder) builder;
- QName nodeBuilderQName = nodeBuilder.getQName();
- TypeDefinition<?> nodeBuilderType = nodeBuilder.getType();
- if (nodeBuilderType != null) {
- TypeDefinition<?> newType = createCorrectTypeDefinition(
- parentSchemaPath, nodeBuilderQName, nodeBuilderType);
- nodeBuilder.setType(newType);
- } else {
- TypeDefinitionBuilder nodeBuilderTypedef = nodeBuilder
- .getTypedef();
- SchemaPath newSchemaPath = createNewSchemaPath(
- nodeBuilderTypedef.getPath(), nodeBuilderQName,
- nodeBuilderTypedef.getQName());
- nodeBuilderTypedef.setPath(newSchemaPath);
+ // if node can contains type, correct path for this type too
+ if (childNode instanceof TypeAwareBuilder) {
+ TypeAwareBuilder nodeBuilder = (TypeAwareBuilder) childNode;
+ correctTypeAwareNodePath(nodeBuilder, parentSchemaPath);
+ }
+ }
+
+ /**
+ * Repair schema path of node type.
+ *
+ * @param node
+ * node which contains type statement
+ * @param parentSchemaPath
+ * schema path of parent node
+ */
+ private static void correctTypeAwareNodePath(
+ TypeAwareBuilder node, SchemaPath parentSchemaPath) {
+ final QName nodeBuilderQName = node.getQName();
+ final TypeDefinition<?> nodeType = node.getType();
+
+ Integer fd = null;
+ List<LengthConstraint> lengths = null;
+ List<PatternConstraint> patterns = null;
+ List<RangeConstraint> ranges = null;
+
+ if (nodeType != null) {
+ if (nodeType instanceof ExtendedType) {
+ ExtendedType et = (ExtendedType) nodeType;
+ if (nodeType
+ .getQName()
+ .getLocalName()
+ .equals(nodeType.getBaseType().getQName()
+ .getLocalName())) {
+ fd = et.getFractionDigits();
+ lengths = et.getLengths();
+ patterns = et.getPatterns();
+ ranges = et.getRanges();
+ if (!hasConstraints(fd, lengths, patterns, ranges)) {
+ return;
+ }
}
}
+ TypeDefinition<?> newType = createCorrectTypeDefinition(
+ parentSchemaPath, nodeBuilderQName, nodeType);
+ node.setType(newType);
+ } else {
+ TypeDefinitionBuilder nodeBuilderTypedef = node.getTypedef();
+
+ fd = nodeBuilderTypedef.getFractionDigits();
+ lengths = nodeBuilderTypedef.getLengths();
+ patterns = nodeBuilderTypedef.getPatterns();
+ ranges = nodeBuilderTypedef.getRanges();
+
+ String tdbTypeName = nodeBuilderTypedef.getQName().getLocalName();
+ String baseTypeName = null;
+ if (nodeBuilderTypedef.getType() == null) {
+ baseTypeName = nodeBuilderTypedef.getTypedef().getQName()
+ .getLocalName();
+ } else {
+ baseTypeName = nodeBuilderTypedef.getType().getQName()
+ .getLocalName();
+ }
+ if (!(tdbTypeName.equals(baseTypeName))) {
+ return;
+ }
+
+ if (!hasConstraints(fd, lengths, patterns, ranges)) {
+ return;
+ }
+
+ SchemaPath newSchemaPath = createNewSchemaPath(
+ nodeBuilderTypedef.getPath(), nodeBuilderQName,
+ nodeBuilderTypedef.getQName());
+ nodeBuilderTypedef.setPath(newSchemaPath);
+ }
+ }
+
+ /**
+ * Check if there are some constraints.
+ *
+ * @param fd
+ * fraction digits
+ * @param lengths
+ * length constraints
+ * @param patterns
+ * pattern constraints
+ * @param ranges
+ * range constraints
+ * @return true, if any of constraints are present, false otherwise
+ */
+ private static boolean hasConstraints(final Integer fd,
+ final List<LengthConstraint> lengths,
+ final List<PatternConstraint> patterns,
+ final List<RangeConstraint> ranges) {
+ if (fd == null && (lengths == null || lengths.isEmpty())
+ && (patterns == null || patterns.isEmpty())
+ && (ranges == null || ranges.isEmpty())) {
+ return false;
+ } else {
+ return true;
}
}
DecimalTypeDefinition decimalType = (DecimalTypeDefinition) nodeType;
newSchemaPath = createNewSchemaPath(parentSchemaPath,
nodeQName, decimalType.getQName());
- result = new Decimal64(newSchemaPath, decimalType.getFractionDigits());
+ result = new Decimal64(newSchemaPath,
+ decimalType.getFractionDigits());
} else if (nodeType instanceof EmptyTypeDefinition) {
newSchemaPath = createNewSchemaPath(parentSchemaPath,
nodeQName, nodeType.getQName());
newSchemaPath = createNewSchemaPath(parentSchemaPath,
nodeQName, unionType.getQName());
return new UnionType(newSchemaPath, unionType.getTypes());
+ } else if (nodeType instanceof ExtendedType) {
+ ExtendedType extType = (ExtendedType) nodeType;
+ newSchemaPath = createNewSchemaPath(parentSchemaPath,
+ nodeQName, extType.getQName());
+ result = createNewExtendedType(newSchemaPath, extType);
}
}
return result;
}
+ private static TypeDefinition<?> createNewExtendedType(
+ SchemaPath newSchemaPath, ExtendedType oldExtendedType) {
+ QName qname = oldExtendedType.getQName();
+ TypeDefinition<?> baseType = oldExtendedType.getBaseType();
+ String desc = oldExtendedType.getDescription();
+ String ref = oldExtendedType.getReference();
+ ExtendedType.Builder builder = new ExtendedType.Builder(qname,
+ baseType, desc, ref, newSchemaPath);
+ builder.status(oldExtendedType.getStatus());
+ builder.lengths(oldExtendedType.getLengths());
+ builder.patterns(oldExtendedType.getPatterns());
+ builder.ranges(oldExtendedType.getRanges());
+ builder.fractionDigits(oldExtendedType.getFractionDigits());
+ builder.unknownSchemaNodes(oldExtendedType.getUnknownSchemaNodes());
+ return builder.build();
+ }
+
private static TypeDefinition<?> createNewStringType(SchemaPath schemaPath,
QName nodeQName, StringTypeDefinition nodeType) {
List<QName> path = schemaPath.getPath();
* <li>reference</li>
* <li>config</li>
* </ul>
- *
+ *
* These parameters may be refined for any node.
- *
+ *
* @param node
* node to refine
* @param refine
private final List<List<RangeConstraint>> ranges = new ArrayList<List<RangeConstraint>>();
private final List<List<LengthConstraint>> lengths = new ArrayList<List<LengthConstraint>>();
private final List<PatternConstraint> patterns = new ArrayList<PatternConstraint>();
- private Integer fractionDigits;
+ private final List<Integer> fractionDigits = new ArrayList<Integer>();
List<List<RangeConstraint>> getAllRanges() {
return ranges;
}
public List<RangeConstraint> getRange() {
- if(ranges.isEmpty()) {
+ if (ranges.isEmpty()) {
return Collections.emptyList();
}
}
public List<LengthConstraint> getLength() {
- if(lengths.isEmpty()) {
+ if (lengths.isEmpty()) {
return Collections.emptyList();
}
}
public Integer getFractionDigits() {
- return fractionDigits;
+ if (fractionDigits.isEmpty()) {
+ return null;
+ }
+ return fractionDigits.get(0);
}
- public void setFractionDigits(final Integer fractionDigits) {
- if (this.fractionDigits == null) {
- this.fractionDigits = fractionDigits;
- }
+ public void addFractionDigits(final Integer fractionDigits) {
+ this.fractionDigits.add(fractionDigits);
}
}
import org.opendaylight.controller.yang.model.api.Status;
import org.opendaylight.controller.yang.model.api.TypeDefinition;
import org.opendaylight.controller.yang.model.api.UnknownSchemaNode;
+import org.opendaylight.controller.yang.model.api.type.BinaryTypeDefinition;
import org.opendaylight.controller.yang.model.api.type.BitsTypeDefinition;
import org.opendaylight.controller.yang.model.api.type.BitsTypeDefinition.Bit;
import org.opendaylight.controller.yang.model.api.type.EnumTypeDefinition;
import org.opendaylight.controller.yang.model.api.type.EnumTypeDefinition.EnumPair;
+import org.opendaylight.controller.yang.model.api.type.IntegerTypeDefinition;
import org.opendaylight.controller.yang.model.api.type.LengthConstraint;
import org.opendaylight.controller.yang.model.api.type.PatternConstraint;
import org.opendaylight.controller.yang.model.api.type.RangeConstraint;
+import org.opendaylight.controller.yang.model.api.type.StringTypeDefinition;
+import org.opendaylight.controller.yang.model.api.type.UnsignedIntegerTypeDefinition;
import org.opendaylight.controller.yang.model.util.BaseConstraints;
import org.opendaylight.controller.yang.model.util.BaseTypes;
import org.opendaylight.controller.yang.model.util.BinaryType;
import org.opendaylight.controller.yang.model.util.Uint64;
import org.opendaylight.controller.yang.model.util.Uint8;
import org.opendaylight.controller.yang.model.util.UnknownType;
+import org.opendaylight.controller.yang.parser.builder.api.Builder;
import org.opendaylight.controller.yang.parser.builder.api.SchemaNodeBuilder;
+import org.opendaylight.controller.yang.parser.builder.api.TypeDefinitionBuilder;
import org.opendaylight.controller.yang.parser.builder.impl.ConstraintsBuilder;
+import org.opendaylight.controller.yang.parser.builder.impl.ModuleBuilder;
+import org.opendaylight.controller.yang.parser.builder.impl.UnionTypeBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
* @return UnknownType object with constraints from parsed type body
*/
public static TypeDefinition<?> parseUnknownTypeBody(QName typedefQName,
- Type_body_stmtsContext ctx) {
+ Type_body_stmtsContext ctx, final List<String> actualPath,
+ final URI namespace, final Date revision, final String prefix,
+ Builder parent, ModuleBuilder moduleBuilder) {
+ String typeName = typedefQName.getLocalName();
+
UnknownType.Builder unknownType = new UnknownType.Builder(typedefQName);
+
if (ctx != null) {
List<RangeConstraint> rangeStatements = getRangeConstraints(ctx);
List<LengthConstraint> lengthStatements = getLengthConstraints(ctx);
List<PatternConstraint> patternStatements = getPatternConstraint(ctx);
Integer fractionDigits = getFractionDigits(ctx);
- unknownType.rangeStatements(rangeStatements);
- unknownType.lengthStatements(lengthStatements);
- unknownType.patterns(patternStatements);
- unknownType.fractionDigits(fractionDigits);
+ if (parent instanceof TypeDefinitionBuilder) {
+ TypeDefinitionBuilder typedef = (TypeDefinitionBuilder) parent;
+ typedef.setRanges(rangeStatements);
+ typedef.setLengths(lengthStatements);
+ typedef.setPatterns(patternStatements);
+ typedef.setFractionDigits(fractionDigits);
+ return unknownType.build();
+ } else {
+ TypeDefinition<?> baseType = unknownType.build();
+ TypeDefinition<?> result = null;
+ QName qname = new QName(namespace, revision, prefix, typeName);
+ ExtendedType.Builder typeBuilder = null;
+
+ SchemaPath schemaPath = createTypeSchemaPath(actualPath,
+ namespace, revision, prefix, typeName, false, false);
+ typeBuilder = new ExtendedType.Builder(qname, baseType, "", "",
+ schemaPath);
+
+ typeBuilder.ranges(rangeStatements);
+ typeBuilder.lengths(lengthStatements);
+ typeBuilder.patterns(patternStatements);
+ typeBuilder.fractionDigits(fractionDigits);
+
+ result = typeBuilder.build();
+
+ return result;
+ }
}
+
return unknownType.build();
}
public static TypeDefinition<?> parseTypeBody(final String typeName,
final Type_body_stmtsContext typeBody,
final List<String> actualPath, final URI namespace,
- final Date revision, final String prefix) {
+ final Date revision, final String prefix, Builder parent) {
TypeDefinition<?> baseType = null;
List<RangeConstraint> rangeStatements = getRangeConstraints(typeBody);
List<EnumTypeDefinition.EnumPair> enumConstants = getEnumConstants(
typeBody, actualPath, namespace, revision, prefix);
+ TypeConstraints constraints = new TypeConstraints();
+ constraints.addFractionDigits(fractionDigits);
+ constraints.addLengths(lengthStatements);
+ constraints.addPatterns(patternStatements);
+ constraints.addRanges(rangeStatements);
+
SchemaPath baseTypePathFinal = createTypeSchemaPath(actualPath,
namespace, revision, prefix, typeName, true, true);
- SchemaPath baseTypePath = createTypeSchemaPath(actualPath,
- namespace, revision, prefix, typeName, true, false);
+ SchemaPath baseTypePath = createTypeSchemaPath(actualPath, namespace,
+ revision, prefix, typeName, true, false);
if ("decimal64".equals(typeName)) {
if (rangeStatements.isEmpty()) {
return new Decimal64(baseTypePathFinal, fractionDigits);
}
- baseType = new Decimal64(baseTypePath, fractionDigits);
+ Decimal64 decimalType = new Decimal64(baseTypePath, fractionDigits);
+ constraints.addRanges(decimalType.getRangeStatements());
+ baseType = decimalType;
} else if (typeName.startsWith("int")) {
+ IntegerTypeDefinition intType = null;
if ("int8".equals(typeName)) {
- baseType = new Int8(baseTypePath);
+ intType = new Int8(baseTypePath);
} else if ("int16".equals(typeName)) {
- baseType = new Int16(baseTypePath);
+ intType = new Int16(baseTypePath);
} else if ("int32".equals(typeName)) {
- baseType = new Int32(baseTypePath);
+ intType = new Int32(baseTypePath);
} else if ("int64".equals(typeName)) {
- baseType = new Int64(baseTypePath);
+ intType = new Int64(baseTypePath);
}
+ constraints.addRanges(intType.getRangeStatements());
+ baseType = intType;
} else if (typeName.startsWith("uint")) {
+ UnsignedIntegerTypeDefinition uintType = null;
if ("uint8".equals(typeName)) {
- baseType = new Uint8(baseTypePath);
+ uintType = new Uint8(baseTypePath);
} else if ("uint16".equals(typeName)) {
- baseType = new Uint16(baseTypePath);
+ uintType = new Uint16(baseTypePath);
} else if ("uint32".equals(typeName)) {
- baseType = new Uint32(baseTypePath);
+ uintType = new Uint32(baseTypePath);
} else if ("uint64".equals(typeName)) {
- baseType = new Uint64(baseTypePath);
+ uintType = new Uint64(baseTypePath);
}
+ constraints.addRanges(uintType.getRangeStatements());
+ baseType = uintType;
} else if ("enumeration".equals(typeName)) {
return new EnumerationType(baseTypePathFinal, enumConstants);
} else if ("string".equals(typeName)) {
- baseType = new StringType(baseTypePath);
+ StringTypeDefinition stringType = new StringType(baseTypePath);
+ constraints.addLengths(stringType.getLengthStatements());
+ baseType = stringType;
} else if ("bits".equals(typeName)) {
return new BitsType(baseTypePathFinal, getBits(typeBody,
actualPath, namespace, revision, prefix));
absolute);
return new Leafref(baseTypePathFinal, xpath);
} else if ("binary".equals(typeName)) {
- baseType = new BinaryType(baseTypePath);
+ BinaryTypeDefinition binaryType = new BinaryType(baseTypePath);
+ constraints.addLengths(binaryType.getLengthConstraints());
+ baseType = binaryType;
} else if ("instance-identifier".equals(typeName)) {
boolean requireInstance = isRequireInstance(typeBody);
baseType = new InstanceIdentifier(baseTypePath, null,
requireInstance);
}
+ if (parent instanceof TypeDefinitionBuilder
+ && !(parent instanceof UnionTypeBuilder)) {
+ TypeDefinitionBuilder typedef = (TypeDefinitionBuilder) parent;
+ typedef.setRanges(constraints.getRange());
+ typedef.setLengths(constraints.getLength());
+ typedef.setPatterns(constraints.getPatterns());
+ typedef.setFractionDigits(constraints.getFractionDigits());
+ return baseType;
+ }
+
TypeDefinition<?> result = null;
QName qname = new QName(namespace, revision, prefix, typeName);
ExtendedType.Builder typeBuilder = null;
typeBuilder = new ExtendedType.Builder(qname, baseType, "", "",
schemaPath);
- typeBuilder.ranges(rangeStatements);
- typeBuilder.lengths(lengthStatements);
- typeBuilder.patterns(patternStatements);
- typeBuilder.fractionDigits(fractionDigits);
+ typeBuilder.ranges(constraints.getRange());
+ typeBuilder.lengths(constraints.getLength());
+ typeBuilder.patterns(constraints.getPatterns());
+ typeBuilder.fractionDigits(constraints.getFractionDigits());
result = typeBuilder.build();
return result;
return parser.parseYangModels(testFiles);
}
- public static Set<Module> loadModules(String... pathToYangFile) throws IOException {
- YangModelParser parser = new YangParserImpl();
- List<InputStream> input = new ArrayList<InputStream>();
- for(String path : pathToYangFile) {
- input.add(TestUtils.class.getResourceAsStream(path));
- }
- Set<Module> modules = new HashSet<Module>(
+ public static Set<Module> loadModules(List<InputStream> input) throws IOException {
+ final YangModelParser parser = new YangParserImpl();
+ final Set<Module> modules = new HashSet<Module>(
parser.parseYangModelsFromStreams(input));
for(InputStream stream : input) {
stream.close();
return modules;
}
- public static Module loadModule(String pathToYangFile) throws IOException {
- YangModelParser parser = new YangParserImpl();
- InputStream stream = TestUtils.class.getResourceAsStream(pathToYangFile);
- List<InputStream> input = Collections.singletonList(stream);
- Set<Module> modules = new HashSet<Module>(
+ public static Module loadModule(final InputStream stream) throws
+ IOException {
+ final YangModelParser parser = new YangParserImpl();
+ final List<InputStream> input = Collections.singletonList(stream);
+ final Set<Module> modules = new HashSet<Module>(
parser.parseYangModelsFromStreams(input));
stream.close();
return modules.iterator().next();
import org.opendaylight.controller.yang.model.api.type.EnumTypeDefinition.EnumPair;
import org.opendaylight.controller.yang.model.api.type.LengthConstraint;
import org.opendaylight.controller.yang.model.api.type.PatternConstraint;
+import org.opendaylight.controller.yang.model.api.type.StringTypeDefinition;
import org.opendaylight.controller.yang.model.util.BitsType;
import org.opendaylight.controller.yang.model.util.EnumerationType;
import org.opendaylight.controller.yang.model.util.ExtendedType;
@Before
public void init() throws FileNotFoundException {
- testedModules = TestUtils.loadModules("src/test/resources/types");
+ testedModules = TestUtils.loadModules(getClass().getResource
+ ("/types").getPath());
}
@Test
List<TypeDefinition<?>> unionTypes = baseType.getTypes();
ExtendedType ipv4 = (ExtendedType) unionTypes.get(0);
- ExtendedType ipv4Base = (ExtendedType) ipv4.getBaseType();
+ assertTrue(ipv4.getBaseType() instanceof StringTypeDefinition);
String expectedPattern = "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}"
+ "([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])"
+ "(%[\\p{N}\\p{L}]+)?";
- assertEquals(expectedPattern, ipv4Base.getPatterns().get(0)
+ assertEquals(expectedPattern, ipv4.getPatterns().get(0)
.getRegularExpression());
+ TypeDefinition<?> ipv4Address = TestUtils.findTypedef(typedefs, "ipv4-address");
+ assertEquals(ipv4Address, ipv4);
+
ExtendedType ipv6 = (ExtendedType) unionTypes.get(1);
- ExtendedType ipv6Base = (ExtendedType) ipv6.getBaseType();
- List<PatternConstraint> ipv6Patterns = ipv6Base.getPatterns();
+ assertTrue(ipv6.getBaseType() instanceof StringTypeDefinition);
+ List<PatternConstraint> ipv6Patterns = ipv6.getPatterns();
expectedPattern = "((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}"
+ "((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|"
+ "(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}"
assertEquals(expectedPattern, ipv6Patterns.get(0)
.getRegularExpression());
+ TypeDefinition<?> ipv6Address = TestUtils.findTypedef(typedefs, "ipv6-address");
+ assertEquals(ipv6Address, ipv6);
+
expectedPattern = "(([^:]+:){6}(([^:]+:[^:]+)|(.*\\..*)))|"
+ "((([^:]+:)*[^:]+)?::(([^:]+:)*[^:]+)?)" + "(%.+)?";
assertEquals(expectedPattern, ipv6Patterns.get(1)
public void testDomainName() {
Module tested = TestUtils.findModule(testedModules, "ietf-inet-types");
Set<TypeDefinition<?>> typedefs = tested.getTypeDefinitions();
- TypeDefinition<?> type = TestUtils.findTypedef(typedefs, "domain-name");
- ExtendedType baseType = (ExtendedType) type.getBaseType();
- List<PatternConstraint> patterns = baseType.getPatterns();
+ ExtendedType type = (ExtendedType)TestUtils.findTypedef(typedefs, "domain-name");
+ assertTrue(type.getBaseType() instanceof StringTypeDefinition);
+ List<PatternConstraint> patterns = type.getPatterns();
assertEquals(1, patterns.size());
String expectedPattern = "((([a-zA-Z0-9_]([a-zA-Z0-9\\-_]){0,61})?[a-zA-Z0-9]\\.)*"
+ "([a-zA-Z0-9_]([a-zA-Z0-9\\-_]){0,61})?[a-zA-Z0-9]\\.?)"
+ "|\\.";
assertEquals(expectedPattern, patterns.get(0).getRegularExpression());
- List<LengthConstraint> lengths = baseType.getLengths();
+ List<LengthConstraint> lengths = type.getLengths();
assertEquals(1, lengths.size());
- LengthConstraint length = baseType.getLengths().get(0);
+ LengthConstraint length = type.getLengths().get(0);
assertEquals(1L, length.getMin());
assertEquals(253L, length.getMax());
}
LeafSchemaNode leaf = (LeafSchemaNode) tested
.getDataChildByName("inst-id-leaf1");
ExtendedType leafType = (ExtendedType) leaf.getType();
- InstanceIdentifier leafTypeBase = (InstanceIdentifier)leafType.getBaseType();
+ InstanceIdentifier leafTypeBase = (InstanceIdentifier) leafType
+ .getBaseType();
assertFalse(leafTypeBase.requireInstance());
}
ExtendedType testedType = (ExtendedType) TestUtils.findTypedef(
typedefs, "object-identifier-128");
+ List<PatternConstraint> patterns = testedType.getPatterns();
+ assertEquals(1, patterns.size());
+ PatternConstraint pattern = patterns.get(0);
+ assertEquals("\\d*(\\.\\d*){1,127}", pattern.getRegularExpression());
+
QName testedTypeQName = testedType.getQName();
assertEquals(URI.create("urn:ietf:params:xml:ns:yang:ietf-yang-types"),
testedTypeQName.getNamespace());
assertEquals("object-identifier-128", testedTypeQName.getLocalName());
ExtendedType testedTypeBase = (ExtendedType) testedType.getBaseType();
+ patterns = testedTypeBase.getPatterns();
+ assertEquals(1, patterns.size());
- List<PatternConstraint> patterns = testedTypeBase.getPatterns();
- assertEquals(2, patterns.size());
- PatternConstraint pattern1 = patterns.get(0);
- assertEquals("\\d*(\\.\\d*){1,127}", pattern1.getRegularExpression());
- PatternConstraint pattern2 = patterns.get(1);
+ pattern = patterns.get(0);
assertEquals(
"(([0-1](\\.[1-3]?[0-9]))|(2\\.(0|([1-9]\\d*))))(\\.(0|([1-9]\\d*)))*",
- pattern2.getRegularExpression());
+ pattern.getRegularExpression());
QName testedTypeBaseQName = testedTypeBase.getQName();
assertEquals(URI.create("urn:ietf:params:xml:ns:yang:ietf-yang-types"),
import static org.junit.Assert.*;
+import java.io.FileInputStream;
import java.io.IOException;
+import java.io.InputStream;
+import java.util.ArrayList;
+import java.util.List;
import org.junit.Test;
import org.opendaylight.controller.yang.parser.util.YangParseException;
@Test
public void testInvalidImport() throws IOException {
try {
- TestUtils.loadModule("/negative-scenario/testfile1.yang");
- fail("ValidationException should by thrown");
+ try (InputStream stream = new FileInputStream(getClass().getResource
+ ("/negative-scenario/testfile1.yang").getPath())) {
+ TestUtils.loadModule(stream);
+ fail("ValidationException should by thrown");
+ }
} catch(YangValidationException e) {
assertTrue(e.getMessage().contains("Not existing module imported"));
}
@Test
public void testTypeNotFound() throws IOException {
try {
- TestUtils.loadModule("/negative-scenario/testfile2.yang");
- fail("YangParseException should by thrown");
+ try (InputStream stream = new FileInputStream(getClass().getResource
+ ("/negative-scenario/testfile2.yang").getPath())) {
+ TestUtils.loadModule(stream);
+ fail("YangParseException should by thrown");
+ }
} catch(YangParseException e) {
assertTrue(e.getMessage().contains("Error in module 'test2' on line 24: Referenced type 'int-ext' not found."));
}
@Test
public void testInvalidAugmentTarget() throws IOException {
try {
- TestUtils.loadModules("/negative-scenario/testfile0.yang", "/negative-scenario/testfile3.yang");
- fail("YangParseException should by thrown");
+ final List<InputStream> streams = new ArrayList<>(2);
+ try (InputStream testFile0 = new FileInputStream(getClass().getResource
+ ("/negative-scenario/testfile0.yang").getPath())) {
+ streams.add(testFile0);
+ try (InputStream testFile3 = new FileInputStream(getClass().getResource
+ ("/negative-scenario/testfile3.yang").getPath())) {
+ streams.add(testFile3);
+ assertEquals("Expected loaded files count is 2", 2,
+ streams.size());
+ TestUtils.loadModules(streams);
+ fail("YangParseException should by thrown");
+ }
+ }
} catch(YangParseException e) {
assertTrue(e.getMessage().contains("Failed to resolve augments in module 'test3'."));
}
@Test
public void testInvalidRefine() throws IOException {
try {
- TestUtils.loadModule("/negative-scenario/testfile4.yang");
- fail("YangParseException should by thrown");
+ try (InputStream stream = new FileInputStream(getClass().getResource
+ ("/negative-scenario/testfile4.yang").getPath())) {
+ TestUtils.loadModule(stream);
+ fail("YangParseException should by thrown");
+ }
} catch(YangParseException e) {
assertTrue(e.getMessage().contains("Can not refine 'presence' for 'node'."));
}
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Date;
-import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.opendaylight.controller.yang.model.util.UnionType;
public class YangParserTest {
- private final DateFormat simpleDateFormat = new SimpleDateFormat("yyyy-MM-dd");
+ private final DateFormat simpleDateFormat = new SimpleDateFormat(
+ "yyyy-MM-dd");
private Set<Module> modules;
@Before
public void init() throws FileNotFoundException {
- modules = TestUtils.loadModules("src/test/resources/model");
+ modules = TestUtils.loadModules(getClass().getResource("/model").getPath());
assertEquals(3, modules.size());
}
// leaf if-name
LeafSchemaNode ifName = (LeafSchemaNode) test
.getDataChildByName("if-name");
- Leafref ifNameType = (Leafref)ifName.getType();
+ Leafref ifNameType = (Leafref) ifName.getType();
QName qname = ifNameType.getQName();
URI baseYangTypeNS = URI.create("urn:ietf:params:xml:ns:yang:1");
assertEquals("leafref", qname.getLocalName());
// leaf name
- LeafSchemaNode name = (LeafSchemaNode) test
- .getDataChildByName("name");
- StringType nameType = (StringType)name.getType();
+ LeafSchemaNode name = (LeafSchemaNode) test.getDataChildByName("name");
+ StringType nameType = (StringType) name.getType();
QName nameQName = nameType.getQName();
assertEquals(baseYangTypeNS, nameQName.getNamespace());
// leaf count
LeafSchemaNode count = (LeafSchemaNode) test
.getDataChildByName("count");
- ExtendedType countType = (ExtendedType)count.getType();
+ ExtendedType countType = (ExtendedType) count.getType();
QName countTypeQName = countType.getQName();
URI expectedNS = URI.create("urn:simple.types.data.demo");
assertEquals("t2", countTypeQName.getPrefix());
assertEquals("int8", countTypeQName.getLocalName());
- Int8 countTypeBase = (Int8)countType.getBaseType();
+ Int8 countTypeBase = (Int8) countType.getBaseType();
QName countTypeBaseQName = countTypeBase.getQName();
assertEquals(baseYangTypeNS, countTypeBaseQName.getNamespace());
}
@Test
- public void testTypedefRangesResolving() {
+ public void testTypedefRangesResolving() throws ParseException {
Module testModule = TestUtils.findModule(modules, "types1");
LeafSchemaNode testleaf = (LeafSchemaNode) testModule
.getDataChildByName("testleaf");
ExtendedType leafType = (ExtendedType) testleaf.getType();
- assertEquals("my-type1", leafType.getQName().getLocalName());
- assertEquals("t2", leafType.getQName().getPrefix());
+ QName leafTypeQName = leafType.getQName();
+ assertEquals("my-type1", leafTypeQName.getLocalName());
+ assertEquals("t1", leafTypeQName.getPrefix());
+ assertEquals(URI.create("urn:simple.container.demo"),
+ leafTypeQName.getNamespace());
+ Date expectedDate = simpleDateFormat.parse("2013-02-27");
+ assertEquals(expectedDate, leafTypeQName.getRevision());
+ assertEquals(1, leafType.getRanges().size());
+
ExtendedType baseType = (ExtendedType) leafType.getBaseType();
- assertEquals("my-base-int32-type", baseType.getQName().getLocalName());
- assertEquals("t2", baseType.getQName().getPrefix());
+ QName baseTypeQName = baseType.getQName();
+ assertEquals("my-type1", baseTypeQName.getLocalName());
+ assertEquals("t2", baseTypeQName.getPrefix());
+ assertEquals(URI.create("urn:simple.types.data.demo"),
+ baseTypeQName.getNamespace());
+ assertEquals(expectedDate, baseTypeQName.getRevision());
+ assertEquals(2, baseType.getRanges().size());
List<RangeConstraint> ranges = leafType.getRanges();
assertEquals(1, ranges.size());
assertEquals("my-string-type-ext", testleafTypeQName.getLocalName());
assertEquals("t2", testleafTypeQName.getPrefix());
- Set<String> expectedRegex = new HashSet<String>();
- expectedRegex.add("[a-k]*");
- expectedRegex.add("[b-u]*");
- expectedRegex.add("[e-z]*");
-
- Set<String> actualRegex = new HashSet<String>();
List<PatternConstraint> patterns = testleafType.getPatterns();
- assertEquals(3, patterns.size());
- for (PatternConstraint pc : patterns) {
- actualRegex.add(pc.getRegularExpression());
- }
- assertEquals(expectedRegex, actualRegex);
+ assertEquals(1, patterns.size());
+ PatternConstraint pattern = patterns.iterator().next();
+ assertEquals("[e-z]*", pattern.getRegularExpression());
- TypeDefinition<?> baseType = testleafType.getBaseType();
+ ExtendedType baseType = (ExtendedType) testleafType.getBaseType();
assertEquals("my-string-type2", baseType.getQName().getLocalName());
- List<LengthConstraint> lengths = testleafType.getLengths();
- assertEquals(1, lengths.size());
+ patterns = baseType.getPatterns();
+ assertEquals(1, patterns.size());
+ pattern = patterns.iterator().next();
+ assertEquals("[b-u]*", pattern.getRegularExpression());
- LengthConstraint length = lengths.get(0);
- assertEquals(5L, length.getMin());
- assertEquals(10L, length.getMax());
+ List<LengthConstraint> lengths = testleafType.getLengths();
+ assertTrue(lengths.isEmpty());
}
@Test
ExtendedType baseType = (ExtendedType) testleafType.getBaseType();
assertEquals("my-base-int32-type", baseType.getQName().getLocalName());
- ExtendedType int32Type = (ExtendedType) baseType.getBaseType();
- Int32 int32TypeBase = (Int32)int32Type.getBaseType();
- QName qname = int32TypeBase.getQName();
- assertEquals(URI.create("urn:ietf:params:xml:ns:yang:1"), qname.getNamespace());
+ Int32 int32Type = (Int32) baseType.getBaseType();
+ QName qname = int32Type.getQName();
+ assertEquals(URI.create("urn:ietf:params:xml:ns:yang:1"),
+ qname.getNamespace());
assertNull(qname.getRevision());
assertEquals("", qname.getPrefix());
assertEquals("int32", qname.getLocalName());
- List<RangeConstraint> ranges = int32Type.getRanges();
+ List<RangeConstraint> ranges = baseType.getRanges();
assertEquals(1, ranges.size());
RangeConstraint range = ranges.get(0);
assertEquals(2L, range.getMin());
ExtendedType type = (ExtendedType) testleaf.getType();
assertEquals(4, (int) type.getFractionDigits());
- Decimal64 baseType = (Decimal64) type.getBaseType();
- assertEquals(6, (int) baseType.getFractionDigits());
+ ExtendedType typeBase = (ExtendedType) type.getBaseType();
+ assertEquals("my-decimal-type", typeBase.getQName().getLocalName());
+ assertNull(typeBase.getFractionDigits());
+
+ Decimal64 decimal = (Decimal64) typeBase.getBaseType();
+ assertEquals(6, (int) decimal.getFractionDigits());
}
@Test
Leafref ifcIdType = (Leafref) ifcId.getType();
SchemaPath ifcIdTypeSchemaPath = ifcIdType.getPath();
List<QName> ifcIdTypePath = ifcIdTypeSchemaPath.getPath();
- QName q0 = new QName(new URI("urn:simple.types.data.demo"),
- simpleDateFormat.parse("2013-02-27"), "data", "interfaces");
- QName q1 = new QName(new URI("urn:simple.types.data.demo"),
- simpleDateFormat.parse("2013-02-27"), "data", "ifEntry");
- QName q2 = new QName(new URI("urn:simple.container.demo.test"),
- simpleDateFormat.parse("2013-02-27"), "data", "augment-holder");
- QName q3 = new QName(new URI("urn:simple.container.demo"),
- simpleDateFormat.parse("2013-02-27"), "data", "interface-id");
+
+ URI types1URI = URI.create("urn:simple.container.demo");
+ URI types2URI = URI.create("urn:simple.types.data.demo");
+ URI types3URI = URI.create("urn:simple.container.demo.test");
+ Date expectedDate = simpleDateFormat.parse("2013-02-27");
+
+ QName q0 = new QName(types2URI,
+ expectedDate, "data", "interfaces");
+ QName q1 = new QName(types2URI,
+ expectedDate, "data", "ifEntry");
+ QName q2 = new QName(types3URI,
+ expectedDate, "data", "augment-holder");
+ QName q3 = new QName(types1URI,
+ expectedDate, "data", "interface-id");
assertEquals(q0, ifcIdTypePath.get(0));
assertEquals(q1, ifcIdTypePath.get(1));
assertEquals(q2, ifcIdTypePath.get(2));
assertEquals(q0, higherLayerTypePath.get(0));
assertEquals(q1, higherLayerTypePath.get(1));
assertEquals(q2, higherLayerTypePath.get(2));
- q3 = new QName(new URI("urn:simple.container.demo"),
- simpleDateFormat.parse("2013-02-27"), "data", "higher-layer-if");
+ q3 = new QName(types1URI,
+ expectedDate, "data", "higher-layer-if");
assertEquals(q3, higherLayerTypePath.get(3));
+
+ LeafSchemaNode myType = (LeafSchemaNode) augment
+ .getDataChildByName("my-type");
+ ExtendedType leafType = (ExtendedType)myType.getType();
+
+ testModule = TestUtils.findModule(modules, "types2");
+ TypeDefinition<?> typedef = TestUtils.findTypedef(testModule.getTypeDefinitions(), "my-type1");
+
+ assertEquals(typedef, leafType);
}
@Test
Set<TypeDefinition<?>> types = test.getTypeDefinitions();
// my-base-int32-type
- ExtendedType int32Typedef = (ExtendedType)TestUtils.findTypedef(types, "my-base-int32-type");
+ ExtendedType int32Typedef = (ExtendedType) TestUtils.findTypedef(types,
+ "my-base-int32-type");
QName int32TypedefQName = int32Typedef.getQName();
URI expectedNS = URI.create("urn:simple.types.data.demo");
assertEquals(int32TypedefQName, typePath.get(0));
// my-base-int32-type/int32
- ExtendedType int32Ext = (ExtendedType)int32Typedef.getBaseType();
- QName int32ExtQName = int32Ext.getQName();
-
- assertEquals(expectedNS, int32ExtQName.getNamespace());
- assertEquals(expectedDate, int32ExtQName.getRevision());
- assertEquals("t2", int32ExtQName.getPrefix());
- assertEquals("int32", int32ExtQName.getLocalName());
-
- SchemaPath int32ExtSchemaPath = int32Ext.getPath();
- List<QName> int32ExtPath = int32ExtSchemaPath.getPath();
- assertEquals(2, int32ExtPath.size());
- assertEquals(int32TypedefQName, int32ExtPath.get(0));
- assertEquals(int32ExtQName, int32ExtPath.get(1));
-
- // my-base-int32-type/int32/int32
- Int32 int32 = (Int32)int32Ext.getBaseType();
+ Int32 int32 = (Int32) int32Typedef.getBaseType();
QName int32QName = int32.getQName();
- assertEquals(URI.create("urn:ietf:params:xml:ns:yang:1"), int32QName.getNamespace());
+ assertEquals(URI.create("urn:ietf:params:xml:ns:yang:1"),
+ int32QName.getNamespace());
assertNull(int32QName.getRevision());
assertEquals("", int32QName.getPrefix());
assertEquals("int32", int32QName.getLocalName());
List<QName> int32Path = int32SchemaPath.getPath();
assertEquals(3, int32Path.size());
assertEquals(int32TypedefQName, int32Path.get(0));
- assertEquals(int32ExtQName, int32Path.get(1));
assertEquals(int32QName, int32Path.get(2));
}
Set<TypeDefinition<?>> types = test.getTypeDefinitions();
// my-base-int32-type
- ExtendedType myDecType = (ExtendedType)TestUtils.findTypedef(types, "my-decimal-type");
+ ExtendedType myDecType = (ExtendedType) TestUtils.findTypedef(types,
+ "my-decimal-type");
QName myDecTypeQName = myDecType.getQName();
URI expectedNS = URI.create("urn:simple.types.data.demo");
assertEquals(myDecTypeQName, typePath.get(0));
// my-base-int32-type/int32
- Decimal64 dec64 = (Decimal64)myDecType.getBaseType();
+ Decimal64 dec64 = (Decimal64) myDecType.getBaseType();
QName dec64QName = dec64.getQName();
- assertEquals(URI.create("urn:ietf:params:xml:ns:yang:1"), dec64QName.getNamespace());
+ assertEquals(URI.create("urn:ietf:params:xml:ns:yang:1"),
+ dec64QName.getNamespace());
assertNull(dec64QName.getRevision());
assertEquals("", dec64QName.getPrefix());
assertEquals("decimal64", dec64QName.getLocalName());
path "/if:interfaces/if:interface/if:higher-layer-if";
}
}
+ leaf my-type {
+ type data:my-type1;
+ }
}
container mycont {
* \r
* \r
*/\r
-public class QName {\r
+public final class QName {\r
protected static final Logger logger = LoggerFactory\r
.getLogger(QName.class);\r
\r
*/\r
TypeDefinition<?> getType();\r
\r
+ String getDefault();\r
+\r
+ String getUnits();\r
+\r
}\r
}
@Override
- public int hashCode() {
- final int prime = 31;
- int result = 1;
- result = prime * result
- + ((baseType == null) ? 0 : baseType.hashCode());
- result = prime * result
- + ((defaultValue == null) ? 0 : defaultValue.hashCode());
- result = prime * result
- + ((description == null) ? 0 : description.hashCode());
- result = prime
- * result
- + ((unknownSchemaNodes == null) ? 0 : unknownSchemaNodes
- .hashCode());
- result = prime * result + ((path == null) ? 0 : path.hashCode());
- result = prime * result
- + ((reference == null) ? 0 : reference.hashCode());
- result = prime * result + ((status == null) ? 0 : status.hashCode());
- result = prime * result
- + ((typeName == null) ? 0 : typeName.hashCode());
- result = prime * result + ((units == null) ? 0 : units.hashCode());
- return result;
- }
-
- @Override
- public boolean equals(Object obj) {
- if (this == obj) {
+ public boolean equals(Object o) {
+ if (this == o) {
return true;
}
- if (obj == null) {
- return false;
- }
- if (getClass() != obj.getClass()) {
- return false;
- }
- ExtendedType other = (ExtendedType) obj;
- if (baseType == null) {
- if (other.baseType != null) {
- return false;
- }
- } else if (!baseType.equals(other.baseType)) {
- return false;
- }
- if (defaultValue == null) {
- if (other.defaultValue != null) {
- return false;
- }
- } else if (!defaultValue.equals(other.defaultValue)) {
- return false;
- }
- if (description == null) {
- if (other.description != null) {
- return false;
- }
- } else if (!description.equals(other.description)) {
- return false;
- }
- if (unknownSchemaNodes == null) {
- if (other.unknownSchemaNodes != null) {
- return false;
- }
- } else if (!unknownSchemaNodes.equals(other.unknownSchemaNodes)) {
+ if (!(o instanceof ExtendedType)) {
return false;
}
- if (path == null) {
- if (other.path != null) {
- return false;
- }
- } else if (!path.equals(other.path)) {
- return false;
- }
- if (reference == null) {
- if (other.reference != null) {
- return false;
- }
- } else if (!reference.equals(other.reference)) {
- return false;
- }
- if (status != other.status) {
- return false;
- }
- if (typeName == null) {
- if (other.typeName != null) {
- return false;
- }
- } else if (!typeName.equals(other.typeName)) {
+
+ ExtendedType that = (ExtendedType) o;
+ if (path != null ? !path.equals(that.path) : that.path != null) {
return false;
}
- if (units == null) {
- if (other.units != null) {
- return false;
- }
- } else if (!units.equals(other.units)) {
+ if (typeName != null ? !typeName.equals(that.typeName) : that.typeName != null)
return false;
- }
+
return true;
}
+ @Override
+ public int hashCode() {
+ int result = typeName != null ? typeName.hashCode() : 0;
+ result = 31 * result + (path != null ? path.hashCode() : 0);
+ return result;
+ }
+
@Override
public String toString() {
- StringBuilder builder2 = new StringBuilder();
- builder2.append("ExtendedType [typeName=");
- builder2.append(typeName);
- builder2.append(", baseType=");
- builder2.append(baseType);
- builder2.append(", path=");
- builder2.append(path);
- builder2.append(", description=");
- builder2.append(description);
- builder2.append(", reference=");
- builder2.append(reference);
- builder2.append(", unknownSchemaNodes=");
- builder2.append(unknownSchemaNodes);
- builder2.append(", status=");
- builder2.append(status);
- builder2.append(", units=");
- builder2.append(units);
- builder2.append(", defaultValue=");
- builder2.append(defaultValue);
- builder2.append("]");
- return builder2.toString();
+ StringBuilder builder = new StringBuilder();
+ builder.append("ExtendedType [typeName=");
+ builder.append(typeName);
+ builder.append(", baseType=");
+ builder.append(baseType);
+ builder.append(", path=");
+ builder.append(path);
+ builder.append(", description=");
+ builder.append(description);
+ builder.append(", reference=");
+ builder.append(reference);
+ builder.append(", unknownSchemaNodes=");
+ builder.append(unknownSchemaNodes);
+ builder.append(", status=");
+ builder.append(status);
+ builder.append(", units=");
+ builder.append(units);
+ builder.append(", defaultValue=");
+ builder.append(defaultValue);
+ builder.append("]");
+ return builder.toString();
}
public List<RangeConstraint> getRanges() {
public static DataSchemaNode findDataSchemaNode(final SchemaContext context, final SchemaPath schemaPath) {
if (schemaPath != null) {
final Module module = resolveModuleFromSchemaPath(context, schemaPath);
- final Queue<QName> prefixedPath = new LinkedList<QName>(schemaPath.getPath());
+ final Queue<QName> prefixedPath = new LinkedList<>(schemaPath.getPath());
if ((module != null) && (prefixedPath != null)) {
return findSchemaNodeForGivenPath(context, module, prefixedPath);
return null;
}
- public static Module resolveModuleFromSchemaPath(final SchemaContext context, final SchemaPath schemaPath) {
+ private static Module resolveModuleFromSchemaPath(final SchemaContext
+ context, final SchemaPath schemaPath) {
if ((schemaPath != null) && (schemaPath.getPath() != null)) {
- List<QName> path = schemaPath.getPath();
- final QName qname = path.get(path.size()-1);
+ final List<QName> path = schemaPath.getPath();
+ if (!path.isEmpty()) {
+ final QName qname = path.get(path.size() - 1);
- if ((qname != null) && (qname.getNamespace() != null)) {
- return context.findModuleByNamespace(qname.getNamespace());
+ if ((qname != null) && (qname.getNamespace() != null)) {
+ return context.findModuleByNamespace(qname.getNamespace());
+ }
}
}
return null;
}
- public static Module resolveModuleFromTypePath(final SchemaContext context, final TypeDefinition<?> type) {
+ public static Module findParentModuleForTypeDefinition(
+ final SchemaContext context, final TypeDefinition<?> type) {
final SchemaPath schemaPath = type.getPath();
if ((schemaPath != null) && (schemaPath.getPath() != null)) {
if(type instanceof ExtendedType) {
List<QName> path = schemaPath.getPath();
- final QName qname = path.get(path.size()-1);
+ final QName qname = path.get(path.size() - 1);
if ((qname != null) && (qname.getNamespace() != null)) {
return context.findModuleByNamespace(qname.getNamespace());
}
} else {
- LinkedList<QName> path = new LinkedList<QName>(schemaPath.getPath());
- path.removeLast();
- final QName qname = path.get(path.size()-1);
+ List<QName> path = schemaPath.getPath();
+ final QName qname = path.get(path.size() - 2);
if ((qname != null) && (qname.getNamespace() != null)) {
return context.findModuleByNamespace(qname.getNamespace());
"The Schema Path MUST contain at least ONE QName which defines namespace and Local name" +
"of path.");
}
- final QName qname = qnamedPath.get(0);
+ final QName qname = qnamedPath.get(qnamedPath.size() - 1);
return context.findModuleByNamespace(qname.getNamespace());
}
private static Queue<QName> xpathToQNamePath(final SchemaContext context, final Module parentModule,
final String xpath) {
- final Queue<QName> path = new LinkedList<QName>();
+ final Queue<QName> path = new LinkedList<>();
if (xpath != null) {
final String[] prefixedPath = xpath.split("/");
private static Queue<QName> resolveRelativeXPath(final SchemaContext context, final Module module,
final RevisionAwareXPath relativeXPath,
final SchemaPath leafrefSchemaPath) {
- final Queue<QName> absolutePath = new LinkedList<QName>();
+ final Queue<QName> absolutePath = new LinkedList<>();
if ((module != null) && (relativeXPath != null) && !relativeXPath.isAbsolute()
&& (leafrefSchemaPath != null)) {
/**
* This class represents a configuration manager for the Load Balancer service.
- * This class is responsible for managing(store/update/delete) the load balancer
+ * This class is responsible for managing(store/update/delete) the load balancer
* configuration that it receives through REST APIs or from any other applications
* present in the controller.
*
*/
public class ConfigManager implements IConfigManager{
-
+
/*
* Logger instance
*/
private static final Logger cmLogger = LoggerFactory.getLogger(ConfigManager.class);
-
+
/*
- * All the available VIPs
+ * All the available VIPs
*/
private HashMap<String,VIP> vips = new HashMap<String,VIP>();
-
+
/*
* All the available Pools
*/
private HashMap<String,Pool> pools = new HashMap<String,Pool>();
-
+
public ConfigManager(){
}
-
+
@Override
public boolean vipExists(String name) {
return this.vips.containsKey(name);
}
-
+
@Override
public boolean vipExists(VIP vip){
if(vip.getName()==null){
}
return true;
}
-
+
@Override
public boolean vipExists(String name,String ip,String protocol,short protocolPort,String poolName){
-
+
VIP vip = new VIP(name,ip,protocol,protocolPort,poolName);
-
+
//Check VIP with the same name
-
+
if(!vips.containsKey(name)){
//Check VIP with the same ip, protocol and protocolPort
if(!vips.containsValue(vip)){
-
+
//if you reach here, means this VIP don't exist
return false;
}
}
-
+
//Yeah, i have it.
- return true;
+ return true;
}
-
+
@Override
public Set<VIP> getAllVIPs(){
return new HashSet<VIP>(this.vips.values());
}
-
+
public VIP getVIPWithPoolName(VIP vip){
cmLogger.info("Search a VIP with name:{}",vip);
for(VIP vipTemp: this.vips.values()){
if(vipTemp.equals(vip)){
-
+
cmLogger.info("Found VIP with pool detail : {}",vipTemp);
return vipTemp;
}
}
-
+
cmLogger.info("VIP with pool detail not found ");
return null;
}
-
+
@Override
public VIP createVIP(String name,String ip,String protocol,short protocolPort,String poolName){
-
+
cmLogger.info("Create VIP with the following details :[ name : "+name
+" ip : "+ip
+" protocol : "+protocol
+" protocol_port : "+protocolPort
+" pool name : "+poolName);
-
+
VIP vip = new VIP(name,ip,protocol,protocolPort,poolName);
-
+
if(poolName !=null && !poolName.isEmpty()){
if(pools.containsKey(poolName)){
pools.get(poolName).addVIP(vip);
}
}
-
+
vip.setStatus(LBConst.STATUS_ACTIVE);
this.vips.put(name, vip);
-
+
cmLogger.info("New VIP created : "+vip.toString());
return vip;
}
-
+
@Override
public String getVIPAttachedPool(String name) {
return this.vips.get(name).getPoolName();
}
-
+
@Override
public VIP updateVIP(String name, String poolName){
-
+
cmLogger.info("Updating VIP : "+name+" pool name to "+poolName);
-
+
if(vips.containsKey(name)){
VIP vip = vips.get(name);
if(vip.getPoolName() == null){
cmLogger.error("VIP with name: "+name+" does not exist");
return null;
}
-
+
@Override
public VIP deleteVIP(String name){
-
+
cmLogger.info("Deleting VIP : "+name);
-
+
VIP vip = vips.get(name);
-
+
String poolName = vip.getPoolName();
-
+
if(poolName != null){
if(pools.containsKey(poolName)){
Pool pool = pools.get(poolName);
pool.removeVIP(vip.getName());
}
}
-
+
cmLogger.info("VIP removed : "+vip.toString());
-
+
vips.remove(vip.getName());
-
+
return vip;
}
-
+
@Override
public boolean memberExists(String name, String poolName) {
if(this.pools.containsKey(poolName)){
}
return false;
}
-
+
@Override
public boolean memberExists(String name, String memberIP,String poolName){
if(!this.pools.containsKey(poolName))
return false;
-
+
return this.pools.get(poolName).poolMemberExists(new PoolMember(name, memberIP, poolName));
}
-
+
@Override
public PoolMember addPoolMember(String name, String memberIP, String poolName){
-
+
PoolMember pm = new PoolMember(name,memberIP,poolName);
-
+
cmLogger.info("Adding pool member : "+pm.toString());
-
+
pools.get(poolName).addMember(pm);
-
+
return pm;
}
-
+
@Override
public PoolMember removePoolMember(String name, String poolName){
-
+
cmLogger.info("Removing pool member : {} from pool {}",name, poolName);
-
+
Pool pool = pools.get(poolName);
-
+
PoolMember pm = pool.getMember(name);
-
+
pool.removeMember(name);
-
+
cmLogger.info("Pool member {} removed from {} ",name,poolName);
-
+
return pm;
}
-
+
@Override
public Set<Pool> getAllPools(){
return new HashSet<Pool>(this.pools.values());
}
-
+
@Override
public boolean poolExists(String name) {
return this.pools.containsKey(name);
}
-
+
@Override
public boolean poolExists(String name, String lbMethod){
-
+
return pools.containsValue(new Pool(name,lbMethod));
}
-
+
@Override
public Pool createPool(String name, String lbMethod){
-
+
Pool newPool = new Pool(name,lbMethod);
-
+
cmLogger.info("New pool created : " + newPool.toString());
-
+
pools.put(name, newPool);
-
+
return newPool;
}
-
+
@Override
public Pool deletePool(String poolName){
-
+
Pool pool = pools.get(poolName);
-
+
for(VIP vip:pool.getAllVip()){
-
+
vip.setPoolName(null);
-
+
}
-
+
cmLogger.info("Pool removed : "+pool.toString());
-
+
pools.remove(poolName);
-
+
return pool;
}
-
+
@Override
public Pool getPool( String poolName){
if(pools.containsKey(poolName)){
}
return null;
}
-
+
@Override
public Set<PoolMember> getAllPoolMembers(String poolName) {
-
+
if(pools.containsKey(poolName)){
return new HashSet<PoolMember>(pools.get(poolName).getAllMembers());
}
return null;
}
-
+
/* (non-Javadoc)
* @see java.lang.Object#toString()
*/
*
*/
public interface IConfigManager {
-
+
/**
* Return all existing VIPs
* @return Set of VIP's
* if there is no VIP, it will return empty set.
*/
public Set<VIP> getAllVIPs();
-
+
/**
* Check if VIP with the 'name' exists
- * @param name Name of the VIP
- * @return true if exists
- * false else
+ * @param name Name of the VIP
+ * @return true if exists
+ * false else
*/
public boolean vipExists(String name);
/**
- * Check if VIP exists with the details 'VIP'
- * @param vip Search for this VIP
- * @return true if exists
- * false else
+ * Check if VIP exists with the details 'VIP'
+ * @param vip Search for this VIP
+ * @return true if exists
+ * false else
*/
public boolean vipExists(VIP vip);
/**
* Check if VIP with the provided details exists
- * @param name Name of the VIP
- * @param ip IP of the VIP
- * @param protocol IP Protocol of the VIP (TCP/UDP)
- * @param protocolPort Transport port of the VIP (e.g 5550)
- * @param poolName Name of the pool attached with the VIP
- * @return true if exists
- * false else
+ * @param name Name of the VIP
+ * @param ip IP of the VIP
+ * @param protocol IP Protocol of the VIP (TCP/UDP)
+ * @param protocolPort Transport port of the VIP (e.g 5550)
+ * @param poolName Name of the pool attached with the VIP
+ * @return true if exists
+ * false else
*/
public boolean vipExists(String name,String ip,String protocol,short protocolPort,String poolName);
-
+
/**
* Add VIP to the configuration
- * @param name Name of the VIP
- * @param ip IP of the VIP
- * @param protocol IP Protocol of the VIP (TCP/UDP)
- * @param protocolPort Transport port of the VIP
- * @param poolName Name of the pool that VIP will use for load balancing its traffic
- * @return Newly created VIP
+ * @param name Name of the VIP
+ * @param ip IP of the VIP
+ * @param protocol IP Protocol of the VIP (TCP/UDP)
+ * @param protocolPort Transport port of the VIP
+ * @param poolName Name of the pool that VIP will use for load balancing its traffic
+ * @return Newly created VIP
*/
public VIP createVIP(String name,String ip,String protocol,short protocolPort,String poolName);
-
+
/**
* Return pool attached to VIP
* @param name Name of the VIP
- * @return Name of the pool attached to VIP
- * else null
+ * @return Name of the pool attached to VIP
+ * else null
*/
public String getVIPAttachedPool(String name);
/**
* Update pool name of the VIP.
- * @param name Name of the VIP
- * @param poolName Attach this pool to VIP
- * @return Updated VIP If successful
- * null If this VIP is already attached to any existing pool.
- */
+ * @param name Name of the VIP
+ * @param poolName Attach this pool to VIP
+ * @return Updated VIP If successful
+ * null If this VIP is already attached to any existing pool.
+ */
public VIP updateVIP(String name, String poolName);
-
+
/**
* Delete the VIP
- * @param name Delete VIP with this name
- * @return Details of the deleted VIP
+ * @param name Delete VIP with this name
+ * @return Details of the deleted VIP
*/
public VIP deleteVIP(String name);
/**
- * Check if pool member with the 'name' present in the pool with name 'poolName'
- * @param name Name of the pool member
- * @param poolName Name of the pool, you want to search for pool member
- * @return true If exist
- * false else
+ * Check if pool member with the 'name' present in the pool with name 'poolName'
+ * @param name Name of the pool member
+ * @param poolName Name of the pool, you want to search for pool member
+ * @return true If exist
+ * false else
*/
public boolean memberExists(String name, String poolName);
/**
* Check if pool member with name 'name' and IP 'memberIP' exist in the pool 'poolName'
- * @param name Name of the pool member
- * @param memberIP IP of the pool member
- * @param poolName Name of the pool member you want to search
- * @return true If Exist
- * false else
+ * @param name Name of the pool member
+ * @param memberIP IP of the pool member
+ * @param poolName Name of the pool member you want to search
+ * @return true If Exist
+ * false else
*/
public boolean memberExists(String name, String memberIP,String poolName);
-
+
/**
* Return all pool members of the pool 'poolName'
- * @param poolName Name of the pool
- * @return Set of all the pool members if pool with the name present in the configuration
- * null else
- *
+ * @param poolName Name of the pool
+ * @return Set of all the pool members if pool with the name present in the configuration
+ * null else
+ *
*/
public Set<PoolMember> getAllPoolMembers(String poolName);
-
+
/**
* Add new pool member to the configuration
- * @param name Name of the pool
- * @param memberIP IP of the pool
- * @param poolName Attach pool member to this pool
- * @return Newly created pool member
+ * @param name Name of the pool
+ * @param memberIP IP of the pool
+ * @param poolName Attach pool member to this pool
+ * @return Newly created pool member
*/
public PoolMember addPoolMember(String name, String memberIP, String poolName);
-
+
/**
* Remove pool member from the pool
- * @param name Name of the pool member
- * @param poolName Name of the pool
- * @return Details of the removed pool member
+ * @param name Name of the pool member
+ * @param poolName Name of the pool
+ * @return Details of the removed pool member
*/
public PoolMember removePoolMember(String name, String poolName);
/**
* Return all the existing pools
- * @return Set of Pools
+ * @return Set of Pools
*/
public Set<Pool> getAllPools();
-
+
/**
* Return pool with input name
- * @param poolName Name of the pool
- * @return Details of the pool if pool exist
- * null else
+ * @param poolName Name of the pool
+ * @return Details of the pool if pool exist
+ * null else
*/
public Pool getPool(String poolName);
-
+
/**
- * Check if pool exists with the input name
- * @param name Name of the pool
- * @return true If exists
- * false else
+ * Check if pool exists with the input name
+ * @param name Name of the pool
+ * @return true If exists
+ * false else
*/
public boolean poolExists(String name);
/**
* Check if pool exists with the input name and loadbalancing method.
- * @param name Name of the pool
- * @param lbMethod Load balancing method name
- * @return true If exists
- * false else
+ * @param name Name of the pool
+ * @param lbMethod Load balancing method name
+ * @return true If exists
+ * false else
*/
public boolean poolExists(String name, String lbMethod);
-
+
/**
* Create new pool with the provided details
- * @param name Name of the pool
- * @param lbMethod Load balancing method this pool will use
- * @return Details of the newly created pool
+ * @param name Name of the pool
+ * @param lbMethod Load balancing method this pool will use
+ * @return Details of the newly created pool
*/
public Pool createPool(String name, String lbMethod);
-
+
/**
* Delete pool with the provided name
- * @param poolName Name of the pool
- * @return Details of the deleted pool
+ * @param poolName Name of the pool
+ * @return Details of the deleted pool
*/
public Pool deletePool(String poolName);
*
*/
public class LBConst {
-
+
public static final int FORWARD_DIRECTION_LB_FLOW = 0;
-
+
public static final int REVERSE_DIRECTION_LB_FLOW = 1;
-
+
public static final String ROUND_ROBIN_LB_METHOD = "roundrobin";
-
+
public static final String RANDOM_LB_METHOD = "random";
-
+
public static final String STATUS_ACTIVE="active";
-
+
public static final String STATUS_INACTIVE="inactive";
-
+
public static final String STATUS_PENDING="pending";
-
+
public static final String STATUS_ERROR="error";
-
-}
+}
import org.slf4j.LoggerFactory;
/**
- * Class defines utilty methods that will be used by different components
- * of the load balancer service
+ * Class defines utilty methods that will be used by different components
+ * of the load balancer service
*
*/
public class LBUtil {
-
+
private static final Logger lbuLogger = LoggerFactory.getLogger(LBUtil.class);
-
+
public LBUtil(){}
-
+
/**
- * Extract the details of the source machine that sent this packet 'inPkt'
- * @param inPkt Packet that is received by the controller
- * @return Details of the source machine in Client object.
+ * Extract the details of the source machine that sent this packet 'inPkt'
+ * @param inPkt Packet that is received by the controller
+ * @return Details of the source machine in Client object.
*/
public Client getClientFromPacket(IPv4 inPkt){
lbuLogger.info("Find client information from packet : {}",inPkt.toString());
-
+
String ip = NetUtils.getInetAddress(inPkt.getSourceAddress()).getHostAddress();
-
+
String protocol = IPProtocols.getProtocolName(inPkt.getProtocol());
-
+
lbuLogger.info("client ip {} and protocl {}",ip,protocol);
-
+
Packet tpFrame= inPkt.getPayload();
-
+
lbuLogger.info("Get protocol layer {}",tpFrame.toString());
-
+
short port = 0;
-
+
if(protocol.equals(IPProtocols.TCP.toString())){
TCP tcpFrame = (TCP)tpFrame;
port = tcpFrame.getSourcePort();
UDP udpFrame = (UDP)tpFrame;
port = udpFrame.getSourcePort();
}
-
+
lbuLogger.info("Found port {}",port);
-
+
Client source = new Client(ip, protocol,port);
-
+
lbuLogger.info("Client information : {}",source.toString());
-
+
return source;
}
-
+
/**
* Extract the details of the destination machine where this packet 'inPkt' need
* to be delivered
* @param inPkt Packet that is received by controller for forwarding
- * @return Details of the destination machine packet in VIP
+ * @return Details of the destination machine packet in VIP
*/
public VIP getVIPFromPacket(IPv4 inPkt){
-
+
lbuLogger.info("Find VIP information from packet : {}",inPkt.toString());
-
+
String ip = NetUtils.getInetAddress(inPkt.getDestinationAddress()).getHostAddress();
-
+
String protocol = IPProtocols.getProtocolName(inPkt.getProtocol());
-
+
Packet tpFrame= inPkt.getPayload();
-
+
short port = 0;
-
+
if(protocol.equals(IPProtocols.TCP.toString())){
TCP tcpFrame = (TCP)tpFrame;
port = tcpFrame.getDestinationPort();
}else{
-
+
UDP udpFrame = (UDP)tpFrame;
port = udpFrame.getDestinationPort();
}
-
+
VIP dest = new VIP(null,ip, protocol,port,null);
-
+
lbuLogger.info("VIP information : {}",dest.toString());
-
+
return dest;
}
}
\ No newline at end of file
* e.g TCP traffic from two different ports from the same host to a given VIP will be considered
* as two different clients by this service. Similarly, traffic using two different protocols
* (TCP, UDP) from the same host will be considered as two different clients.
- *
+ *
*/
public class Client {
-
+
/*
* IP address of the client (source address)
*/
private String ip;
-
+
/*
* Network protocol of the traffic sent by client
*/
private String protocol;
-
+
/*
* Port used to send network traffic (source port)
*/
private short port;
-
+
public Client(String ip, String protocol, short port){
this.ip = ip;
this.protocol = protocol;
this.port = port;
}
-
+
/**
* @return the client IP
*/
public String getIp() {
return ip;
}
-
+
/**
* @param ip the IP to set
*/
public void setIp(String ip) {
this.ip = ip;
}
-
+
/**
* @return the client network protocol
*/
public String getProtocol() {
return protocol;
}
-
+
/**
* @param protocol the protocol to set
*/
public void setProtocol(String protocol) {
this.protocol = protocol;
}
-
+
/**
* @return the client port
*/
public short getPort() {
return port;
}
-
+
/**
* @param port the port to set
*/
public void setPort(short port) {
this.port = port;
}
-
+
/* (non-Javadoc)
* @see java.lang.Object#hashCode()
*/
result = prime * result+ ((protocol == null) ? 0 : protocol.hashCode());
return result;
}
-
+
/* (non-Javadoc)
* @see java.lang.Object#equals(java.lang.Object)
*/
}
return true;
}
-
+
/* (non-Javadoc)
* @see java.lang.Object#toString()
*/
/**
* This class represents the pool of hosts among which incoming traffic
- * will be load balanced. Each pool will load balance the traffic among its pool members
- * based on the loadbalancing policy set for the pool.
+ * will be load balanced. Each pool will load balance the traffic among its pool members
+ * based on the loadbalancing policy set for the pool.
* Currently, the pool supports two load balancing policies:
* 1. Round Robin Policy{@link org.opendaylight.controller.samples.loadbalancer.policies.RoundRobinLBPolicy}
* 2. Random Policy {@link org.opendaylight.controller.samples.loadbalancer.policies.RandomLBPolicy}
- *
+ *
* NOTE: After creation of the pool, user can't update (change) its load balancing policy.
* NOTE: Each Pool should have a unique name.
*/
@XmlRootElement(name="pool")
@XmlAccessorType(XmlAccessType.NONE)
public class Pool {
-
+
/*
* Unique name of the pool
*/
@XmlElement
private String name;
-
+
/*
* Associated load balancing policy
*/
@XmlElement(name="lbmethod")
private String lbMethod;
-
+
/*
* Status of the pool (active/inactive)
*/
@XmlElement
private String status;
-
+
/*
* List of all the VIPs using this pool for load balancing their traffic - more than
* one VIP can be mapped to each pool.
*/
@XmlElement
private ArrayList<VIP> vips = new ArrayList<VIP>();
-
+
/*
* List of all the pool members used for load balancing the traffic
*/
@XmlElement
private ArrayList<PoolMember> members = new ArrayList<PoolMember>();
-
+
/*
* Private constructor used for JAXB mapping
*/
@SuppressWarnings("unused")
private Pool() {}
-
+
/**
* Getter/ Setter methods
*/
-
+
public Pool(String name,
String lbMethod) {
this.name = name;
this.lbMethod = lbMethod;
}
-
+
/**
* @return the name
*/
public String getName() {
return name;
}
-
+
/**
* @param name the name to set
*/
public void setName(String name) {
this.name = name;
}
-
+
/**
* @return the lbMethod
*/
public String getLbMethod() {
return lbMethod;
}
-
+
/**
* @param lbMethod the lbMethod to set
*/
public void setLbMethod(String lbMethod) {
this.lbMethod = lbMethod;
}
-
+
/**
* @return the status
*/
public String getStatus() {
return status;
}
-
+
/**
* @param status the status to set
*/
public void setStatus(String status) {
this.status = status;
}
-
+
/**
* @return the vip
*/
public ArrayList<VIP> getAllVip() {
return vips;
}
-
+
/**
* @param vip the vip to set
*/
public void setVips(ArrayList<VIP> vips) {
this.vips = vips;
}
-
+
/**
* @return the members
*/
public ArrayList<PoolMember> getAllMembers() {
return members;
}
-
+
/**
* @param members the members to set
*/
public void setMembers(ArrayList<PoolMember> members) {
this.members = members;
}
-
+
/**
* Add new VIP to the VIP list
* @param vip new VIP to add
public void addVIP(VIP vip){
this.vips.add(vip);
}
-
+
/**
* Remove VIP with given name from the VIP list of the pool
* @param name Name of the VIP
- * @return true If VIP was using this pool and removed
+ * @return true If VIP was using this pool and removed
* false IF VIP is not using this pool
*/
public boolean removeVIP(String name){
}
return false;
}
-
+
/**
* Check if the given pool member is part of this pool
* @param pm Search for this pool member
- * @return true If pool member is attached to this pool
+ * @return true If pool member is attached to this pool
* false else
*/
public boolean poolMemberExists(PoolMember pm){
return this.members.contains(pm);
}
-
+
/**
* Returns the pool member with the given name
* @param name Search for this pool member
- * @return PoolMember If pool member is attached to this pool
+ * @return PoolMember If pool member is attached to this pool
* null else
*/
public PoolMember getMember(String name){
-
+
for(PoolMember pm: this.members){
if(pm.getName().equals(name)){
return pm;
}
return null;
}
-
+
/**
* Add new pool member to the pool
* @param pm Add this new pool
public void addMember(PoolMember pm){
this.members.add(pm);
}
-
+
/**
* Remove pool member from the pool list
* @param name Remove this pool member
- * @return true If pool member was attached to this pool and successfully removed
- * false If pool member is not attached to this pool
+ * @return true If pool member was attached to this pool and successfully removed
+ * false If pool member is not attached to this pool
*/
public boolean removeMember(String name){
for(PoolMember pm: this.members){
}
return false;
}
-
+
/* (non-Javadoc)
* @see java.lang.Object#hashCode()
*/
result = prime * result + ((name == null) ? 0 : name.hashCode());
return result;
}
-
+
/* (non-Javadoc)
* @see java.lang.Object#equals(java.lang.Object)
*/
}
return true;
}
-
+
/* (non-Javadoc)
* @see java.lang.Object#toString()
*/
* This class represents the host where load balancing service will
* redirect VIP traffic for load balancing. All these hosts have to
* register with a pool to be a part of traffic load balancing.
- * This entity is referred to as a 'PoolMember'.
- * Load balancer service differentiates each pool member based on its
+ * This entity is referred to as a 'PoolMember'.
+ * Load balancer service differentiates each pool member based on its
* two properties { ip address, attached pool }.
* A host (IP) can be attached to two different pools through creation of two
* different pool member objects.
- *
+ *
* NOTE: Each pool member should have a unique name.
*
*/
@XmlRootElement(name="poolmember")
@XmlAccessorType(XmlAccessType.NONE)
public class PoolMember {
-
+
/*
* Unique name of the pool member
*/
@XmlElement
private String name;
-
+
/*
* IP address of the pool member
*/
@XmlElement
private String ip;
-
+
/*
* Name of the pool this member is attached to.
*/
@XmlElement(name="poolname")
private String poolName;
-
+
/*
* Status (active/inactive)
*/
@XmlElement
private String status;
-
+
/**
* Private constructor used for JAXB mapping
*/
@SuppressWarnings("unused")
private PoolMember() {}
-
+
public PoolMember(String name, String memberIP, String poolName){
this.name = name;
this.ip = memberIP;
this.poolName = poolName;
}
-
+
/**
* @return the name
*/
public String getName() {
return name;
}
-
+
/**
* @param name the name to set
*/
public void setName(String name) {
this.name = name;
}
-
+
/**
* @return the ip
*/
public String getIp() {
return ip;
}
-
+
/**
* @param ip the ip to set
*/
public void setIp(String ip) {
this.ip = ip;
}
-
+
/**
* @return the poolName
*/
public String getPoolName() {
return poolName;
}
-
+
/**
* @param poolName the poolName to set
*/
public void setPoolName(String poolName) {
this.poolName = poolName;
}
-
+
/**
* @return the status
*/
public String getStatus() {
return status;
}
-
+
/**
* @param status the status to set
*/
public void setStatus(String status) {
this.status = status;
}
-
+
/* (non-Javadoc)
* @see java.lang.Object#hashCode()
*/
+ ((poolName == null) ? 0 : poolName.hashCode());
return result;
}
-
+
/* (non-Javadoc)
* @see java.lang.Object#equals(java.lang.Object)
*/
* 1. IP address of the VIP exposed by the application
* 2. Protocol of the network traffic (TCP/UDP)
* 3. Port to which incoming traffic is destined
- *
- * User is allowed to create mutliple VIPs with the same IP, but all such VIPs (with the same IP)
+ *
+ * User is allowed to create mutliple VIPs with the same IP, but all such VIPs (with the same IP)
* should differ at least in the protocol or port or both.
- *
+ *
* NOTE: Each VIP should have a unique name.
*/
@XmlRootElement(name="vip")
@XmlAccessorType(XmlAccessType.NONE)
public class VIP {
-
+
/*
* Unique name of the VIP
*/
@XmlElement
private String name;
-
+
/*
- * Virtual IP address of the VIP
+ * Virtual IP address of the VIP
*/
@XmlElement
private String ip;
-
+
/*
- * Network traffic protocol
+ * Network traffic protocol
*/
@XmlElement
private String protocol;
-
+
/*
* Port where network traffic is destined (destination port)
*/
@XmlElement
private short port;
-
+
/*
* Name of the pool attached to the VIP for load balancing its traffic
*/
@XmlElement(name="poolname")
private String poolName;
-
+
/*
* Status (Active/inactive)
*/
*/
@SuppressWarnings("unused")
private VIP() {}
-
+
public VIP(String name,
String ip,
String protocol,
this.port = port;
this.poolName = poolName;
}
-
+
public String getName() {
return name;
}
-
+
public void setName(String name) {
this.name = name;
}
-
+
public String getIp() {
return ip;
}
-
+
public void setIp(String ip) {
this.ip = ip;
}
-
+
public String getProtocol() {
return protocol;
}
-
+
public void setProtocol(String protocol) {
this.protocol = protocol;
}
-
+
public short getPort() {
return port;
}
-
+
public void setPort(short port) {
this.port = port;
}
-
+
public String getPoolName() {
return poolName;
}
-
+
public void setPoolName(String poolName) {
this.poolName = poolName;
}
-
+
/**
* @return the status
*/
public String getStatus() {
return status;
}
-
+
/**
* @param status the status to set
*/
public void setStatus(String status) {
this.status = status;
}
-
+
/* (non-Javadoc)
* @see java.lang.Object#hashCode()
*/
+ ((protocol == null) ? 0 : protocol.hashCode());
return result;
}
-
+
/* (non-Javadoc)
* @see java.lang.Object#equals(java.lang.Object)
*/
public boolean equals(Object obj) {
if (this == obj) {
return true;
-
+
}
if (obj == null) {
return false;
if (!(obj instanceof VIP)) {
return false;
}
-
+
VIP other = (VIP) obj;
if (ip == null) {
if (other.ip != null) {
}
return true;
}
-
+
/* (non-Javadoc)
* @see java.lang.Object#toString()
*/
import org.opendaylight.controller.samples.loadbalancer.IConfigManager;
/**
- * Main application activator class for registering the dependencies and
+ * Main application activator class for registering the dependencies and
* initialising the load balancer application.
*
*/
public class Activator extends ComponentActivatorAbstractBase {
-
+
/*
* Logger instance
*/
props.put("salListenerName", "loadbalancer");
c.setInterface(new String[] { IListenDataPacket.class.getName(),
- IConfigManager.class.getName()}, props);
+ IConfigManager.class.getName()}, props);
c.add(createContainerServiceDependency(containerName).setService(
IDataPacketService.class).setCallbacks(
"setDataPacketService", "unsetDataPacketService")
.setRequired(true));
-
+
c.add(createContainerServiceDependency(containerName).setService(
IRouting.class).setCallbacks("setRouting", "unsetRouting")
.setRequired(false));
import org.slf4j.LoggerFactory;
/**
- * This class is the main class that represents the load balancer service.
- * This is a sample load balancer application that balances traffic to backend servers
- * based on the source address and source port on each incoming packet. The service
+ * This class is the main class that represents the load balancer service.
+ * This is a sample load balancer application that balances traffic to backend servers
+ * based on the source address and source port on each incoming packet. The service
* reactively installs OpenFlow rules to direct all packets with a specific source address
- * and source port to one of the appropriate backend servers. The servers may be chosen
- * using a round robin policy or a random policy. This service can be configured via a
+ * and source port to one of the appropriate backend servers. The servers may be chosen
+ * using a round robin policy or a random policy. This service can be configured via a
* REST APIs which are similar to the OpenStack Quantum LBaaS (Load-balancer-as-a-Service)
* v1.0 API proposal (http://wiki.openstack.org/Quantum/LBaaS)
- *
+ *
* To use this service, a virtual IP (or VIP) should be exposed to the clients of this service
* and used as the destination address. A VIP is a entity that comprises of a virtual IP, port
* and protocol (TCP or UDP).
* Assumptions:
* 1. One or more VIPs may be mapped to the same server pool. All VIPs that share the same
* pool must also share the same load balancing policy (random or round robin).
- *
+ *
* 2. Only one server pool can be be assigned to a VIP.
- *
+ *
* 3. All flow rules are installed with an idle timeout of 5 seconds.
- *
+ *
* 4. Packets to a VIP must leave the OpenFlow cluster from the same switch from where
* it entered it.
- *
+ *
* 5. When you delete a VIP or a server pool or a server from a pool, the service does not
* delete the flow rules it has already installed. The flow rules should automatically
- * time out after the idle timeout of 5 seconds.
+ * time out after the idle timeout of 5 seconds.
*
*/
public class LoadBalancerService implements IListenDataPacket, IConfigManager{
-
+
/*
* Logger instance
*/
private static Logger lbsLogger = LoggerFactory.getLogger(LoadBalancerService.class);
-
+
/*
* Single instance of the configuration manager. Application passes this reference to all
* the new policies implemented for load balancing.
*/
private static ConfigManager configManager = new ConfigManager();
-
+
/*
* Round robing policy instance. Need to implement factory patterns to get
* policy instance.
*/
private static RoundRobinLBPolicy rrLBMethod= new RoundRobinLBPolicy(configManager);
-
+
/*
* Random policy instance.
*/
private static RandomLBPolicy ranLBMethod= new RandomLBPolicy(configManager);
-
+
/*
* Reference to the data packet service
*/
private IDataPacketService dataPacketService = null;
-
+
/*
* Reference to the host tracker service
*/
private IfIptoHost hostTracker;
-
+
/*
* Reference to the forwarding manager
*/
private IForwardingRulesManager ruleManager;
-
+
/*
* Reference to the routing service
*/
private IRouting routing;
-
+
/*
* Load balancer application installs all flows with priority 2.
*/
private String containerName = null;
/*
- * Set/unset methods for the service instance that load balancer
+ * Set/unset methods for the service instance that load balancer
* service requires
*/
public String getContainerName() {
return GlobalConstants.DEFAULT.toString();
return containerName;
}
-
+
void setDataPacketService(IDataPacketService s) {
this.dataPacketService = s;
}
this.dataPacketService = null;
}
}
-
+
public void setRouting(IRouting routing) {
this.routing = routing;
}
}
public void setHostTracker(IfIptoHost hostTracker) {
- lbsLogger.debug("Setting HostTracker");
+ lbsLogger.debug("Setting HostTracker");
this.hostTracker = hostTracker;
}
public void setForwardingRulesManager(
IForwardingRulesManager forwardingRulesManager) {
- lbsLogger.debug("Setting ForwardingRulesManager");
+ lbsLogger.debug("Setting ForwardingRulesManager");
this.ruleManager = forwardingRulesManager;
}
/**
* This method receives first packet of flows for which there is no
- * matching flow rule installed on the switch. IP addresses used for VIPs
+ * matching flow rule installed on the switch. IP addresses used for VIPs
* are not supposed to be used by any real/virtual host in the network.
* Hence, any forwarding/routing service will not install any flows rules matching
* these VIPs. This ensures that all the flows destined for VIPs will not find a match
* in the switch and will be forwarded to the load balancing service.
- * Service will decide where to route this traffic based on the load balancing
- * policy of the VIP's attached pool and will install appropriate flow rules
- * in a reactive manner.
+ * Service will decide where to route this traffic based on the load balancing
+ * policy of the VIP's attached pool and will install appropriate flow rules
+ * in a reactive manner.
*/
@Override
public PacketResult receiveDataPacket(RawPacket inPkt){
-
+
if (inPkt == null) {
return PacketResult.IGNORED;
}
-
+
Packet formattedPak = this.dataPacketService.decodeDataPacket(inPkt);
-
+
if (formattedPak instanceof Ethernet) {
byte[] vipMacAddr = ((Ethernet) formattedPak).getDestinationMACAddress();
Object ipPkt = formattedPak.getPayload();
-
+
if (ipPkt instanceof IPv4) {
-
+
lbsLogger.debug("Packet recieved from switch : {}",inPkt.getIncomingNodeConnector().getNode().toString());
IPv4 ipv4Pkt = (IPv4)ipPkt;
if(IPProtocols.getProtocolName(ipv4Pkt.getProtocol()).equals(IPProtocols.TCP.toString())
|| IPProtocols.getProtocolName(ipv4Pkt.getProtocol()).equals(IPProtocols.UDP.toString())){
-
+
lbsLogger.debug("Packet protocol : {}",IPProtocols.getProtocolName(ipv4Pkt.getProtocol()));
Client client = new LBUtil().getClientFromPacket(ipv4Pkt);
VIP vip = new LBUtil().getVIPFromPacket(ipv4Pkt);
-
+
if(configManager.vipExists(vip)){
VIP vipWithPoolName = configManager.getVIPWithPoolName(vip);
String poolMemberIp = null;
if(configManager.getPool(vipWithPoolName.getPoolName()).getLbMethod().equalsIgnoreCase(LBConst.ROUND_ROBIN_LB_METHOD)){
-
+
poolMemberIp = rrLBMethod.getPoolMemberForClient(client,vipWithPoolName);
}
-
+
if(configManager.getPool(vipWithPoolName.getPoolName()).getLbMethod().equalsIgnoreCase(LBConst.RANDOM_LB_METHOD)){
poolMemberIp = ranLBMethod.getPoolMemberForClient(client,vipWithPoolName);
}
-
+
try {
-
+
Node clientNode = inPkt.getIncomingNodeConnector().getNode();
HostNodeConnector hnConnector = this.hostTracker.hostFind(InetAddress.getByName(poolMemberIp));
-
+
Node destNode = hnConnector.getnodeconnectorNode();
-
+
lbsLogger.debug("Client is connected to switch : {}",clientNode.toString());
lbsLogger.debug("Destination pool machine is connected to switch : {}",destNode.toString());
-
+
//Get path between both the nodes
Path route = this.routing.getRoute(clientNode, destNode);
-
+
lbsLogger.info("Path between source (client) and destination switch nodes : {}",route.toString());
-
+
NodeConnector forwardPort = route.getEdges().get(0).getTailNodeConnector();
-
+
if(installLoadBalancerFlow(client,
vip,
clientNode,
}else{
lbsLogger.error("Not able to route traffic from client : {}",client );
}
-
+
if(installLoadBalancerFlow(client,
vip,
clientNode,
}
return PacketResult.IGNORED;
}
-
+
/*
* This method installs the flow rule for routing the traffic between two hosts.
* @param source Traffic is sent by this source
* @param destMachineMac MAC address of the pool member where traffic needs to be routed
* @param outport Use this port to send out traffic
* @param flowDirection FORWARD_DIRECTION_LB_FLOW or REVERSE_DIRECTION_LB_FLOW
- * @return true If flow installation was successful
- * false else
+ * @return true If flow installation was successful
+ * false else
* @throws UnknownHostException
*/
private boolean installLoadBalancerFlow(Client source,
VIP dest,
Node sourceSwitch,
String destMachineIp,
- byte[] destMachineMac,
+ byte[] destMachineMac,
NodeConnector outport,
int flowDirection) throws UnknownHostException{
-
+
Match match = new Match();
List<Action> actions = new ArrayList<Action>();
-
+
if(flowDirection == LBConst.FORWARD_DIRECTION_LB_FLOW){
match.setField(MatchType.DL_TYPE, EtherTypes.IPv4.shortValue());
match.setField(MatchType.NW_SRC, InetAddress.getByName(source.getIp()));
match.setField(MatchType.NW_PROTO, IPProtocols.getProtocolNumberByte(dest.getProtocol()));
match.setField(MatchType.TP_SRC, source.getPort());
match.setField(MatchType.TP_DST, dest.getPort());
-
+
actions.add(new SetNwDst(InetAddress.getByName(destMachineIp)));
actions.add(new SetDlDst(destMachineMac));
}
-
+
if(flowDirection == LBConst.REVERSE_DIRECTION_LB_FLOW){
match.setField(MatchType.DL_TYPE, EtherTypes.IPv4.shortValue());
match.setField(MatchType.NW_SRC, InetAddress.getByName(destMachineIp));
match.setField(MatchType.NW_PROTO, IPProtocols.getProtocolNumberByte(source.getProtocol()));
match.setField(MatchType.TP_SRC, dest.getPort());
match.setField(MatchType.TP_DST,source.getPort());
-
+
actions.add(new SetNwSrc(InetAddress.getByName(dest.getIp())));
actions.add(new SetDlSrc(destMachineMac));
}
-
+
actions.add(new Output(outport));
-
+
// Make sure the priority for IP switch entries is
// set to a level just above default drop entries
-
+
Flow flow = new Flow(match, actions);
flow.setIdleTimeout((short) 5);
flow.setHardTimeout((short) 0);
flow.setPriority(LB_IPSWITCH_PRIORITY);
-
+
String policyName = source.getIp()+":"+source.getProtocol()+":"+source.getPort();
String flowName =null;
-
+
if(flowDirection == LBConst.FORWARD_DIRECTION_LB_FLOW){
flowName = "["+policyName+":"+source.getIp() + ":"+dest.getIp()+"]";
}
-
+
if(flowDirection == LBConst.REVERSE_DIRECTION_LB_FLOW){
-
+
flowName = "["+policyName+":"+dest.getIp() + ":"+source.getIp()+"]";
}
-
+
FlowEntry fEntry = new FlowEntry(policyName, flowName, flow, sourceSwitch);
-
+
lbsLogger.info("Install flow entry {} on node {}",fEntry.toString(),sourceSwitch.toString());
-
+
if(!this.ruleManager.checkFlowEntryConflict(fEntry)){
if(this.ruleManager.installFlowEntry(fEntry).isSuccess()){
return true;
}
return false;
}
-
+
/**
* Function called by the dependency manager when all the required
* dependencies are satisfied
Dictionary<?, ?> props = c.getServiceProperties();
if (props != null) {
this.containerName = (String) props.get("containerName");
-
+
lbsLogger.info("Running container name:" + this.containerName);
}else {
-
+
// In the Global instance case the containerName is empty
this.containerName = "";
}
lbsLogger.info(configManager.toString());
}
-
+
/**
* Function called by the dependency manager when at least one
* dependency become unsatisfied or when the component is shutting
/*
* All the methods below are just proxy methods to direct the REST API requests to configuration
- * manager. We need this redirection as currently, opendaylight supports only one
- * implementation of the service.
+ * manager. We need this redirection as currently, opendaylight supports only one
+ * implementation of the service.
*/
@Override
public Set<VIP> getAllVIPs() {
return configManager.getAllVIPs();
}
-
+
@Override
public boolean vipExists(String name, String ip, String protocol,
short protocolPort, String poolName) {
return configManager.vipExists(name, ip, protocol, protocolPort, poolName);
}
-
+
@Override
public boolean vipExists(VIP vip) {
return configManager.vipExists(vip);
}
-
+
@Override
public VIP createVIP(String name, String ip, String protocol,
short protocolPort, String poolName) {
return configManager.createVIP(name, ip, protocol, protocolPort, poolName);
}
-
+
@Override
public VIP updateVIP(String name, String poolName) {
return configManager.updateVIP(name, poolName);
}
-
+
@Override
public VIP deleteVIP(String name) {
return configManager.deleteVIP(name);
}
-
+
@Override
public boolean memberExists(String name, String memberIP, String poolName) {
return configManager.memberExists(name, memberIP, poolName);
}
-
+
@Override
public Set<PoolMember> getAllPoolMembers(String poolName) {
-
+
return configManager.getAllPoolMembers(poolName);
}
-
+
@Override
- public PoolMember addPoolMember(String name,
+ public PoolMember addPoolMember(String name,
String memberIP,
String poolName) {
return configManager.addPoolMember(name, memberIP, poolName);
}
-
+
@Override
public PoolMember removePoolMember(String name, String poolName) {
-
+
return configManager.removePoolMember(name, poolName);
}
-
+
@Override
public Set<Pool> getAllPools() {
-
+
return configManager.getAllPools();
}
-
+
@Override
public Pool getPool(String poolName) {
return configManager.getPool(poolName);
}
-
+
@Override
public boolean poolExists(String name, String lbMethod) {
return configManager.poolExists(name, lbMethod);
}
-
+
@Override
public Pool createPool(String name, String lbMethod) {
return configManager.createPool(name, lbMethod);
}
-
+
@Override
public Pool deletePool(String poolName) {
return configManager.deletePool(poolName);
}
-
+
@Override
public boolean vipExists(String name) {
return configManager.vipExists(name);
}
-
+
@Override
public boolean memberExists(String name, String poolName) {
return configManager.memberExists(name, poolName);
}
-
+
@Override
public boolean poolExists(String name) {
return configManager.poolExists(name);
}
-
+
@Override
public String getVIPAttachedPool(String name) {
return configManager.getVIPAttachedPool(name);
* All new load balancer policies must implement this interface.
*/
public interface ILoadBalancingPolicy {
-
+
/**
* Returns IP address of the next pool member from the pool
* to which the load balancer service can direct incoming packets.
* @param source source on the packet
* @param dest virtual IP (VIP) that is used as destination on the packet
- * @return IP address of the next pool member which will serve
+ * @return IP address of the next pool member which will serve
* all incoming traffic destined for the given VIP and with the given source
* information
*/
public String getPoolMemberForClient(Client source, VIP dest);
-
+
}
\ No newline at end of file
*
*/
public class RandomLBPolicy implements ILoadBalancingPolicy {
-
+
/*
* Instance logger
*/
private static final Logger rLogger = LoggerFactory.getLogger(RandomLBPolicy.class);
-
+
/*
- * Reference to the configuration manager. This reference is passed from load balancer
+ * Reference to the configuration manager. This reference is passed from load balancer
* class.
*/
private ConfigManager cmgr;
-
+
/*
* Mapping between the client and the pool member that serves all traffic for that client.
*/
private HashMap<Client, PoolMember> clientMemberMap;
-
+
/*
* Random generator
*/
Random randomGenerator = null;
-
+
@SuppressWarnings("unused")
private RandomLBPolicy(){}
-
+
public RandomLBPolicy(ConfigManager cmgr){
this.cmgr = cmgr;
this.clientMemberMap = new HashMap<Client, PoolMember>();
}
@Override
public String getPoolMemberForClient(Client source, VIP dest){
-
+
rLogger.info("Received traffic from client : {} for VIP : {} ",source, dest);
-
+
syncWithLoadBalancerData();
-
+
PoolMember pm= null;
-
+
if(this.clientMemberMap.containsKey(source)){
pm= this.clientMemberMap.get(source);
rLogger.info("Client {} had sent traffic before,new traffic will be routed to the same pool member {}",source,pm);
}
return pm.getIp();
}
-
+
/*
* This method does the clean up. Whenever a new client packet arrives with a given VIP,
* this method checks the current configuration to see if any pool members have been deleted and
*/
private void syncWithLoadBalancerData(){
rLogger.debug("[Client - PoolMember] table before cleanup : {}",this.clientMemberMap.toString());
-
+
ArrayList<Client> removeClient = new ArrayList<Client>();
-
+
if(this.clientMemberMap.size() != 0){
for(Client client : this.clientMemberMap.keySet()){
-
+
if(!this.cmgr.memberExists(this.clientMemberMap.get(client).getName(),
this.clientMemberMap.get(client).getPoolName())){
removeClient.add(client);
}
}
}
-
+
for(Client client : removeClient){
this.clientMemberMap.remove(client);
-
+
rLogger.debug("Removed client : {} ",client);
}
rLogger.debug("[Client - PoolMember] table after cleanup : {}",this.clientMemberMap.toString());
}
-
+
}
*
*/
public class RoundRobinLBPolicy implements ILoadBalancingPolicy{
-
+
/*
* Logger instance
*/
private static final Logger rrLogger = LoggerFactory.getLogger(RoundRobinLBPolicy.class);
-
+
/*
- * Reference to the configuration manager. This reference is passed from load balancer
+ * Reference to the configuration manager. This reference is passed from load balancer
* class.
*/
private ConfigManager cmgr;
-
+
/*
* Mapping between the client and the pool member that serves all traffic for that client.
*/
private HashMap<Client, PoolMember> clientMemberMap;
-
+
/*
* Maintains the next pool member counter for the VIPs.
* More than one VIP can be attached to one pool, so each VIP
* the same pool.
*/
private HashMap<VIP,Integer> nextItemFromPool;
-
+
@SuppressWarnings("unused")
private RoundRobinLBPolicy(){}
-
+
public RoundRobinLBPolicy(ConfigManager cmgr){
this.cmgr = cmgr;
this.clientMemberMap = new HashMap<Client, PoolMember>();
this.nextItemFromPool = new HashMap<VIP, Integer>();
}
-
+
@Override
public String getPoolMemberForClient(Client source, VIP dest){
-
+
rrLogger.info("Received traffic from client : {} for VIP : {} ",source, dest);
-
+
syncWithLoadBalancerData();
-
+
PoolMember pm= null;
-
+
if(this.clientMemberMap.containsKey(source)){
-
+
pm= this.clientMemberMap.get(source);
rrLogger.info("Client {} had sent traffic before,new traffic will be routed to the same pool member {}",source,pm);
}else{
-
+
Pool pool = null;
if(nextItemFromPool.containsKey(dest)){
-
+
int memberNum = nextItemFromPool.get(dest).intValue();
rrLogger.debug("Packet is from new client for VIP {}",dest);
pool = this.cmgr.getPool(dest.getPoolName());
this.clientMemberMap.put(source, pm );
rrLogger.info("New client's packet will be directed to pool member {}",pm);
memberNum++;
-
+
if(memberNum > pool.getAllMembers().size()-1){
memberNum = 0;
}
rrLogger.debug("Next pool member for new client of VIP is set to {}",pool.getAllMembers().get(memberNum));
-
+
this.nextItemFromPool.put(dest, new Integer(memberNum));
}else{
rrLogger.debug("Network traffic for VIP : {} has appeared first time from client {}",dest,source);
pool = this.cmgr.getPool(dest.getPoolName());
pm = pool.getAllMembers().get(0);
this.clientMemberMap.put(source, pm);
-
+
rrLogger.info("Network traffic from client {} will be directed to pool member {}",pm);
this.nextItemFromPool.put(dest, new Integer(1));
rrLogger.debug("Next pool member for new client of VIP is set to {}",pool.getAllMembers().get(1));
}
return pm.getIp();
}
-
+
/*
* This method does the clean up. Whenever a new client packet arrives with a given VIP,
* this method checks the current configuration to see if any pool members have been deleted and
private void syncWithLoadBalancerData(){
rrLogger.debug("[Client - PoolMember] table before cleanup : {}",this.clientMemberMap.toString());
ArrayList<Client> removeClient = new ArrayList<Client>();
-
+
if(this.clientMemberMap.size() != 0){
for(Client client : this.clientMemberMap.keySet()){
if(!this.cmgr.memberExists(this.clientMemberMap.get(client).getName(),
this.clientMemberMap.get(client).getPoolName())){
-
+
removeClient.add(client);
}
}
}
-
+
for(Client client : removeClient){
this.clientMemberMap.remove(client);
-
+
rrLogger.debug("Removed client : {} ",client);
}
rrLogger.debug("[Client - PoolMember] table after cleanup : {}",this.clientMemberMap.toString());
-
+
rrLogger.debug("[VIP- NextMember] table before cleanup : {}",this.nextItemFromPool.toString());
-
+
ArrayList<VIP> resetVIPPoolMemberCount= new ArrayList<VIP>();
-
+
if(this.nextItemFromPool.size() != 0){
-
+
for(VIP vip:this.nextItemFromPool.keySet()){
if(this.nextItemFromPool.get(vip).intValue() > this.cmgr.getPool(vip.getPoolName()).getAllMembers().size()-1){
-
+
resetVIPPoolMemberCount.add(vip);
}
}
}
-
+
for(VIP vip:resetVIPPoolMemberCount){
rrLogger.debug("VIP next pool member counter reset to 0");
this.nextItemFromPool.put(vip, new Integer(0));
}
-
+
rrLogger.debug("[VIP- NextMember] table after cleanup : {}",this.nextItemFromPool.toString());
}
}
import junit.framework.TestCase;\r
\r
/**\r
- * \r
+ *\r
* Class to unit test the load balancing policies.\r
*\r
*/\r
ConfigManager cm = null;\r
cm = new ConfigManager();\r
Assert.assertFalse(cm== null);\r
- \r
+\r
Pool pool = cm.createPool("TestPool","roundrobin");\r
VIP vip = cm.createVIP("TestVIP","10.0.0.9","TCP",(short)5550,"TestPool");\r
PoolMember host1 = new PoolMember("host1","10.0.0.1","TestPool");\r
PoolMember host5 = new PoolMember("host5","10.0.0.5","TestPool");\r
PoolMember host6 = new PoolMember("host6","10.0.0.6","TestPool");\r
PoolMember host7 = new PoolMember("host7","10.0.0.7","TestPool");\r
- \r
+\r
pool.addMember(host1);\r
pool.addMember(host2);\r
pool.addMember(host3);\r
pool.addMember(host6);\r
pool.addMember(host7);\r
pool.addVIP(vip);\r
- \r
+\r
Assert.assertTrue(cm.getAllPoolMembers("TestPool").size() == pool.getAllMembers().size());\r
- \r
+\r
RoundRobinLBPolicy rrp = new RoundRobinLBPolicy(cm);\r
- \r
+\r
Client c1 = new Client("10.0.0.1","TCP",(short)5000);\r
Assert.assertTrue(rrp.getPoolMemberForClient(c1, vip).equals(host1.getIp()));\r
- \r
+\r
c1 = new Client("10.0.0.1","TCP",(short)5001);\r
Assert.assertTrue(rrp.getPoolMemberForClient(c1, vip).equals(host2.getIp()));\r
- \r
+\r
c1 = new Client("10.0.0.1","TCP",(short)5002);\r
Assert.assertTrue(rrp.getPoolMemberForClient(c1, vip).equals(host3.getIp()));\r
- \r
+\r
c1 = new Client("10.0.0.1","TCP",(short)5003);\r
Assert.assertTrue(rrp.getPoolMemberForClient(c1, vip).equals(host4.getIp()));\r
}\r
/**
* This class exposes North bound REST APIs for the Load Balancer Service.
* Following APIs are exposed by the Load Balancer Service:
- *
+ *
* Data retrieval REST APIs::
- * 1. Get details of all existing pools
- * Type : GET
- * URI : /one/nb/v2/lb/{container-name}/
- * NOTE: Current implementation of the opendaylight usage 'default' as a container-name
- * e.g : http://localhost:8080/one/nb/v2/lb/default will give you list of all the pools
- *
- * 2. Get details of all the existing VIPs
- * Type : GET
- * URI: /one/nb/v2/lb/{container-name}/vips
- *
+ * 1. Get details of all existing pools
+ * Type : GET
+ * URI : /one/nb/v2/lb/{container-name}/
+ * NOTE: Current implementation of the opendaylight usage 'default' as a container-name
+ * e.g : http://localhost:8080/one/nb/v2/lb/default will give you list of all the pools
+ *
+ * 2. Get details of all the existing VIPs
+ * Type : GET
+ * URI: /one/nb/v2/lb/{container-name}/vips
+ *
* Pool related REST APIs::
- * 1. Create Pool :
- * Type : POST
- * URI : /one/nb/v2/lb/{container-name}/create/pool
- * Request body :
+ * 1. Create Pool :
+ * Type : POST
+ * URI : /one/nb/v2/lb/{container-name}/create/pool
+ * Request body :
* {
* "name":"",
* "lbmethod":""
* }
- * Currently, two load balancing policies are allowed {"roundrobin" and "random" }
- *
- * 2. Delete Pool :
- * Type : DELETE
- * URI : /one/nb/v2/lb/{container-name}/delete/pool/{pool-name}
- *
+ * Currently, two load balancing policies are allowed {"roundrobin" and "random" }
+ *
+ * 2. Delete Pool :
+ * Type : DELETE
+ * URI : /one/nb/v2/lb/{container-name}/delete/pool/{pool-name}
+ *
* VIP related REST APIs::
- * 1. Create VIP:
- * Type : POST
- * URI : /one/nb/v2/lb/{container-name}/create/vip
- * Request body :
+ * 1. Create VIP:
+ * Type : POST
+ * URI : /one/nb/v2/lb/{container-name}/create/vip
+ * Request body :
* {
* "name":"",
* "ip":"ip in (xxx.xxx.xxx.xxx) format",
* "port":"any valid port number",
* "poolname":"" (optional)
* }
- * The pool name is optional and can be set up at a later stage (using the REST API given below).
- *
- * 2. Update VIP: Update pool name of the VIP
- * Type : PUT
- * URI : /one/nb/v2/lb/{container-name}/update/vip
- * Request body :
+ * The pool name is optional and can be set up at a later stage (using the REST API given below).
+ *
+ * 2. Update VIP: Update pool name of the VIP
+ * Type : PUT
+ * URI : /one/nb/v2/lb/{container-name}/update/vip
+ * Request body :
* {
* "name":"",
* "poolname":""
* and not of the VIP name itself.
* The specified pool name must already exist. If the specified VIP is already attached to a pool, the update
* will fail.
- *
- * 3. Delete VIP :
- * Type : DELETE
- * URI : /one/nb/v2/lb/{container-name}/delete/vip/{vip-name}
- *
+ *
+ * 3. Delete VIP :
+ * Type : DELETE
+ * URI : /one/nb/v2/lb/{container-name}/delete/vip/{vip-name}
+ *
* Pool member related REST APIs::
- * 1. Create pool member:
- * Type : POST
- * URI : /one/nb/v2/lb/default/create/poolmember
- * Request body :
+ * 1. Create pool member:
+ * Type : POST
+ * URI : /one/nb/v2/lb/default/create/poolmember
+ * Request body :
* {
* "name":"",
* "ip":"ip in (xxx.xxx.xxx.xxx) format",
* "poolname":"existing pool name"
* }
- *
- * 2. Delete pool member:
- * Type : DELETE
- * URI : /one/nb/v2/lb/{container-name}/delete/poolmember/{pool-member-name}/{pool-name}
- *
- * NOTE: Property "name" of each individual entity must be unique.
- * All the above REST APIs throw appropriate response codes in case of error/success.
- * Please consult the respective methods to get details of various response codes.
+ *
+ * 2. Delete pool member:
+ * Type : DELETE
+ * URI : /one/nb/v2/lb/{container-name}/delete/poolmember/{pool-member-name}/{pool-name}
+ *
+ * NOTE: Property "name" of each individual entity must be unique.
+ * All the above REST APIs throw appropriate response codes in case of error/success.
+ * Please consult the respective methods to get details of various response codes.
*/
@Path("/")
public class LoadBalancerNorthbound {
-
+
/*
* Method returns the Load balancer service instance running within
* 'default' container.
}
IConfigManager configManager = (IConfigManager) ServiceHelper.getInstance(
- IConfigManager.class, containerName, this);
+ IConfigManager.class, containerName, this);
if (configManager == null) {
throw new ServiceUnavailableException("Load Balancer"
@ResponseCode(code = 503, condition = "Load balancer service is unavailable") })
public Pools getAllPools(
@PathParam("containerName") String containerName) {
-
+
IConfigManager configManager = getConfigManagerService(containerName);
if (configManager == null) {
throw new ServiceUnavailableException("Load Balancer "
+ RestMessages.SERVICEUNAVAILABLE.toString());
}
-
+
return new Pools(configManager.getAllPools());
}
@ResponseCode(code = 503, condition = "Load balancer service is unavailable") })
public VIPs getAllVIPs(
@PathParam("containerName") String containerName) {
-
+
IConfigManager configManager = getConfigManagerService(containerName);
if (configManager == null) {
throw new ServiceUnavailableException("Load Balancer "
@ResponseCode(code = 415, condition = "Invalid input data")})
public Response addVIP(@PathParam("containerName") String containerName,
@TypeHint(VIP.class) JAXBElement<VIP> inVIP){
-
+
VIP vipInput = inVIP.getValue();
String name = vipInput.getName();
String ip = vipInput.getIp();
protocolPort < 0 ){
throw new UnsupportedMediaTypeException(RestMessages.INVALIDDATA.toString());
}
-
+
IConfigManager configManager = getConfigManagerService(containerName);
-
+
if (configManager == null) {
throw new ServiceUnavailableException("Load Balancer "
+ RestMessages.SERVICEUNAVAILABLE.toString());
}
-
+
if(!configManager.vipExists(name, ip, protocol, protocolPort, poolName)){
-
+
VIP vip = configManager.createVIP(name, ip, protocol, protocolPort, poolName);
if ( vip != null){
return Response.status(Response.Status.CREATED).build();
@ResponseCode(code = 415, condition = "Invalid input name")})
public Response updateVIP(@PathParam("containerName") String containerName,
@TypeHint(VIP.class) JAXBElement<VIP> inVIP) {
-
+
VIP vipInput = inVIP.getValue();
String name = vipInput.getName();
String poolName = vipInput.getPoolName();
poolName.isEmpty()){
throw new UnsupportedMediaTypeException(RestMessages.INVALIDDATA.toString());
}
-
+
IConfigManager configManager = getConfigManagerService(containerName);
if (configManager == null) {
throw new ServiceUnavailableException("Load Balancer "
+ RestMessages.SERVICEUNAVAILABLE.toString());
}
-
+
if(!configManager.poolExists(poolName))
throw new ResourceNotFoundException(NBConst.RES_POOL_NOT_FOUND);
-
+
if(configManager.getVIPAttachedPool(name)!=null)
throw new MethodNotAllowedException(NBConst.RES_VIP_POOL_EXIST);
-
+
if(configManager.updateVIP(name, poolName)!= null)
return Response.status(Response.Status.ACCEPTED).build();
-
+
throw new InternalServerErrorException(NBConst.RES_VIP_UPDATE_FAILED);
}
-
+
@Path("/{containerName}/delete/vip/{vipName}")
@DELETE
@Consumes( { MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
public Response deleteVIP(
@PathParam(value = "containerName") String containerName,
@PathParam(value = "vipName") String vipName) {
-
+
if(vipName.isEmpty())
throw new UnsupportedMediaTypeException(RestMessages.INVALIDDATA.toString());
-
+
IConfigManager configManager = getConfigManagerService(containerName);
if (configManager == null) {
throw new ServiceUnavailableException("Load Balancer"
+ RestMessages.SERVICEUNAVAILABLE.toString());
}
-
+
if(!configManager.vipExists(vipName))
throw new ResourceNotFoundException(NBConst.RES_VIP_NOT_FOUND);
-
+
for(VIP vip : configManager.getAllVIPs()){
if(vip.getName().equals(vipName)){
configManager.deleteVIP(vipName);
@ResponseCode(code = 415, condition = "Invalid input data")})
public Response addPool(@PathParam("containerName") String containerName,
@TypeHint(Pool.class) JAXBElement<Pool> inPool) {
-
+
Pool poolInput = inPool.getValue();
String name = poolInput.getName();
String lbMethod =poolInput.getLbMethod();
lbMethod.isEmpty()){
throw new UnsupportedMediaTypeException(RestMessages.INVALIDDATA.toString());
}
-
+
IConfigManager configManager = getConfigManagerService(containerName);
if (configManager == null) {
throw new ServiceUnavailableException("Load Balancer "
+ RestMessages.SERVICEUNAVAILABLE.toString());
}
-
+
if(!configManager.poolExists(name)){
-
+
Pool pool = configManager.createPool(name, lbMethod);
if ( pool != null){
return Response.status(Response.Status.CREATED).build();
public Response deletePool(
@PathParam(value = "containerName") String containerName,
@PathParam(value = "poolName") String poolName) {
-
+
if(poolName.isEmpty())
throw new UnsupportedMediaTypeException(RestMessages.INVALIDDATA.toString());
-
+
IConfigManager configManager = getConfigManagerService(containerName);
if (configManager == null) {
throw new ServiceUnavailableException("Load Balancer"
+ RestMessages.SERVICEUNAVAILABLE.toString());
}
-
+
if(!configManager.poolExists(poolName))
throw new ResourceNotFoundException(NBConst.RES_POOL_NOT_FOUND);
-
+
for(Pool pool:configManager.getAllPools()){
if(pool.getName().equals(poolName)){
configManager.deletePool(poolName);
@ResponseCode(code = 415, condition = "Invalid input data")})
public Response addPoolMember(@PathParam("containerName") String containerName,
@TypeHint(PoolMember.class) JAXBElement<PoolMember> inPoolMember){
-
+
PoolMember pmInput = inPoolMember.getValue();
- String name = pmInput.getName();
- String memberIP = pmInput.getIp();
- String poolName = pmInput.getPoolName();
-
- if(name.isEmpty() ||
- memberIP.isEmpty()||
- poolName.isEmpty()){
- throw new UnsupportedMediaTypeException(RestMessages.INVALIDDATA.toString());
- }
-
- IConfigManager configManager = getConfigManagerService(containerName);
- if (configManager == null) {
- throw new ServiceUnavailableException("Load Balancer "
- + RestMessages.SERVICEUNAVAILABLE.toString());
- }
-
- if(!configManager.poolExists(poolName))
- throw new ResourceNotFoundException(NBConst.RES_POOL_NOT_FOUND);
-
- if(!configManager.memberExists(name, memberIP, poolName)){
-
- PoolMember poolMember = configManager.addPoolMember(name, memberIP, poolName);
- if ( poolMember != null){
- return Response.status(Response.Status.CREATED).build();
- }
- }else{
- throw new ResourceConflictException(NBConst.RES_POOLMEMBER_ALREADY_EXIST);
- }
- throw new InternalServerErrorException(NBConst.RES_POOLMEMBER_CREATION_FAILED);
+ String name = pmInput.getName();
+ String memberIP = pmInput.getIp();
+ String poolName = pmInput.getPoolName();
+
+ if(name.isEmpty() ||
+ memberIP.isEmpty()||
+ poolName.isEmpty()){
+ throw new UnsupportedMediaTypeException(RestMessages.INVALIDDATA.toString());
+ }
+
+ IConfigManager configManager = getConfigManagerService(containerName);
+ if (configManager == null) {
+ throw new ServiceUnavailableException("Load Balancer "
+ + RestMessages.SERVICEUNAVAILABLE.toString());
+ }
+
+ if(!configManager.poolExists(poolName))
+ throw new ResourceNotFoundException(NBConst.RES_POOL_NOT_FOUND);
+
+ if(!configManager.memberExists(name, memberIP, poolName)){
+
+ PoolMember poolMember = configManager.addPoolMember(name, memberIP, poolName);
+ if ( poolMember != null){
+ return Response.status(Response.Status.CREATED).build();
+ }
+ }else{
+ throw new ResourceConflictException(NBConst.RES_POOLMEMBER_ALREADY_EXIST);
+ }
+ throw new InternalServerErrorException(NBConst.RES_POOLMEMBER_CREATION_FAILED);
}
@Path("/{containerName}/delete/poolmember/{poolMemberName}/{poolName}")
@PathParam(value = "containerName") String containerName,
@PathParam(value = "poolMemberName") String poolMemberName,
@PathParam(value = "poolName") String poolName) {
-
+
if(poolMemberName.isEmpty()||
poolName.isEmpty())
throw new UnsupportedMediaTypeException(RestMessages.INVALIDDATA.toString());
-
+
IConfigManager configManager = getConfigManagerService(containerName);
-
+
if (configManager == null) {
throw new ServiceUnavailableException("Load Balancer"
+ RestMessages.SERVICEUNAVAILABLE.toString());
}
-
+
if(!configManager.poolExists(poolName))
throw new ResourceNotFoundException(NBConst.RES_POOL_NOT_FOUND);
-
+
if(configManager.memberExists(poolMemberName, poolName)){
-
+
configManager.removePoolMember(poolMemberName, poolName);
-
+
return Response.ok().build();
}
throw new ResourceNotFoundException(NBConst.RES_POOLMEMBER_NOT_FOUND);
*
*/
public class NBConst {
-
+
public static final String RES_VIP_ALREADY_EXIST= " VIP already exists";
-
+
public static final String RES_VIP_NOT_FOUND= " VIP not found";
-
+
public static final String RES_VIP_CREATION_FAILED = " Creation of VIP failed";
-
+
public static final String RES_VIP_DELETION_FAILED = " Deletion of VIP failed";
-
+
public static final String RES_VIP_UPDATE_FAILED = " Update of VIP failed";
-
+
public static final String RES_POOL_ALREADY_EXIST= " Pool already exists";
-
+
public static final String RES_POOL_NOT_FOUND= " Pool not found";
-
+
public static final String RES_POOL_CREATION_FAILED = " Creation of pool failed";
-
+
public static final String RES_POOL_DELETION_FAILED = " Deletion of pool failed";
-
+
public static final String RES_POOLMEMBER_ALREADY_EXIST= " Pool member already exists";
-
+
public static final String RES_POOLMEMBER_NOT_FOUND= " Pool member not found";
-
+
public static final String RES_POOLMEMBER_CREATION_FAILED = " Creation of pool member failed";
-
+
public static final String RES_POOLMEMBER_DELETION_FAILED = " Deletion of pool member failed";
-
+
public static final String RES_VIP_POOL_EXIST = "Pool already attached to a VIP";
}
@XmlRootElement
@XmlAccessorType(XmlAccessType.NONE)
/**
- * JAX-RS resource for handling details of all the available pools
+ * JAX-RS resource for handling details of all the available pools
* in response to respective REST API requests.
*/
public class Pools {
-
+
@XmlElement (name="pool")
Set<Pool> loadBalancerPools;
-
+
public Pools() {
}
-
+
public Pools (Set<Pool> loadBalancerPools) {
this.loadBalancerPools = loadBalancerPools;
}
-
+
/**
* @return the loadBalancerPools
*/
public Set<Pool> getLoadBalancerPools() {
return loadBalancerPools;
}
-
+
/**
* @param loadBalancerPools the loadBalancerPools to set
*/
@XmlAccessorType(XmlAccessType.NONE)
public class VIPs {
-
+
@XmlElement (name="vip")
Set<VIP> loadBalancerVIPs;
-
+
public VIPs() {}
-
-
+
+
public VIPs (Set<VIP> loadBalancerVIPs) {
this.loadBalancerVIPs = loadBalancerVIPs;
}
-
+
/**
* @return the loadBalancerVIPs
*/
public Set<VIP> getLoadBalancerVIPs() {
return loadBalancerVIPs;
}
-
+
/**
* @param loadBalancerVIPs the loadBalancerVIPs to set
*/
-
+
public void setLoadBalancerVIPs(Set<VIP> loadBalancerVIPs) {
this.loadBalancerVIPs = loadBalancerVIPs;
}
}
@SuppressWarnings("deprecation")
- private void allocateCaches() {
+ private void allocateCaches() {
if (this.clusterContainerService == null) {
log.info("un-initialized clusterContainerService, can't create cache");
return;
}
@SuppressWarnings("deprecation")
- private void destroyCaches() {
+ private void destroyCaches() {
if (this.clusterContainerService == null) {
log.info("un-initialized clusterContainerService, can't destroy cache");
return;
}
} else {
- log.debug("Adding policyMatch(DIP = {}) Action(outPort= {}) " +
+ log.debug("Adding policyMatch(DIP = {}) Action(outPort= {}) " +
"to node {}", new Object[] {
host.getNetworkAddress().getHostAddress(), outPort,
currNode});
updatePerHostRuleInSW(host, rootNode, rootNode, null, new HostNodePair(
host, rootNode), ports);
- // log.debug("Getting out at the end!");
+ // log.debug("Getting out at the end!");
return switchesToProgram;
}
// rest of the world
// switchesToProgram.add(rootNode);
//updatePerHostRuleInSW(host, rootNode,
- // rootNode, null,
- // new HostNodePair(host, rootNode),ports);
+ // rootNode, null,
+ // new HostNodePair(host, rootNode),ports);
- // log.debug("Getting out at the end!");
+ // log.debug("Getting out at the end!");
return switchesToProgram;
}
Status poStatus = this.frm.installFlowEntry(po);
if (!poStatus.isSuccess()) {
log.error("Failed to install policy: "
- + po.getGroupName() + " ("
+ + po.getGroupName() + " ("
+ poStatus.getDescription() + ")");
retCode = RulesProgrammingReturnCode.FAILED_FEW_SWITCHES;
swId, host);
/* // Now dump every single rule */
/* for (HostNodePair dumpkey : this.rulesDB.keySet()) { */
- /* po = this.rulesDB.get(dumpkey); */
- /* log.debug("Dumping entry H{" + dumpkey.getHost() + "} S{" + dumpkey.getSwitchId() + "} = {" + (po == null ? "null policy" : po)); */
+ /* po = this.rulesDB.get(dumpkey); */
+ /* log.debug("Dumping entry H{" + dumpkey.getHost() + "} S{" + dumpkey.getSwitchId() + "} = {" + (po == null ? "null policy" : po)); */
/* } */
}
}
* If the context is the default container, the returned statistics are for all the flows installed on the node,
* regardless of the container they belong to
*
- * @param node the network node
+ * @param node the network node
* @return the list of flows installed on the network node
*/
List<FlowOnNode> getFlows(Node node);
* Returns the statistics for the flows specified in the list
*
* @param flows
- * @return the list of flows installed on the network node
+ * @return the list of flows installed on the network node
*/
Map<Node, List<FlowOnNode>> getFlowStatisticsForFlowList(
List<FlowEntry> flows);
/**
* This method is called when some properties of a node are added/deleted/changed.
*
- * @param node {@link org.opendaylight.controller.sal.core.Node} being updated
- * @param type {@link org.opendaylight.controller.sal.core.UpdateType}
- * @param propMap map of {@link org.opendaylight.controller.sal.core.Property} such as
- * {@link org.opendaylight.controller.sal.core.Description} and/or
- * {@link org.opendaylight.controller.sal.core.Tier} etc.
+ * @param node {@link org.opendaylight.controller.sal.core.Node} being updated
+ * @param type {@link org.opendaylight.controller.sal.core.UpdateType}
+ * @param propMap map of {@link org.opendaylight.controller.sal.core.Property} such as
+ * {@link org.opendaylight.controller.sal.core.Description} and/or
+ * {@link org.opendaylight.controller.sal.core.Tier} etc.
*/
public void notifyNode(Node node, UpdateType type,
Map<String, Property> propMap);
/**
* This method is called when some properties of a node connector are added/deleted/changed.
*
- * @param nodeConnector {@link org.opendaylight.controller.sal.core.NodeConnector} being updated
- * @param type {@link org.opendaylight.controller.sal.core.UpdateType}
- * @param propMap map of {@link org.opendaylight.controller.sal.core.Property} such as
- * {@link org.opendaylight.controller.sal.core.Description} and/or
- * {@link org.opendaylight.controller.sal.core.State} etc.
+ * @param nodeConnector {@link org.opendaylight.controller.sal.core.NodeConnector} being updated
+ * @param type {@link org.opendaylight.controller.sal.core.UpdateType}
+ * @param propMap map of {@link org.opendaylight.controller.sal.core.Property} such as
+ * {@link org.opendaylight.controller.sal.core.Description} and/or
+ * {@link org.opendaylight.controller.sal.core.State} etc.
*/
public void notifyNodeConnector(NodeConnector nodeConnector,
UpdateType type, Map<String, Property> propMap);
/**
* This method is called when list of ports in a node are added/deleted as span ports.
*
- * @param node {@link org.opendaylight.controller.sal.core.Node} being updated
- * @param portList list of span {@link org.opendaylight.controller.sal.core.NodeConnector}
- * @param add true if add; false if delete.
+ * @param node {@link org.opendaylight.controller.sal.core.Node} being updated
+ * @param portList list of span {@link org.opendaylight.controller.sal.core.NodeConnector}
+ * @param add true if add; false if delete.
*/
public void spanUpdate(Node node, List<NodeConnector> portList, boolean add);
}
\ No newline at end of file
/**
* Remove a subnet configuration
*
- * @param configObject refer to {@link Open Declaration org.opendaylight.controller.switchmanager.SubnetConfig}
+ * @param configObject refer to {@link Open Declaration org.opendaylight.controller.switchmanager.SubnetConfig}
* @return "Success" or failure reason
*/
public Status removeSubnet(SubnetConfig configObject);
* Remove a subnet configuration given the name
*
* @param name subnet name
- * @return "Success" or failure reason
+ * @return "Success" or failure reason
*/
public Status removeSubnet(String name);
/**
* Return a list of all known devices in the system
*
- * @return returns a list of {@link org.opendaylight.controller.switchmanager.Switch}
+ * @return returns a list of {@link org.opendaylight.controller.switchmanager.Switch}
*/
public List<Switch> getNetworkDevices();
/**
* Return a subnet configuration given the network address
*
- * @param networkAddress the ip address in long format
- * @return the {@link org.opendaylight.controller.switchmanager.Subnet}
+ * @param networkAddress the ip address in long format
+ * @return the {@link org.opendaylight.controller.switchmanager.Subnet}
*/
public Subnet getSubnetByNetworkAddress(InetAddress networkAddress);
* Add a span port configuration
*
* @param SpanConfig refer to {@link Open Declaration org.opendaylight.controller.switchmanager.SpanConfig}
- * @return status code
+ * @return status code
*/
public Status addSpanConfig(SpanConfig configObject);
* Remove a span port configuration
*
* @param SpanConfig refer to {@link Open Declaration org.opendaylight.controller.switchmanager.SpanConfig}
- * @return status code
+ * @return status code
*/
public Status removeSpanConfig(SpanConfig cfgObject);
/**
* Remove node connectors from a subnet
*
- * @param name the subnet config name
- * @param nodeConnectors nodePorts string specified by {@link Open Declaration org.opendaylight.controller.switchmanager.SubnetConfig}
+ * @param name the subnet config name
+ * @param nodeConnectors nodePorts string specified by {@link Open Declaration org.opendaylight.controller.switchmanager.SubnetConfig}
* @return "Success" or failure reason
*/
public Status removePortsFromSubnet(String name, String nodeConnectors);
*
* @param node {@link org.opendaylight.controller.sal.core.Node}
* @return map of {@link org.opendaylight.controller.sal.core.Property} such as
- * {@link org.opendaylight.controller.sal.core.Description} and/or
- * {@link org.opendaylight.controller.sal.core.Tier} etc.
+ * {@link org.opendaylight.controller.sal.core.Description} and/or
+ * {@link org.opendaylight.controller.sal.core.Tier} etc.
*/
public Map<String, Property> getNodeProps(Node node);
/**
* Return a specific property of a node given the property name
*
- * @param node {@link org.opendaylight.controller.sal.core.Node}
- * @param propName the property name specified by {@link org.opendaylight.controller.sal.core.Property} and its extended classes
+ * @param node {@link org.opendaylight.controller.sal.core.Node}
+ * @param propName the property name specified by {@link org.opendaylight.controller.sal.core.Property} and its extended classes
* @return {@link org.opendaylight.controller.sal.core.Property}
*/
public Property getNodeProp(Node node, String propName);
/**
* Set a specific property of a node
*
- * @param node {@link org.opendaylight.controller.sal.core.Node}
- * @param prop {@link org.opendaylight.controller.sal.core.Property}
+ * @param node {@link org.opendaylight.controller.sal.core.Node}
+ * @param prop {@link org.opendaylight.controller.sal.core.Property}
*/
public void setNodeProp(Node node, Property prop);
/**
* Remove a property of a node
- *
- * @param nc {@link org.opendaylight.controller.sal.core.Node}
- * @param propName the property name specified by {@link org.opendaylight.controller.sal.core.Property} and its extended classes
+ *
+ * @param nc {@link org.opendaylight.controller.sal.core.Node}
+ * @param propName the property name specified by {@link org.opendaylight.controller.sal.core.Property} and its extended classes
* @return success or failed reason
*/
public Status removeNodeProp(Node node, String propName);
/**
* Remove all the properties of a node
- *
+ *
* @param node {@link org.opendaylight.controller.sal.core.Node}
* @return success or failed reason
*/
*
* @param nodeConnector {@link org.opendaylight.controller.sal.core.NodeConnector}
* @return map of {@link org.opendaylight.controller.sal.core.Property} such as
- * {@link org.opendaylight.controller.sal.core.Description} and/or
- * {@link org.opendaylight.controller.sal.core.State} etc.
+ * {@link org.opendaylight.controller.sal.core.Description} and/or
+ * {@link org.opendaylight.controller.sal.core.State} etc.
*/
public Map<String, Property> getNodeConnectorProps(
NodeConnector nodeConnector);
/**
* Remove a property of a node connector
- *
+ *
* @param nc {@link org.opendaylight.controller.sal.core.NodeConnector}
* @param propName property name specified by {@link org.opendaylight.controller.sal.core.Property} and its extended classes
* @return success or failed reason
/**
* Remove all the properties of a node connector
- *
+ *
* @param nodeConnector {@link org.opendaylight.controller.sal.core.NodeConnector}
* @return success or failed reason
*/
/**
* Return the node connector given its name
*
- * @param node {@link org.opendaylight.controller.sal.core.Node}
+ * @param node {@link org.opendaylight.controller.sal.core.Node}
* @param nodeConnectorName node connector identifier specified by {@link org.opendaylight.controller.sal.core.NodeConnector}
* @return {@link org.opendaylight.controller.sal.core.NodeConnector}
*/
/**
* Return controller MAC address
- *
+ *
* @return MAC address in byte array
*/
public byte[] getControllerMAC();
/**
* Return MAC address for a given node
*
- * @param node {@link org.opendaylight.controller.sal.core.Node}
+ * @param node {@link org.opendaylight.controller.sal.core.Node}
* @return MAC address in byte array
*/
public byte[] getNodeMAC(Node node);
*/
public int getHostRetryCount();
- /**
- * Create a Name/Tier/Bandwidth Property object based on given property
- * name and value. Other property types are not supported yet.
- *
+ /**
+ * Create a Name/Tier/Bandwidth Property object based on given property
+ * name and value. Other property types are not supported yet.
+ *
* @param propName Name of the Property specified by {@link org.opendaylight.controller.sal.core.Property} and its extended classes
* @param propValue Value of the Property specified by {@link org.opendaylight.controller.sal.core.Property} and its extended classes
- * @return {@link org.opendaylight.controller.sal.core.Property}
- */
+ * @return {@link org.opendaylight.controller.sal.core.Property}
+ */
public Property createProperty(String propName, String propValue);
-
+
/**
- * Returns the description for the specified node. It is either the one
+ * Returns the description for the specified node. It is either the one
* configured by user or the description advertised by the node.
- *
+ *
* @param node the network node identifier
- * @return the description of the specified node. If no description is
+ * @return the description of the specified node. If no description is
* configured and the network node does not provide its description,
* an empty string is returned.
*/
}
public boolean isProactive() {
- return Integer.parseInt(mode) != 0;
+ return Integer.parseInt(mode) != 0;
}
-
+
public static long getSerialversionuid() {
return serialVersionUID;
}
Assert.assertTrue(sw.getSpanPorts().isEmpty());
}
-
+
@Test
public void testSwitchConfig(){
SwitchConfig sc = new SwitchConfig(null, null, null, null);
SwitchConfig sc2 = new SwitchConfig(null, null, null, null);
Assert.assertTrue(sc.equals(sc2));
-
+
Assert.assertNull(sc.getMode());
Assert.assertNull(sc.getNodeId());
Assert.assertNull(sc.getTier());
Assert.assertNull(sc.getNodeDescription());
-
+
SwitchConfig sc3 = new SwitchConfig("123", "name", "tier", "mode");
SwitchConfig sc4 = new SwitchConfig("123", "name", "tier", "mode");
Assert.assertFalse(sc.equals(sc3));
Assert.assertTrue(sc3.equals(sc4));
-
+
Assert.assertTrue(sc3.getNodeId().equals("123"));
Assert.assertTrue(sc3.getNodeDescription().equals("name"));
Assert.assertTrue(sc3.getTier().equals("tier"));
return false;
}
- if (!isValidNodeConnector(srcNodeConnector) ||
+ if (!isValidNodeConnector(srcNodeConnector) ||
!isValidNodeConnector(dstNodeConnector)) {
logger.warn("Invalid NodeConnector");
return false;
+ ", srcNodeConnector=" + srcNodeConnector
+ ", dstNodeConnector=" + dstNodeConnector + "]";
}
-}
\ No newline at end of file
+}
if (srcNodeConnector == null || dstNodeConnector == null) return null;
try {
linkTuple = new Edge(srcNodeConnector, dstNodeConnector);
- } catch (Exception e) {
+ } catch (Exception e) {
}
return linkTuple;
}
* NodeConnectorId> : <1:1>--><11:11>; <1:2>--><11:12>; <3:3>--><13:13>;
* <3:4>--><13:14>; <5:5>--><15:15>; <5:6>--><15:16>; Method used by two
* tests: testGetNodeEdges and testGetEdges
- *
+ *
* @param topoManagerImpl
- *
+ *
* @throws ConstructionException
*/
public void setNodeEdges(TopologyManagerImpl topoManagerImpl)
/**
* Authenticate user with AAA server and return authentication and
* authorization info using the Provider's mechanism
- *
+ *
* @param userName
* the username
* @param password
/**
* Returns the Name of the Provider
- *
+ *
* @return Name of the AAA provider
*/
public String getName();
import javax.servlet.http.HttpSessionListener;
/**
- *
+ *
* This interface defines method(s) to invalidate a session.
*/
public interface ISessionManager extends HttpSessionListener {
/**
* Invalidates the session for a given user and a session Id
- *
+ *
* @param username
* the username
* @param sessionId
*/
public ISessionManager getSessionManager();
+ /**
+ * Checks if the specified role belongs to any application. Usually an
+ * application will call this function when configuring a role, to check if
+ * that role is already being used by another application.
+ *
+ * @param role
+ * The role to check
+ * @return true if the specified role belongs to any application or if the
+ * role is a well-known controller role, false otherwise.
+ */
+ public boolean isRoleInUse(String role);
+
/* non-Javadoc
* Returns the password for a given user
*
import org.springframework.security.core.GrantedAuthority;
public class ODLUserLevel implements GrantedAuthority {
- private static final long serialVersionUID = 1L;
- UserLevel userLevel;
+ private static final long serialVersionUID = 1L;
+ UserLevel userLevel;
public ODLUserLevel(UserLevel userLevel) {
this.userLevel = userLevel;
}
public ServerConfig(String ip, String secret, String protocol) {
- this.ip = ip;
- this.secret = secret;
- this.protocol = protocol;
+ this.ip = ip;
+ this.secret = secret;
+ this.protocol = protocol;
}
-
+
public String getAddress() {
return ip;
}
this.roles = (roles == null) ? new ArrayList<String>()
: new ArrayList<String>(roles);
}
-
+
public String getUser() {
return user;
}
return new Status(StatusCode.BADREQUEST,
"Current password is incorrect");
}
-
+
// Create a new object with the proposed modifications
UserConfig proposed = new UserConfig();
proposed.user = this.user;
proposed.password = (newPassword != null)? newPassword : this.password;
proposed.roles = (newRoles != null)? newRoles : this.roles;
-
+
// Validate it
Status status = proposed.validate();
if (!status.isSuccess()) {
return status;
}
-
+
// Accept the modifications
this.user = proposed.user;
this.password = proposed.password;
this.roles = new ArrayList<String>(proposed.roles);
-
+
return status;
}
}
return locResponse;
}
-
+
protected String getRolesString() {
StringBuffer buffer = new StringBuffer();
if (!roles.isEmpty()) {
"usermanager.authorizationSaveConfigEvent",
EnumSet.of(IClusterServices.cacheMode.NON_TRANSACTIONAL));
} catch (CacheConfigException cce) {
- logger.error("\nCache configuration invalid - check cache mode");
+ logger.error("Cache configuration invalid - check cache mode");
} catch (CacheExistException ce) {
- logger.error("\nCache already exits - destroy and recreate if needed");
+ logger.debug("Skipping cache creation as already present");
}
}
activeUsers = (ConcurrentMap<String, AuthenticatedUser>) clusterGlobalService
.getCache("usermanager.activeUsers");
if (activeUsers == null) {
- logger.error("\nFailed to get cache for activeUsers");
+ logger.error("Failed to get cache for activeUsers");
}
localUserConfigList = (ConcurrentMap<String, UserConfig>) clusterGlobalService
.getCache("usermanager.localUserConfigList");
if (localUserConfigList == null) {
- logger.error("\nFailed to get cache for localUserConfigList");
+ logger.error("Failed to get cache for localUserConfigList");
}
remoteServerConfigList = (ConcurrentMap<String, ServerConfig>) clusterGlobalService
.getCache("usermanager.remoteServerConfigList");
if (remoteServerConfigList == null) {
- logger.error("\nFailed to get cache for remoteServerConfigList");
+ logger.error("Failed to get cache for remoteServerConfigList");
}
authorizationConfList = (ConcurrentMap<String, AuthorizationConfig>) clusterGlobalService
.getCache("usermanager.authorizationConfList");
if (authorizationConfList == null) {
- logger.error("\nFailed to get cache for authorizationConfList");
+ logger.error("Failed to get cache for authorizationConfList");
}
localUserListSaveConfigEvent = (ConcurrentMap<Long, String>) clusterGlobalService
.getCache("usermanager.localUserSaveConfigEvent");
if (localUserListSaveConfigEvent == null) {
- logger.error("\nFailed to get cache for localUserSaveConfigEvent");
+ logger.error("Failed to get cache for localUserSaveConfigEvent");
}
remoteServerSaveConfigEvent = (ConcurrentMap<Long, String>) clusterGlobalService
.getCache("usermanager.remoteServerSaveConfigEvent");
if (remoteServerSaveConfigEvent == null) {
- logger.error("\nFailed to get cache for remoteServerSaveConfigEvent");
+ logger.error("Failed to get cache for remoteServerSaveConfigEvent");
}
authorizationSaveConfigEvent = (ConcurrentMap<Long, String>) clusterGlobalService
.getCache("usermanager.authorizationSaveConfigEvent");
if (authorizationSaveConfigEvent == null) {
- logger.error("\nFailed to get cache for authorizationSaveConfigEvent");
+ logger.error("Failed to get cache for authorizationSaveConfigEvent");
}
}
@Override
public List<String> getUserRoles(String userName) {
- if (userName == null) {
- return new ArrayList<String>(0);
+ List<String> roles = null;
+ if (userName != null) {
+ /*
+ * First look in active users then in local configured users,
+ * finally in local authorized users
+ */
+ if (activeUsers.containsKey(userName)) {
+ roles = activeUsers.get(userName).getUserRoles();
+ } else if (localUserConfigList.containsKey(userName)) {
+ roles = localUserConfigList.get(userName).getRoles();
+ } else if (authorizationConfList.containsKey(userName)) {
+ roles = authorizationConfList.get(userName).getRoles();
+ }
}
- AuthenticatedUser locatedUser = activeUsers.get(userName);
- return (locatedUser == null) ? new ArrayList<String>(0) : locatedUser
- .getUserRoles();
+ return (roles == null) ? new ArrayList<String>(0) : roles;
}
@Override
public UserLevel getUserLevel(String username) {
- // Returns the controller well-know user level for the passed user
- List<String> rolesNames = null;
-
- // First check in active users then in local configured users
- if (activeUsers.containsKey(username)) {
- List<String> roles = activeUsers.get(username).getUserRoles();
- rolesNames = (roles == null || roles.isEmpty()) ? null : roles;
- } else if (localUserConfigList.containsKey(username)) {
- UserConfig config = localUserConfigList.get(username);
- rolesNames = (config == null) ? null : config.getRoles();
- }
+ // Returns the highest controller user level for the passed user
+ List<String> rolesNames = getUserRoles(username);
- if (rolesNames == null) {
+ if (rolesNames.isEmpty()) {
return UserLevel.NOUSER;
}
@Override
public List<UserLevel> getUserLevels(String username) {
- // Returns the controller well-know user levels for the passed user
- List<String> rolesNames = null;
+ // Returns the controller user levels for the passed user
+ List<String> rolesNames = getUserRoles(username);
List<UserLevel> levels = new ArrayList<UserLevel>();
- if (activeUsers.containsKey(username)) {
- List<String> roles = activeUsers.get(username).getUserRoles();
- rolesNames = (roles == null || roles.isEmpty()) ? null : roles;
- } else if (localUserConfigList.containsKey(username)) {
- UserConfig config = localUserConfigList.get(username);
- rolesNames = (config == null) ? null : config.getRoles();
- }
-
- if (rolesNames == null) {
+ if (rolesNames.isEmpty()) {
return levels;
}
}
- // following are setters for use in unit testing
+ // Following are setters for use in unit testing
void setLocalUserConfigList(ConcurrentMap<String, UserConfig> ucl) {
if (ucl != null) {
this.localUserConfigList = ucl;
public String getPassword(String username) {
return localUserConfigList.get(username).getPassword();
}
+
+ @Override
+ public boolean isRoleInUse(String role) {
+ if (role == null || role.isEmpty()) {
+ return false;
+ }
+ // Check against controller roles
+ if (role.equals(UserLevel.SYSTEMADMIN.toString())
+ || role.equals(UserLevel.NETWORKADMIN.toString())
+ || role.equals(UserLevel.NETWORKOPERATOR.toString())) {
+ return true;
+ }
+ // Check if container roles
+ if (containerAuthorizationClient != null) {
+ if (containerAuthorizationClient.isApplicationRole(role)) {
+ return true;
+ }
+ }
+ // Finally if application role
+ if (applicationAuthorizationClients != null) {
+ for (IResourceAuthorization client : this.applicationAuthorizationClients) {
+ if (client.isApplicationRole(role)) {
+ return true;
+ }
+ }
+ }
+ return false;
+ }
}
while (sessIterator.hasNext()) {
HttpSession session = sessIterator.next();
if (session != null && sessionId != null && session.getId() != null && !session.getId().equals(sessionId)) {
- sessionsList.add(session);
+ sessionsList.add(session);
sessIterator.remove();
}
else {
package org.opendaylight.controller.usermanager;
public class AuthResponseTest{
-
+
}
\ No newline at end of file
public class AuthenticatedUserTest {
- static String[] roleArray;
- static AuthenticatedUser user;
-
- @BeforeClass
- public static void testSetup() {
- roleArray = new String[] { UserLevel.NETWORKOPERATOR.toString(),
- UserLevel.APPUSER.toString() };
- }
-
- @Test
- public void testAuthenticatedUser() {
- user = new AuthenticatedUser("auser");
-
- Assert.assertFalse(user.getAccessDate().isEmpty());
- Assert.assertNull(user.getUserRoles());
- }
-
- @Test
- public void testSetUserRoleList() {
- List<String> retrievedRoleList = null;
- List<String> roleList = Arrays.asList(roleArray);
-
- // list arg
- user = new AuthenticatedUser("auser");
- user.setRoleList(roleList);
- retrievedRoleList = user.getUserRoles();
- Assert.assertTrue(roleList.equals(retrievedRoleList));
-
- // array arg
- user = new AuthenticatedUser("auser");
- user.setRoleList(roleArray);
- retrievedRoleList = user.getUserRoles();
- for (int i = 0; i < roleArray.length; i++)
- Assert.assertTrue(roleArray[i].equals(retrievedRoleList.get(i)));
-
- // test addUserRole
- user.addUserRole("AnotherRole");
- Assert.assertTrue(user.getUserRoles().lastIndexOf("AnotherRole") != -1);
-
- }
-
- @Test
- public void testGetGrantedAuthorities() {
- List<GrantedAuthority> gaList = user
- .getGrantedAuthorities(UserLevel.NETWORKOPERATOR);
- Assert.assertTrue(gaList.get(0).getAuthority()
- .equals("ROLE_NETWORK-OPERATOR"));
- }
+ static String[] roleArray;
+ static AuthenticatedUser user;
+
+ @BeforeClass
+ public static void testSetup() {
+ roleArray = new String[] { UserLevel.NETWORKOPERATOR.toString(),
+ UserLevel.APPUSER.toString() };
+ }
+
+ @Test
+ public void testAuthenticatedUser() {
+ user = new AuthenticatedUser("auser");
+
+ Assert.assertFalse(user.getAccessDate().isEmpty());
+ Assert.assertNull(user.getUserRoles());
+ }
+
+ @Test
+ public void testSetUserRoleList() {
+ List<String> retrievedRoleList = null;
+ List<String> roleList = Arrays.asList(roleArray);
+
+ // list arg
+ user = new AuthenticatedUser("auser");
+ user.setRoleList(roleList);
+ retrievedRoleList = user.getUserRoles();
+ Assert.assertTrue(roleList.equals(retrievedRoleList));
+
+ // array arg
+ user = new AuthenticatedUser("auser");
+ user.setRoleList(roleArray);
+ retrievedRoleList = user.getUserRoles();
+ for (int i = 0; i < roleArray.length; i++)
+ Assert.assertTrue(roleArray[i].equals(retrievedRoleList.get(i)));
+
+ // test addUserRole
+ user.addUserRole("AnotherRole");
+ Assert.assertTrue(user.getUserRoles().lastIndexOf("AnotherRole") != -1);
+
+ }
+
+ @Test
+ public void testGetGrantedAuthorities() {
+ List<GrantedAuthority> gaList = user
+ .getGrantedAuthorities(UserLevel.NETWORKOPERATOR);
+ Assert.assertTrue(gaList.get(0).getAuthority()
+ .equals("ROLE_NETWORK-OPERATOR"));
+ }
}
roles.clear();
roles.add(UserLevel.NETWORKADMIN.toString());
assertTrue(userConfig.update("ciscocisco", null, roles).isSuccess());
-
+
// Role change and same new password
roles.clear();
roles.add(UserLevel.NETWORKOPERATOR.toString());
assertTrue(userConfig.update("ciscocisco", "ciscocisco", roles)
.isSuccess());
-
+
// New Password = null, No change in password
assertTrue(userConfig.getPassword().equals("ciscocisco"));
/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
// Server config can't be empty
static final long serialVersionUID = 8645L;
+ @Override
public String getAddress() {
return "1.1.1.1";
}
+ @Override
public String getSecret() {
return "secret";
}
+ @Override
public String getProtocol() {
return "IPv4";
}
"7029,7455,8165,7029,7881", roles));
}
});
+
+ um.setAuthorizationConfList(new ConcurrentHashMap<String, AuthorizationConfig>() {
+ static final long serialVersionUID = 2L;
+ {
+ List<String> roles = new ArrayList<String>(3);
+ roles.add(UserLevel.NETWORKOPERATOR.toString());
+ roles.add("Container1-Admin");
+ roles.add("Application2-User");
+
+ put("Andrew", new AuthorizationConfig("Andrew", roles));
+ }
+ });
// instantiate an empty activeUser collection
um.setActiveUsers(new ConcurrentHashMap<String, AuthenticatedUser>());
-
}
-
}
/**
// instantiate an anonymous AAAProvider
IAAAProvider a3p = new IAAAProvider() {
+ @Override
public AuthResponse authService(String userName, String password,
String server, String secretKey) {
return new AuthResponse();
};
+ @Override
public String getName() {
return "dummyAAAProvider";
}
Assert.assertTrue(um.getUserLevel("Jack") == UserLevel.SYSTEMADMIN);
// Run the check on configured users
Assert.assertTrue(um.getUserLevel("John") == UserLevel.NETWORKOPERATOR);
- Assert.assertTrue(um.getUserLevel("Andrew") == UserLevel.NOUSER);
+ // Run the check on local authorized users
+ Assert.assertTrue(um.getUserLevel("Andrew") == UserLevel.NETWORKOPERATOR);
+ // Non locally known user
+ Assert.assertTrue(um.getUserLevel("Tom") == UserLevel.NOUSER);
}
}
@ResponseBody
public Set<Map<String, Object>> getFlows(HttpServletRequest request, @RequestParam(required = false) String container) {
String containerName = DaylightWebUtil.getAuthorizedContainer(request, container, this);
-
+
// fetch frm
IForwardingRulesManager frm = (IForwardingRulesManager) ServiceHelper
.getInstance(IForwardingRulesManager.class, containerName, this);
@ResponseBody
public Map<String, Object> getNodePorts(HttpServletRequest request, @RequestParam(required = false) String container) {
String containerName = DaylightWebUtil.getAuthorizedContainer(request, container, this);
-
+
ISwitchManager switchManager = (ISwitchManager) ServiceHelper
.getInstance(ISwitchManager.class, containerName, this);
if (switchManager == null) {
@ResponseBody
public Map<String, Object> getNodeFlows(HttpServletRequest request, @RequestParam(required = false) String container) {
String containerName = DaylightWebUtil.getAuthorizedContainer(request, container, this);
-
+
ISwitchManager switchManager = (ISwitchManager) ServiceHelper
.getInstance(ISwitchManager.class, containerName, this);
if (switchManager == null) {
if (!isUserAuthorized(UserLevel.NETWORKADMIN, request)) {
return "Operation not authorized";
}
-
+
String containerName = DaylightWebUtil.getAuthorizedContainer(request, container, this);
IForwardingRulesManager frm = (IForwardingRulesManager) ServiceHelper
if (!isUserAuthorized(UserLevel.NETWORKADMIN, request)) {
return "Operation not authorized";
}
-
+
String containerName = DaylightWebUtil.getAuthorizedContainer(request, container, this);
IForwardingRulesManager frm = (IForwardingRulesManager) ServiceHelper
/**
* Returns whether the current user's level is same or above the required
* authorization level.
- *
+ *
* @param requiredLevel
* the authorization level required
*/
return userManager.removeLocalUser(userName).getDescription();
}
-
+
@RequestMapping(value = "/users/password/{username}", method = RequestMethod.POST)
@ResponseBody
public Status changePassword(@PathVariable("username") String username, HttpServletRequest request,
if (userManager == null) {
return new Status(StatusCode.GONE, "User Manager not found");
}
-
+
if (!authorize(userManager, UserLevel.NETWORKADMIN, request)) {
return new Status(StatusCode.FORBIDDEN, "Operation not permitted");
}
-
+
if (newPassword.isEmpty()) {
return new Status(StatusCode.BADREQUEST, "Empty passwords not allowed");
}
-
+
Status status = userManager.changeLocalUserPassword(username, currentPassword, newPassword);
-
+
return status;
}
/**
* Is the operation permitted for the given level
- *
+ *
* @param level
*/
private boolean authorize(IUserManager userManager, UserLevel level,
public class DaylightWebUtil {
private static String defaultName = GlobalConstants.DEFAULT.toString();
-
+
/**
* Returns the container that this user is authorized to access. If the user is not authorized to the requested
* container, then this method will return the default container.
- *
+ *
* @param request - HttpServletRequest object to retrieve username
* @param container - requested container
* @param bundle - respective bundle
if (container == null) {
return defaultName;
}
-
+
String username = request.getUserPrincipal().getName();
IContainerAuthorization containerAuthorization = (IContainerAuthorization)
ServiceHelper.getGlobalInstance(IContainerAuthorization.class, bundle);
/**
* Returns the name of the bundle. In the GUI, this name will be displayed
* on the tab.
- *
+ *
* @return Name assigned to the bundle.
*/
public String getWebName();
/**
* Returns the Id assigned to the web bundle.
- *
+ *
* @return Id assigned to the web bundle.
*/
public String getWebId();
/**
* Returns the position where the bundle tab will be placed in the GUI.
- *
+ *
* @return Position number for the bundle tab.
*/
public short getWebOrder();
/**
* This method checks if the user is authorized to access the bundle.
- *
+ *
* @param userLevel
* user role level in the controller space.
- *
+ *
* @return true, if user is authorized to access the bundle, else false.
*/
public boolean isAuthorized(UserLevel userLevel);
protected Map<String, Map<String, Map<String, Object>>> metaCache = new HashMap<String, Map<String, Map<String, Object>>>();
protected Map<String, Map<String, Object>> stagedNodes;
protected Map<String, Map<String, Object>> newNodes;
-
+
protected Map<String, Integer> metaNodeHash = new HashMap<String, Integer>();
protected Map<String, Integer> metaHostHash = new HashMap<String, Integer>();
protected Map<String, Integer> metaNodeSingleHash = new HashMap<String, Integer>();
protected Map<String, Integer> metaNodeConfigurationHash = new HashMap<String, Integer>();
-
+
public Topology() {
- ServiceHelper.registerGlobalService(IConfigurationAware.class, this, null);
- topologyWebFileName = ROOT + "topologyCache.sav";
- loadConfiguration();
+ ServiceHelper.registerGlobalService(IConfigurationAware.class, this, null);
+ topologyWebFileName = ROOT + "topologyCache.sav";
+ loadConfiguration();
}
-
+
/**
* Topology of nodes and hosts in the network in JSON format.
- *
+ *
* Mainly intended for consumption by the visual topology.
- *
+ *
* @return - JSON output for visual topology
*/
@RequestMapping(value = "/visual.json", method = RequestMethod.GET)
@ResponseBody
public Collection<Map<String, Object>> getLinkData(@RequestParam(required = false) String container, HttpServletRequest request) {
- String containerName = DaylightWebUtil.getAuthorizedContainer(request, container, this);
-
+ String containerName = DaylightWebUtil.getAuthorizedContainer(request, container, this);
+
ITopologyManager topologyManager = (ITopologyManager) ServiceHelper
.getInstance(ITopologyManager.class, containerName, this);
if (topologyManager == null) {
- return null;
+ return null;
}
ISwitchManager switchManager = (ISwitchManager) ServiceHelper
.getInstance(ISwitchManager.class, containerName, this);
if (switchManager == null) {
- return null;
+ return null;
}
-
+
Map<Node, Set<Edge>> nodeEdges = topologyManager.getNodeEdges();
Map<Node, Set<NodeConnector>> hostEdges = topologyManager
.getNodesWithNodeConnectorHost();
List<Switch> nodes = switchManager.getNetworkDevices();
-
+
List<SwitchConfig> switchConfigurations = new ArrayList<SwitchConfig>();
for(Switch sw : nodes) {
- Node n = sw.getNode();
- SwitchConfig config = switchManager.getSwitchConfig(n.toString());
- switchConfigurations.add(config);
+ Node n = sw.getNode();
+ SwitchConfig config = switchManager.getSwitchConfig(n.toString());
+ switchConfigurations.add(config);
}
-
+
// initialize cache if needed
if (!metaCache.containsKey(containerName)) {
- metaCache.put(containerName, new HashMap<String, Map<String, Object>>());
- // initialize hashes
- metaNodeHash.put(containerName, null);
- metaHostHash.put(containerName, null);
- metaNodeSingleHash.put(containerName, null);
- metaNodeConfigurationHash.put(containerName, null);
+ metaCache.put(containerName, new HashMap<String, Map<String, Object>>());
+ // initialize hashes
+ metaNodeHash.put(containerName, null);
+ metaHostHash.put(containerName, null);
+ metaNodeSingleHash.put(containerName, null);
+ metaNodeConfigurationHash.put(containerName, null);
}
-
+
// return cache if topology hasn't changed
if (
- (metaNodeHash.get(containerName) != null && metaHostHash.get(containerName) != null && metaNodeSingleHash.get(containerName) != null && metaNodeConfigurationHash.get(containerName) != null) &&
- metaNodeHash.get(containerName).equals(nodeEdges.hashCode()) && metaHostHash.get(containerName).equals(hostEdges.hashCode()) && metaNodeSingleHash.get(containerName).equals(nodes.hashCode()) && metaNodeConfigurationHash.get(containerName).equals(switchConfigurations.hashCode())
+ (metaNodeHash.get(containerName) != null && metaHostHash.get(containerName) != null && metaNodeSingleHash.get(containerName) != null && metaNodeConfigurationHash.get(containerName) != null) &&
+ metaNodeHash.get(containerName).equals(nodeEdges.hashCode()) && metaHostHash.get(containerName).equals(hostEdges.hashCode()) && metaNodeSingleHash.get(containerName).equals(nodes.hashCode()) && metaNodeConfigurationHash.get(containerName).equals(switchConfigurations.hashCode())
) {
- return metaCache.get(containerName).values();
+ return metaCache.get(containerName).values();
}
-
+
// cache has changed, we must assign the new values
metaNodeHash.put(containerName, nodeEdges.hashCode());
metaHostHash.put(containerName, hostEdges.hashCode());
metaNodeSingleHash.put(containerName, nodes.hashCode());
metaNodeConfigurationHash.put(containerName, switchConfigurations.hashCode());
-
+
stagedNodes = new HashMap<String, Map<String, Object>>();
newNodes = new HashMap<String, Map<String, Object>>();
// single nodes addition
addSingleNodes(nodes, switchManager, containerName);
-
+
// hostNodes addition
addHostNodes(hostEdges, topologyManager, containerName);
-
+
repositionTopology(containerName);
-
+
return metaCache.get(containerName).values();
}
* @param topology - the topology instance
*/
private void addNodes(Map<Node, Set<Edge>> nodeEdges,
- ITopologyManager topology, ISwitchManager switchManager, String containerName) {
+ ITopologyManager topology, ISwitchManager switchManager, String containerName) {
Bandwidth bandwidth = new Bandwidth(0);
Map<Edge, Set<Property>> properties = topology.getEdges();
-
+
for (Map.Entry<Node, Set<Edge>> e : nodeEdges.entrySet()) {
Node n = e.getKey();
String description = switchManager.getNodeDescription(n);
NodeBean node = createNodeBean(description, n);
-
+
// skip production node
if (nodeIgnore(n)) {
continue;
}
-
+
List<Map<String, Object>> adjacencies = new LinkedList<Map<String, Object>>();
Set<Edge> links = e.getValue();
for (Edge link : links) {
}
for (Property p : properties.get(link)) {
if (p instanceof Bandwidth) {
- bandwidth = (Bandwidth) p;
+ bandwidth = (Bandwidth) p;
break;
}
}
EdgeBean edge = new EdgeBean(link, bandwidth);
adjacencies.add(edge.out());
}
-
+
node.setLinks(adjacencies);
if (metaCache.get(containerName).containsKey(node.id())) {
- // retrieve node from cache
- Map<String, Object> nodeEntry = metaCache.get(containerName).get(node.id());
-
- Map<String, String> data = (Map<String, String>) nodeEntry.get("data");
- data.put("$desc", description);
- nodeEntry.put("data", data);
-
- // always update adjacencies
- nodeEntry.put("adjacencies", adjacencies);
- // stage this cached node (with position)
- stagedNodes.put(node.id(), nodeEntry);
+ // retrieve node from cache
+ Map<String, Object> nodeEntry = metaCache.get(containerName).get(node.id());
+
+ Map<String, String> data = (Map<String, String>) nodeEntry.get("data");
+ data.put("$desc", description);
+ nodeEntry.put("data", data);
+
+ // always update adjacencies
+ nodeEntry.put("adjacencies", adjacencies);
+ // stage this cached node (with position)
+ stagedNodes.put(node.id(), nodeEntry);
} else {
- newNodes.put(node.id(), node.out());
+ newNodes.put(node.id(), node.out());
}
}
}
-
+
/**
* Check if this node shouldn't appear in the visual topology
- *
+ *
* @param node
* @return
*/
private boolean nodeIgnore(Node node) {
String nodeType = node.getType();
-
+
// add other node types to ignore later
if (nodeType.equals(NodeIDType.PRODUCTION)) {
return true;
}
-
+
return false;
}
-
+
/**
* Check if this edge shouldn't appear in the visual topology
- *
+ *
* @param edge
* @return
*/
if (nodeIgnore(headNode)) {
return true;
}
-
+
NodeConnector tailNodeConnector = edge.getTailNodeConnector();
Node tailNode = tailNodeConnector.getNode();
if (nodeIgnore(tailNode)) {
return true;
}
-
+
return false;
}
-
+
protected NodeBean createNodeBean(String description, Node node) {
- String name = (description == null ||
- description.trim().isEmpty() ||
- description.equalsIgnoreCase("none"))?
- node.toString() : description;
- return new NodeBean(node.toString(), name, NodeType.NODE);
+ String name = (description == null ||
+ description.trim().isEmpty() ||
+ description.equalsIgnoreCase("none"))?
+ node.toString() : description;
+ return new NodeBean(node.toString(), name, NodeType.NODE);
}
-
+
@SuppressWarnings("unchecked")
- private void addSingleNodes(List<Switch> nodes, ISwitchManager switchManager, String containerName) {
- if (nodes == null) {
- return;
- }
- for (Switch sw : nodes) {
- Node n = sw.getNode();
-
- // skip production node
- if (nodeIgnore(n)) {
- continue;
- }
-
- String description = switchManager.getNodeDescription(n);
-
- if ((stagedNodes.containsKey(n.toString()) && metaCache.get(containerName).containsKey(n.toString())) || newNodes.containsKey(n.toString())) {
- continue;
- }
- NodeBean node = createNodeBean(description, n);
-
- // FIXME still doesn't display standalone node when last remaining link is removed
- if (metaCache.get(containerName).containsKey(node.id()) && !stagedNodes.containsKey(node.id())) {
- Map<String, Object> nodeEntry = metaCache.get(containerName).get(node.id());
- Map<String, String> data = (Map<String, String>) nodeEntry.get("data");
- data.put("$desc", description);
- nodeEntry.put("data", data);
- // clear adjacencies since this is now a single node
- nodeEntry.put("adjacencies", new LinkedList<Map<String, Object>>());
- stagedNodes.put(node.id(), nodeEntry);
+ private void addSingleNodes(List<Switch> nodes, ISwitchManager switchManager, String containerName) {
+ if (nodes == null) {
+ return;
+ }
+ for (Switch sw : nodes) {
+ Node n = sw.getNode();
+
+ // skip production node
+ if (nodeIgnore(n)) {
+ continue;
+ }
+
+ String description = switchManager.getNodeDescription(n);
+
+ if ((stagedNodes.containsKey(n.toString()) && metaCache.get(containerName).containsKey(n.toString())) || newNodes.containsKey(n.toString())) {
+ continue;
+ }
+ NodeBean node = createNodeBean(description, n);
+
+ // FIXME still doesn't display standalone node when last remaining link is removed
+ if (metaCache.get(containerName).containsKey(node.id()) && !stagedNodes.containsKey(node.id())) {
+ Map<String, Object> nodeEntry = metaCache.get(containerName).get(node.id());
+ Map<String, String> data = (Map<String, String>) nodeEntry.get("data");
+ data.put("$desc", description);
+ nodeEntry.put("data", data);
+ // clear adjacencies since this is now a single node
+ nodeEntry.put("adjacencies", new LinkedList<Map<String, Object>>());
+ stagedNodes.put(node.id(), nodeEntry);
} else {
- newNodes.put(node.id(), node.out());
+ newNodes.put(node.id(), node.out());
}
- }
+ }
}
/**
addressByteBuffer.putShort((short) 0);
addressByteBuffer.put(dmac.getValue());
addressByteBuffer.rewind();
-
+
long hid = addressByteBuffer.getLong();
String hostId = String.valueOf(hid);
-
+
NodeBean hostBean = new NodeBean(hostId, host.getNetworkAddressAsString(), NodeType.HOST);
List<Map<String, Object>> adjacencies = new LinkedList<Map<String, Object>>();
EdgeBean edge = new EdgeBean(connector, hid);
adjacencies.add(edge.out());
hostBean.setLinks(adjacencies);
-
+
if (metaCache.get(containerName).containsKey(hostId)) {
- Map<String, Object> hostEntry = metaCache.get(containerName).get(hostId);
- hostEntry.put("adjacencies", adjacencies);
- stagedNodes.put(hostId, hostEntry);
+ Map<String, Object> hostEntry = metaCache.get(containerName).get(hostId);
+ hostEntry.put("adjacencies", adjacencies);
+ stagedNodes.put(hostId, hostEntry);
} else {
- newNodes.put(String.valueOf(hid), hostBean.out());
+ newNodes.put(String.valueOf(hid), hostBean.out());
}
}
}
*/
private void repositionTopology(String containerName) {
Graph<String, String> graph = new SparseMultigraph<String, String>();
-
+
metaCache.get(containerName).clear();
metaCache.get(containerName).putAll(stagedNodes);
metaCache.get(containerName).putAll(newNodes);
-
+
for (Map<String, Object> on : metaCache.get(containerName).values()) {
graph.addVertex(on.toString());
List<Map<String, Object>> adjacencies = (List<Map<String, Object>>) on.get("adjacencies");
-
+
for (Map<String, Object> adj : adjacencies) {
graph.addEdge(
- adj.toString(), adj.get("nodeFrom").toString(),
- adj.get("nodeTo").toString()
+ adj.toString(), adj.get("nodeFrom").toString(),
+ adj.get("nodeTo").toString()
);
}
}
-
+
CircleLayout<String, String> layout = new CircleLayout<String, String>(graph);
layout.setSize(new Dimension(1200, 365));
for (Map.Entry<String, Map<String, Object>> v : newNodes.entrySet()) {
/**
* Update node position
- *
+ *
* This method is mainly used by the visual topology
*
* @param nodeId - The node to update
@RequestMapping(value = "/node/{nodeId}", method = RequestMethod.POST)
@ResponseBody
public Map<String, Object> post(@PathVariable String nodeId, @RequestParam(required = true) String x,
- @RequestParam(required = true) String y, @RequestParam(required = false) String container,
- HttpServletRequest request) {
- if (!authorize(UserLevel.NETWORKADMIN, request)) {
- return new HashMap<String, Object>(); // silently disregard new node position
- }
-
- String containerName = getAuthorizedContainer(request, container);
-
+ @RequestParam(required = true) String y, @RequestParam(required = false) String container,
+ HttpServletRequest request) {
+ if (!authorize(UserLevel.NETWORKADMIN, request)) {
+ return new HashMap<String, Object>(); // silently disregard new node position
+ }
+
+ String containerName = getAuthorizedContainer(request, container);
+
String id = new String(nodeId);
-
+
if (!metaCache.get(containerName).containsKey(id)) {
return null;
}
data.put("$y", y);
node.put("data", data);
-
+
return node;
}
-
+
/**
* Node object for visual topology
*/
protected class NodeBean {
- protected String id;
- protected String name;
- protected Map<String, String> data;
- protected List<Map<String, Object>> links;
-
- public NodeBean() {
- data = new HashMap<String, String>();
- links = new ArrayList<Map<String, Object>>();
- }
-
- public NodeBean(String id, String name, String type) {
- this();
- this.id = id;
- this.name = name;
- data.put("$desc", name);
- data.put("$type", type);
- }
-
- public void setLinks(List<Map<String, Object>> links) {
- this.links = links;
- }
-
- public Map<String, Object> out() {
- Map<String, Object> node = new HashMap<String, Object>();
- node.put("id", this.id);
- node.put("name", this.name);
- node.put("data", this.data);
- node.put("adjacencies", this.links);
-
- return node;
- }
-
- public String name() {
- return this.name;
- }
-
- public String id() {
- return this.id;
- }
+ protected String id;
+ protected String name;
+ protected Map<String, String> data;
+ protected List<Map<String, Object>> links;
+
+ public NodeBean() {
+ data = new HashMap<String, String>();
+ links = new ArrayList<Map<String, Object>>();
+ }
+
+ public NodeBean(String id, String name, String type) {
+ this();
+ this.id = id;
+ this.name = name;
+ data.put("$desc", name);
+ data.put("$type", type);
+ }
+
+ public void setLinks(List<Map<String, Object>> links) {
+ this.links = links;
+ }
+
+ public Map<String, Object> out() {
+ Map<String, Object> node = new HashMap<String, Object>();
+ node.put("id", this.id);
+ node.put("name", this.name);
+ node.put("data", this.data);
+ node.put("adjacencies", this.links);
+
+ return node;
+ }
+
+ public String name() {
+ return this.name;
+ }
+
+ public String id() {
+ return this.id;
+ }
}
-
+
/**
* Edge object for visual topology
*/
protected class EdgeBean {
- protected NodeConnector source;
- protected NodeConnector destination;
- protected Map<String, String> data;
- protected Long hostId;
-
- public EdgeBean() {
- data = new HashMap<String, String>();
- }
-
- public EdgeBean(Edge link, Bandwidth bandwidth) {
- this();
- this.source = link.getHeadNodeConnector();
- this.destination = link.getTailNodeConnector();
-
- // data
- data.put("$bandwidth", bandwidth.toString());
- data.put("$color", bandwidthColor(bandwidth));
- data.put("$nodeToPort", destination.getID().toString());
- data.put("$nodeFromPort", source.getID().toString());
- data.put("$descFrom", source.getNode().toString());
- data.put("$descTo", destination.getNode().toString());
- data.put("$nodeFromPortName", source.toString());
- data.put("$nodeToPortName", destination.toString());
- }
-
- public EdgeBean(NodeConnector connector, Long hostId) {
- this();
- this.source = null;
- this.destination = connector;
- this.hostId = hostId;
-
- data.put("$bandwidth", "N/A");
- data.put("$color", bandwidthColor(new Bandwidth(0)));
- data.put("$nodeToPort", connector.getNodeConnectorIDString());
- data.put("$nodeFromPort", connector.getNodeConnectorIDString());
- data.put("$descTo", "");
- data.put("$descFrom", "");
- data.put("$nodeToPortName", "");
- data.put("$nodeFromPortName", "");
- }
-
- public Map<String, Object> out() {
- Map<String, Object> edge = new HashMap<String, Object>();
-
- edge.put("data", data);
- if (source == null) {
- edge.put("nodeFrom", String.valueOf(this.hostId));
- } else {
- edge.put("nodeFrom", source.getNode().toString());
- }
- edge.put("nodeTo", destination.getNode().toString());
-
-
- return edge;
- }
-
- private String bandwidthColor(Bandwidth bandwidth) {
- String color = null;
- long bandwidthValue = bandwidth.getValue();
-
- if (bandwidthValue == 0) {
+ protected NodeConnector source;
+ protected NodeConnector destination;
+ protected Map<String, String> data;
+ protected Long hostId;
+
+ public EdgeBean() {
+ data = new HashMap<String, String>();
+ }
+
+ public EdgeBean(Edge link, Bandwidth bandwidth) {
+ this();
+ this.source = link.getHeadNodeConnector();
+ this.destination = link.getTailNodeConnector();
+
+ // data
+ data.put("$bandwidth", bandwidth.toString());
+ data.put("$color", bandwidthColor(bandwidth));
+ data.put("$nodeToPort", destination.getID().toString());
+ data.put("$nodeFromPort", source.getID().toString());
+ data.put("$descFrom", source.getNode().toString());
+ data.put("$descTo", destination.getNode().toString());
+ data.put("$nodeFromPortName", source.toString());
+ data.put("$nodeToPortName", destination.toString());
+ }
+
+ public EdgeBean(NodeConnector connector, Long hostId) {
+ this();
+ this.source = null;
+ this.destination = connector;
+ this.hostId = hostId;
+
+ data.put("$bandwidth", "N/A");
+ data.put("$color", bandwidthColor(new Bandwidth(0)));
+ data.put("$nodeToPort", connector.getNodeConnectorIDString());
+ data.put("$nodeFromPort", connector.getNodeConnectorIDString());
+ data.put("$descTo", "");
+ data.put("$descFrom", "");
+ data.put("$nodeToPortName", "");
+ data.put("$nodeFromPortName", "");
+ }
+
+ public Map<String, Object> out() {
+ Map<String, Object> edge = new HashMap<String, Object>();
+
+ edge.put("data", data);
+ if (source == null) {
+ edge.put("nodeFrom", String.valueOf(this.hostId));
+ } else {
+ edge.put("nodeFrom", source.getNode().toString());
+ }
+ edge.put("nodeTo", destination.getNode().toString());
+
+
+ return edge;
+ }
+
+ private String bandwidthColor(Bandwidth bandwidth) {
+ String color = null;
+ long bandwidthValue = bandwidth.getValue();
+
+ if (bandwidthValue == 0) {
color = "#000";
} else if (bandwidthValue < Bandwidth.BW1Kbps) {
- color = "#148AC6";
+ color = "#148AC6";
} else if (bandwidthValue < Bandwidth.BW1Mbps) {
color = "#2858A0";
} else if (bandwidthValue < Bandwidth.BW1Gbps) {
} else if (bandwidthValue < Bandwidth.BW1Pbps) {
color = "#F9F464";
}
-
- return color;
+
+ return color;
}
}
-
+
protected class NodeType {
- public static final String NODE = "swtch";
- public static final String HOST = "host";
+ public static final String NODE = "swtch";
+ public static final String HOST = "host";
}
-
+
private boolean authorize(UserLevel level, HttpServletRequest request) {
- IUserManager userManager = (IUserManager) ServiceHelper
+ IUserManager userManager = (IUserManager) ServiceHelper
.getGlobalInstance(IUserManager.class, this);
if (userManager == null) {
- return false;
+ return false;
}
-
+
String username = request.getUserPrincipal().getName();
UserLevel userLevel = userManager.getUserLevel(username);
if (userLevel.toNumber() <= level.toNumber()) {
- return true;
+ return true;
}
return false;
}
-
+
private String getAuthorizedContainer(HttpServletRequest request, String container) {
- String username = request.getUserPrincipal().getName();
- IContainerAuthorization containerAuthorization = (IContainerAuthorization) ServiceHelper.
- getGlobalInstance(IContainerAuthorization.class, this);
- if (containerAuthorization != null) {
- Set<Resource> resources = containerAuthorization.getAllResourcesforUser(username);
- if (authorizeContainer(container, resources)) {
- return container;
- }
- }
-
- return GlobalConstants.DEFAULT.toString();
+ String username = request.getUserPrincipal().getName();
+ IContainerAuthorization containerAuthorization = (IContainerAuthorization) ServiceHelper.
+ getGlobalInstance(IContainerAuthorization.class, this);
+ if (containerAuthorization != null) {
+ Set<Resource> resources = containerAuthorization.getAllResourcesforUser(username);
+ if (authorizeContainer(container, resources)) {
+ return container;
+ }
+ }
+
+ return GlobalConstants.DEFAULT.toString();
}
-
+
private boolean authorizeContainer(String container, Set<Resource> resources) {
- for(Resource resource : resources) {
- String containerName = (String) resource.getResource();
- if (containerName.equals(container)) {
- return true;
- }
- }
-
- return false;
+ for(Resource resource : resources) {
+ String containerName = (String) resource.getResource();
+ if (containerName.equals(container)) {
+ return true;
+ }
+ }
+
+ return false;
}
@SuppressWarnings("unchecked")
- private void loadConfiguration() {
+ private void loadConfiguration() {
ObjectReader objReader = new ObjectReader();
metaCache = (Map<String, Map<String, Map<String, Object>>>) objReader.read(this, topologyWebFileName);
if (metaCache == null) metaCache = new HashMap<String, Map<String, Map<String, Object>>>();
/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
import org.opendaylight.controller.sal.utils.NodeCreator;
import org.opendaylight.controller.switchmanager.SwitchConfig;
import org.opendaylight.controller.topology.web.Topology.NodeBean;
-
+
public class TopologyTest {
- @Test
- public void testCreateNodeBean() {
- Topology topology = new Topology();
- Node node = NodeCreator.createOFNode(new Long(3));
- String description = "foo";
-
- NodeBean bean = topology.createNodeBean(description, node);
-
- assertNotNull(bean);
- assertEquals(bean.id, node.toString());
- assertEquals(bean.name, "foo");
-
- bean = topology.createNodeBean(null, node);
-
- assertNotNull(bean);
- assertEquals(bean.id, node.toString());
- assertEquals(bean.name, bean.id);
-
- bean = topology.createNodeBean(" ", node);
-
- assertNotNull(bean);
- assertEquals(bean.id, node.toString());
- assertEquals(bean.name, bean.id);
- }
+ @Test
+ public void testCreateNodeBean() {
+ Topology topology = new Topology();
+ Node node = NodeCreator.createOFNode(new Long(3));
+ String description = "foo";
+
+ NodeBean bean = topology.createNodeBean(description, node);
+
+ assertNotNull(bean);
+ assertEquals(bean.id, node.toString());
+ assertEquals(bean.name, "foo");
+
+ bean = topology.createNodeBean(null, node);
+
+ assertNotNull(bean);
+ assertEquals(bean.id, node.toString());
+ assertEquals(bean.name, bean.id);
+
+ bean = topology.createNodeBean(" ", node);
+
+ assertNotNull(bean);
+ assertEquals(bean.id, node.toString());
+ assertEquals(bean.name, bean.id);
+ }
}
Match match = flow.getMatch();
ISwitchManager switchManager = (ISwitchManager) ServiceHelper
.getInstance(ISwitchManager.class, containerName, this);
- String desc = (switchManager == null)?
- "" : switchManager.getNodeDescription(node);
- desc = (desc.isEmpty() || desc.equalsIgnoreCase("none"))?
- node.toString(): desc;
+ String desc = (switchManager == null)?
+ "" : switchManager.getNodeDescription(node);
+ desc = (desc.isEmpty() || desc.equalsIgnoreCase("none"))?
+ node.toString(): desc;
row.put("nodeName", desc);
if (match.isPresent(MatchType.IN_PORT)) {
row.put(MatchType.IN_PORT.id(), ((NodeConnector) flow.getMatch()
Short tpSrc = (Short) (flow.getMatch().getField(MatchType.TP_SRC)
.getValue());
row.put(MatchType.TP_SRC.id(),
- String.valueOf(NetUtils.getUnsignedShort(tpSrc)));
+ String.valueOf(NetUtils.getUnsignedShort(tpSrc)));
} else {
row.put(MatchType.TP_SRC.id(), "*");
}
Short tpDst = (Short) (flow.getMatch().getField(MatchType.TP_DST)
.getValue());
row.put(MatchType.TP_DST.id(),
- String.valueOf(NetUtils.getUnsignedShort(tpDst)));
+ String.valueOf(NetUtils.getUnsignedShort(tpDst)));
} else {
row.put(MatchType.TP_DST.id(), "*");
}