<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>clustering.services</artifactId>
- <version>0.4.1-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
</scm>
<artifactId>clustering.services</artifactId>
- <version>0.4.1-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
import java.util.Properties;
import java.util.Set;
import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.TimeUnit;
import javax.transaction.HeuristicMixedException;
import javax.transaction.HeuristicRollbackException;
*/
void tbegin() throws NotSupportedException, SystemException;
+ /**
+ * tbegin with a timeout
+ * @see IClusterServices#tbegin
+ * @param timeout the transaction timeout
+ * @param unit TimeUnit for the timeout
+ * @throws NotSupportedException
+ * @throws SystemException
+ */
+ void tbegin(long timeout, TimeUnit unit) throws NotSupportedException, SystemException;
+
/**
* Commit a transaction covering all the data structures/HW updates.
*/
import java.util.Properties;
import java.util.Set;
import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.TimeUnit;
import javax.transaction.HeuristicMixedException;
import javax.transaction.HeuristicRollbackException;
*/
void tbegin() throws NotSupportedException, SystemException;
+ /**
+ * tbegin with a timeout
+ * @see IClusterServicesCommon#tbegin
+ * @param timeout the transaction timeout
+ * @param unit TimeUnit for the timeout
+ * @throws NotSupportedException
+ * @throws SystemException
+ */
+ void tbegin(long timeout, TimeUnit unit) throws NotSupportedException, SystemException;
+
/**
* Commit a transaction covering all the data structures/HW updates.
*/
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>clustering.services</artifactId>
- <version>0.4.1-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
import java.util.Set;
import java.util.StringTokenizer;
import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.TimeUnit;
import javax.transaction.HeuristicMixedException;
import javax.transaction.HeuristicRollbackException;
private static String loopbackAddress = "127.0.0.1";
+ // defaultTransactionTimeout is 60 seconds
+ private static int DEFAULT_TRANSACTION_TIMEOUT = 60;
+
/**
* Start a JGroups GossipRouter if we are a supernode. The
* GosispRouter is nothing more than a simple
@Override
public void tbegin() throws NotSupportedException, SystemException {
+ // call tbegin with the default timeout
+ tbegin(DEFAULT_TRANSACTION_TIMEOUT, TimeUnit.SECONDS);
+ }
+
+ @Override
+ public void tbegin(long timeout, TimeUnit unit) throws NotSupportedException, SystemException {
EmbeddedCacheManager manager = this.cm;
if (manager == null) {
throw new IllegalStateException();
if (tm == null) {
throw new IllegalStateException();
}
+ long timeoutSec = unit.toSeconds(timeout);
+ if((timeoutSec > Integer.MAX_VALUE) || (timeoutSec <= 0)) {
+ // fall back to the default timeout
+ tm.setTransactionTimeout(DEFAULT_TRANSACTION_TIMEOUT);
+ } else {
+ // cast is ok here
+ // as here we are sure that timeoutSec < = Integer.MAX_VALUE.
+ tm.setTransactionTimeout((int) timeoutSec);
+ }
tm.begin();
}
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.TimeUnit;
+
import javax.transaction.HeuristicMixedException;
import javax.transaction.HeuristicRollbackException;
import javax.transaction.NotSupportedException;
import javax.transaction.RollbackException;
import javax.transaction.SystemException;
import javax.transaction.Transaction;
+
import org.apache.felix.dm.Component;
import org.opendaylight.controller.clustering.services.CacheConfigException;
import org.opendaylight.controller.clustering.services.CacheExistException;
}
}
+ @Override
+ public void tbegin(long timeout, TimeUnit unit) throws NotSupportedException, SystemException {
+ if (this.clusterService != null) {
+ this.clusterService.tbegin(timeout, unit);
+ } else {
+ throw new IllegalStateException();
+ }
+ }
+
@Override
public void tcommit() throws RollbackException, HeuristicMixedException,
HeuristicRollbackException, java.lang.SecurityException,
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>clustering.services</artifactId>
- <version>0.4.1-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
package org.opendaylight.controller.clustering.stub.internal;
-import java.util.ArrayList;
-import java.util.concurrent.ConcurrentHashMap;
-import java.net.UnknownHostException;
import java.net.InetAddress;
+import java.net.UnknownHostException;
+import java.util.ArrayList;
+import java.util.Dictionary;
import java.util.List;
import java.util.Properties;
import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.TimeUnit;
import javax.transaction.HeuristicMixedException;
import javax.transaction.HeuristicRollbackException;
import javax.transaction.SystemException;
import javax.transaction.Transaction;
+import org.apache.felix.dm.Component;
import org.opendaylight.controller.clustering.services.CacheConfigException;
import org.opendaylight.controller.clustering.services.CacheExistException;
import org.opendaylight.controller.clustering.services.IClusterServices;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import java.util.Dictionary;
-import org.apache.felix.dm.Component;
-
public abstract class ClusterManagerCommon implements IClusterServicesCommon {
protected String containerName = "";
protected static final Logger logger = LoggerFactory
java.lang.IllegalStateException, SystemException {
}
+ @Override
+ public void tbegin(long timeout, TimeUnit unit) throws NotSupportedException, SystemException {
+
+ }
+
@Override
public void trollback() throws java.lang.IllegalStateException,
java.lang.SecurityException, SystemException {
<yang.version>0.5.9-SNAPSHOT</yang.version>
<guava.version>14.0.1</guava.version>
<osgi.core.version>5.0.0</osgi.core.version>
- <ietf-inet-types.version>2010.09.24.1</ietf-inet-types.version>
- <ietf-yang-types.version>2010.09.24.1</ietf-yang-types.version>
+ <ietf-inet-types.version>2010.09.24.2-SNAPSHOT</ietf-inet-types.version>
+ <ietf-yang-types.version>2010.09.24.2-SNAPSHOT</ietf-yang-types.version>
<opendaylight-l2-types.version>2013.08.27.1</opendaylight-l2-types.version>
<yang-ext.version>2013.09.07.1</yang-ext.version>
<javassist.version>3.17.1-GA</javassist.version>
<sonar.language>java</sonar.language>
<forwardingrulesmanager.version>0.5.0-SNAPSHOT</forwardingrulesmanager.version>
<statisticsmanager.version>0.5.0-SNAPSHOT</statisticsmanager.version>
+ <clustering.services.version>0.5.0-SNAPSHOT</clustering.services.version>
<maven.compile.plugin.version>2.5.1</maven.compile.plugin.version>
<java.version.source>1.7</java.version.source>
<java.version.target>1.7</java.version.target>
</generator>
<generator>
<codeGeneratorClass>org.opendaylight.yangtools.yang.unified.doc.generator.maven.DocumentationGeneratorImpl</codeGeneratorClass>
- <outputBaseDir>target/site</outputBaseDir>
+ <outputBaseDir>target/site/models</outputBaseDir>
</generator>
</codeGenerators>
<inspectDependencies>true</inspectDependencies>
type tt:extend-enum;
}
+ leaf sleep-factor {
+ type decimal64 {
+ fraction-digits 2;
+ }
+ }
+
container dto-c {
leaf simple-arg {
type uint32;
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>clustering.services</artifactId>
- <version>0.4.1-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>clustering.services</artifactId>
- <version>${controller.version}</version>
+ <version>${clustering.services.version}</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
netconf.tcp.address=0.0.0.0
netconf.tcp.port=8383
+netconf.tcp.client.address=127.0.0.1
+netconf.tcp.client.port=8383
+
netconf.ssh.address=0.0.0.0
netconf.ssh.port=1830
<type xmlns:netty="urn:opendaylight:params:xml:ns:yang:controller:netty">netty:netty-event-executor</type>
<instance>
<name>global-event-executor</name>
- <provider>/config/modules/module[name='netty-global-eventexecutor']/instance[name='global-event-executor']</provider>
+ <provider>/config/modules/module[name='netty-global-event-executor']/instance[name='global-event-executor']</provider>
</instance>
</service>
<service>
<name>ref_dom-broker</name>
</dom-broker>
<mapping-service xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">
- <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">binding:binding-dom-mapping-service</type>
- <name>ref_runtime-mapping-singleton</name>
+ <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">binding:binding-dom-mapping-service</type>
+ <name>ref_runtime-mapping-singleton</name>
</mapping-service>
</module>
//SERVICES START
<service>
- <type xmlns:dom="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">dom:schema-service</type>
+ <type xmlns:dom="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">dom:schema-service</type>
<instance>
- <name>ref_yang-schema-service</name>
- <provider>/config/modules/module[name='schema-service-singleton']/instance[name='yang-schema-service']</provider>
+ <name>ref_yang-schema-service</name>
+ <provider>/config/modules/module[name='schema-service-singleton']/instance[name='yang-schema-service']</provider>
</instance>
</service>
<service>
<provider>/config/modules/module[name='binding-broker-impl']/instance[name='binding-broker-impl']</provider>
</instance>
</service>
+ <service>
+ <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">binding:binding-rpc-registry</type>
+ <instance>
+ <name>ref_binding-rpc-broker</name>
+ <provider>/config/modules/module[name='binding-broker-impl']/instance[name='binding-broker-impl']</provider>
+ </instance>
+ </service>
<service>
<type xmlns:binding-impl="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">binding-impl:binding-dom-mapping-service</type>
<instance>
<service>
<type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">binding:binding-data-broker</type>
<instance>
- <name>ref_binding-data-broker</name>
- <provider>/config/modules/module[name='binding-data-broker']/instance[name='binding-data-broker']</provider>
- </instance>
+ <name>ref_binding-data-broker</name>
+ <provider>/config/modules/module[name='binding-data-broker']/instance[name='binding-data-broker']</provider>
+ </instance>
</service>
//CAPABILITIES START
urn:opendaylight:l2:types?module=opendaylight-l2-types&revision=2013-08-27
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>clustering.services</artifactId>
- <version>0.4.1-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>clustering.services</artifactId>
- <version>0.4.1-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
</dependency>
<dependency>
</namespaceToPackage1>
</additionalConfiguration>
</generator>
+ <generator>
+ <codeGeneratorClass>org.opendaylight.yangtools.yang.unified.doc.generator.maven.DocumentationGeneratorImpl</codeGeneratorClass>
+ <outputBaseDir>target/site/models</outputBaseDir>
+ </generator>
</codeGenerators>
<inspectDependencies>true</inspectDependencies>
</configuration>
<artifactId>yang-jmx-generator-plugin</artifactId>
<version>0.2.3-SNAPSHOT</version>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>maven-sal-api-gen-plugin</artifactId>
+ <version>0.6.0-SNAPSHOT</version>
+ <type>jar</type>
+ </dependency>
</dependencies>
</plugin>
</plugins>
import java.util.regex.Matcher;
import java.util.regex.Pattern;
+import org.opendaylight.controller.sal.core.NodeConnector.NodeConnectorIDType;
import org.opendaylight.controller.sal.utils.IPProtocols;
+import org.opendaylight.controller.sal.utils.NetUtils;
import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev100924.PortNumber;
import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev100924.Uri;
import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.yang.types.rev100924.MacAddress;
import org.opendaylight.yang.gen.v1.urn.opendaylight.action.types.rev131112.action.action.SetTpSrcAction;
import org.opendaylight.yang.gen.v1.urn.opendaylight.action.types.rev131112.action.action.SetVlanIdAction;
import org.opendaylight.yang.gen.v1.urn.opendaylight.action.types.rev131112.action.action.SetVlanPcpAction;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.action.types.rev131112.action.list.Action;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.config.rev130819.flows.Flow;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.NodeFlow;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.flow.Instructions;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.instruction.instruction.ApplyActions;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.instruction.instruction.ClearActions;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.instruction.instruction.GoToTable;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.instruction.instruction.Meter;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.instruction.instruction.WriteActions;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.instruction.list.Instruction;
import org.opendaylight.yang.gen.v1.urn.opendaylight.l2.types.rev130827.VlanId;
import org.opendaylight.yang.gen.v1.urn.opendaylight.l2.types.rev130827.VlanPcp;
import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.Match;
import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.match.EthernetMatch;
import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.match.IpMatch;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.match.Layer3Match;
import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.match.VlanMatch;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.match.layer._3.match.Ipv4Match;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.match.layer._3.match.Ipv6Match;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.action.types.rev131112.action.list.Action;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.instruction.list.Instruction;
public class FRMUtil {
protected static final Logger logger = LoggerFactory.getLogger(FRMUtil.class);
ADD, DELETE, UPDATE, GET
};
+ private enum EtherIPType {
+ ANY, V4, V6;
+ };
+
public static boolean isNameValid(String name) {
// Name validation
}
public static boolean validateMatch(Flow flow) {
+ EtherIPType etype = EtherIPType.ANY;
+ EtherIPType ipsrctype = EtherIPType.ANY;
+ EtherIPType ipdsttype = EtherIPType.ANY;
+
Match match = flow.getMatch();
if (match != null) {
EthernetMatch ethernetmatch = match.getEthernetMatch();
IpMatch ipmatch = match.getIpMatch();
+ Layer3Match layer3match = match.getLayer3Match();
VlanMatch vlanmatch = match.getVlanMatch();
match.getIcmpv4Match();
if (ethernetmatch != null) {
if ((ethernetmatch.getEthernetSource() != null)
- && !isL2AddressValid(ethernetmatch.getEthernetSource().toString())) {
+ && !isL2AddressValid(ethernetmatch.getEthernetSource().getAddress().getValue())) {
- logger.error("Ethernet source address %s is not valid. Example: 00:05:b9:7c:81:5f",
+ logger.error("Ethernet source address is not valid. Example: 00:05:b9:7c:81:5f",
ethernetmatch.getEthernetSource());
return false;
}
if ((ethernetmatch.getEthernetDestination() != null)
- && !isL2AddressValid(ethernetmatch.getEthernetDestination().toString())) {
- logger.error("Ethernet destination address %s is not valid. Example: 00:05:b9:7c:81:5f",
+ && !isL2AddressValid(ethernetmatch.getEthernetDestination().getAddress().getValue())) {
+ logger.error("Ethernet destination address is not valid. Example: 00:05:b9:7c:81:5f",
ethernetmatch.getEthernetDestination());
return false;
}
if (ethernetmatch.getEthernetType() != null) {
- int type = Integer.decode(ethernetmatch.getEthernetType().toString());
+ long type = ethernetmatch.getEthernetType().getType().getValue().longValue();
if ((type < 0) || (type > 0xffff)) {
logger.error("Ethernet type is not valid");
return false;
+ } else {
+ if (type == 0x0800) {
+ etype = EtherIPType.V4;
+ } else if (type == 0x86dd) {
+ etype = EtherIPType.V6;
+ }
+ }
+
+ }
+ }
+
+ if (layer3match != null) {
+ if (layer3match instanceof Ipv4Match) {
+ if (((Ipv4Match) layer3match).getIpv4Source() != null) {
+ if (NetUtils.isIPv4AddressValid(((Ipv4Match) layer3match).getIpv4Source().getValue())) {
+ ipsrctype = EtherIPType.V4;
+ } else {
+ logger.error("IP source address is not valid");
+ return false;
+ }
+
+ } else if (((Ipv4Match) layer3match).getIpv4Destination() != null) {
+ if (NetUtils.isIPv4AddressValid(((Ipv4Match) layer3match).getIpv4Destination().getValue())) {
+ ipdsttype = EtherIPType.V4;
+ } else {
+ logger.error("IP Destination address is not valid");
+ return false;
+ }
+
+ }
+ } else if (layer3match instanceof Ipv6Match) {
+ if (((Ipv6Match) layer3match).getIpv6Source() != null) {
+ if (NetUtils.isIPv6AddressValid(((Ipv6Match) layer3match).getIpv6Source().getValue())) {
+ ipsrctype = EtherIPType.V6;
+ } else {
+ logger.error("IPv6 source address is not valid");
+ return false;
+ }
+
+ } else if (((Ipv6Match) layer3match).getIpv6Destination() != null) {
+ if (NetUtils.isIPv6AddressValid(((Ipv6Match) layer3match).getIpv6Destination().getValue())) {
+ ipdsttype = EtherIPType.V6;
+ } else {
+ logger.error("IPv6 Destination address is not valid");
+ return false;
+ }
+
+ }
+
+ }
+
+ if (etype != EtherIPType.ANY) {
+ if ((ipsrctype != EtherIPType.ANY) && (ipsrctype != etype)) {
+ logger.error("Type mismatch between Ethernet & Src IP");
+ return false;
+ }
+ if ((ipdsttype != EtherIPType.ANY) && (ipdsttype != etype)) {
+ logger.error("Type mismatch between Ethernet & Dst IP");
+ return false;
+ }
+ }
+ if (ipsrctype != ipdsttype) {
+ if (!((ipsrctype == EtherIPType.ANY) || (ipdsttype == EtherIPType.ANY))) {
+ logger.error("IP Src Dest Type mismatch");
+ return false;
}
}
- } else if (ipmatch != null) {
- if (ipmatch.getIpProtocol() != null && isProtocolValid(ipmatch.getIpProtocol().toString())) {
+ }
+
+ if (ipmatch != null) {
+ if (ipmatch.getIpProtocol() != null && !(isProtocolValid(ipmatch.getIpProtocol().toString()))) {
logger.error("Protocol is not valid");
return false;
}
- } else if (vlanmatch != null) {
- if (vlanmatch.getVlanId() != null && isVlanIdValid(vlanmatch.getVlanId().toString())) {
+
+ }
+
+ if (vlanmatch != null) {
+ if (vlanmatch.getVlanId() != null
+ && !(isVlanIdValid(vlanmatch.getVlanId().getVlanId().getValue().toString()))) {
logger.error("Vlan ID is not in the range 0 - 4095");
return false;
}
- if (vlanmatch.getVlanPcp() != null && isVlanPriorityValid(vlanmatch.getVlanPcp().toString())) {
+ if (vlanmatch.getVlanPcp() != null
+ && !(isVlanPriorityValid(vlanmatch.getVlanPcp().getValue().toString()))) {
logger.error("Vlan priority is not in the range 0 - 7");
return false;
}
}
+
}
return true;
+
}
public static boolean validateActions(List<Action> actions) {
return false;
}
if (outputnodeconnector != null) {
- // TODO
+ if (!outputnodeconnector.getValue().equals(NodeConnectorIDType.ALL)
+ || !outputnodeconnector.getValue().equals(NodeConnectorIDType.CONTROLLER)
+ || !outputnodeconnector.getValue().equals(NodeConnectorIDType.HWPATH)
+ || !outputnodeconnector.getValue().equals(NodeConnectorIDType.ONEPK)
+ || !outputnodeconnector.getValue().equals(NodeConnectorIDType.ONEPK2OPENFLOW)
+ || !outputnodeconnector.getValue().equals(NodeConnectorIDType.ONEPK2PCEP)
+ || !outputnodeconnector.getValue().equals(NodeConnectorIDType.OPENFLOW)
+ || !outputnodeconnector.getValue().equals(NodeConnectorIDType.OPENFLOW2ONEPK)
+ || !outputnodeconnector.getValue().equals(NodeConnectorIDType.OPENFLOW2PCEP)
+ || !outputnodeconnector.getValue().equals(NodeConnectorIDType.PCEP)
+ || !outputnodeconnector.getValue().equals(NodeConnectorIDType.PCEP2ONEPK)
+ || !outputnodeconnector.getValue().equals(NodeConnectorIDType.PCEP2OPENFLOW)
+ || !outputnodeconnector.getValue().equals(NodeConnectorIDType.PRODUCTION)
+ || !outputnodeconnector.getValue().equals(NodeConnectorIDType.SWSTACK)) {
+ logger.error("Output Action: NodeConnector Type is not valid");
+ return false;
+ }
+
}
} else if (action instanceof PushMplsAction) {
Integer ethertype = ((PushMplsAction) action).getEthernetType();
logger.error("Ether Type is not valid for PushVlanAction");
return false;
}
- } else if (action instanceof SetDlDstAction || action instanceof SetDlSrcAction) {
+ } else if (action instanceof SetDlDstAction) {
MacAddress address = ((SetDlDstAction) action).getAddress();
if (address != null && !isL2AddressValid(address.toString())) {
logger.error("SetDlDstAction: Address not valid");
}
} else if (action instanceof SetVlanIdAction) {
VlanId vlanid = ((SetVlanIdAction) action).getVlanId();
- if (vlanid != null && !isVlanIdValid(vlanid.toString())) {
- logger.error("Vlan ID %s is not in the range 0 - 4095");
+ if (vlanid != null && !isVlanIdValid(vlanid.getValue().toString())) {
+ logger.error("Vlan ID is not in the range 0 - 4095");
return false;
}
} else if (action instanceof SetVlanPcpAction) {
VlanPcp vlanpcp = ((SetVlanPcpAction) action).getVlanPcp();
- if (vlanpcp != null && !isVlanPriorityValid(vlanpcp.toString())) {
- logger.error("Vlan priority %s is not in the range 0 - 7");
+ if (vlanpcp != null && !isVlanPriorityValid(vlanpcp.getValue().toString())) {
+ logger.error("Vlan priority is not in the range 0 - 7");
return false;
}
}
}
return true;
+
}
public static boolean validateInstructions(Flow flow) {
List<Instruction> instructionsList = new ArrayList<>();
Instructions instructions = flow.getInstructions();
- if( instructions == null ) {
+ if (instructions == null) {
return false;
}
instructionsList = instructions.getInstruction();
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.AddFlowInput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.AddFlowInputBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.AddFlowOutput;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.FlowAdded;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.FlowRemoved;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.FlowUpdated;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.NodeErrorNotification;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.NodeExperimenterErrorNotification;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.NodeFlow;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.RemoveFlowInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.SalFlowListener;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.SalFlowService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.SwitchFlowRemoved;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.UpdateFlowInputBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.flow.update.UpdatedFlowBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.instruction.list.Instruction;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
public class FlowConsumerImpl implements IForwardingRulesManager {
protected static final Logger logger = LoggerFactory.getLogger(FlowConsumerImpl.class);
- private final FlowEventListener flowEventListener = new FlowEventListener();
+ // private final FlowEventListener flowEventListener = new
+ // FlowEventListener();
private Registration<NotificationListener> listener1Reg;
private SalFlowService flowService;
// private FlowDataListener listener;
if (null == flowService) {
logger.error("Consumer SAL Service is down or NULL. FRM may not function as intended");
- System.out.println("Consumer SAL Service is down or NULL.");
return;
}
// }
// For switch events
- listener1Reg = FRMConsumerImpl.getNotificationService().registerNotificationListener(flowEventListener);
+ // listener1Reg =
+ // FRMConsumerImpl.getNotificationService().registerNotificationListener(flowEventListener);
if (null == listener1Reg) {
logger.error("Listener to listen on flow data modifcation events");
- System.out.println("Consumer SAL Service is down or NULL.");
return;
}
// addFlowTest();
- System.out.println("-------------------------------------------------------------------");
commitHandler = new FlowDataCommitHandler();
FRMConsumerImpl.getDataProviderService().registerCommitHandler(path, commitHandler);
clusterContainerService = (IClusterContainerServices) ServiceHelper.getGlobalInstance(
AddFlowInput firstMsg = input1.build();
if (null != flowService) {
- System.out.println(flowService.toString());
- } else {
- System.out.println("ConsumerFlowService is NULL");
+ logger.error("ConsumerFlowService is NULL");
}
@SuppressWarnings("unused")
Future<RpcResult<AddFlowOutput>> result1 = flowService.addFlow(firstMsg);
private void addFlow(InstanceIdentifier<?> path, Flow dataObject) {
AddFlowInputBuilder input = new AddFlowInputBuilder();
-
- List<Instruction> inst = (dataObject).getInstructions().getInstruction();
+
input.setNode((dataObject).getNode());
input.setPriority((dataObject).getPriority());
input.setMatch((dataObject).getMatch());
input.setCookie((dataObject).getCookie());
input.setInstructions((dataObject).getInstructions());
- dataObject.getMatch().getLayer3Match();
- for (int i = 0; i < inst.size(); i++) {
- System.out.println("i = " + i + inst.get(i).getInstruction().toString());
- System.out.println("i = " + i + inst.get(i).toString());
- }
-
- System.out.println("Instruction list" + (dataObject).getInstructions().getInstruction().toString());
+ input.setBufferId(dataObject.getBufferId());
+ input.setTableId(dataObject.getTableId());
+ input.setOutPort(dataObject.getOutPort());
+ input.setOutGroup(dataObject.getOutGroup());
+ input.setIdleTimeout(dataObject.getIdleTimeout());
+ input.setHardTimeout(dataObject.getHardTimeout());
+ input.setFlowName(dataObject.getFlowName());
+ input.setFlags(dataObject.getFlags());
+ input.setCookieMask(dataObject.getCookieMask());
+ input.setContainerName(dataObject.getContainerName());
+ input.setBarrier(dataObject.isBarrier());
+ input.setInstallHw(dataObject.isInstallHw());
+ input.setStrict(dataObject.isStrict());
// updating the staticflow cache
/*
- * Commented out... as in many other places... use of ClusteringServices is breaking things
- * insufficient time to debug
- Integer ordinal = staticFlowsOrdinal.get(0);
- staticFlowsOrdinal.put(0, ++ordinal);
- staticFlows.put(ordinal, dataObject);
- */
+ * Commented out... as in many other places... use of ClusteringServices
+ * is breaking things insufficient time to debug Integer ordinal =
+ * staticFlowsOrdinal.get(0); staticFlowsOrdinal.put(0, ++ordinal);
+ * staticFlows.put(ordinal, dataObject);
+ */
// We send flow to the sounthbound plugin
+
flowService.addFlow(input.build());
+
/*
- * Commented out as this will also break due to improper use of ClusteringServices
- updateLocalDatabase((NodeFlow) dataObject, true);
- */
+ * Commented out as this will also break due to improper use of
+ * ClusteringServices updateLocalDatabase((NodeFlow) dataObject, true);
+ */
}
/**
private void removeFlow(InstanceIdentifier<?> path, Flow dataObject) {
RemoveFlowInputBuilder input = new RemoveFlowInputBuilder();
- List<Instruction> inst = (dataObject).getInstructions().getInstruction();
input.setNode((dataObject).getNode());
input.setPriority((dataObject).getPriority());
input.setMatch((dataObject).getMatch());
input.setCookie((dataObject).getCookie());
input.setInstructions((dataObject).getInstructions());
- dataObject.getMatch().getLayer3Match();
- for (int i = 0; i < inst.size(); i++) {
- System.out.println("i = " + i + inst.get(i).getInstruction().toString());
- System.out.println("i = " + i + inst.get(i).toString());
- }
-
- System.out.println("Instruction list" + (dataObject).getInstructions().getInstruction().toString());
-
+ input.setBufferId(dataObject.getBufferId());
+ input.setTableId(dataObject.getTableId());
+ input.setOutPort(dataObject.getOutPort());
+ input.setOutGroup(dataObject.getOutGroup());
+ input.setIdleTimeout(dataObject.getIdleTimeout());
+ input.setHardTimeout(dataObject.getHardTimeout());
+ input.setFlowName(dataObject.getFlowName());
+ input.setFlags(dataObject.getFlags());
+ input.setCookieMask(dataObject.getCookieMask());
+ input.setContainerName(dataObject.getContainerName());
+ input.setBarrier(dataObject.isBarrier());
+ input.setInstallHw(dataObject.isInstallHw());
+ input.setStrict(dataObject.isStrict());
// updating the staticflow cache
/*
- * Commented out due to problems caused by improper use of ClusteringServices
- Integer ordinal = staticFlowsOrdinal.get(0);
- staticFlowsOrdinal.put(0, ++ordinal);
- staticFlows.put(ordinal, dataObject);
- */
+ * Commented out due to problems caused by improper use of
+ * ClusteringServices Integer ordinal = staticFlowsOrdinal.get(0);
+ * staticFlowsOrdinal.put(0, ++ordinal); staticFlows.put(ordinal,
+ * dataObject);
+ */
// We send flow to the sounthbound plugin
flowService.removeFlow(input.build());
+
/*
- * Commented out due to problems caused by improper use of ClusteringServices
- updateLocalDatabase((NodeFlow) dataObject, false);
- */
+ * Commented out due to problems caused by improper use of
+ * ClusteringServices updateLocalDatabase((NodeFlow) dataObject, false);
+ */
}
/**
// updating the staticflow cache
/*
- * Commented out due to problems caused by improper use of ClusteringServices.
- Integer ordinal = staticFlowsOrdinal.get(0);
- staticFlowsOrdinal.put(0, ++ordinal);
- staticFlows.put(ordinal, dataObject);
- */
+ * Commented out due to problems caused by improper use of
+ * ClusteringServices. Integer ordinal = staticFlowsOrdinal.get(0);
+ * staticFlowsOrdinal.put(0, ++ordinal); staticFlows.put(ordinal,
+ * dataObject);
+ */
// We send flow to the sounthbound plugin
flowService.updateFlow(input.build());
+
/*
- * Commented out due to problems caused by improper use of ClusteringServices.
- updateLocalDatabase((NodeFlow) dataObject, true);
- */
+ * Commented out due to problems caused by improper use of
+ * ClusteringServices. updateLocalDatabase((NodeFlow) dataObject, true);
+ */
}
@SuppressWarnings("unchecked")
private void commitToPlugin(internalTransaction transaction) {
- Set<Entry<InstanceIdentifier<?>, DataObject>> createdEntries = transaction.getModification().getCreatedConfigurationData().entrySet();
+ Set<Entry<InstanceIdentifier<?>, DataObject>> createdEntries = transaction.getModification()
+ .getCreatedConfigurationData().entrySet();
/*
- * This little dance is because updatedEntries contains both created and modified entries
- * The reason I created a new HashSet is because the collections we are returned are immutable.
+ * This little dance is because updatedEntries contains both created and
+ * modified entries The reason I created a new HashSet is because the
+ * collections we are returned are immutable.
*/
Set<Entry<InstanceIdentifier<?>, DataObject>> updatedEntries = new HashSet<Entry<InstanceIdentifier<?>, DataObject>>();
updatedEntries.addAll(transaction.getModification().getUpdatedConfigurationData().entrySet());
updatedEntries.removeAll(createdEntries);
- Set<InstanceIdentifier<?>> removeEntriesInstanceIdentifiers = transaction.getModification().getRemovedConfigurationData();
+ Set<InstanceIdentifier<?>> removeEntriesInstanceIdentifiers = transaction.getModification()
+ .getRemovedConfigurationData();
transaction.getModification().getOriginalConfigurationData();
for (Entry<InstanceIdentifier<?>, DataObject> entry : createdEntries) {
- if(entry.getValue() instanceof Flow) {
- System.out.println("Coming add cc in FlowDatacommitHandler");
+ if (entry.getValue() instanceof Flow) {
+ logger.debug("Coming add cc in FlowDatacommitHandler");
+ Flow flow = (Flow) entry.getValue();
+ boolean status = validate(flow);
+ if (!status) {
+ return;
+ }
addFlow(entry.getKey(), (Flow) entry.getValue());
}
}
for (@SuppressWarnings("unused")
Entry<InstanceIdentifier<?>, DataObject> entry : updatedEntries) {
- if(entry.getValue() instanceof Flow) {
- System.out.println("Coming update cc in FlowDatacommitHandler");
+ if (entry.getValue() instanceof Flow) {
+ logger.debug("Coming update cc in FlowDatacommitHandler");
+ Flow flow = (Flow) entry.getValue();
+ boolean status = validate(flow);
+ if (!status) {
+ return;
+ }
updateFlow(entry.getKey(), (Flow) entry.getValue());
}
}
- for (InstanceIdentifier<?> instanceId : removeEntriesInstanceIdentifiers ) {
+ for (InstanceIdentifier<?> instanceId : removeEntriesInstanceIdentifiers) {
DataObject removeValue = transaction.getModification().getOriginalConfigurationData().get(instanceId);
- if(removeValue instanceof Flow) {
- System.out.println("Coming remove cc in FlowDatacommitHandler");
+ if (removeValue instanceof Flow) {
+ logger.debug("Coming remove cc in FlowDatacommitHandler");
+ Flow flow = (Flow) removeValue;
+ boolean status = validate(flow);
+ if (!status) {
+ return;
+ }
removeFlow(instanceId, (Flow) removeValue);
}
@Override
public DataCommitTransaction requestCommit(DataModification<InstanceIdentifier<?>, DataObject> modification) {
// We should verify transaction
- System.out.println("Coming in FlowDatacommitHandler");
+ logger.debug("Coming in FlowDatacommitHandler");
internalTransaction transaction = new internalTransaction(modification);
transaction.prepareUpdate();
return transaction;
Set<Entry<InstanceIdentifier<?>, DataObject>> puts = modification.getUpdatedConfigurationData().entrySet();
for (Entry<InstanceIdentifier<?>, DataObject> entry : puts) {
-
- // validating the DataObject
- DataObject value = entry.getValue();
- if(value instanceof Flow ) {
- Flow flow = (Flow)value;
- boolean status = validate(flow);
- if (!status) {
- return;
- }
- // Presence check
- /*
- * This is breaking due to some improper use of caches...
- *
- if (flowEntryExists(flow)) {
- String error = "Entry with this name on specified table already exists";
- logger.warn("Entry with this name on specified table already exists: {}", entry);
- logger.error(error);
- return;
- }
- if (originalSwView.containsKey(entry)) {
- logger.warn("Operation Rejected: A flow with same match and priority exists on the target node");
- logger.trace("Aborting to install {}", entry);
- continue;
- }
- */
- if (!FRMUtil.validateMatch(flow)) {
- logger.error("Not a valid Match");
- return;
- }
- if (!FRMUtil.validateInstructions(flow)) {
- logger.error("Not a valid Instruction");
- return;
- }
- /*
- * Commented out due to Clustering Services issues
- * preparePutEntry(entry.getKey(), flow);
- */
- }
}
// removals = modification.getRemovedConfigurationData();
Flow original = originalSwView.get(key);
if (original != null) {
// It is update for us
- System.out.println("Coming update in FlowDatacommitHandler");
updates.put(key, flow);
} else {
// It is addition for us
- System.out.println("Coming add in FlowDatacommitHandler");
additions.put(key, flow);
}
}
commitToPlugin(this);
// We return true if internal transaction is successful.
// return Rpcs.getRpcResult(true, null, Collections.emptySet());
- return Rpcs.getRpcResult(true, null, Collections.<RpcError>emptySet());
+ return Rpcs.getRpcResult(true, null, Collections.<RpcError> emptySet());
}
/**
// NOOP - we did not modified any internal state during
// requestCommit phase
// return Rpcs.getRpcResult(true, null, Collections.emptySet());
- return Rpcs.getRpcResult(true, null, Collections.<RpcError>emptySet());
+ return Rpcs.getRpcResult(true, null, Collections.<RpcError> emptySet());
}
- public boolean validate(Flow flow) {
-
- String msg = ""; // Specific part of warn/error log
-
- boolean result = true;
- // flow Name validation
- if (flow.getFlowName() == null || flow.getFlowName().trim().isEmpty()
- || !flow.getFlowName().matches(NAMEREGEX)) {
- msg = "Invalid Flow name";
- result = false;
- }
- // Node Validation
- if (result == true && flow.getNode() == null) {
- msg = "Node is null";
- result = false;
- }
-
- // TODO: Validate we are seeking to program a flow against a valid Node
-
- if (result == true && flow.getPriority() != null) {
- if (flow.getPriority() < 0 || flow.getPriority() > 65535) {
- msg = String.format("priority %s is not in the range 0 - 65535",
- flow.getPriority());
- result = false;
- }
- }
- if (result == false) {
- logger.warn("Invalid Configuration for flow {}. The failure is {}",flow,msg);
- logger.error("Invalid Configuration ({})",msg);
- }
- return result;
- }
-
private boolean flowEntryExists(Flow flow) {
// Flow name has to be unique on per table id basis
for (ConcurrentMap.Entry<FlowKey, Flow> entry : originalSwView.entrySet()) {
}
}
- final class FlowEventListener implements SalFlowListener {
-
- List<FlowAdded> addedFlows = new ArrayList<>();
- List<FlowRemoved> removedFlows = new ArrayList<>();
- List<FlowUpdated> updatedFlows = new ArrayList<>();
-
- @Override
- public void onFlowAdded(FlowAdded notification) {
- System.out.println("added flow..........................");
- addedFlows.add(notification);
- }
-
- @Override
- public void onFlowRemoved(FlowRemoved notification) {
- removedFlows.add(notification);
- };
-
- @Override
- public void onFlowUpdated(FlowUpdated notification) {
- updatedFlows.add(notification);
- }
-
- @Override
- public void onSwitchFlowRemoved(SwitchFlowRemoved notification) {
- // TODO
- }
-
- @Override
- public void onNodeErrorNotification(NodeErrorNotification notification) {
- // TODO Auto-generated method stub
-
- }
-
- @Override
- public void onNodeExperimenterErrorNotification(NodeExperimenterErrorNotification notification) {
- // TODO Auto-generated method stub
-
- };
-
- }
-
// Commented out DataChangeListene - to be used by Stats
// final class FlowDataListener implements DataChangeListener {
// }
// }
+ public boolean validate(Flow flow) {
+
+ String msg = ""; // Specific part of warn/error log
+
+ boolean result = true;
+ // flow Name validation
+ if (flow.getFlowName() == null || flow.getFlowName().trim().isEmpty() || !flow.getFlowName().matches(NAMEREGEX)) {
+ msg = "Invalid Flow name";
+ result = false;
+ }
+ // Node Validation
+ if (result == true && flow.getNode() == null) {
+ msg = "Node is null";
+ result = false;
+ }
+
+ // TODO: Validate we are seeking to program a flow against a valid
+ // Node
+
+ if (result == true && flow.getPriority() != null) {
+ if (flow.getPriority() < 0 || flow.getPriority() > 65535) {
+ msg = String.format("priority %s is not in the range 0 - 65535", flow.getPriority());
+ result = false;
+ }
+ }
+
+ // Presence check
+ /*
+ * This is breaking due to some improper use of caches...
+ *
+ * if (flowEntryExists(flow)) { String error =
+ * "Entry with this name on specified table already exists";
+ * logger.warn(
+ * "Entry with this name on specified table already exists: {}" ,
+ * entry); logger.error(error); return; } if
+ * (originalSwView.containsKey(entry)) { logger.warn(
+ * "Operation Rejected: A flow with same match and priority exists on the target node"
+ * ); logger.trace("Aborting to install {}", entry); continue; }
+ */
+ if (!FRMUtil.validateMatch(flow)) {
+ logger.error("Not a valid Match");
+ result = false;
+ }
+ if (!FRMUtil.validateInstructions(flow)) {
+ logger.error("Not a valid Instruction");
+ result = false;
+ }
+ if (result == false) {
+ logger.warn("Invalid Configuration for flow {}. The failure is {}", flow, msg);
+ logger.error("Invalid Configuration ({})", msg);
+ }
+ return result;
+ }
+
private static void updateLocalDatabase(NodeFlow entry, boolean add) {
updateSwViewes(entry, add);
</generator>
<generator>
<codeGeneratorClass>org.opendaylight.yangtools.yang.unified.doc.generator.maven.DocumentationGeneratorImpl</codeGeneratorClass>
- <outputBaseDir>target/site/restconf</outputBaseDir>
+ <outputBaseDir>target/site/models</outputBaseDir>
</generator>
</codeGenerators>
<inspectDependencies>true</inspectDependencies>
</namespaceToPackage1>
</additionalConfiguration>
</generator>
+ <generator>
+ <codeGeneratorClass>org.opendaylight.yangtools.yang.unified.doc.generator.maven.DocumentationGeneratorImpl</codeGeneratorClass>
+ <outputBaseDir>target/site/models</outputBaseDir>
+ </generator>
</codeGenerators>
<inspectDependencies>true</inspectDependencies>
</configuration>
<artifactId>yang-jmx-generator-plugin</artifactId>
<version>0.2.3-SNAPSHOT</version>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>maven-sal-api-gen-plugin</artifactId>
+ <version>0.6.0-SNAPSHOT</version>
+ <type>jar</type>
+ </dependency>
</dependencies>
</plugin>
</namespaceToPackage1>
</additionalConfiguration>
</generator>
+ <generator>
+ <codeGeneratorClass>org.opendaylight.yangtools.yang.unified.doc.generator.maven.DocumentationGeneratorImpl</codeGeneratorClass>
+ <outputBaseDir>target/site/models</outputBaseDir>
+ </generator>
</codeGenerators>
<inspectDependencies>true</inspectDependencies>
</configuration>
<artifactId>yang-jmx-generator-plugin</artifactId>
<version>0.2.3-SNAPSHOT</version>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>maven-sal-api-gen-plugin</artifactId>
+ <version>0.6.0-SNAPSHOT</version>
+ <type>jar</type>
+ </dependency>
</dependencies>
</plugin>
<plugin>
systemProperty("netconf.tcp.address").value("0.0.0.0"), //
systemProperty("netconf.tcp.port").value("18383"), //
+ systemProperty("netconf.tcp.client.address").value("127.0.0.1"), //
+ systemProperty("netconf.tcp.client.port").value("18383"), //
systemProperty("netconf.config.persister.active").value("1"), //
systemProperty("netconf.config.persister.1.storageAdapterClass").value(
"org.opendaylight.controller.config.persist.storage.file.FileStorageAdapter"), //
</namespaceToPackage1>
</additionalConfiguration>
</generator>
+ <generator>
+ <codeGeneratorClass>org.opendaylight.yangtools.yang.unified.doc.generator.maven.DocumentationGeneratorImpl</codeGeneratorClass>
+ <outputBaseDir>target/site/models</outputBaseDir>
+ </generator>
</codeGenerators>
<inspectDependencies>true</inspectDependencies>
</configuration>
<artifactId>yang-jmx-generator-plugin</artifactId>
<version>0.2.3-SNAPSHOT</version>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>maven-sal-api-gen-plugin</artifactId>
+ <version>0.6.0-SNAPSHOT</version>
+ <type>jar</type>
+ </dependency>
</dependencies>
</plugin>
<plugin>
</namespaceToPackage1>
</additionalConfiguration>
</generator>
+ <generator>
+ <codeGeneratorClass>org.opendaylight.yangtools.yang.unified.doc.generator.maven.DocumentationGeneratorImpl</codeGeneratorClass>
+ <outputBaseDir>target/site/models</outputBaseDir>
+ </generator>
</codeGenerators>
<inspectDependencies>true</inspectDependencies>
</configuration>
<artifactId>yang-jmx-generator-plugin</artifactId>
<version>0.2.3-SNAPSHOT</version>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>maven-sal-api-gen-plugin</artifactId>
+ <version>0.6.0-SNAPSHOT</version>
+ <type>jar</type>
+ </dependency>
</dependencies>
</plugin>
</namespaceToPackage1>
</additionalConfiguration>
</generator>
+ <generator>
+ <codeGeneratorClass>org.opendaylight.yangtools.yang.unified.doc.generator.maven.DocumentationGeneratorImpl</codeGeneratorClass>
+ <outputBaseDir>target/site/models</outputBaseDir>
+ </generator>
</codeGenerators>
<inspectDependencies>true</inspectDependencies>
</configuration>
<artifactId>yang-jmx-generator-plugin</artifactId>
<version>0.2.3-SNAPSHOT</version>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>maven-sal-api-gen-plugin</artifactId>
+ <version>0.6.0-SNAPSHOT</version>
+ <type>jar</type>
+ </dependency>
</dependencies>
</plugin>
<plugin>
import org.opendaylight.controller.sal.restconf.impl.ResponseException;
import org.opendaylight.controller.sal.restconf.impl.StructuredData;
import org.opendaylight.yangtools.yang.data.api.CompositeNode;
-import org.opendaylight.yangtools.yang.data.impl.NodeUtils;
+import org.opendaylight.yangtools.yang.model.api.DataNodeContainer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.w3c.dom.Document;
if (data == null) {
throw new ResponseException(Response.Status.NOT_FOUND, "No data exists.");
}
-
- Document domTree = NodeUtils.buildShadowDomTree(data);
+
+ XmlMapper xmlMapper = new XmlMapper();
+ Document domTree = xmlMapper.write(data, (DataNodeContainer) t.getSchema());
try {
TransformerFactory tf = TransformerFactory.newInstance();
Transformer transformer = tf.newTransformer();
--- /dev/null
+package org.opendaylight.controller.sal.rest.impl;
+
+import java.util.Set;
+
+import javax.activation.UnsupportedDataTypeException;
+import javax.xml.parsers.DocumentBuilder;
+import javax.xml.parsers.DocumentBuilderFactory;
+import javax.xml.parsers.ParserConfigurationException;
+
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.data.api.CompositeNode;
+import org.opendaylight.yangtools.yang.data.api.Node;
+import org.opendaylight.yangtools.yang.data.api.SimpleNode;
+import org.opendaylight.yangtools.yang.model.api.ChoiceCaseNode;
+import org.opendaylight.yangtools.yang.model.api.ChoiceNode;
+import org.opendaylight.yangtools.yang.model.api.ContainerSchemaNode;
+import org.opendaylight.yangtools.yang.model.api.DataNodeContainer;
+import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
+import org.opendaylight.yangtools.yang.model.api.LeafListSchemaNode;
+import org.opendaylight.yangtools.yang.model.api.LeafSchemaNode;
+import org.opendaylight.yangtools.yang.model.api.ListSchemaNode;
+import org.opendaylight.yangtools.yang.model.api.TypeDefinition;
+import org.opendaylight.yangtools.yang.model.api.YangNode;
+import org.opendaylight.yangtools.yang.model.api.type.IdentityrefTypeDefinition;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+
+import com.google.common.base.Preconditions;
+
+public class XmlMapper {
+
+ public Document write(CompositeNode data, DataNodeContainer schema) throws UnsupportedDataTypeException {
+ Preconditions.checkNotNull(data);
+ Preconditions.checkNotNull(schema);
+
+ DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
+ Document doc = null;
+ try {
+ DocumentBuilder bob = dbf.newDocumentBuilder();
+ doc = bob.newDocument();
+ } catch (ParserConfigurationException e) {
+ return null;
+ }
+
+ if (schema instanceof ContainerSchemaNode || schema instanceof ListSchemaNode) {
+ doc.appendChild(translateToXmlAndReturnRootElement(doc, data, schema));
+ return doc;
+ } else {
+ throw new UnsupportedDataTypeException(
+ "Schema can be ContainerSchemaNode or ListSchemaNode. Other types are not supported yet.");
+ }
+ }
+
+ private Element translateToXmlAndReturnRootElement(Document doc, Node<?> data, YangNode schema)
+ throws UnsupportedDataTypeException {
+ QName dataType = data.getNodeType();
+ Element itemEl = doc.createElementNS(dataType.getNamespace().toString(), dataType.getLocalName());
+
+ if (data instanceof SimpleNode<?>) {
+ if (schema instanceof LeafListSchemaNode) {
+ writeValueOfNodeByType(itemEl, (SimpleNode<?>) data, ((LeafListSchemaNode) schema).getType());
+ } else if (schema instanceof LeafSchemaNode) {
+ writeValueOfNodeByType(itemEl, (SimpleNode<?>) data, ((LeafSchemaNode) schema).getType());
+ } else {
+ Object value = data.getValue();
+ if (value != null) {
+ itemEl.setTextContent(String.valueOf(value));
+ }
+ }
+ } else { // CompositeNode
+ for (Node<?> child : ((CompositeNode) data).getChildren()) {
+ DataSchemaNode childSchema = findFirstSchemaForNode(child, ((DataNodeContainer) schema).getChildNodes());
+ if (childSchema == null) {
+ throw new UnsupportedDataTypeException("Probably the data node \""
+ + child.getNodeType().getLocalName() + "\" is not conform to schema");
+ }
+ itemEl.appendChild(translateToXmlAndReturnRootElement(doc, child, childSchema));
+ }
+ }
+ return itemEl;
+ }
+
+ private void writeValueOfNodeByType(Element element, SimpleNode<?> node, TypeDefinition<?> type) {
+
+ TypeDefinition<?> baseType = resolveBaseTypeFrom(type);
+
+ if (baseType instanceof IdentityrefTypeDefinition && node.getValue() instanceof QName) {
+ QName value = (QName) node.getValue();
+ element.setAttribute("xmlns:x", value.getNamespace().toString());
+ element.setTextContent("x:" + value.getLocalName());
+ } else {
+ Object value = node.getValue();
+ if (value != null) {
+ element.setTextContent(String.valueOf(value));
+ }
+ }
+ }
+
+ private DataSchemaNode findFirstSchemaForNode(Node<?> node, Set<DataSchemaNode> dataSchemaNode) {
+ for (DataSchemaNode dsn : dataSchemaNode) {
+ if (node.getNodeType().getLocalName().equals(dsn.getQName().getLocalName())) {
+ return dsn;
+ } else if (dsn instanceof ChoiceNode) {
+ for (ChoiceCaseNode choiceCase : ((ChoiceNode) dsn).getCases()) {
+ DataSchemaNode foundDsn = findFirstSchemaForNode(node, choiceCase.getChildNodes());
+ if (foundDsn != null) {
+ return foundDsn;
+ }
+ }
+ }
+ }
+ return null;
+ }
+
+ private TypeDefinition<?> resolveBaseTypeFrom(TypeDefinition<?> type) {
+ return type.getBaseType() != null ? resolveBaseTypeFrom(type.getBaseType()) : type;
+ }
+
+}
systemProperty("netconf.tcp.address").value("127.0.0.1"),
systemProperty("netconf.tcp.port").value("8383"),
+ systemProperty("netconf.tcp.client.address").value("127.0.0.1"),
+ systemProperty("netconf.tcp.client.port").value("8383"),
+
// Set the systemPackages (used by clustering)
systemPackages("sun.reflect", "sun.reflect.misc", "sun.misc"),
import javax.management.openmbean.SimpleType;
import java.lang.reflect.Method;
+import java.math.BigDecimal;
import java.math.BigInteger;
import java.util.Date;
import java.util.Map;
resolverPlugins.put(Date.class.getCanonicalName(), new DateResolver());
resolverPlugins.put(Character.class.getCanonicalName(), new CharResolver());
resolverPlugins.put(BigInteger.class.getCanonicalName(), new BigIntegerResolver());
+ resolverPlugins.put(BigDecimal.class.getCanonicalName(), new BigDecimalResolver());
}
static interface Resolver {
}
}
+ static class BigDecimalResolver extends DefaultResolver {
+
+ @Override
+ protected Object parseObject(Class<?> type, String value) throws Exception {
+ return new BigDecimal(value);
+ }
+ }
+
static class CharResolver extends DefaultResolver {
@Override
checkTypeConfigAttribute(response);
checkTypedefs(response);
checkEnum(response);
+ checkBigDecimal(response);
edit("netconfMessages/editConfig_remove.xml");
verifyNoMoreInteractions(netconfOperationRouter);
}
+ private void checkBigDecimal(Element response) {
+ int size = response.getElementsByTagName("sleep-factor").getLength();
+ assertEquals(1, size);
+ }
+
private void closeSession() throws NetconfDocumentedException, ParserConfigurationException, SAXException,
IOException {
DefaultCloseSession closeOp = new DefaultCloseSession(NETCONF_SESSION_ID);
PersisterAggregator persister = PersisterAggregator.createFromProperties(propertiesProvider);
InetSocketAddress address = NetconfConfigUtil.extractTCPNetconfAddress(context,
- "Netconf is not configured, persister is not operational");
+ "Netconf is not configured, persister is not operational",true);
configPersisterNotificationHandler = new ConfigPersisterNotificationHandler(persister, address,
platformMBeanServer, ignoredMissingCapabilityRegex);
@Override
public void start(final BundleContext context) throws Exception {
- InetSocketAddress address = NetconfConfigUtil.extractTCPNetconfAddress(context, "TCP is not configured, netconf not available.");
+ InetSocketAddress address = NetconfConfigUtil.extractTCPNetconfAddress(context,
+ "TCP is not configured, netconf not available.", false);
NetconfOperationServiceFactoryListenerImpl factoriesListener = new NetconfOperationServiceFactoryListenerImpl();
factoriesTracker = new NetconfOperationServiceFactoryTracker(context, factoriesListener);
logger.trace("Starting netconf SSH bridge.");
Optional<InetSocketAddress> sshSocketAddressOptional = NetconfConfigUtil.extractSSHNetconfAddress(context,EXCEPTION_MESSAGE);
- InetSocketAddress tcpSocketAddress = NetconfConfigUtil.extractTCPNetconfAddress(context,EXCEPTION_MESSAGE);
+ InetSocketAddress tcpSocketAddress = NetconfConfigUtil.extractTCPNetconfAddress(context,
+ EXCEPTION_MESSAGE, true);
if (sshSocketAddressOptional.isPresent()){
server = NetconfSSHServer.start(sshSocketAddressOptional.get().getPort(),tcpSocketAddress);
private String id;
private ServerSession servSession;
private ServerConnection servconnection;
+ private String customHeader;
public IOThread (InputStream is, OutputStream os, String id,ServerSession ss, ServerConnection conn){
super.setName(id);
logger.trace("IOThread {} created", super.getName());
}
+ public IOThread (InputStream is, OutputStream os, String id,ServerSession ss, ServerConnection conn,String header){
+ this.inputStream = is;
+ this.outputStream = os;
+ this.servSession = ss;
+ this.servconnection = conn;
+ this.customHeader = header;
+ super.setName(id);
+ logger.trace("IOThread {} created", super.getName());
+ }
@Override
public void run() {
logger.trace("thread {} started", super.getName());
try {
+ if (this.customHeader!=null && !this.customHeader.equals("")){
+ this.outputStream.write(this.customHeader.getBytes());
+ logger.trace("adding {} header", this.customHeader);
+ }
IOUtils.copy(this.inputStream, this.outputStream);
} catch (Exception e) {
logger.error("inputstream -> outputstream copy error ",e);
private static final Logger logger = LoggerFactory.getLogger(SocketThread.class);
private ServerConnection conn = null;
private long sessionId;
+ private String currentUser;
+ private final String remoteAddressWithPort;
public static void start(Socket socket, InetSocketAddress clientAddress, long sessionId) throws IOException{
this.socket = socket;
this.clientAddress = clientAddress;
this.sessionId = sessionId;
+ this.remoteAddressWithPort = socket.getRemoteSocketAddress().toString().replaceFirst("/","");
}
netconf_ssh_input.start();
logger.trace("starting netconf_ssh_output thread");
- netconf_ssh_output = new IOThread(ss.getStdout(),echoSocket.getOutputStream(),"output_thread_"+sessionId,ss,conn);
+ final String customHeader = "["+currentUser+";"+remoteAddressWithPort+";ssh;;;;;;]\n";
+ netconf_ssh_output = new IOThread(ss.getStdout(),echoSocket.getOutputStream(),"output_thread_"+sessionId,ss,conn,customHeader);
netconf_ssh_output.setDaemon(false);
netconf_ssh_output.start();
public String initAuthentication(ServerConnection sc)
{
- return "";
+ logger.trace("Established connection with host {}",remoteAddressWithPort);
+ return "Established connection with host "+remoteAddressWithPort+"\r\n";
}
public String[] getRemainingAuthMethods(ServerConnection sc)
public AuthenticationResult authenticateWithPassword(ServerConnection sc, String username, String password)
{
- if (USER.equals(username) && PASSWORD.equals(password))
+ if (USER.equals(username) && PASSWORD.equals(password)){
+ currentUser = username;
+ logger.trace("user {}@{} authenticated",currentUser,remoteAddressWithPort);
return AuthenticationResult.SUCCESS;
+ }
+
return AuthenticationResult.FAILURE;
}
package org.opendaylight.controller.netconf.util.osgi;
-import com.google.common.base.Optional;
-import java.net.InetSocketAddress;
-import org.osgi.framework.BundleContext;
-import static com.google.common.base.Preconditions.checkNotNull;
+ import com.google.common.base.Optional;
+ import java.net.InetSocketAddress;
+ import org.osgi.framework.BundleContext;
+ import static com.google.common.base.Preconditions.checkNotNull;
- public class NetconfConfigUtil {
+public class NetconfConfigUtil {
private static final String PREFIX_PROP = "netconf.";
private enum InfixProp {
private static final String PORT_SUFFIX_PROP = ".port";
private static final String ADDRESS_SUFFIX_PROP = ".address";
+ private static final String CLIENT_PROP = ".client";
- public static InetSocketAddress extractTCPNetconfAddress(BundleContext context, String exceptionMessageIfNotFound) {
+ public static InetSocketAddress extractTCPNetconfAddress(BundleContext context, String exceptionMessageIfNotFound, boolean forClient) {
- Optional<InetSocketAddress> inetSocketAddressOptional = extractSomeNetconfAddress(context, InfixProp.tcp, exceptionMessageIfNotFound);
+ Optional<InetSocketAddress> inetSocketAddressOptional = extractSomeNetconfAddress(context, InfixProp.tcp, exceptionMessageIfNotFound, forClient);
if (inetSocketAddressOptional.isPresent() == false) {
throw new IllegalStateException("Netconf tcp address not found." + exceptionMessageIfNotFound);
}
public static Optional<InetSocketAddress> extractSSHNetconfAddress(BundleContext context, String exceptionMessage) {
- return extractSomeNetconfAddress(context, InfixProp.ssh, exceptionMessage);
+ return extractSomeNetconfAddress(context, InfixProp.ssh, exceptionMessage, false);
}
/**
* if address or port are invalid, or configuration is missing
*/
private static Optional<InetSocketAddress> extractSomeNetconfAddress(BundleContext context,
- InfixProp infixProp, String exceptionMessage) {
- String address = context.getProperty(PREFIX_PROP + infixProp + ADDRESS_SUFFIX_PROP);
- if (address == null) {
+ InfixProp infixProp,
+ String exceptionMessage,
+ boolean client) {
+ String address = "";
+ if (client) {
+ address = context.getProperty(PREFIX_PROP + infixProp + CLIENT_PROP + ADDRESS_SUFFIX_PROP);
+ }
+ if (address == null || address.equals("")){
+ address = context.getProperty(PREFIX_PROP + infixProp + ADDRESS_SUFFIX_PROP);
+ }
+ if (address == null || address.equals("")) {
throw new IllegalStateException("Cannot find initial netconf configuration for parameter "
+PREFIX_PROP + infixProp + ADDRESS_SUFFIX_PROP
+" in config.ini. "+exceptionMessage);
}
- String portKey = PREFIX_PROP + infixProp + PORT_SUFFIX_PROP;
+ String portKey = "";
+ if (client) {
+ portKey = PREFIX_PROP + infixProp + CLIENT_PROP + PORT_SUFFIX_PROP;
+ }
+ if (portKey == null || portKey.equals("")){
+ portKey = PREFIX_PROP + infixProp + PORT_SUFFIX_PROP;
+ }
String portString = context.getProperty(portKey);
checkNotNull(portString, "Netconf port must be specified in properties file with " + portKey);
try {
<name>test1</name>
+ <sleep-factor>
+ 2.00
+ </sleep-factor>
+
<extended>
1
</extended>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>clustering.services</artifactId>
- <version>0.4.1-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>clustering.services</artifactId>
- <version>0.4.1-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>clustering.services</artifactId>
- <version>0.4.1-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>