Merge "Updated remote rpc code after integration tests. Rpc execution is failing...
authorEd Warnicke <eaw@cisco.com>
Mon, 28 Jul 2014 14:06:45 +0000 (14:06 +0000)
committerGerrit Code Review <gerrit@opendaylight.org>
Mon, 28 Jul 2014 14:06:45 +0000 (14:06 +0000)
165 files changed:
features/config-netty/pom.xml [new file with mode: 0644]
features/config-netty/src/main/resources/features.xml [new file with mode: 0644]
features/config-persister/pom.xml [new file with mode: 0644]
features/config-persister/src/main/resources/features.xml [new file with mode: 0644]
features/config/pom.xml
features/config/src/main/resources/features.xml
features/netconf/pom.xml
features/pom.xml
features/protocol-framework/pom.xml
opendaylight/config/config-manager/src/main/java/org/opendaylight/controller/config/manager/impl/osgi/ExtensibleBundleTracker.java
opendaylight/config/yang-jmx-generator/src/main/java/org/opendaylight/controller/config/yangjmxgenerator/ModuleMXBeanEntryBuilder.java
opendaylight/config/yang-jmx-generator/src/main/java/org/opendaylight/controller/config/yangjmxgenerator/RuntimeBeanEntry.java
opendaylight/distribution/opendaylight-karaf/pom.xml
opendaylight/distribution/opendaylight-karaf/src/main/resources/karaf/instance [new file with mode: 0755]
opendaylight/distribution/opendaylight-karaf/src/main/resources/karaf/instance.bat [new file with mode: 0644]
opendaylight/distribution/opendaylight-karaf/src/main/resources/karaf/karaf [new file with mode: 0755]
opendaylight/distribution/opendaylight-karaf/src/main/resources/karaf/karaf.bat [new file with mode: 0644]
opendaylight/forwardingrulesmanager/api/src/main/java/org/opendaylight/controller/forwardingrulesmanager/FlowConfig.java
opendaylight/forwardingrulesmanager/api/src/test/java/org/opendaylight/controller/forwardingrulesmanager/frmTest.java
opendaylight/md-sal/compatibility/flow-management-compatibility/src/main/java/org/opendaylight/controller/md/frm/compatibility/FRMRuntimeDataProvider.java
opendaylight/md-sal/compatibility/sal-compatibility/src/test/java/org/opendaylight/controller/sal/compatibility/test/TestFromSalConversionsUtils.java
opendaylight/md-sal/forwardingrules-manager/pom.xml
opendaylight/md-sal/inventory-manager/src/main/java/org/opendaylight/controller/md/inventory/manager/FlowCapableInventoryProvider.java
opendaylight/md-sal/sal-akka-raft/pom.xml [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/example/ClientActor.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/example/ExampleActor.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/example/LogGenerator.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/example/Main.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/example/TestDriver.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/example/messages/KeyValue.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/example/messages/KeyValueSaved.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/example/messages/PrintRole.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/example/messages/PrintState.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/ClientRequestTracker.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/ClientRequestTrackerImpl.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/ElectionTerm.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/FollowerLogInformation.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/FollowerLogInformationImpl.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActor.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActorContext.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActorContextImpl.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftState.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/ReplicatedLog.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/ReplicatedLogEntry.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/AbstractRaftActorBehavior.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/Candidate.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/Follower.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/Leader.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/RaftActorBehavior.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/client/messages/AddRaftPeer.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/client/messages/FindLeader.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/client/messages/FindLeaderReply.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/client/messages/RemoveRaftPeer.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/internal/messages/ApplySnapshot.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/internal/messages/ApplyState.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/internal/messages/CommitEntry.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/internal/messages/ElectionTimeout.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/internal/messages/PersistEntry.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/internal/messages/Replicate.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/internal/messages/SaveSnapshot.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/internal/messages/SendHeartBeat.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/internal/messages/SendInstallSnapshot.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/AbstractRaftRPC.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/AppendEntries.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/AppendEntriesReply.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/InstallSnapshot.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/InstallSnapshotReply.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/RaftRPC.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/RequestVote.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/RequestVoteReply.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/resources/application.conf [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/AbstractActorTest.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/MockRaftActorContext.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/behaviors/AbstractRaftActorBehaviorTest.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/behaviors/CandidateTest.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/behaviors/FollowerTest.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/behaviors/LeaderTest.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/utils/DoNothingActor.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/test/resources/application.conf [new file with mode: 0644]
opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/BindingTransactionChain.java
opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/DataBroker.java
opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/MountPoint.java [new file with mode: 0644]
opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/MountPointService.java [new file with mode: 0644]
opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/TransactionFactory.java [new file with mode: 0644]
opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/WriteTransaction.java
opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/sal/binding/api/BindingAwareBroker.java
opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/sal/binding/api/data/DataProviderService.java
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/AbstractReadWriteTransaction.java
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/AbstractWriteTransaction.java
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDataWriteTransactionImpl.java
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingToNormalizedNodeCodec.java
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/ForwardedBackwardsCompatibleDataBroker.java
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/sal/binding/codegen/RpcIsNotRoutedException.java [new file with mode: 0644]
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/sal/binding/codegen/RuntimeCodeGenerator.java
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/sal/binding/codegen/impl/AbstractRuntimeCodeGenerator.java
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/sal/binding/impl/DataBrokerImpl.java
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/sal/binding/impl/RpcProviderRegistryImpl.java
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/sal/binding/impl/connect/dom/BindingIndependentConnector.java
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/sal/binding/impl/forward/DomForwardedBindingBrokerImpl.java
opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/impl/test/RpcProviderRegistryTest.java
opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/impl/test/WriteTransactionTest.java
opendaylight/md-sal/sal-binding-it/src/test/java/org/opendaylight/controller/test/sal/binding/it/NotificationTest.java
opendaylight/md-sal/sal-binding-util/src/main/java/org/opendaylight/controller/md/sal/binding/util/AbstractBindingSalProviderInstance.java
opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/RegistrationListener.java
opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/DataCommitHandlerRegistration.java
opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/DataProvisionService.java
opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/notify/NotificationSubscriptionService.java
opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/routing/RoutedRegistration.java
opendaylight/md-sal/sal-common-impl/src/main/java/org/opendaylight/controller/md/sal/common/impl/routing/AbstractDataReadRouter.java
opendaylight/md-sal/sal-common-impl/src/main/java/org/opendaylight/controller/md/sal/common/impl/service/AbstractDataBroker.java
opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMDataBroker.java
opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMMountPoint.java [new file with mode: 0644]
opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMMountPointService.java [new file with mode: 0644]
opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMService.java [new file with mode: 0644]
opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/sal/core/api/Broker.java
opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/sal/core/api/Consumer.java
opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/sal/core/api/Provider.java
opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/sal/core/api/RpcProvisionRegistry.java
opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/sal/core/api/data/DataProviderService.java
opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/sal/core/api/mount/MountInstance.java
opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/sal/core/api/mount/MountProvisionInstance.java
opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/sal/core/api/mount/MountProvisionListener.java [new file with mode: 0644]
opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/sal/core/api/mount/MountProvisionService.java
opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/sal/core/api/mount/MountService.java
opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/sal/core/api/notify/NotificationPublishService.java
opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/sal/core/api/notify/NotificationService.java
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/compat/BackwardsCompatibleDataBroker.java
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/mount/DOMMountPointServiceImpl.java [new file with mode: 0644]
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/sal/dom/broker/BrokerImpl.java
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/sal/dom/broker/DataBrokerImpl.java
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/sal/dom/broker/GlobalBundleScanningSchemaServiceImpl.java
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/sal/dom/broker/MountPointImpl.java
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/sal/dom/broker/MountPointManagerImpl.java
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/sal/dom/broker/impl/NotificationRouterImpl.java
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/sal/dom/broker/osgi/AbstractBrokerServiceProxy.java
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/sal/dom/broker/osgi/DataProviderServiceProxy.java
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/sal/dom/broker/osgi/MountProviderServiceProxy.java
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/sal/dom/broker/osgi/NotificationPublishServiceProxy.java
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/sal/dom/broker/osgi/NotificationServiceProxy.java
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/sal/dom/broker/spi/NotificationRouter.java
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/sal/dom/broker/util/YangSchemaUtils.java
opendaylight/md-sal/sal-dom-broker/src/test/java/org/opendaylight/controller/md/sal/dom/broker/impl/MountPointServiceTest.java [new file with mode: 0644]
opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/md/sal/dom/broker/spi/mount/SimpleDOMMountPoint.java [new file with mode: 0644]
opendaylight/md-sal/sal-netconf-connector/src/main/java/org/opendaylight/controller/sal/connect/netconf/listener/NetconfSessionCapabilities.java
opendaylight/md-sal/sal-netconf-connector/src/main/java/org/opendaylight/controller/sal/connect/netconf/util/NetconfMessageTransformUtil.java
opendaylight/md-sal/sal-netconf-connector/src/main/java/org/opendaylight/controller/sal/connect/netconf/util/NodeContainerProxy.java
opendaylight/md-sal/sal-netconf-connector/src/test/java/org/opendaylight/controller/sal/connect/netconf/NetconfDeviceTest.java
opendaylight/md-sal/sal-protocolbuffer-encoding/src/main/java/org/opendaylight/controller/cluster/datastore/util/EncoderDecoderUtil.java
opendaylight/md-sal/sal-protocolbuffer-encoding/src/test/java/org/opendaylight/controller/cluster/datastore/util/NormalizedNodeXmlConverterTest.java
opendaylight/md-sal/sal-rest-connector/src/main/java/org/opendaylight/controller/sal/rest/impl/JsonMapper.java
opendaylight/md-sal/sal-rest-connector/src/main/java/org/opendaylight/controller/sal/restconf/impl/ControllerContext.java
opendaylight/md-sal/sal-rest-connector/src/main/java/org/opendaylight/controller/sal/restconf/impl/RestconfImpl.java
opendaylight/md-sal/sal-rest-connector/src/main/java/org/opendaylight/controller/sal/restconf/rpc/impl/AbstractRpcExecutor.java
opendaylight/md-sal/sal-rest-connector/src/test/java/org/opendaylight/controller/sal/restconf/impl/test/InvokeRpcMethodTest.java
opendaylight/md-sal/sal-rest-connector/src/test/resources/invoke-rpc/invoke-rpc-module.yang
opendaylight/md-sal/sal-rest-docgen/src/main/java/org/opendaylight/controller/sal/rest/doc/DocProvider.java
opendaylight/md-sal/sal-rest-docgen/src/main/java/org/opendaylight/controller/sal/rest/doc/impl/BaseYangSwaggerGenerator.java
opendaylight/md-sal/sal-rest-docgen/src/main/java/org/opendaylight/controller/sal/rest/doc/impl/ModelGenerator.java
opendaylight/md-sal/sal-rest-docgen/src/main/java/org/opendaylight/controller/sal/rest/doc/mountpoints/MountPointSwagger.java
opendaylight/md-sal/samples/l2switch/implementation/src/main/java/org/opendaylight/controller/sample/l2switch/md/L2SwitchProvider.java
opendaylight/md-sal/samples/toaster-consumer/src/main/java/org/opendaylight/controller/config/yang/config/kitchen_service/impl/KitchenServiceModule.java
opendaylight/md-sal/statistics-manager/src/main/java/org/opendaylight/controller/md/statistics/manager/StatisticsProvider.java
opendaylight/md-sal/topology-lldp-discovery/src/main/java/org/opendaylight/md/controller/topology/lldp/LLDPDiscoveryProvider.java
opendaylight/md-sal/topology-manager/src/main/java/org/opendaylight/md/controller/topology/manager/FlowCapableTopologyProvider.java
opendaylight/netconf/netconf-cli/src/main/java/org/opendaylight/controller/netconf/cli/commands/output/OutputDefinition.java

diff --git a/features/config-netty/pom.xml b/features/config-netty/pom.xml
new file mode 100644 (file)
index 0000000..98b97d1
--- /dev/null
@@ -0,0 +1,80 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+  <parent>
+    <groupId>org.opendaylight.controller</groupId>
+    <artifactId>config-subsystem</artifactId>
+    <version>0.2.5-SNAPSHOT</version>
+    <relativePath>../../opendaylight/config/</relativePath>
+  </parent>
+  <artifactId>config-netty-features</artifactId>
+
+  <packaging>pom</packaging>
+
+  <properties>
+    <features.file>features.xml</features.file>
+  </properties>
+
+  <dependencies>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>config-persister-features</artifactId>
+      <version>${config.version}</version>
+      <classifier>features</classifier>
+      <type>xml</type>
+      <scope>runtime</scope>
+    </dependency>
+  </dependencies>
+
+  <build>
+    <resources>
+      <resource>
+        <filtering>true</filtering>
+        <directory>src/main/resources</directory>
+      </resource>
+    </resources>
+    <plugins>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-resources-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>filter</id>
+            <goals>
+              <goal>resources</goal>
+            </goals>
+            <phase>generate-resources</phase>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>build-helper-maven-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>attach-artifacts</id>
+            <goals>
+              <goal>attach-artifact</goal>
+            </goals>
+            <phase>package</phase>
+            <configuration>
+              <artifacts>
+                <artifact>
+                  <file>${project.build.directory}/classes/${features.file}</file>
+                  <type>xml</type>
+                  <classifier>features</classifier>
+                </artifact>
+              </artifacts>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+    </plugins>
+  </build>
+  <scm>
+    <connection>scm:git:ssh://git.opendaylight.org:29418/controller.git</connection>
+    <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
+    <tag>HEAD</tag>
+    <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:MD-SAL</url>
+  </scm>
+</project>
diff --git a/features/config-netty/src/main/resources/features.xml b/features/config-netty/src/main/resources/features.xml
new file mode 100644 (file)
index 0000000..3121ca0
--- /dev/null
@@ -0,0 +1,16 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<features name="odl-config-persister-${project.version}" xmlns="http://karaf.apache.org/xmlns/features/v1.2.0"
+          xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+          xsi:schemaLocation="http://karaf.apache.org/xmlns/features/v1.2.0 http://karaf.apache.org/xmlns/features/v1.2.0">
+  <repository>mvn:org.opendaylight.controller/config-persister-features/${config.version}/xml/features</repository>
+  <feature name='odl-config-netty' version='${project.version}'>
+    <feature version='${project.version}'>odl-config-netty-config-api</feature>
+    <bundle>mvn:org.opendaylight.controller/netty-event-executor-config/${project.version}</bundle>
+    <bundle>mvn:org.opendaylight.controller/netty-threadgroup-config/${project.version}</bundle>
+    <bundle>mvn:org.opendaylight.controller/netty-timer-config/${project.version}</bundle>
+    <bundle>mvn:org.opendaylight.controller/threadpool-config-api/${project.version}</bundle>
+    <bundle>mvn:org.opendaylight.controller/threadpool-config-impl/${project.version}</bundle>
+    <feature version='${project.version}'>odl-config-startup</feature>
+  </feature>
+</features>
\ No newline at end of file
diff --git a/features/config-persister/pom.xml b/features/config-persister/pom.xml
new file mode 100644 (file)
index 0000000..969d0c8
--- /dev/null
@@ -0,0 +1,96 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+  <parent>
+    <groupId>org.opendaylight.controller</groupId>
+    <artifactId>config-subsystem</artifactId>
+    <version>0.2.5-SNAPSHOT</version>
+    <relativePath>../../opendaylight/config/</relativePath>
+  </parent>
+  <artifactId>config-persister-features</artifactId>
+
+  <packaging>pom</packaging>
+
+  <properties>
+    <features.file>features.xml</features.file>
+  </properties>
+
+  <dependencies>
+    <dependency>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>features-yangtools</artifactId>
+      <version>${yangtools.version}</version>
+      <classifier>features</classifier>
+      <type>xml</type>
+      <scope>runtime</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>netconf-features</artifactId>
+      <version>${netconf.version}</version>
+      <classifier>features</classifier>
+      <type>xml</type>
+      <scope>runtime</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>config-features</artifactId>
+      <version>${config.version}</version>
+      <classifier>features</classifier>
+      <type>xml</type>
+      <scope>runtime</scope>
+    </dependency>
+  </dependencies>
+
+  <build>
+    <resources>
+      <resource>
+        <filtering>true</filtering>
+        <directory>src/main/resources</directory>
+      </resource>
+    </resources>
+    <plugins>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-resources-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>filter</id>
+            <goals>
+              <goal>resources</goal>
+            </goals>
+            <phase>generate-resources</phase>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>build-helper-maven-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>attach-artifacts</id>
+            <goals>
+              <goal>attach-artifact</goal>
+            </goals>
+            <phase>package</phase>
+            <configuration>
+              <artifacts>
+                <artifact>
+                  <file>${project.build.directory}/classes/${features.file}</file>
+                  <type>xml</type>
+                  <classifier>features</classifier>
+                </artifact>
+              </artifacts>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+    </plugins>
+  </build>
+  <scm>
+    <connection>scm:git:ssh://git.opendaylight.org:29418/controller.git</connection>
+    <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
+    <tag>HEAD</tag>
+    <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:MD-SAL</url>
+  </scm>
+</project>
diff --git a/features/config-persister/src/main/resources/features.xml b/features/config-persister/src/main/resources/features.xml
new file mode 100644 (file)
index 0000000..2273a4a
--- /dev/null
@@ -0,0 +1,32 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<features name="odl-config-persister-${project.version}" xmlns="http://karaf.apache.org/xmlns/features/v1.2.0"
+          xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+          xsi:schemaLocation="http://karaf.apache.org/xmlns/features/v1.2.0 http://karaf.apache.org/xmlns/features/v1.2.0">
+  <repository>mvn:org.opendaylight.yangtools/features-yangtools/${yangtools.version}/xml/features</repository>
+  <repository>mvn:org.opendaylight.controller/netconf-features/${netconf.version}/xml/features</repository>
+  <repository>mvn:org.opendaylight.controller/config-features/${config.version}/xml/features</repository>
+  <feature name='odl-config-startup' version='${project.version}'>
+    <feature version='${project.version}'>odl-config-netconf-connector</feature>
+    <feature version='${project.version}'>odl-config-persister</feature>
+    <feature version='${project.version}'>odl-netconf-impl</feature>
+  </feature>
+  <feature name='odl-config-persister' version='${project.version}'>
+    <feature version='${netconf.version}'>odl-netconf-api</feature>
+    <feature version='${project.version}'>odl-config-api</feature>
+    <feature version='${yangtools.version}'>yangtools-binding-generator</feature>
+    <bundle>mvn:org.opendaylight.controller/config-persister-api/${project.version}</bundle>
+    <bundle>mvn:org.opendaylight.controller/config-persister-file-xml-adapter/${project.version}</bundle>
+    <bundle>mvn:org.opendaylight.controller/config-persister-directory-xml-adapter/${project.version}</bundle>
+    <bundle>mvn:org.opendaylight.controller/config-persister-impl/${project.version}</bundle>
+
+    <bundle>mvn:org.opendaylight.controller/netconf-util/${netconf.version}</bundle>
+    <bundle>mvn:org.opendaylight.controller/netconf-mapping-api/${netconf.version}</bundle>
+
+    <bundle>mvn:com.google.guava/guava/${guava.version}</bundle>
+    <bundle>mvn:commons-io/commons-io/${commons.io.version}</bundle>
+    <bundle>mvn:org.apache.commons/commons-lang3/${commons.lang3.version}</bundle>
+    <bundle>mvn:org.eclipse.persistence/org.eclipse.persistence.core/${eclipse.persistence.version}</bundle>
+    <bundle>mvn:org.eclipse.persistence/org.eclipse.persistence.moxy/${eclipse.persistence.version}</bundle>
+  </feature>
+</features>
\ No newline at end of file
index 01a4ea74a264afcf722d10b6515b5b26313e869c..7e5dd6472bc7320a6b7c20953c811fb41982cb26 100644 (file)
     <features.file>features.xml</features.file>
   </properties>
 
-  <dependencies></dependencies>
+  <dependencies>
+    <dependency>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>features-yangtools</artifactId>
+      <version>${yangtools.version}</version>
+      <classifier>features</classifier>
+      <type>xml</type>
+      <scope>runtime</scope>
+    </dependency>
+  </dependencies>
 
   <build>
     <resources>
index 7c11b5b18bace780aaa59c93c56a14e45bafc400..e18f844622900c8b5a108296c0f3c7174ca78f43 100644 (file)
@@ -4,13 +4,6 @@
           xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
           xsi:schemaLocation="http://karaf.apache.org/xmlns/features/v1.2.0 http://karaf.apache.org/xmlns/features/v1.2.0">
   <repository>mvn:org.opendaylight.yangtools/features-yangtools/${yangtools.version}/xml/features</repository>
-  <repository>mvn:org.opendaylight.controller/netconf-features/${netconf.version}/xml/features</repository>
-
-  <feature name='odl-config-startup' version='${project.version}'>
-    <feature version='${project.version}'>odl-config-netconf-connector</feature>
-    <feature version='${project.version}'>odl-config-persister</feature>
-    <feature version='${project.version}'>odl-netconf-impl</feature>
-  </feature>
 
   <feature name='odl-config-core' version='${project.version}'>
     <feature version='${yangtools.version}'>yangtools-concepts</feature>
     <feature version='${project.version}'>odl-config-core</feature>
     <bundle>mvn:org.opendaylight.controller/config-manager/${project.version}</bundle>
   </feature>
-  <feature name='odl-config-persister' version='${project.version}'>
-    <feature version='${netconf.version}'>odl-netconf-api</feature>
-    <feature version='${project.version}'>odl-config-api</feature>
-    <feature version='${yangtools.version}'>yangtools-binding-generator</feature>
-    <bundle>mvn:org.opendaylight.controller/config-persister-api/${project.version}</bundle>
-    <bundle>mvn:org.opendaylight.controller/config-persister-file-xml-adapter/${project.version}</bundle>
-    <bundle>mvn:org.opendaylight.controller/config-persister-directory-xml-adapter/${project.version}</bundle>
-    <bundle>mvn:org.opendaylight.controller/config-persister-impl/${project.version}</bundle>
 
-    <bundle>mvn:org.opendaylight.controller/netconf-util/${netconf.version}</bundle>
-    <bundle>mvn:org.opendaylight.controller/netconf-mapping-api/${netconf.version}</bundle>
-
-    <bundle>mvn:com.google.guava/guava/${guava.version}</bundle>
-    <bundle>mvn:commons-io/commons-io/${commons.io.version}</bundle>
-    <bundle>mvn:org.apache.commons/commons-lang3/${commons.lang3.version}</bundle>
-    <bundle>mvn:org.eclipse.persistence/org.eclipse.persistence.core/${eclipse.persistence.version}</bundle>
-    <bundle>mvn:org.eclipse.persistence/org.eclipse.persistence.moxy/${eclipse.persistence.version}</bundle>
-  </feature>
   <feature name='odl-config-api' version='${project.version}'>
     <bundle>mvn:org.opendaylight.controller/config-api/${project.version}</bundle>
 
 
     <feature version='${project.version}'>odl-config-api</feature>
   </feature>
-  <feature name='odl-config-netty' version='${project.version}'>
-    <feature version='${project.version}'>odl-config-netty-config-api</feature>
-    <bundle>mvn:org.opendaylight.controller/netty-event-executor-config/${project.version}</bundle>
-    <bundle>mvn:org.opendaylight.controller/netty-threadgroup-config/${project.version}</bundle>
-    <bundle>mvn:org.opendaylight.controller/netty-timer-config/${project.version}</bundle>
-    <bundle>mvn:org.opendaylight.controller/threadpool-config-api/${project.version}</bundle>
-    <bundle>mvn:org.opendaylight.controller/threadpool-config-impl/${project.version}</bundle>
-    <feature version='${project.version}'>odl-config-startup</feature>
-  </feature>
   <feature name='odl-config-dispatcher' version='${project.version}'>
       <bundle>mvn:org.opendaylight.controller/netconf-config-dispatcher/${project.version}</bundle>
   </feature>
index 457fc64eddcd0be11a5ff9c469b603fbdf60c088..90c088eaba2aab2911bdc65fbbba281af094ecb4 100644 (file)
     <features.file>features.xml</features.file>
   </properties>
 
-  <dependencies></dependencies>
+  <dependencies>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>config-features</artifactId>
+      <version>${config.version}</version>
+      <classifier>features</classifier>
+      <type>xml</type>
+      <scope>runtime</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>features-odl-protocol-framework</artifactId>
+      <version>${protocol-framework.version}</version>
+      <classifier>features</classifier>
+      <type>xml</type>
+      <scope>runtime</scope>
+    </dependency>
+  </dependencies>
 
   <build>
     <resources>
index dce47faea665aa4d857c235103fbf84768382e9b..fb40fa93b18f063b0a796b201809df210c13523b 100644 (file)
@@ -14,6 +14,8 @@
   </prerequisites>
   <modules>
     <module>config</module>
+    <module>config-persister</module>
+    <module>config-netty</module>
     <module>mdsal</module>
     <module>netconf</module>
     <module>protocol-framework</module>
index f0208d64521cc7e740ef644105957d49d7f9004a..045ac2dffec10871437ab784f53d7c97cbfe98cd 100644 (file)
     <features.file>features.xml</features.file>
   </properties>
 
-  <dependencies></dependencies>
+  <dependencies>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>config-features</artifactId>
+      <version>${config.version}</version>
+      <classifier>features</classifier>
+      <type>xml</type>
+      <scope>runtime</scope>
+    </dependency>
+  </dependencies>
 
   <build>
     <resources>
index c1ebba78817fb36c1a34f256ee81bdcb8c86d85f..eff267ad1319c90c1ce9e06de2ff7bcaddc86c67 100644 (file)
@@ -7,6 +7,13 @@
  */
 package org.opendaylight.controller.config.manager.impl.osgi;
 
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.ThreadFactory;
 import org.osgi.framework.Bundle;
 import org.osgi.framework.BundleContext;
 import org.osgi.framework.BundleEvent;
@@ -15,74 +22,114 @@ import org.osgi.util.tracker.BundleTrackerCustomizer;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+
 /**
  *
- * Extensible bundle tracker. Takes several BundleTrackerCustomizers and propagates bundle events to all of them.
- * Primary customizer
+ * Extensible bundle tracker. Takes several BundleTrackerCustomizers and
+ * propagates bundle events to all of them.
+ *
+ * Primary customizer may return tracking object,
+ * which will be passed to it during invocation of
+ * {@link BundleTrackerCustomizer#removedBundle(Bundle, BundleEvent, Future)}
+ *
+ *
+ * This extender modifies behaviour to not leak platform thread
+ * in {@link BundleTrackerCustomizer#addingBundle(Bundle, BundleEvent)}
+ * but deliver this event from its own single threaded executor.
+ *
+ * If bundle is removed before event for adding bundle was executed,
+ * that event is cancelled. If addingBundle event is currently in progress
+ * or was already executed, platform thread is block untill addingBundle
+ * finishes so bundle could be removed correctly in platform thread.
+ *
+ *
+ * Method {@link BundleTrackerCustomizer#removedBundle(Bundle, BundleEvent, Object)}
+ * is never invoked on registered trackers.
  *
  * @param <T>
  */
-public final class ExtensibleBundleTracker<T> extends BundleTracker<T> {
+public final class ExtensibleBundleTracker<T> extends BundleTracker<Future<T>> {
 
+    private static final ThreadFactory THREAD_FACTORY = new ThreadFactoryBuilder()
+        .setNameFormat("config-bundle-tracker-%d")
+        .build();
+    private final ExecutorService eventExecutor;
     private final BundleTrackerCustomizer<T> primaryTracker;
     private final BundleTrackerCustomizer<?>[] additionalTrackers;
 
-    private static final Logger logger = LoggerFactory.getLogger(ExtensibleBundleTracker.class);
+    private static final Logger LOG = LoggerFactory.getLogger(ExtensibleBundleTracker.class);
 
-    public ExtensibleBundleTracker(BundleContext context, BundleTrackerCustomizer<T> primaryBundleTrackerCustomizer,
-                                   BundleTrackerCustomizer<?>... additionalBundleTrackerCustomizers) {
+    public ExtensibleBundleTracker(final BundleContext context, final BundleTrackerCustomizer<T> primaryBundleTrackerCustomizer,
+                                   final BundleTrackerCustomizer<?>... additionalBundleTrackerCustomizers) {
         this(context, Bundle.ACTIVE, primaryBundleTrackerCustomizer, additionalBundleTrackerCustomizers);
     }
 
-    public ExtensibleBundleTracker(BundleContext context, int bundleState,
-                                   BundleTrackerCustomizer<T> primaryBundleTrackerCustomizer,
-                                   BundleTrackerCustomizer<?>... additionalBundleTrackerCustomizers) {
+    public ExtensibleBundleTracker(final BundleContext context, final int bundleState,
+                                   final BundleTrackerCustomizer<T> primaryBundleTrackerCustomizer,
+                                   final BundleTrackerCustomizer<?>... additionalBundleTrackerCustomizers) {
         super(context, bundleState, null);
         this.primaryTracker = primaryBundleTrackerCustomizer;
         this.additionalTrackers = additionalBundleTrackerCustomizers;
-        logger.trace("Registered as extender with context {} and bundle state {}", context, bundleState);
+        eventExecutor = Executors.newSingleThreadExecutor(THREAD_FACTORY);
+        LOG.trace("Registered as extender with context {} and bundle state {}", context, bundleState);
     }
 
     @Override
-    public T addingBundle(final Bundle bundle, final BundleEvent event) {
-        T primaryTrackerRetVal = primaryTracker.addingBundle(bundle, event);
-
-        forEachAdditionalBundle(new BundleStrategy() {
+    public Future<T> addingBundle(final Bundle bundle, final BundleEvent event) {
+        LOG.trace("Submiting AddingBundle for bundle {} and event {} to be processed asynchronously",bundle,event);
+        Future<T> future = eventExecutor.submit(new Callable<T>() {
             @Override
-            public void execute(BundleTrackerCustomizer<?> tracker) {
-                tracker.addingBundle(bundle, event);
+            public T call() throws Exception {
+                try {
+                    T primaryTrackerRetVal = primaryTracker.addingBundle(bundle, event);
+
+                    forEachAdditionalBundle(new BundleStrategy() {
+                        @Override
+                        public void execute(final BundleTrackerCustomizer<?> tracker) {
+                            tracker.addingBundle(bundle, event);
+                        }
+                    });
+                    LOG.trace("AddingBundle for {} and event {} finished successfully",bundle,event);
+                    return primaryTrackerRetVal;
+                } catch (Exception e) {
+                    LOG.error("Failed to add bundle {}",e);
+                    throw e;
+                }
             }
         });
-
-        return primaryTrackerRetVal;
+        return future;
     }
 
     @Override
-    public void modifiedBundle(final Bundle bundle, final BundleEvent event, final T object) {
-        primaryTracker.modifiedBundle(bundle, event, object);
-
-        forEachAdditionalBundle(new BundleStrategy() {
-            @Override
-            public void execute(BundleTrackerCustomizer<?> tracker) {
-                tracker.modifiedBundle(bundle, event, null);
-            }
-        });
+    public void modifiedBundle(final Bundle bundle, final BundleEvent event, final Future<T> object) {
+        // Intentionally NOOP
 
     }
 
     @Override
-    public void removedBundle(final Bundle bundle, final BundleEvent event, final T object) {
-        primaryTracker.removedBundle(bundle, event, object);
-
-        forEachAdditionalBundle(new BundleStrategy() {
-            @Override
-            public void execute(BundleTrackerCustomizer<?> tracker) {
-                tracker.removedBundle(bundle, event, null);
-            }
-        });
+    public void removedBundle(final Bundle bundle, final BundleEvent event, final Future<T> object) {
+        if(!object.isDone() && object.cancel(false)) {
+            // We canceled adding event before it was processed
+            // so it is safe to return
+            LOG.trace("Adding Bundle event for {} was cancelled. No additional work required.",bundle);
+            return;
+        }
+        try {
+            LOG.trace("Invoking removedBundle event for {}",bundle);
+            primaryTracker.removedBundle(bundle, event, object.get());
+            forEachAdditionalBundle(new BundleStrategy() {
+                @Override
+                public void execute(final BundleTrackerCustomizer<?> tracker) {
+                    tracker.removedBundle(bundle, event, null);
+                }
+            });
+            LOG.trace("Removed bundle event for {} finished successfully.",bundle);
+        } catch (InterruptedException | ExecutionException e) {
+            LOG.error("Addition of bundle failed, ", e);
+        }
     }
 
-    private void forEachAdditionalBundle(BundleStrategy lambda) {
+    private void forEachAdditionalBundle(final BundleStrategy lambda) {
         for (BundleTrackerCustomizer<?> trac : additionalTrackers) {
             lambda.execute(trac);
         }
index d8c5a56fb7a7561de457894180ad1a1b418b7f57..518a11540e6c1ea33ebd3cf70909ad35b9874024 100644 (file)
@@ -130,7 +130,7 @@ final class ModuleMXBeanEntryBuilder {
         Map<String, ModuleMXBeanEntry> result = new HashMap<>();
 
         for (AugmentationSchema augmentation : currentModule.getAugmentations()) {
-            Set<DataSchemaNode> childNodes = augmentation.getChildNodes();
+            Collection<DataSchemaNode> childNodes = augmentation.getChildNodes();
             if (areAllChildrenChoiceCaseNodes(childNodes)) {
                 for (ChoiceCaseNode childCase : castChildNodesToChoiceCases(childNodes)) {
                     // TODO refactor, extract to standalone builder class
@@ -215,7 +215,7 @@ final class ModuleMXBeanEntryBuilder {
         return moduleIdentities;
     }
 
-    private Collection<ChoiceCaseNode> castChildNodesToChoiceCases(final Set<DataSchemaNode> childNodes) {
+    private Collection<ChoiceCaseNode> castChildNodesToChoiceCases(final Collection<DataSchemaNode> childNodes) {
         return Collections2.transform(childNodes, new Function<DataSchemaNode, ChoiceCaseNode>() {
             @Nullable
             @Override
@@ -225,7 +225,7 @@ final class ModuleMXBeanEntryBuilder {
         });
     }
 
-    private boolean areAllChildrenChoiceCaseNodes(final Set<DataSchemaNode> childNodes) {
+    private boolean areAllChildrenChoiceCaseNodes(final Iterable<DataSchemaNode> childNodes) {
         for (DataSchemaNode childNode : childNodes) {
             if (childNode instanceof ChoiceCaseNode == false) {
                 return false;
@@ -388,7 +388,7 @@ final class ModuleMXBeanEntryBuilder {
      * @return either choiceCaseNode or its only child container
      */
     private <HAS_CHILDREN_AND_QNAME extends DataNodeContainer & SchemaNode> HAS_CHILDREN_AND_QNAME getDataNodeContainer(final ChoiceCaseNode choiceCaseNode) {
-        Set<DataSchemaNode> childNodes = choiceCaseNode.getChildNodes();
+        Collection<DataSchemaNode> childNodes = choiceCaseNode.getChildNodes();
         if (childNodes.size() == 1) {
             DataSchemaNode onlyChild = childNodes.iterator().next();
             if (onlyChild instanceof ContainerSchemaNode) {
@@ -406,8 +406,7 @@ final class ModuleMXBeanEntryBuilder {
             final TypeProviderWrapper typeProviderWrapper, final Map<QName, ServiceInterfaceEntry> qNamesToSIEs,
             final SchemaContext schemaContext, final String packageName) {
         Map<String, AttributeIfc> yangToAttributes = new HashMap<>();
-        Set<DataSchemaNode> childNodes = dataNodeContainer.getChildNodes();
-        for (DataSchemaNode attrNode : childNodes) {
+        for (DataSchemaNode attrNode : dataNodeContainer.getChildNodes()) {
             AttributeIfc attributeValue = getAttributeValue(attrNode, currentModule, qNamesToSIEs, typeProviderWrapper,
                     schemaContext, packageName);
             yangToAttributes.put(attributeValue.getAttributeYangName(), attributeValue);
index 23b071c817022ac0103f7cc01c4cc5f5482ba995..67f624175b00aacff88369aa8fce5fe00ce5a961 100644 (file)
@@ -332,7 +332,7 @@ public class RuntimeBeanEntry {
         }
     }
 
-    private static Collection<DataSchemaNode> sortAttributes(final Set<DataSchemaNode> childNodes) {
+    private static Collection<DataSchemaNode> sortAttributes(final Collection<DataSchemaNode> childNodes) {
         final TreeSet<DataSchemaNode> dataSchemaNodes = new TreeSet<>(new Comparator<DataSchemaNode>() {
             @Override
             public int compare(final DataSchemaNode o1, final DataSchemaNode o2) {
index 4ef0b8e86bf926062ff0e7071775efd79d0f531a..06e0a92fa6cf3d1c0656736c65f2d0cc76d0cc09 100644 (file)
     </dependency>
     <dependency>
       <groupId>org.opendaylight.controller</groupId>
-      <artifactId>features-odl-protocol-framework</artifactId>
-      <version>${protocol-framework.version}</version>
-      <classifier>features</classifier>
-      <type>xml</type>
-      <scope>runtime</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.controller</groupId>
-      <artifactId>netconf-features</artifactId>
-      <version>${netconf.version}</version>
-      <classifier>features</classifier>
-      <type>xml</type>
-      <scope>runtime</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.controller</groupId>
-      <artifactId>config-features</artifactId>
+      <artifactId>config-netty-features</artifactId>
       <version>${config.version}</version>
       <classifier>features</classifier>
       <type>xml</type>
           </execution>
         </executions>
       </plugin>
+        <plugin>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-antrun-plugin</artifactId>
+            <executions>
+                <execution>
+                    <phase>prepare-package</phase>
+                    <goals>
+                        <goal>run</goal>
+                    </goals>
+                    <configuration>
+                        <tasks>
+                            <copy todir="${project.build.directory}/assembly/bin" overwrite="true">
+                                <fileset dir="${basedir}/src/main/resources/karaf/" includes="karaf,karaf.bat,instance,instance.bat"/>
+                            </copy>
+                        </tasks>
+                    </configuration>
+                </execution>
+            </executions>
+        </plugin>
     </plugins>
   </build>
   <scm>
diff --git a/opendaylight/distribution/opendaylight-karaf/src/main/resources/karaf/instance b/opendaylight/distribution/opendaylight-karaf/src/main/resources/karaf/instance
new file mode 100755 (executable)
index 0000000..7288042
--- /dev/null
@@ -0,0 +1,349 @@
+#!/bin/sh
+#
+#    Licensed to the Apache Software Foundation (ASF) under one or more
+#    contributor license agreements.  See the NOTICE file distributed with
+#    this work for additional information regarding copyright ownership.
+#    The ASF licenses this file to You under the Apache License, Version 2.0
+#    (the "License"); you may not use this file except in compliance with
+#    the License.  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS,
+#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#    See the License for the specific language governing permissions and
+#    limitations under the License.
+#
+
+DIRNAME=`dirname "$0"`
+PROGNAME=`basename "$0"`
+
+#
+# Sourcing environment settings for karaf similar to tomcats setenv
+#
+KARAF_SCRIPT="instance"
+export KARAF_SCRIPT
+if [ -f "$DIRNAME/setenv" ]; then
+  . "$DIRNAME/setenv"
+fi
+
+#
+# Check/Set up some easily accessible MIN/MAX params for JVM mem usage
+#
+if [ "x$JAVA_MIN_MEM" = "x" ]; then
+    JAVA_MIN_MEM=128M
+    export JAVA_MIN_MEM
+fi
+if [ "x$JAVA_MAX_MEM" = "x" ]; then
+    JAVA_MAX_MEM=512M
+    export JAVA_MAX_MEM
+fi
+
+warn() {
+    echo "${PROGNAME}: $*"
+}
+
+die() {
+    warn "$*"
+    exit 1
+}
+
+detectOS() {
+    # OS specific support (must be 'true' or 'false').
+    cygwin=false;
+    darwin=false;
+    aix=false;
+    os400=false;
+    case "`uname`" in
+        CYGWIN*)
+            cygwin=true
+            ;;
+        Darwin*)
+            darwin=true
+            ;;
+        AIX*)
+            aix=true
+            ;;
+        OS400*)
+            os400=true
+            ;;
+    esac
+    # For AIX, set an environment variable
+    if $aix; then
+         export LDR_CNTRL=MAXDATA=0xB0000000@DSA
+         echo $LDR_CNTRL
+    fi
+}
+
+unlimitFD() {
+    # Use the maximum available, or set MAX_FD != -1 to use that
+    if [ "x$MAX_FD" = "x" ]; then
+        MAX_FD="maximum"
+    fi
+
+    # Increase the maximum file descriptors if we can
+    if [ "$os400" = "false" ] && [ "$cygwin" = "false" ]; then
+        MAX_FD_LIMIT=`ulimit -H -n`
+        if [ "$MAX_FD_LIMIT" != 'unlimited' ]; then 
+            if [ $? -eq 0 ]; then
+                if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ]; then
+                    # use the system max
+                    MAX_FD="$MAX_FD_LIMIT"
+                fi
+
+                ulimit -n $MAX_FD > /dev/null
+                # echo "ulimit -n" `ulimit -n`
+                if [ $? -ne 0 ]; then
+                    warn "Could not set maximum file descriptor limit: $MAX_FD"
+                fi
+            else
+                warn "Could not query system maximum file descriptor limit: $MAX_FD_LIMIT"
+            fi
+        fi
+    fi
+}
+
+locateHome() {
+    if [ "x$KARAF_HOME" != "x" ]; then
+        warn "Ignoring predefined value for KARAF_HOME"
+    fi
+
+    # In POSIX shells, CDPATH may cause cd to write to stdout
+    (unset CDPATH) >/dev/null 2>&1 && unset CDPATH
+
+    KARAF_HOME=`cd "$DIRNAME/.."; pwd`
+    if [ ! -d "$KARAF_HOME" ]; then
+        die "KARAF_HOME is not valid: $KARAF_HOME"
+    fi
+}
+
+locateBase() {
+    if [ "x$KARAF_BASE" != "x" ]; then
+        if [ ! -d "$KARAF_BASE" ]; then
+            die "KARAF_BASE is not valid: $KARAF_BASE"
+        fi
+    else
+        KARAF_BASE=$KARAF_HOME
+    fi
+}
+
+locateData() {
+    if [ "x$KARAF_DATA" != "x" ]; then
+        if [ ! -d "$KARAF_DATA" ]; then
+            die "KARAF_DATA is not valid: $KARAF_DATA"
+        fi
+    else
+        KARAF_DATA=$KARAF_BASE/data
+    fi
+}
+
+locateEtc() {
+    if [ "x$KARAF_ETC" != "x" ]; then
+        if [ ! -d "$KARAF_ETC" ]; then
+            die "KARAF_ETC is not valid: $KARAF_ETC"
+        fi
+    else
+        KARAF_ETC=$KARAF_BASE/etc
+    fi
+}
+
+setupNativePath() {
+    # Support for loading native libraries
+    LD_LIBRARY_PATH="${LD_LIBRARY_PATH}:$KARAF_BASE/lib:$KARAF_HOME/lib"
+
+    # For Cygwin, set PATH from LD_LIBRARY_PATH
+    if $cygwin; then
+        LD_LIBRARY_PATH=`cygpath --path --windows "$LD_LIBRARY_PATH"`
+        PATH="$PATH;$LD_LIBRARY_PATH"
+        export PATH
+    fi
+    export LD_LIBRARY_PATH
+}
+
+pathCanonical() {
+    local dst="${1}"
+    while [ -h "${dst}" ] ; do
+        ls=`ls -ld "${dst}"`
+        link=`expr "$ls" : '.*-> \(.*\)$'`
+        if expr "$link" : '/.*' > /dev/null; then
+            dst="$link"
+        else
+            dst="`dirname "${dst}"`/$link"
+        fi
+    done
+    local bas=`basename "${dst}"`
+    local dir=`dirname "${dst}"`
+    if [ "$bas" != "$dir" ]; then
+        dst="`pathCanonical "$dir"`/$bas"
+    fi
+    echo "${dst}" | sed -e 's#//#/#g' -e 's#/./#/#g' -e 's#/[^/]*/../#/#g'
+}
+
+locateJava() {
+    # Setup the Java Virtual Machine
+    if $cygwin ; then
+        [ -n "$JAVA" ] && JAVA=`cygpath --unix "$JAVA"`
+        [ -n "$JAVA_HOME" ] && JAVA_HOME=`cygpath --unix "$JAVA_HOME"`
+    fi
+
+    if [ "x$JAVA_HOME" = "x" ] && [ "$darwin" = "true" ]; then
+        JAVA_HOME="$(/usr/libexec/java_home)"
+    fi
+    if [ "x$JAVA" = "x" ] && [ -r /etc/gentoo-release ] ; then
+        JAVA_HOME=`java-config --jre-home`
+    fi
+    if [ "x$JAVA" = "x" ]; then
+        if [ "x$JAVA_HOME" != "x" ]; then
+            if [ ! -d "$JAVA_HOME" ]; then
+                die "JAVA_HOME is not valid: $JAVA_HOME"
+            fi
+            JAVA="$JAVA_HOME/bin/java"
+        else
+            warn "JAVA_HOME not set; results may vary"
+            JAVA=`type java`
+            JAVA=`expr "$JAVA" : '.*is \(.*\)$'`
+            if [ "x$JAVA" = "x" ]; then
+                die "java command not found"
+            fi
+        fi
+    fi
+    if [ "x$JAVA_HOME" = "x" ]; then
+        JAVA_HOME="$(dirname $(dirname $(pathCanonical "$JAVA")))"
+    fi
+}
+
+detectJVM() {
+   #echo "`$JAVA -version`"
+   # This service should call `java -version`,
+   # read stdout, and look for hints
+   if $JAVA -version 2>&1 | grep "^IBM" ; then
+       JVM_VENDOR="IBM"
+   # on OS/400, java -version does not contain IBM explicitly
+   elif $os400; then
+       JVM_VENDOR="IBM"
+   else
+       JVM_VENDOR="SUN"
+   fi
+   # echo "JVM vendor is $JVM_VENDOR"
+}
+
+setupDebugOptions() {
+    if [ "x$JAVA_OPTS" = "x" ]; then
+        JAVA_OPTS="$DEFAULT_JAVA_OPTS"
+    fi
+    export JAVA_OPTS
+
+    # Set Debug options if enabled
+    if [ "x$KARAF_DEBUG" != "x" ]; then
+        # Use the defaults if JAVA_DEBUG_OPTS was not set
+        if [ "x$JAVA_DEBUG_OPTS" = "x" ]; then
+            JAVA_DEBUG_OPTS="$DEFAULT_JAVA_DEBUG_OPTS"
+        fi
+
+        JAVA_OPTS="$JAVA_DEBUG_OPTS $JAVA_OPTS"
+        warn "Enabling Java debug options: $JAVA_DEBUG_OPTS"
+    fi
+}
+
+setupDefaults() {
+    DEFAULT_JAVA_OPTS="-Xms$JAVA_MIN_MEM -Xmx$JAVA_MAX_MEM "
+
+    #Set the JVM_VENDOR specific JVM flags
+    if [ "$JVM_VENDOR" = "SUN" ]; then
+        #
+        # Check some easily accessible MIN/MAX params for JVM mem usage
+        #
+        if [ "x$JAVA_PERM_MEM" != "x" ]; then
+            DEFAULT_JAVA_OPTS="$DEFAULT_JAVA_OPTS -XX:PermSize=$JAVA_PERM_MEM"
+        fi
+        if [ "x$JAVA_MAX_PERM_MEM" != "x" ]; then
+            DEFAULT_JAVA_OPTS="$DEFAULT_JAVA_OPTS -XX:MaxPermSize=$JAVA_MAX_PERM_MEM"
+        fi
+        DEFAULT_JAVA_OPTS="-server $DEFAULT_JAVA_OPTS -Dcom.sun.management.jmxremote"
+    elif [ "$JVM_VENDOR" = "IBM" ]; then
+        if $os400; then
+            DEFAULT_JAVA_OPTS="$DEFAULT_JAVA_OPTS"
+        elif $aix; then
+            DEFAULT_JAVA_OPTS="-Xverify:none -Xdump:heap -Xlp $DEFAULT_JAVA_OPTS"
+        else
+            DEFAULT_JAVA_OPTS="-Xverify:none $DEFAULT_JAVA_OPTS"
+        fi
+    fi
+
+    # Add the jars in the lib dir
+    for file in "$KARAF_HOME"/lib/*.jar
+    do
+        if [ -z "$CLASSPATH" ]; then
+            CLASSPATH="$file"
+        else
+            CLASSPATH="$CLASSPATH:$file"
+        fi
+    done
+    DEFAULT_JAVA_DEBUG_OPTS="-Xdebug -Xnoagent -Djava.compiler=NONE -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=5005"
+
+    ##
+    ## TODO: Move to conf/profiler/yourkit.{sh|cmd}
+    ##
+    # Uncomment to enable YourKit profiling
+    #DEFAULT_JAVA_DEBUG_OPTS="-Xrunyjpagent"
+}
+
+init() {
+    # Determine if there is special OS handling we must perform
+    detectOS
+
+    # Unlimit the number of file descriptors if possible
+    unlimitFD
+
+    # Locate the Karaf home directory
+    locateHome
+
+    # Locate the Karaf base directory
+    locateBase
+
+    # Locate the Karaf data directory
+    locateData
+
+    # Locate the Karaf etc directory
+    locateEtc
+
+    # Setup the native library path
+    setupNativePath
+
+    # Locate the Java VM to execute
+    locateJava
+
+    # Determine the JVM vendor
+    detectJVM
+
+    # Setup default options
+    setupDefaults
+
+    # Install debug options
+    setupDebugOptions
+
+}
+
+run() {
+
+    CLASSPATH="${KARAF_HOME}/system/org/apache/karaf/instance/org.apache.karaf.instance.command/3.0.1/org.apache.karaf.instance.command-3.0.1.jar:${KARAF_HOME}/system/org/apache/karaf/instance/org.apache.karaf.instance.core/3.0.1/org.apache.karaf.instance.core-3.0.1.jar:${KARAF_HOME}/system/org/apache/karaf/shell/org.apache.karaf.shell.console/3.0.1/org.apache.karaf.shell.console-3.0.1.jar:${KARAF_HOME}/system/org/apache/karaf/shell/org.apache.karaf.shell.table/3.0.1/org.apache.karaf.shell.table-3.0.1.jar:${KARAF_HOME}/system/org/apache/aries/blueprint/org.apache.aries.blueprint.api/1.0.0/org.apache.aries.blueprint.api-1.0.0.jar:${KARAF_HOME}/system/org/apache/aries/blueprint/org.apache.aries.blueprint.core/1.4.0/org.apache.aries.blueprint.core-1.4.0.jar:${KARAF_HOME}/system/org/apache/aries/blueprint/org.apache.aries.blueprint.cm/1.0.3/org.apache.aries.blueprint.cm-1.0.3.jar:${KARAF_HOME}/system/org/ops4j/pax/logging/pax-logging-api/1.7.2/pax-logging-api-1.7.2.jar:${KARAF_HOME}/system/org/apache/felix/org.apache.felix.framework/4.2.1/org.apache.felix.framework-4.2.1.jar:${KARAF_HOME}/system/jline/jline/2.11/jline-2.11.jar:$CLASSPATH"
+
+    if $cygwin; then
+        KARAF_HOME=`cygpath --path --windows "$KARAF_HOME"`
+        KARAF_BASE=`cygpath --path --windows "$KARAF_BASE"`
+        KARAF_DATA=`cygpath --path --windows "$KARAF_DATA"`
+        KARAF_ETC=`cygpath --path --windows "$KARAF_ETC"`
+        CLASSPATH=`cygpath --path --windows "$CLASSPATH"`
+    fi
+
+    exec "$JAVA" $JAVA_OPTS -Dkaraf.instances="${KARAF_HOME}/instances" -Dkaraf.home="$KARAF_HOME" -Dkaraf.base="$KARAF_BASE" -Dkaraf.etc="$KARAF_ETC" -Djava.io.tmpdir="$KARAF_DATA/tmp" -Djava.util.logging.config.file="$KARAF_BASE/etc/java.util.logging.properties" $KARAF_OPTS $OPTS -classpath "$CLASSPATH" org.apache.karaf.instance.main.Execute "$@"
+}
+
+main() {
+    init
+    run "$@"
+}
+
+main "$@"
+
diff --git a/opendaylight/distribution/opendaylight-karaf/src/main/resources/karaf/instance.bat b/opendaylight/distribution/opendaylight-karaf/src/main/resources/karaf/instance.bat
new file mode 100644 (file)
index 0000000..49c2c0f
--- /dev/null
@@ -0,0 +1,151 @@
+@echo off\r
+rem\r
+rem\r
+rem    Licensed to the Apache Software Foundation (ASF) under one or more\r
+rem    contributor license agreements.  See the NOTICE file distributed with\r
+rem    this work for additional information regarding copyright ownership.\r
+rem    The ASF licenses this file to You under the Apache License, Version 2.0\r
+rem    (the "License"); you may not use this file except in compliance with\r
+rem    the License.  You may obtain a copy of the License at\r
+rem\r
+rem       http://www.apache.org/licenses/LICENSE-2.0\r
+rem\r
+rem    Unless required by applicable law or agreed to in writing, software\r
+rem    distributed under the License is distributed on an "AS IS" BASIS,\r
+rem    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
+rem    See the License for the specific language governing permissions and\r
+rem    limitations under the License.\r
+rem\r
+\r
+if not "%ECHO%" == "" echo %ECHO%\r
+\r
+setlocal\r
+set DIRNAME=%~dp0%\r
+set PROGNAME=%~nx0%\r
+set ARGS=%*\r
+\r
+rem Sourcing environment settings for karaf similar to tomcats setenv\r
+SET KARAF_SCRIPT="instance.bat"\r
+if exist "%DIRNAME%setenv.bat" (\r
+  call "%DIRNAME%setenv.bat"\r
+)\r
+\r
+rem Check console window title. Set to Karaf by default\r
+if not "%KARAF_TITLE%" == "" (\r
+    title %KARAF_TITLE%\r
+) else (\r
+    title Karaf\r
+)\r
+\r
+rem Check/Set up some easily accessible MIN/MAX params for JVM mem usage\r
+if "%JAVA_MIN_MEM%" == "" (\r
+    set JAVA_MIN_MEM=128M\r
+)\r
+if "%JAVA_MAX_MEM%" == "" (\r
+    set JAVA_MAX_MEM=512M\r
+)\r
+\r
+goto BEGIN\r
+\r
+:warn\r
+    echo %PROGNAME%: %*\r
+goto :EOF\r
+\r
+:BEGIN\r
+\r
+rem # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\r
+\r
+if not "%KARAF_HOME%" == "" (\r
+    call :warn Ignoring predefined value for KARAF_HOME\r
+)\r
+set KARAF_HOME=%DIRNAME%..\r
+if not exist "%KARAF_HOME%" (\r
+    call :warn KARAF_HOME is not valid: "%KARAF_HOME%"\r
+    goto END\r
+)\r
+\r
+if not "%KARAF_BASE%" == "" (\r
+    if not exist "%KARAF_BASE%" (\r
+       call :warn KARAF_BASE is not valid: "%KARAF_BASE%"\r
+       goto END\r
+    )\r
+)\r
+if "%KARAF_BASE%" == "" (\r
+  set "KARAF_BASE=%KARAF_HOME%"\r
+)\r
+\r
+if not "%KARAF_DATA%" == "" (\r
+    if not exist "%KARAF_DATA%" (\r
+        call :warn KARAF_DATA is not valid: "%KARAF_DATA%"\r
+        goto END\r
+    )\r
+)\r
+if "%KARAF_DATA%" == "" (\r
+    set "KARAF_DATA=%KARAF_BASE%\data"\r
+)\r
+\r
+if not "%KARAF_ETC%" == "" (\r
+    if not exist "%KARAF_ETC%" (\r
+        call :warn KARAF_ETC is not valid: "%KARAF_ETC%"\r
+        goto END\r
+    )\r
+)\r
+if "%KARAF_ETC%" == "" (\r
+    set "KARAF_ETC=%KARAF_BASE%\etc"\r
+)\r
+\r
+set DEFAULT_JAVA_OPTS=\r
+set DEFAULT_JAVA_DEBUG_OPTS=-Xdebug -Xnoagent -Djava.compiler=NONE -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=5005\r
+\r
+rem Support for loading native libraries\r
+set PATH=%PATH%;%KARAF_BASE%\lib;%KARAF_HOME%\lib\r
+\r
+rem Setup the Java Virtual Machine\r
+if not "%JAVA%" == "" goto :Check_JAVA_END\r
+    set JAVA=java\r
+    if "%JAVA_HOME%" == "" call :warn JAVA_HOME not set; results may vary\r
+    if not "%JAVA_HOME%" == "" set JAVA=%JAVA_HOME%\bin\java\r
+    if not exist "%JAVA_HOME%" (\r
+        call :warn JAVA_HOME is not valid: "%JAVA_HOME%"\r
+        goto END\r
+    )\r
+:Check_JAVA_END\r
+\r
+if "%JAVA_OPTS%" == "" set JAVA_OPTS=%DEFAULT_JAVA_OPTS%\r
+\r
+if "%KARAF_DEBUG%" == "" goto :KARAF_DEBUG_END\r
+    rem Use the defaults if JAVA_DEBUG_OPTS was not set\r
+    if "%JAVA_DEBUG_OPTS%" == "" set JAVA_DEBUG_OPTS=%DEFAULT_JAVA_DEBUG_OPTS%\r
+\r
+    set "JAVA_OPTS=%JAVA_DEBUG_OPTS% %JAVA_OPTS%"\r
+    call :warn Enabling Java debug options: %JAVA_DEBUG_OPTS%\r
+:KARAF_DEBUG_END\r
+\r
+rem Setup the classpath\r
+pushd "%KARAF_HOME%\lib"\r
+for %%G in (karaf*.jar) do call:APPEND_TO_CLASSPATH %%G\r
+popd\r
+goto CLASSPATH_END\r
+\r
+: APPEND_TO_CLASSPATH\r
+set filename=%~1\r
+set suffix=%filename:~-4%\r
+if %suffix% equ .jar set CLASSPATH=%CLASSPATH%;%KARAF_HOME%\lib\%filename%\r
+goto :EOF\r
+\r
+:CLASSPATH_END\r
+\r
+set CLASSPATH=%KARAF_HOME%\system\org\apache\karaf\instance\org.apache.karaf.instance.command\3.0.1\org.apache.karaf.instance.command-3.0.1.jar;%KARAF_HOME%\system\org\apache\karaf\instance\org.apache.karaf.instance.core\3.0.1\org.apache.karaf.instance.core-3.0.1.jar;%KARAF_HOME%\system\org\apache\karaf\shell\org.apache.karaf.shell.console\3.0.1\org.apache.karaf.shell.console-3.0.1.jar;%KARAF_HOME%\system\org\apache\karaf\shell\org.apache.karaf.shell.table\3.0.1\org.apache.karaf.shell.table-3.0.1.jar;%KARAF_HOME%\system\org\apache\aries\blueprint\org.apache.aries.blueprint.api\1.0.0\org.apache.aries.blueprint.api-1.0.0.jar;%KARAF_HOME%\system\org\apache\aries\blueprint\org.apache.aries.blueprint.core\1.4.0\org.apache.aries.blueprint.core-1.4.0.jar;%KARAF_HOME%\system\org\apache\aries\blueprint\org.apache.aries.blueprint.cm\1.0.3\org.apache.aries.blueprint.cm-1.0.3.jar;%KARAF_HOME%\system\org\ops4j\pax\logging\pax-logging-api\1.7.2\pax-logging-api-1.7.2.jar;%KARAF_HOME%\system\org\apache\felix\org.apache.felix.framework\4.2.1\org.apache.felix.framework-4.2.1.jar;%KARAF_HOME%\system\jline\jline\2.11\jline-2.11.jar;%CLASSPATH%\r
+\r
+:EXECUTE\r
+    if "%SHIFT%" == "true" SET ARGS=%2 %3 %4 %5 %6 %7 %8\r
+    if not "%SHIFT%" == "true" SET ARGS=%1 %2 %3 %4 %5 %6 %7 %8\r
+    rem Execute the Java Virtual Machine\r
+    "%JAVA%" %JAVA_OPTS% %OPTS% -classpath "%CLASSPATH%" -Dkaraf.instances="%KARAF_HOME%\instances" -Dkaraf.home="%KARAF_HOME%" -Dkaraf.base="%KARAF_BASE%" -Dkaraf.etc="%KARAF_ETC%" -Djava.io.tmpdir="%KARAF_DATA%\tmp" -Djava.util.logging.config.file="%KARAF_BASE%\etc\java.util.logging.properties" %KARAF_OPTS% org.apache.karaf.instance.main.Execute %ARGS%\r
+\r
+rem # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\r
+\r
+:END\r
+\r
+endlocal\r
+\r
diff --git a/opendaylight/distribution/opendaylight-karaf/src/main/resources/karaf/karaf b/opendaylight/distribution/opendaylight-karaf/src/main/resources/karaf/karaf
new file mode 100755 (executable)
index 0000000..cad052a
--- /dev/null
@@ -0,0 +1,418 @@
+#!/bin/sh
+#
+#    Licensed to the Apache Software Foundation (ASF) under one or more
+#    contributor license agreements.  See the NOTICE file distributed with
+#    this work for additional information regarding copyright ownership.
+#    The ASF licenses this file to You under the Apache License, Version 2.0
+#    (the "License"); you may not use this file except in compliance with
+#    the License.  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS,
+#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#    See the License for the specific language governing permissions and
+#    limitations under the License.
+#
+
+DIRNAME=`dirname $0`
+PROGNAME=`basename $0`
+
+#
+# Sourcing environment settings for karaf similar to tomcats setenv
+#
+KARAF_SCRIPT="karaf"
+export KARAF_SCRIPT
+if [ -f "$DIRNAME/setenv" ]; then
+  . "$DIRNAME/setenv"
+fi
+
+#
+# Set up some easily accessible MIN/MAX params for JVM mem usage
+#
+if [ "x$JAVA_MIN_MEM" = "x" ]; then
+    JAVA_MIN_MEM=128M
+    export JAVA_MIN_MEM
+fi
+if [ "x$JAVA_MAX_MEM" = "x" ]; then
+    JAVA_MAX_MEM=512M
+    export JAVA_MAX_MEM
+fi
+
+#
+# Check the mode that initiated the script
+#
+if [ "x$1" != "x" ]; then
+    MODE=$1
+fi
+
+warn() {
+    echo "${PROGNAME}: $*"
+}
+
+die() {
+    warn "$*"
+    exit 1
+}
+
+detectOS() {
+    # OS specific support (must be 'true' or 'false').
+    cygwin=false;
+    darwin=false;
+    aix=false;
+    os400=false;
+    case "`uname`" in
+        CYGWIN*)
+            cygwin=true
+            ;;
+        Darwin*)
+            darwin=true
+            ;;
+        AIX*)
+            aix=true
+            ;;
+        OS400*)
+            os400=true
+            ;;
+    esac
+    # For AIX, set an environment variable
+    if $aix; then
+         export LDR_CNTRL=MAXDATA=0xB0000000@DSA
+         echo $LDR_CNTRL
+    fi
+}
+
+unlimitFD() {
+    # Use the maximum available, or set MAX_FD != -1 to use that
+    if [ "x$MAX_FD" = "x" ]; then
+        MAX_FD="maximum"
+    fi
+
+    # Increase the maximum file descriptors if we can
+    if [ "$os400" = "false" ] && [ "$cygwin" = "false" ]; then
+        MAX_FD_LIMIT=`ulimit -H -n`
+        if [ "$MAX_FD_LIMIT" != 'unlimited' ]; then 
+            if [ $? -eq 0 ]; then
+                if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ]; then
+                    # use the system max
+                    MAX_FD="$MAX_FD_LIMIT"
+                fi
+
+                ulimit -n $MAX_FD > /dev/null
+                # echo "ulimit -n" `ulimit -n`
+                if [ $? -ne 0 ]; then
+                    warn "Could not set maximum file descriptor limit: $MAX_FD"
+                fi
+            else
+                warn "Could not query system maximum file descriptor limit: $MAX_FD_LIMIT"
+            fi
+        fi
+    fi
+}
+
+locateHome() {
+    if [ "x$KARAF_HOME" != "x" ]; then
+        warn "Ignoring predefined value for KARAF_HOME"
+    fi
+
+    # In POSIX shells, CDPATH may cause cd to write to stdout
+    (unset CDPATH) >/dev/null 2>&1 && unset CDPATH
+
+    KARAF_HOME=`cd "$DIRNAME/.."; pwd`
+    if [ ! -d "$KARAF_HOME" ]; then
+        die "KARAF_HOME is not valid: $KARAF_HOME"
+    fi
+}
+
+locateBase() {
+    if [ "x$KARAF_BASE" != "x" ]; then
+        if [ ! -d "$KARAF_BASE" ]; then
+            die "KARAF_BASE is not valid: $KARAF_BASE"
+        fi
+    else
+        KARAF_BASE=$KARAF_HOME
+    fi
+}
+
+locateData() {
+    if [ "x$KARAF_DATA" != "x" ]; then
+        if [ ! -d "$KARAF_DATA" ]; then
+            die "KARAF_DATA is not valid: $KARAF_DATA"
+        fi
+    else
+        KARAF_DATA=$KARAF_BASE/data
+    fi
+}
+
+locateEtc() {
+    if [ "x$KARAF_ETC" != "x" ]; then
+        if [ ! -d "$KARAF_ETC" ]; then
+            die "KARAF_ETC is not valid: $KARAF_ETC"
+        fi
+    else
+        KARAF_ETC=$KARAF_BASE/etc
+    fi
+}
+
+setupNativePath() {
+    # Support for loading native libraries
+    LD_LIBRARY_PATH="${LD_LIBRARY_PATH}:$KARAF_BASE/lib:$KARAF_HOME/lib"
+
+    # For Cygwin, set PATH from LD_LIBRARY_PATH
+    if $cygwin; then
+        LD_LIBRARY_PATH=`cygpath --path --windows "$LD_LIBRARY_PATH"`
+        PATH="$PATH;$LD_LIBRARY_PATH"
+        export PATH
+    fi
+    export LD_LIBRARY_PATH
+}
+
+pathCanonical() {
+    local dst="${1}"
+    while [ -h "${dst}" ] ; do
+        ls=`ls -ld "${dst}"`
+        link=`expr "$ls" : '.*-> \(.*\)$'`
+        if expr "$link" : '/.*' > /dev/null; then
+            dst="$link"
+        else
+            dst="`dirname "${dst}"`/$link"
+        fi
+    done
+    local bas=`basename "${dst}"`
+    local dir=`dirname "${dst}"`
+    if [ "$bas" != "$dir" ]; then
+      dst="`pathCanonical "$dir"`/$bas"
+    fi
+    echo "${dst}" | sed -e 's#//#/#g' -e 's#/./#/#g' -e 's#/[^/]*/../#/#g'
+}
+
+locateJava() {
+    # Setup the Java Virtual Machine
+    if $cygwin ; then
+        [ -n "$JAVA" ] && JAVA=`cygpath --unix "$JAVA"`
+        [ -n "$JAVA_HOME" ] && JAVA_HOME=`cygpath --unix "$JAVA_HOME"`
+    fi
+
+       if [ "x$JAVA_HOME" = "x" ] && [ "$darwin" = "true" ]; then
+               JAVA_HOME="$(/usr/libexec/java_home)"
+       fi
+    if [ "x$JAVA" = "x" ] && [ -r /etc/gentoo-release ] ; then
+        JAVA_HOME=`java-config --jre-home`
+    fi
+    if [ "x$JAVA" = "x" ]; then
+        if [ "x$JAVA_HOME" != "x" ]; then
+            if [ ! -d "$JAVA_HOME" ]; then
+                die "JAVA_HOME is not valid: $JAVA_HOME"
+            fi
+            JAVA="$JAVA_HOME/bin/java"
+        else
+            warn "JAVA_HOME not set; results may vary"
+            JAVA=`type java`
+            JAVA=`expr "$JAVA" : '.*is \(.*\)$'`
+            if [ "x$JAVA" = "x" ]; then
+                die "java command not found"
+            fi
+        fi
+    fi
+    if [ "x$JAVA_HOME" = "x" ]; then
+        JAVA_HOME="$(dirname $(dirname $(pathCanonical "$JAVA")))"
+    fi
+}
+
+detectJVM() {
+   #echo "`$JAVA -version`"
+   # This service should call `java -version`,
+   # read stdout, and look for hints
+   if $JAVA -version 2>&1 | grep "^IBM" ; then
+       JVM_VENDOR="IBM"
+   # on OS/400, java -version does not contain IBM explicitly
+   elif $os400; then
+       JVM_VENDOR="IBM"
+   else
+       JVM_VENDOR="SUN"
+   fi
+   # echo "JVM vendor is $JVM_VENDOR"
+}
+
+checkJvmVersion() {
+   # echo "`$JAVA -version`"
+   VERSION=`$JAVA -version 2>&1 | egrep '"([0-9].[0-9]\..*[0-9])"' | awk '{print substr($3,2,length($3)-2)}' | awk '{print substr($1, 3, 3)}' | sed -e 's;\.;;g'`
+   # echo $VERSION
+   if [ "$VERSION" -lt "60" ]; then
+       echo "JVM must be greater than 1.6"
+       exit 1;
+   fi
+}
+
+setupDebugOptions() {
+    if [ "x$JAVA_OPTS" = "x" ]; then
+        JAVA_OPTS="$DEFAULT_JAVA_OPTS"
+    fi
+    export JAVA_OPTS
+
+    # Set Debug options if enabled
+    if [ "x$KARAF_DEBUG" != "x" ]; then
+        # Ignore DEBUG in case of stop or client mode
+        if [ "x$MODE" = "xstop" ]; then
+            return
+        fi
+        if [ "x$MODE" = "xclient" ]; then
+            return
+        fi
+        # Use the defaults if JAVA_DEBUG_OPTS was not set
+        if [ "x$JAVA_DEBUG_OPTS" = "x" ]; then
+            JAVA_DEBUG_OPTS="$DEFAULT_JAVA_DEBUG_OPTS"
+        fi
+
+        JAVA_OPTS="$JAVA_DEBUG_OPTS $JAVA_OPTS"
+        warn "Enabling Java debug options: $JAVA_DEBUG_OPTS"
+    fi
+}
+
+setupDefaults() {
+    DEFAULT_JAVA_OPTS="-Xms$JAVA_MIN_MEM -Xmx$JAVA_MAX_MEM -XX:+UnlockDiagnosticVMOptions -XX:+UnsyncloadClass "
+
+    #Set the JVM_VENDOR specific JVM flags
+    if [ "$JVM_VENDOR" = "SUN" ]; then
+        #
+        # Check some easily accessible MIN/MAX params for JVM mem usage
+        #
+        if [ "x$JAVA_PERM_MEM" != "x" ]; then
+            DEFAULT_JAVA_OPTS="$DEFAULT_JAVA_OPTS -XX:PermSize=$JAVA_PERM_MEM"
+        fi
+        if [ "x$JAVA_MAX_PERM_MEM" != "x" ]; then
+            DEFAULT_JAVA_OPTS="$DEFAULT_JAVA_OPTS -XX:MaxPermSize=$JAVA_MAX_PERM_MEM"
+        fi
+        DEFAULT_JAVA_OPTS="-server $DEFAULT_JAVA_OPTS -Dcom.sun.management.jmxremote"
+    elif [ "$JVM_VENDOR" = "IBM" ]; then
+        if $os400; then
+            DEFAULT_JAVA_OPTS="$DEFAULT_JAVA_OPTS"
+        elif $aix; then
+            DEFAULT_JAVA_OPTS="-Xverify:none -Xdump:heap -Xlp $DEFAULT_JAVA_OPTS"
+        else
+            DEFAULT_JAVA_OPTS="-Xverify:none $DEFAULT_JAVA_OPTS"
+        fi
+    fi
+
+    # Add the jars in the lib dir
+    for file in "$KARAF_HOME"/lib/karaf*.jar
+    do
+        if [ -z "$CLASSPATH" ]; then
+            CLASSPATH="$file"
+        else
+            CLASSPATH="$CLASSPATH:$file"
+        fi
+    done
+    DEFAULT_JAVA_DEBUG_OPTS="-Xdebug -Xnoagent -Djava.compiler=NONE -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=5005"
+
+    ##
+    ## TODO: Move to conf/profiler/yourkit.{sh|cmd}
+    ##
+    # Uncomment to enable YourKit profiling
+    #DEFAULT_JAVA_DEBUG_OPTS="-Xrunyjpagent"
+}
+
+init() {
+    # Determine if there is special OS handling we must perform
+    detectOS
+
+    # Unlimit the number of file descriptors if possible
+    unlimitFD
+
+    # Locate the Karaf home directory
+    locateHome
+
+    # Locate the Karaf base directory
+    locateBase
+
+    # Locate the Karaf data directory
+    locateData
+
+    # Locate the Karaf etc directory
+    locateEtc
+
+    # Setup the native library path
+    setupNativePath
+
+    # Locate the Java VM to execute
+    locateJava
+
+    # Determine the JVM vendor
+    detectJVM
+    
+    # Determine the JVM version >= 1.6
+    checkJvmVersion
+
+    # Setup default options
+    setupDefaults
+
+    # Install debug options
+    setupDebugOptions
+
+}
+
+run() {
+    OPTS="-Dkaraf.startLocalConsole=true -Dkaraf.startRemoteShell=true"
+    MAIN=org.apache.karaf.main.Main
+    while [ "$1" != "" ]; do
+        case $1 in
+            'clean')
+                rm -Rf "$KARAF_DATA"
+                shift
+                ;;
+            'debug')
+                if [ "x$JAVA_DEBUG_OPTS" = "x" ]; then
+                    JAVA_DEBUG_OPTS="$DEFAULT_JAVA_DEBUG_OPTS"
+                fi
+                JAVA_OPTS="$JAVA_DEBUG_OPTS $JAVA_OPTS"
+                shift
+                ;;
+            'status')
+                MAIN=org.apache.karaf.main.Status
+                shift
+                ;;
+            'stop')
+                MAIN=org.apache.karaf.main.Stop
+                shift
+                ;;
+            'console')
+                shift
+                ;;
+            'server')
+                OPTS="-Dkaraf.startLocalConsole=false -Dkaraf.startRemoteShell=true"
+                shift
+                ;;
+            'client')
+                OPTS="-Dkaraf.startLocalConsole=true -Dkaraf.startRemoteShell=false"
+                shift
+                ;;
+            *)
+                break
+                ;;
+        esac
+    done
+
+    JAVA_ENDORSED_DIRS="${JAVA_HOME}/jre/lib/endorsed:${JAVA_HOME}/lib/endorsed:${KARAF_HOME}/lib/endorsed"
+    JAVA_EXT_DIRS="${JAVA_HOME}/jre/lib/ext:${JAVA_HOME}/lib/ext:${KARAF_HOME}/lib/ext"
+    if $cygwin; then
+        KARAF_HOME=`cygpath --path --windows "$KARAF_HOME"`
+        KARAF_BASE=`cygpath --path --windows "$KARAF_BASE"`
+        KARAF_DATA=`cygpath --path --windows "$KARAF_DATA"`
+        KARAF_ETC=`cygpath --path --windows "$KARAF_ETC"`
+        CLASSPATH=`cygpath --path --windows "$CLASSPATH"`
+        JAVA_HOME=`cygpath --path --windows "$JAVA_HOME"`
+        JAVA_ENDORSED_DIRS=`cygpath --path --windows "$JAVA_ENDORSED_DIRS"`
+        JAVA_EXT_DIRS=`cygpath --path --windows "$JAVA_EXT_DIRS"`
+    fi
+    cd "$KARAF_BASE"
+
+    exec "$JAVA" $JAVA_OPTS -Djava.endorsed.dirs="${JAVA_ENDORSED_DIRS}" -Djava.ext.dirs="${JAVA_EXT_DIRS}" -Dkaraf.instances="${KARAF_HOME}/instances" -Dkaraf.home="$KARAF_HOME" -Dkaraf.base="$KARAF_BASE" -Dkaraf.data="$KARAF_DATA" -Dkaraf.etc="$KARAF_ETC" -Djava.io.tmpdir="$KARAF_DATA/tmp" -Djava.util.logging.config.file="$KARAF_BASE/etc/java.util.logging.properties" $KARAF_OPTS $OPTS -classpath "$CLASSPATH" $MAIN "$@"
+}
+
+main() {
+    init
+    run "$@"
+}
+
+main "$@"
diff --git a/opendaylight/distribution/opendaylight-karaf/src/main/resources/karaf/karaf.bat b/opendaylight/distribution/opendaylight-karaf/src/main/resources/karaf/karaf.bat
new file mode 100644 (file)
index 0000000..a450877
--- /dev/null
@@ -0,0 +1,336 @@
+@echo off\r
+rem\r
+rem\r
+rem    Licensed to the Apache Software Foundation (ASF) under one or more\r
+rem    contributor license agreements.  See the NOTICE file distributed with\r
+rem    this work for additional information regarding copyright ownership.\r
+rem    The ASF licenses this file to You under the Apache License, Version 2.0\r
+rem    (the "License"); you may not use this file except in compliance with\r
+rem    the License.  You may obtain a copy of the License at\r
+rem\r
+rem       http://www.apache.org/licenses/LICENSE-2.0\r
+rem\r
+rem    Unless required by applicable law or agreed to in writing, software\r
+rem    distributed under the License is distributed on an "AS IS" BASIS,\r
+rem    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
+rem    See the License for the specific language governing permissions and\r
+rem    limitations under the License.\r
+rem\r
+\r
+if not "%ECHO%" == "" echo %ECHO%\r
+\r
+setlocal\r
+set DIRNAME=%~dp0%\r
+set PROGNAME=%~nx0%\r
+set ARGS=%*\r
+\r
+rem Sourcing environment settings for karaf similar to tomcats setenv\r
+SET KARAF_SCRIPT="karaf.bat"\r
+if exist "%DIRNAME%setenv.bat" (\r
+  call "%DIRNAME%setenv.bat"\r
+)\r
+\r
+rem Check console window title. Set to Karaf by default\r
+if not "%KARAF_TITLE%" == "" (\r
+    title %KARAF_TITLE%\r
+) else (\r
+    title Karaf\r
+)\r
+\r
+rem Check/Set up some easily accessible MIN/MAX params for JVM mem usage\r
+if "%JAVA_MIN_MEM%" == "" (\r
+    set JAVA_MIN_MEM=128M\r
+)\r
+if "%JAVA_MAX_MEM%" == "" (\r
+    set JAVA_MAX_MEM=512M\r
+)\r
+\r
+goto BEGIN\r
+\r
+:warn\r
+    echo %PROGNAME%: %*\r
+goto :EOF\r
+\r
+:BEGIN\r
+\r
+rem # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\r
+\r
+if not "%KARAF_HOME%" == "" (\r
+    call :warn Ignoring predefined value for KARAF_HOME\r
+)\r
+set KARAF_HOME=%DIRNAME%..\r
+if not exist "%KARAF_HOME%" (\r
+    call :warn KARAF_HOME is not valid: "%KARAF_HOME%"\r
+    goto END\r
+)\r
+\r
+if not "%KARAF_BASE%" == "" (\r
+    if not exist "%KARAF_BASE%" (\r
+       call :warn KARAF_BASE is not valid: "%KARAF_BASE%"\r
+       goto END\r
+    )\r
+)\r
+if "%KARAF_BASE%" == "" (\r
+  set "KARAF_BASE=%KARAF_HOME%"\r
+)\r
+\r
+if not "%KARAF_DATA%" == "" (\r
+    if not exist "%KARAF_DATA%" (\r
+        call :warn KARAF_DATA is not valid: "%KARAF_DATA%"\r
+        goto END\r
+    )\r
+)\r
+if "%KARAF_DATA%" == "" (\r
+    set "KARAF_DATA=%KARAF_BASE%\data"\r
+)\r
+\r
+if not "%KARAF_ETC%" == "" (\r
+    if not exist "%KARAF_ETC%" (\r
+        call :warn KARAF_ETC is not valid: "%KARAF_ETC%"\r
+        goto END\r
+    )\r
+)\r
+if "%KARAF_ETC%" == "" (\r
+    set "KARAF_ETC=%KARAF_BASE%\etc"\r
+)\r
+\r
+set LOCAL_CLASSPATH=%CLASSPATH%\r
+set JAVA_MODE=-server\r
+if not exist "%JAVA_HOME%\bin\server\jvm.dll" (\r
+    if not exist "%JAVA_HOME%\jre\bin\server\jvm.dll" (\r
+        echo WARNING: Running karaf on a Java HotSpot Client VM because server-mode is not available.\r
+        echo Install Java Developer Kit to fix this.\r
+        echo For more details see http://java.sun.com/products/hotspot/whitepaper.html#client\r
+        set JAVA_MODE=-client\r
+    )\r
+)\r
+set DEFAULT_JAVA_OPTS=%JAVA_MODE% -Xms%JAVA_MIN_MEM% -Xmx%JAVA_MAX_MEM% -Dderby.system.home="%KARAF_DATA%\derby" -Dderby.storage.fileSyncTransactionLog=true -Dcom.sun.management.jmxremote  -XX:+UnlockDiagnosticVMOptions -XX:+UnsyncloadClass\r
+\r
+rem Check some easily accessible MIN/MAX params for JVM mem usage\r
+if not "%JAVA_PERM_MEM%" == "" (\r
+    set DEFAULT_JAVA_OPTS=%DEFAULT_JAVA_OPTS% -XX:PermSize=%JAVA_PERM_MEM%\r
+)\r
+if not "%JAVA_MAX_PERM_MEM%" == "" (\r
+    set DEFAULT_JAVA_OPTS=%DEFAULT_JAVA_OPTS% -XX:MaxPermSize=%JAVA_MAX_PERM_MEM%\r
+)\r
+\r
+set CLASSPATH=%LOCAL_CLASSPATH%;%KARAF_BASE%\conf\r
+set DEFAULT_JAVA_DEBUG_OPTS=-Xdebug -Xnoagent -Djava.compiler=NONE -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=5005\r
+\r
+if "%LOCAL_CLASSPATH%" == "" goto :KARAF_CLASSPATH_EMPTY\r
+    set CLASSPATH=%LOCAL_CLASSPATH%;%KARAF_BASE%\conf\r
+    goto :KARAF_CLASSPATH_END\r
+:KARAF_CLASSPATH_EMPTY\r
+    set CLASSPATH=%KARAF_BASE%\conf\r
+:KARAF_CLASSPATH_END\r
+\r
+rem Setup Karaf Home\r
+if exist "%KARAF_HOME%\conf\karaf-rc.cmd" call %KARAF_HOME%\conf\karaf-rc.cmd\r
+if exist "%HOME%\karaf-rc.cmd" call %HOME%\karaf-rc.cmd\r
+\r
+rem Support for loading native libraries\r
+set PATH=%PATH%;%KARAF_BASE%\lib;%KARAF_HOME%\lib\r
+\r
+rem Setup the Java Virtual Machine\r
+if not "%JAVA%" == "" goto :Check_JAVA_END\r
+    if not "%JAVA_HOME%" == "" goto :TryJDKEnd\r
+        call :warn JAVA_HOME not set; results may vary\r
+:TryJRE\r
+    start /w regedit /e __reg1.txt "HKEY_LOCAL_MACHINE\SOFTWARE\JavaSoft\Java Runtime Environment"\r
+    if not exist __reg1.txt goto :TryJDK\r
+    type __reg1.txt | find "CurrentVersion" > __reg2.txt\r
+    if errorlevel 1 goto :TryJDK\r
+    for /f "tokens=2 delims==" %%x in (__reg2.txt) do set JavaTemp=%%~x\r
+    if errorlevel 1 goto :TryJDK\r
+    set JavaTemp=%JavaTemp%##\r
+    set JavaTemp=%JavaTemp:                ##=##%\r
+    set JavaTemp=%JavaTemp:        ##=##%\r
+    set JavaTemp=%JavaTemp:    ##=##%\r
+    set JavaTemp=%JavaTemp:  ##=##%\r
+    set JavaTemp=%JavaTemp: ##=##%\r
+    set JavaTemp=%JavaTemp:##=%\r
+    del __reg1.txt\r
+    del __reg2.txt\r
+    start /w regedit /e __reg1.txt "HKEY_LOCAL_MACHINE\SOFTWARE\JavaSoft\Java Runtime Environment\%JavaTemp%"\r
+    if not exist __reg1.txt goto :TryJDK\r
+    type __reg1.txt | find "JavaHome" > __reg2.txt\r
+    if errorlevel 1 goto :TryJDK\r
+    for /f "tokens=2 delims==" %%x in (__reg2.txt) do set JAVA_HOME=%%~x\r
+    if errorlevel 1 goto :TryJDK\r
+    del __reg1.txt\r
+    del __reg2.txt\r
+    goto TryJDKEnd\r
+:TryJDK\r
+    start /w regedit /e __reg1.txt "HKEY_LOCAL_MACHINE\SOFTWARE\JavaSoft\Java Development Kit"\r
+    if not exist __reg1.txt (\r
+        goto TryRegJRE\r
+    )\r
+    type __reg1.txt | find "CurrentVersion" > __reg2.txt\r
+    if errorlevel 1 (\r
+        goto TryRegJRE\r
+    )\r
+    for /f "tokens=2 delims==" %%x in (__reg2.txt) do set JavaTemp=%%~x\r
+    if errorlevel 1 (\r
+        goto TryRegJRE\r
+    )\r
+    set JavaTemp=%JavaTemp%##\r
+    set JavaTemp=%JavaTemp:                ##=##%\r
+    set JavaTemp=%JavaTemp:        ##=##%\r
+    set JavaTemp=%JavaTemp:    ##=##%\r
+    set JavaTemp=%JavaTemp:  ##=##%\r
+    set JavaTemp=%JavaTemp: ##=##%\r
+    set JavaTemp=%JavaTemp:##=%\r
+    del __reg1.txt\r
+    del __reg2.txt\r
+    start /w regedit /e __reg1.txt "HKEY_LOCAL_MACHINE\SOFTWARE\JavaSoft\Java Development Kit\%JavaTemp%"\r
+    if not exist __reg1.txt (\r
+        goto TryRegJRE\r
+    )\r
+    type __reg1.txt | find "JavaHome" > __reg2.txt\r
+    if errorlevel 1 (\r
+        goto TryRegJRE\r
+    )\r
+    for /f "tokens=2 delims==" %%x in (__reg2.txt) do set JAVA_HOME=%%~x\r
+    if errorlevel 1 (\r
+        goto TryRegJRE\r
+    )\r
+    del __reg1.txt\r
+    del __reg2.txt\r
+:TryRegJRE\r
+    rem try getting the JAVA_HOME from registry\r
+    FOR /F "usebackq tokens=3*" %%A IN (`REG QUERY "HKLM\Software\JavaSoft\Java Runtime Environment" /v CurrentVersion`) DO (\r
+       set JAVA_VERSION=%%A\r
+    )\r
+    FOR /F "usebackq tokens=3*" %%A IN (`REG QUERY "HKLM\Software\JavaSoft\Java Runtime Environment\%JAVA_VERSION%" /v JavaHome`) DO (\r
+       set JAVA_HOME=%%A %%B\r
+    )\r
+    if not exist "%JAVA_HOME%" (\r
+       goto TryRegJDK\r
+    )\r
+       goto TryJDKEnd\r
+:TryRegJDK\r
+    rem try getting the JAVA_HOME from registry\r
+    FOR /F "usebackq tokens=3*" %%A IN (`REG QUERY "HKLM\Software\JavaSoft\Java Development Kit" /v CurrentVersion`) DO (\r
+       set JAVA_VERSION=%%A\r
+    )\r
+    FOR /F "usebackq tokens=3*" %%A IN (`REG QUERY "HKLM\Software\JavaSoft\Java Development Kit\%JAVA_VERSION%" /v JavaHome`) DO (\r
+       set JAVA_HOME=%%A %%B\r
+    )\r
+    if not exist "%JAVA_HOME%" (\r
+       call :warn Unable to retrieve JAVA_HOME from Registry\r
+    )\r
+        goto TryJDKEnd\r
+:TryJDKEnd\r
+    if not exist "%JAVA_HOME%" (\r
+        call :warn JAVA_HOME is not valid: "%JAVA_HOME%"\r
+        goto END\r
+    )\r
+    set JAVA=%JAVA_HOME%\bin\java\r
+:Check_JAVA_END\r
+\r
+if "%JAVA_OPTS%" == "" set JAVA_OPTS=%DEFAULT_JAVA_OPTS%\r
+\r
+if "%KARAF_DEBUG%" == "" goto :KARAF_DEBUG_END\r
+    if "%1" == "stop" goto :KARAF_DEBUG_END\r
+    if "%1" == "client" goto :KARAF_DEBUG_END\r
+    rem Use the defaults if JAVA_DEBUG_OPTS was not set\r
+    if "%JAVA_DEBUG_OPTS%" == "" set JAVA_DEBUG_OPTS=%DEFAULT_JAVA_DEBUG_OPTS%\r
+\r
+    set "JAVA_OPTS=%JAVA_DEBUG_OPTS% %JAVA_OPTS%"\r
+    call :warn Enabling Java debug options: %JAVA_DEBUG_OPTS%\r
+:KARAF_DEBUG_END\r
+\r
+if "%KARAF_PROFILER%" == "" goto :KARAF_PROFILER_END\r
+    set KARAF_PROFILER_SCRIPT=%KARAF_HOME%\conf\profiler\%KARAF_PROFILER%.cmd\r
+\r
+    if exist "%KARAF_PROFILER_SCRIPT%" goto :KARAF_PROFILER_END\r
+    call :warn Missing configuration for profiler '%KARAF_PROFILER%': %KARAF_PROFILER_SCRIPT%\r
+    goto END\r
+:KARAF_PROFILER_END\r
+\r
+rem Setup the classpath\r
+pushd "%KARAF_HOME%\lib"\r
+for %%G in (karaf*.jar) do call:APPEND_TO_CLASSPATH %%G\r
+popd\r
+goto CLASSPATH_END\r
+\r
+: APPEND_TO_CLASSPATH\r
+set filename=%~1\r
+set suffix=%filename:~-4%\r
+if %suffix% equ .jar set CLASSPATH=%CLASSPATH%;%KARAF_HOME%\lib\%filename%\r
+goto :EOF\r
+\r
+:CLASSPATH_END\r
+\r
+rem Execute the JVM or the load the profiler\r
+if "%KARAF_PROFILER%" == "" goto :RUN\r
+    rem Execute the profiler if it has been configured\r
+    call :warn Loading profiler script: %KARAF_PROFILER_SCRIPT%\r
+    call %KARAF_PROFILER_SCRIPT%\r
+\r
+:RUN\r
+    SET OPTS=-Dkaraf.startLocalConsole=true -Dkaraf.startRemoteShell=true\r
+    SET MAIN=org.apache.karaf.main.Main\r
+    SET SHIFT=false\r
+\r
+:RUN_LOOP\r
+    if "%1" == "stop" goto :EXECUTE_STOP\r
+    if "%1" == "status" goto :EXECUTE_STATUS\r
+    if "%1" == "console" goto :EXECUTE_CONSOLE\r
+    if "%1" == "server" goto :EXECUTE_SERVER\r
+    if "%1" == "client" goto :EXECUTE_CLIENT\r
+    if "%1" == "clean" goto :EXECUTE_CLEAN\r
+    if "%1" == "debug" goto :EXECUTE_DEBUG\r
+    goto :EXECUTE\r
+\r
+:EXECUTE_STOP\r
+    SET MAIN=org.apache.karaf.main.Stop\r
+    shift\r
+    goto :RUN_LOOP\r
+\r
+:EXECUTE_STATUS\r
+    SET MAIN=org.apache.karaf.main.Status\r
+    shift\r
+    goto :RUN_LOOP\r
+\r
+:EXECUTE_CONSOLE\r
+    shift\r
+    goto :RUN_LOOP\r
+\r
+:EXECUTE_SERVER\r
+    SET OPTS=-Dkaraf.startLocalConsole=false -Dkaraf.startRemoteShell=true\r
+    shift\r
+    goto :RUN_LOOP\r
+\r
+:EXECUTE_CLIENT\r
+    SET OPTS=-Dkaraf.startLocalConsole=true -Dkaraf.startRemoteShell=false\r
+    shift\r
+    goto :RUN_LOOP\r
+\r
+:EXECUTE_CLEAN\r
+    rmdir /S /Q "%KARAF_DATA%"\r
+    shift\r
+    goto :RUN_LOOP\r
+\r
+:EXECUTE_DEBUG\r
+    if "%JAVA_DEBUG_OPTS%" == "" set JAVA_DEBUG_OPTS=%DEFAULT_JAVA_DEBUG_OPTS%\r
+    set "JAVA_OPTS=%JAVA_DEBUG_OPTS% %JAVA_OPTS%"\r
+    shift\r
+    goto :RUN_LOOP\r
+\r
+:EXECUTE\r
+    SET ARGS=%1 %2 %3 %4 %5 %6 %7 %8\r
+    rem Execute the Java Virtual Machine\r
+    cd "%KARAF_BASE%"\r
+    "%JAVA%" %JAVA_OPTS% %OPTS% -classpath "%CLASSPATH%" -Djava.endorsed.dirs="%JAVA_HOME%\jre\lib\endorsed;%JAVA_HOME%\lib\endorsed;%KARAF_HOME%\lib\endorsed" -Djava.ext.dirs="%JAVA_HOME%\jre\lib\ext;%JAVA_HOME%\lib\ext;%KARAF_HOME%\lib\ext" -Dkaraf.instances="%KARAF_HOME%\instances" -Dkaraf.home="%KARAF_HOME%" -Dkaraf.base="%KARAF_BASE%" -Dkaraf.etc="%KARAF_ETC%" -Djava.io.tmpdir="%KARAF_DATA%\tmp" -Dkaraf.data="%KARAF_DATA%" -Djava.util.logging.config.file="%KARAF_BASE%\etc\java.util.logging.properties" %KARAF_OPTS% %MAIN% %ARGS%\r
+\r
+rem # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\r
+\r
+:END\r
+\r
+endlocal\r
+\r
+if not "%PAUSE%" == "" pause\r
+\r
+:END_NO_PAUSE\r
+\r
index c89cfc1b9e8914148bc58fc6f3a375baca026e53..e2b6642501edf8acefb9f47743c5166455c8e8db 100644 (file)
@@ -867,6 +867,7 @@ public class FlowConfig extends ConfigurationObject implements Serializable {
                     }
                     continue;
                 }
+
                 sstr = Pattern.compile(ActionType.SET_NEXT_HOP.toString() + "=(.*)").matcher(actiongrp);
                 if (sstr.matches()) {
                     if (!NetUtils.isIPAddressValid(sstr.group(1))) {
@@ -875,7 +876,57 @@ public class FlowConfig extends ConfigurationObject implements Serializable {
                     }
                     continue;
                 }
-            }
+
+                sstr = Pattern.compile(ActionType.OUTPUT + "=(.*)").matcher(actiongrp);
+                if (sstr.matches()) {
+                    continue;
+                }
+
+                sstr = Pattern.compile(ActionType.DROP.toString()).matcher(actiongrp);
+                if (sstr.matches()) {
+                    continue;
+                }
+
+                sstr = Pattern.compile(ActionType.LOOPBACK.toString()).matcher(actiongrp);
+                if (sstr.matches()) {
+                    continue;
+                }
+
+                sstr = Pattern.compile(ActionType.FLOOD.toString()).matcher(actiongrp);
+                if (sstr.matches()) {
+                    continue;
+                }
+
+                sstr = Pattern.compile(ActionType.FLOOD_ALL.toString()).matcher(actiongrp);
+                if (sstr.matches()) {
+                    continue;
+                }
+
+                sstr = Pattern.compile(ActionType.SW_PATH.toString()).matcher(actiongrp);
+                if (sstr.matches()) {
+                    continue;
+                }
+
+                sstr = Pattern.compile(ActionType.HW_PATH.toString()).matcher(actiongrp);
+                if (sstr.matches()) {
+                    continue;
+                }
+
+                sstr = Pattern.compile(ActionType.CONTROLLER.toString()).matcher(actiongrp);
+                if (sstr.matches()) {
+                    continue;
+                }
+
+                sstr = Pattern.compile(ActionType.POP_VLAN.toString()).matcher(actiongrp);
+                if (sstr.matches()) {
+                    continue;
+                }
+
+                // If execution reached here, it means there is an Action
+                // which is not recognized by the controller. Return error
+
+                return new Status(StatusCode.BADREQUEST, String.format("%s is an UnSupported Action", actiongrp));
+           }
         } catch (NumberFormatException e) {
             return new Status(StatusCode.BADREQUEST, String.format("Invalid number format %s", e.getMessage()));
         }
index 685ccdb7c44ea78931de795bb21547c1bd116b36..3c367b9af45d267816bca893c257b35d8c42c8f8 100644 (file)
@@ -760,9 +760,9 @@ public class frmTest {
         actions.add(ActionType.SET_NW_SRC.toString() + "=1.1.1.1");
         actions.add(ActionType.SET_NW_DST.toString() + "=2.2.2.2");
         actions.add(ActionType.CONTROLLER.toString());
-        actions.add(ActionType.SET_NW_TOS.toString() + "1");
-        actions.add(ActionType.SET_TP_SRC.toString() + "60");
-        actions.add(ActionType.SET_TP_DST.toString() + "8080");
+        actions.add(ActionType.SET_NW_TOS.toString() + "=1");
+        actions.add(ActionType.SET_TP_SRC.toString() + "=60");
+        actions.add(ActionType.SET_TP_DST.toString() + "=8080");
         actions.add(ActionType.SET_NEXT_HOP.toString() + "=1.1.1.1");
 
         return actions;
index ff68176f1fed36f3b8c606421cee8790a73ec582..0653eeb2a63810d5b89e291facbb4c4f7e822e2f 100644 (file)
@@ -38,7 +38,7 @@ public class FRMRuntimeDataProvider implements RuntimeDataProvider, DataCommitHa
     private DataProviderService dataService;
     private IForwardingRulesManager manager;
 
-    public Registration<DataCommitHandler<InstanceIdentifier<? extends DataObject>, DataObject>> init() {
+    public Registration init() {
         return this.dataService.registerCommitHandler(FRMRuntimeDataProvider.FLOWS_PATH, this);
     }
 
index b6e611bf1155087e4faf965e148443e34692308b..e9f56f6a0301798b963fe9fb567c34076a640a44 100644 (file)
@@ -242,30 +242,29 @@ public class TestFromSalConversionsUtils {
 
     private void checkOdActions(
             List<org.opendaylight.yang.gen.v1.urn.opendaylight.action.types.rev131112.action.list.Action> actions) {
-        checkOdAction(actions, FloodActionCase.class, false);
-        checkOdAction(actions, FloodAllActionCase.class, false);
-        checkOdAction(actions, HwPathActionCase.class, false);
-        checkOdAction(actions, LoopbackActionCase.class, false);
-        checkOdAction(actions, PopVlanActionCase.class, false);
-        checkOdAction(actions, PushVlanActionCase.class, true);
-        checkOdAction(actions, SetDlDstActionCase.class, true);
-        checkOdAction(actions, SetDlSrcActionCase.class, true);
-        checkOdAction(actions, SetDlTypeActionCase.class, true);
-        checkOdAction(actions, SetNwTosActionCase.class, true);
-        checkOdAction(actions, SetNwDstActionCase.class, true);
-        checkOdAction(actions, SetNwSrcActionCase.class, true);
-        checkOdAction(actions, SetNextHopActionCase.class, true);
-        checkOdAction(actions, SetTpDstActionCase.class, true);
-        checkOdAction(actions, SetTpSrcActionCase.class, true);
-        checkOdAction(actions, SetVlanCfiActionCase.class, true);
-        checkOdAction(actions, SetVlanIdActionCase.class, true);
-        checkOdAction(actions, SetVlanPcpActionCase.class, true);
-        checkOdAction(actions, SwPathActionCase.class, false);
+        checkOdAction(actions, FloodActionCase.class);
+        checkOdAction(actions, FloodAllActionCase.class);
+        checkOdAction(actions, HwPathActionCase.class);
+        checkOdAction(actions, LoopbackActionCase.class);
+        checkOdAction(actions, PopVlanActionCase.class);
+        checkOdAction(actions, PushVlanActionCase.class);
+        checkOdAction(actions, SetDlDstActionCase.class);
+        checkOdAction(actions, SetDlSrcActionCase.class);
+        checkOdAction(actions, SetDlTypeActionCase.class);
+        checkOdAction(actions, SetNwTosActionCase.class);
+        checkOdAction(actions, SetNwDstActionCase.class);
+        checkOdAction(actions, SetNwSrcActionCase.class);
+        checkOdAction(actions, SetNextHopActionCase.class);
+        checkOdAction(actions, SetTpDstActionCase.class);
+        checkOdAction(actions, SetTpSrcActionCase.class);
+        checkOdAction(actions, SetVlanCfiActionCase.class);
+        checkOdAction(actions, SetVlanIdActionCase.class);
+        checkOdAction(actions, SetVlanPcpActionCase.class);
+        checkOdAction(actions, SwPathActionCase.class);
     }
 
     private void checkOdAction(
-            List<org.opendaylight.yang.gen.v1.urn.opendaylight.action.types.rev131112.action.list.Action> actions, Class<?> cl,
-            boolean b) {
+            List<org.opendaylight.yang.gen.v1.urn.opendaylight.action.types.rev131112.action.list.Action> actions, Class<?> cl) {
         int numOfFoundActions = 0;
         for (org.opendaylight.yang.gen.v1.urn.opendaylight.action.types.rev131112.action.list.Action action : actions) {
             org.opendaylight.yang.gen.v1.urn.opendaylight.action.types.rev131112.action.Action innerAction = action
index b3096e6478857aa1ca0286efda6c7a78912b4813..ed5e19219353e3265a5b7f55ef58dccd5f6b7ad7 100644 (file)
       <groupId>junit</groupId>
       <artifactId>junit</artifactId>
     </dependency>
-    <dependency>
-      <groupId>org.opendaylight.controller</groupId>
-      <artifactId>flow-management-compatibility</artifactId>
-    </dependency>
     <dependency>
       <groupId>org.opendaylight.controller</groupId>
       <artifactId>sal-binding-api</artifactId>
index 6ed61e3024b9522dbc89d99a979f93fbcb147c7f..9724d31f9ae6cd3ab8eaef0c23212bf626cfca84 100644 (file)
@@ -15,7 +15,7 @@ import org.opendaylight.controller.md.sal.common.api.TransactionStatus;
 import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
 import org.opendaylight.controller.sal.binding.api.data.DataModificationTransaction;
 import org.opendaylight.controller.sal.binding.api.data.DataProviderService;
-import org.opendaylight.yangtools.concepts.Registration;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -30,7 +30,7 @@ class FlowCapableInventoryProvider implements AutoCloseable, Runnable {
     private final BlockingQueue<InventoryOperation> queue = new LinkedBlockingDeque<>(QUEUE_DEPTH);
     private final NotificationProviderService notificationService;
     private final DataProviderService dataService;
-    private Registration<?> listenerRegistration;
+    private ListenerRegistration<?> listenerRegistration;
     private Thread thread;
 
     FlowCapableInventoryProvider(final DataProviderService dataService, final NotificationProviderService notificationService) {
diff --git a/opendaylight/md-sal/sal-akka-raft/pom.xml b/opendaylight/md-sal/sal-akka-raft/pom.xml
new file mode 100644 (file)
index 0000000..50442bd
--- /dev/null
@@ -0,0 +1,135 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+  <parent>
+    <groupId>org.opendaylight.controller</groupId>
+    <artifactId>sal-parent</artifactId>
+    <version>1.1-SNAPSHOT</version>
+  </parent>
+  <artifactId>sal-akka-raft</artifactId>
+  <packaging>bundle</packaging>
+
+  <dependencies>
+    <dependency>
+      <groupId>com.google.guava</groupId>
+      <artifactId>guava</artifactId>
+    </dependency>
+
+    <dependency>
+      <groupId>com.typesafe.akka</groupId>
+      <artifactId>akka-actor_${scala.version}</artifactId>
+    </dependency>
+
+    <dependency>
+      <groupId>com.typesafe.akka</groupId>
+      <artifactId>akka-cluster_${scala.version}</artifactId>
+    </dependency>
+
+    <dependency>
+      <groupId>com.typesafe.akka</groupId>
+      <artifactId>akka-persistence-experimental_${scala.version}</artifactId>
+    </dependency>
+
+    <dependency>
+      <groupId>com.typesafe.akka</groupId>
+      <artifactId>akka-remote_${scala.version}</artifactId>
+    </dependency>
+
+    <dependency>
+      <groupId>com.typesafe.akka</groupId>
+      <artifactId>akka-testkit_${scala.version}</artifactId>
+    </dependency>
+
+    <dependency>
+      <groupId>org.osgi</groupId>
+      <artifactId>org.osgi.core</artifactId>
+    </dependency>
+
+    <dependency>
+      <groupId>org.scala-lang</groupId>
+      <artifactId>scala-library</artifactId>
+    </dependency>
+
+    <!-- Test Dependencies -->
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.mockito</groupId>
+      <artifactId>mockito-all</artifactId>
+      <scope>test</scope>
+    </dependency>
+
+    <dependency>
+      <groupId>org.slf4j</groupId>
+      <artifactId>slf4j-simple</artifactId>
+      <version>${slf4j.version}</version>
+      <scope>test</scope>
+    </dependency>
+
+  </dependencies>
+
+  <build>
+    <plugins>
+
+      <plugin>
+        <groupId>org.apache.felix</groupId>
+        <artifactId>maven-bundle-plugin</artifactId>
+        <extensions>true</extensions>
+        <configuration>
+          <instructions>
+            <Bundle-Name>${project.groupId}.${project.artifactId}</Bundle-Name>
+            <Export-package></Export-package>
+            <Private-Package></Private-Package>
+            <Import-Package></Import-Package>
+          </instructions>
+        </configuration>
+      </plugin>
+
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-jar-plugin</artifactId>
+        <executions>
+          <execution>
+            <goals>
+              <goal>test-jar</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.jacoco</groupId>
+        <artifactId>jacoco-maven-plugin</artifactId>
+        <configuration>
+          <includes>
+            <include>org.opendaylight.controller.*</include>
+          </includes>
+          <check>false</check>
+        </configuration>
+        <executions>
+          <execution>
+            <id>pre-test</id>
+            <goals>
+              <goal>prepare-agent</goal>
+            </goals>
+          </execution>
+          <execution>
+            <id>post-test</id>
+            <goals>
+              <goal>report</goal>
+            </goals>
+            <phase>test</phase>
+          </execution>
+        </executions>
+      </plugin>
+    </plugins>
+  </build>
+  <scm>
+    <connection>scm:git:ssh://git.opendaylight.org:29418/controller.git</connection>
+    <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
+    <tag>HEAD</tag>
+    <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:MD-SAL:Architecture:Clustering</url>
+  </scm>
+</project>
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/example/ClientActor.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/example/ClientActor.java
new file mode 100644 (file)
index 0000000..2560f16
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.example;
+
+import akka.actor.ActorRef;
+import akka.actor.Props;
+import akka.actor.UntypedActor;
+import akka.event.Logging;
+import akka.event.LoggingAdapter;
+import akka.japi.Creator;
+import org.opendaylight.controller.cluster.example.messages.KeyValue;
+import org.opendaylight.controller.cluster.example.messages.KeyValueSaved;
+
+public class ClientActor extends UntypedActor {
+    protected final LoggingAdapter LOG =
+        Logging.getLogger(getContext().system(), this);
+
+    private final ActorRef target;
+
+    public ClientActor(ActorRef target){
+        this.target = target;
+    }
+
+    public static Props props(final ActorRef target){
+        return Props.create(new Creator<ClientActor>(){
+
+            @Override public ClientActor create() throws Exception {
+                return new ClientActor(target);
+            }
+        });
+    }
+
+    @Override public void onReceive(Object message) throws Exception {
+        if(message instanceof KeyValue) {
+            target.tell(message, getSelf());
+        } else if(message instanceof KeyValueSaved){
+            LOG.info("KeyValue saved");
+        }
+    }
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/example/ExampleActor.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/example/ExampleActor.java
new file mode 100644 (file)
index 0000000..8d4d5e4
--- /dev/null
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.example;
+
+import akka.actor.ActorRef;
+import akka.actor.Props;
+import akka.japi.Creator;
+import org.opendaylight.controller.cluster.example.messages.KeyValue;
+import org.opendaylight.controller.cluster.example.messages.KeyValueSaved;
+import org.opendaylight.controller.cluster.example.messages.PrintRole;
+import org.opendaylight.controller.cluster.example.messages.PrintState;
+import org.opendaylight.controller.cluster.raft.RaftActor;
+
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * A sample actor showing how the RaftActor is to be extended
+ */
+public class ExampleActor extends RaftActor {
+
+    private final Map<String, String> state = new HashMap();
+
+    private long persistIdentifier = 1;
+
+
+    public ExampleActor(String id, Map<String, String> peerAddresses) {
+        super(id, peerAddresses);
+    }
+
+    public static Props props(final String id, final Map<String, String> peerAddresses){
+        return Props.create(new Creator<ExampleActor>(){
+
+            @Override public ExampleActor create() throws Exception {
+                return new ExampleActor(id, peerAddresses);
+            }
+        });
+    }
+
+    @Override public void onReceiveCommand(Object message){
+        if(message instanceof KeyValue){
+
+            if(isLeader()) {
+                String persistId = Long.toString(persistIdentifier++);
+                persistData(getSender(), persistId, message);
+            } else {
+                if(getLeader() != null) {
+                    getLeader().forward(message, getContext());
+                }
+            }
+
+        } else if (message instanceof PrintState) {
+            LOG.debug("State of the node:"+getId() + " is="+state.size());
+
+        } else if (message instanceof PrintRole) {
+            LOG.debug(getId() + " = " + getRaftState());
+        }
+        super.onReceiveCommand(message);
+    }
+
+    @Override protected void applyState(ActorRef clientActor, String identifier,
+        Object data) {
+        if(data instanceof KeyValue){
+            KeyValue kv = (KeyValue) data;
+            state.put(kv.getKey(), kv.getValue());
+            if(clientActor != null) {
+                clientActor.tell(new KeyValueSaved(), getSelf());
+            }
+        }
+    }
+
+    @Override protected Object createSnapshot() {
+        return state;
+    }
+
+    @Override protected void applySnapshot(Object snapshot) {
+        state.clear();
+        state.putAll((HashMap) snapshot);
+    }
+
+    @Override public void onReceiveRecover(Object message) {
+        super.onReceiveRecover(message);
+    }
+
+    @Override public String persistenceId() {
+        return getId();
+    }
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/example/LogGenerator.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/example/LogGenerator.java
new file mode 100644 (file)
index 0000000..fbe1447
--- /dev/null
@@ -0,0 +1,67 @@
+package org.opendaylight.controller.cluster.example;
+
+import akka.actor.ActorRef;
+import org.opendaylight.controller.cluster.example.messages.KeyValue;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Random;
+
+/**
+ * Created by kramesha on 7/16/14.
+ */
+public class LogGenerator {
+    private Map<ActorRef, LoggingThread> clientToLoggingThread = new HashMap<ActorRef, LoggingThread>();
+
+    public void startLoggingForClient(ActorRef client) {
+        LoggingThread lt = new LoggingThread(client);
+        clientToLoggingThread.put(client, lt);
+        Thread t = new Thread(lt);
+        t.start();
+    }
+
+    public void stopLoggingForClient(ActorRef client) {
+        clientToLoggingThread.get(client).stopLogging();
+        clientToLoggingThread.remove(client);
+    }
+
+    public class LoggingThread implements Runnable {
+
+        private ActorRef clientActor;
+        private volatile boolean stopLogging = false;
+
+        public LoggingThread(ActorRef clientActor) {
+            this.clientActor = clientActor;
+        }
+
+        public void run() {
+            Random r = new Random();
+            while (true) {
+                if (stopLogging) {
+                    System.out.println("Logging stopped for client:" + clientActor.path());
+                    break;
+                }
+                String key = clientActor.path().name();
+                int random = r.nextInt(100);
+                clientActor.tell(new KeyValue(key+"-key-" + random, "value-" + random), null);
+                try {
+                    Thread.sleep((random%10) * 1000);
+                } catch (InterruptedException e) {
+                    e.printStackTrace();
+                }
+            }
+        }
+
+        public void stopLogging() {
+            stopLogging = true;
+        }
+
+        public void startLogging() {
+            stopLogging = false;
+        }
+
+
+    }
+
+
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/example/Main.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/example/Main.java
new file mode 100644 (file)
index 0000000..a148ed4
--- /dev/null
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.example;
+
+import akka.actor.ActorRef;
+import akka.actor.ActorSystem;
+import akka.actor.PoisonPill;
+import org.opendaylight.controller.cluster.example.messages.KeyValue;
+
+import java.io.BufferedReader;
+import java.io.InputStreamReader;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+public class Main {
+    private static final ActorSystem actorSystem = ActorSystem.create();
+    // Create three example actors
+    private static Map<String, String> allPeers = new HashMap<>();
+
+    static {
+        allPeers.put("example-1", "akka://default/user/example-1");
+        allPeers.put("example-2", "akka://default/user/example-2");
+        allPeers.put("example-3", "akka://default/user/example-3");
+    }
+
+    public static void main(String[] args) throws Exception{
+        ActorRef example1Actor =
+            actorSystem.actorOf(ExampleActor.props("example-1",
+                withoutPeer("example-1")), "example-1");
+
+        ActorRef example2Actor =
+            actorSystem.actorOf(ExampleActor.props("example-2",
+                withoutPeer("example-2")), "example-2");
+
+        ActorRef example3Actor =
+            actorSystem.actorOf(ExampleActor.props("example-3",
+                withoutPeer("example-3")), "example-3");
+
+
+        List<ActorRef> examples = Arrays.asList(example1Actor, example2Actor, example3Actor);
+
+        ActorRef clientActor = actorSystem.actorOf(ClientActor.props(example1Actor));
+        BufferedReader br =
+            new BufferedReader(new InputStreamReader(System.in));
+
+        System.out.println("Usage :");
+        System.out.println("s <1-3> to start a peer");
+        System.out.println("k <1-3> to kill a peer");
+
+        while(true) {
+            System.out.print("Enter command (0 to exit):");
+            try {
+                String s = br.readLine();
+                String[] split = s.split(" ");
+                if(split.length > 1) {
+                    String command = split[0];
+                    String actor = split[1];
+
+                    if ("k".equals(command)) {
+                        int i = Integer.parseInt(actor);
+                        examples.get(i - 1)
+                            .tell(PoisonPill.getInstance(), null);
+                        continue;
+                    } else if ("s".equals(command)) {
+                        int i = Integer.parseInt(actor);
+                        String actorName = "example-" + i;
+                        examples.add(i - 1,
+                            actorSystem.actorOf(ExampleActor.props(actorName,
+                                withoutPeer(actorName)), actorName));
+                        System.out.println("Created actor : " + actorName);
+                        continue;
+                    }
+                }
+
+                int i = Integer.parseInt(s);
+                if(i == 0){
+                    System.exit(0);
+                }
+                clientActor.tell(new KeyValue("key " + i, "value " + i), null);
+            } catch (NumberFormatException nfe) {
+                System.err.println("Invalid Format!");
+            }
+        }
+    }
+
+    private static Map<String, String> withoutPeer(String peerId) {
+        Map<String, String> without = new HashMap<>(allPeers);
+        without.remove(peerId);
+        return without;
+    }
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/example/TestDriver.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/example/TestDriver.java
new file mode 100644 (file)
index 0000000..c8a7835
--- /dev/null
@@ -0,0 +1,246 @@
+package org.opendaylight.controller.cluster.example;
+
+import akka.actor.ActorRef;
+import akka.actor.ActorSystem;
+import org.opendaylight.controller.cluster.example.messages.PrintRole;
+import org.opendaylight.controller.cluster.example.messages.PrintState;
+import org.opendaylight.controller.cluster.raft.client.messages.AddRaftPeer;
+import org.opendaylight.controller.cluster.raft.client.messages.RemoveRaftPeer;
+
+import java.io.BufferedReader;
+import java.io.InputStreamReader;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Random;
+import java.util.concurrent.ConcurrentHashMap;
+
+
+public class TestDriver {
+
+    private static final ActorSystem actorSystem = ActorSystem.create();
+    private static Map<String, String> allPeers = new HashMap<>();
+    private static Map<String, ActorRef> clientActorRefs  = new HashMap<String, ActorRef>();
+    private static Map<String, ActorRef> actorRefs = new HashMap<String, ActorRef>();
+    private static LogGenerator logGenerator = new LogGenerator();;
+
+    /**
+     * Create nodes, add clients and start logging.
+     * Commands
+     *  bye
+     *  createNodes:{num}
+     *  addNodes:{num}
+     *  stopNode:{nodeName}
+     *  addClients:{num}
+     *  addClientsToNode:{nodeName, num}
+     *  startLogging
+     *  stopLogging
+     *  startLoggingForClient:{nodeName}
+     *  stopLoggingForClient:{nodeName}
+     *  printNodes
+     *  printState
+     * @param args
+     * @throws Exception
+     */
+    public static void main(String[] args) throws Exception {
+        TestDriver td = new TestDriver();
+
+        System.out.println("Enter command (type bye to exit):");
+
+
+        BufferedReader br = new BufferedReader(new InputStreamReader(System.in));
+        while(true) {
+            String command = br.readLine();
+            if (command.startsWith("bye")) {
+                System.exit(0);
+
+            } else if (command.startsWith("createNodes")) {
+                String[] arr = command.split(":");
+                int n = Integer.parseInt(arr[1]);
+                td.createNodes(n);
+
+            } else if (command.startsWith("addNodes")) {
+                String[] arr = command.split(":");
+                int n = Integer.parseInt(arr[1]);
+                td.addNodes(n);
+
+            } else if (command.startsWith("addClients")) {
+                String[] arr = command.split(":");
+                int n = Integer.parseInt(arr[1]);
+                td.addClients(n);
+
+            } else if (command.startsWith("addClientsToNode")) {
+                String[] arr = command.split(":");
+                String nodeName = arr[1];
+                int n = Integer.parseInt(arr[1]);
+                td.addClientsToNode(nodeName, n);
+
+            } else if (command.startsWith("stopNode")) {
+                String[] arr = command.split(":");
+                td.stopNode(arr[1]);
+
+            } else if (command.startsWith("startLogging")) {
+                td.startAllLogging();
+
+            } else if (command.startsWith("startLoggingForClient")) {
+                String[] arr = command.split(":");
+                td.startLoggingForClient(clientActorRefs.get(arr[1]));
+
+            } else if (command.startsWith("stopLogging")) {
+                td.stopAllLogging();
+
+            } else if (command.startsWith("stopLoggingForClient")) {
+                String[] arr = command.split(":");
+                td.stopLoggingForClient(clientActorRefs.get(arr[1]));
+
+            } else if (command.startsWith("printState")) {
+                td.printState();
+            } else if (command.startsWith("printNodes")) {
+                td.printNodes();
+            }
+
+        }
+    }
+
+    public void createNodes(int num) {
+        for (int i=0; i < num; i++)  {
+            int rand = getUnusedRandom(num);
+            allPeers.put("example-"+rand, "akka://default/user/example-"+rand);
+        }
+
+        for (String s : allPeers.keySet())  {
+            ActorRef exampleActor = actorSystem.actorOf(
+                ExampleActor.props(s, withoutPeer(s)), s);
+            actorRefs.put(s, exampleActor);
+            System.out.println("Created node:"+s);
+
+        }
+    }
+
+    // add new nodes , pass in the count
+    public void addNodes(int num) {
+        Map<String, String> newPeers = new HashMap<>();
+        for (int i=0; i < num; i++)  {
+            int rand = getUnusedRandom(num);
+            newPeers.put("example-"+rand, "akka://default/user/example-"+rand);
+            allPeers.put("example-"+rand, "akka://default/user/example-"+rand);
+
+        }
+        Map<String, ActorRef> newActorRefs = new HashMap<String, ActorRef>(num);
+        for (Map.Entry<String, String> entry : newPeers.entrySet())  {
+            ActorRef exampleActor = actorSystem.actorOf(
+                ExampleActor.props(entry.getKey(), withoutPeer(entry.getKey())), entry.getKey());
+            newActorRefs.put(entry.getKey(), exampleActor);
+
+            //now also add these new nodes as peers from the previous nodes
+            for (ActorRef actor : actorRefs.values()) {
+                actor.tell(new AddRaftPeer(entry.getKey(), entry.getValue()), null);
+            }
+
+            System.out.println("Added node:" + entry);
+        }
+
+        actorRefs.putAll(newActorRefs);
+    }
+
+
+    // add num clients to all nodes in the system
+    public void addClients(int num) {
+        for(Map.Entry<String,ActorRef> actorRefEntry : actorRefs.entrySet()) {
+            for (int i=0; i < num; i++) {
+                String clientName = "client-" + i + "-" + actorRefEntry.getKey();
+                ActorRef clientActor = actorSystem.actorOf(
+                    ClientActor.props(actorRefEntry.getValue()), clientName);
+                clientActorRefs.put(clientName, clientActor);
+                System.out.println("Created client-node:" + clientName);
+            }
+        }
+    }
+
+    // add num clients to a node
+    public void addClientsToNode(String actorName, int num) {
+        ActorRef actorRef = actorRefs.get(actorName);
+        for (int i=0; i < num; i++) {
+            String clientName = "client-" + i + "-" + actorRef;
+            clientActorRefs.put(clientName,
+                actorSystem.actorOf(ClientActor.props(actorRef), clientName));
+            System.out.println("Added client-node:" + clientName);
+        }
+    }
+
+    public void stopNode(String actorName) {
+        ActorRef actorRef = actorRefs.get(actorName);
+        String clientName = "client-"+actorName;
+        if(clientActorRefs.containsKey(clientName)) {
+            actorSystem.stop(clientActorRefs.get(clientName));
+            clientActorRefs.remove(clientName);
+        }
+        actorSystem.stop(actorRef);
+        actorRefs.remove(actorName);
+
+        for (ActorRef actor : actorRefs.values()) {
+            actor.tell(new RemoveRaftPeer(actorName), null);
+        }
+
+        allPeers.remove(actorName);
+
+    }
+
+    public void startAllLogging() {
+        if(!clientActorRefs.isEmpty()) {
+            for(Map.Entry<String,ActorRef> client : clientActorRefs.entrySet()) {
+                logGenerator.startLoggingForClient(client.getValue());
+                System.out.println("Started logging for client:"+client.getKey());
+            }
+        } else {
+            System.out.println("There are no clients for any nodes. First create clients using commands- addClients:<num> or addClientsToNode:<nodename>:<num>");
+        }
+
+    }
+
+    public void startLoggingForClient(ActorRef client) {
+        logGenerator.startLoggingForClient(client);
+    }
+
+    public void stopAllLogging() {
+        for(Map.Entry<String,ActorRef> client : clientActorRefs.entrySet()) {
+            logGenerator.stopLoggingForClient(client.getValue());
+        }
+    }
+
+    public void stopLoggingForClient(ActorRef client) {
+        logGenerator.stopLoggingForClient(client);
+    }
+
+    public void printState() {
+        for (ActorRef ref : actorRefs.values()) {
+            ref.tell(new PrintState(), null);
+        }
+    }
+
+    public void printNodes() {
+        for (ActorRef ref : actorRefs.values()) {
+            ref.tell(new PrintRole(), null);
+        }
+    }
+
+    public ActorRef getLeader() {
+        return null;
+    }
+
+    private int getUnusedRandom(int num) {
+        int rand = -1;
+        do {
+            rand = (new Random()).nextInt(num * num);
+        } while (allPeers.keySet().contains("example-"+rand));
+
+        return rand;
+    }
+
+    private static Map<String, String> withoutPeer(String peerId) {
+        Map<String, String> without = new ConcurrentHashMap<>(allPeers);
+        without.remove(peerId);
+
+        return without;
+    }
+}
+
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/example/messages/KeyValue.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/example/messages/KeyValue.java
new file mode 100644 (file)
index 0000000..00cc09a
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.example.messages;
+
+import java.io.Serializable;
+
+public class KeyValue implements Serializable{
+    private final String key;
+    private final String value;
+
+    public KeyValue(String key, String value){
+        this.key = key;
+        this.value = value;
+    }
+
+    public String getKey() {
+        return key;
+    }
+
+    public String getValue() {
+        return value;
+    }
+
+    @Override public String toString() {
+        return "KeyValue{" +
+            "key='" + key + '\'' +
+            ", value='" + value + '\'' +
+            '}';
+    }
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/example/messages/KeyValueSaved.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/example/messages/KeyValueSaved.java
new file mode 100644 (file)
index 0000000..e10e5a7
--- /dev/null
@@ -0,0 +1,12 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.example.messages;
+
+public class KeyValueSaved {
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/example/messages/PrintRole.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/example/messages/PrintRole.java
new file mode 100644 (file)
index 0000000..c9d4bfa
--- /dev/null
@@ -0,0 +1,7 @@
+package org.opendaylight.controller.cluster.example.messages;
+
+/**
+ * Created by kramesha on 7/17/14.
+ */
+public class PrintRole {
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/example/messages/PrintState.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/example/messages/PrintState.java
new file mode 100644 (file)
index 0000000..dbf863d
--- /dev/null
@@ -0,0 +1,7 @@
+package org.opendaylight.controller.cluster.example.messages;
+
+/**
+ * Created by kramesha on 7/17/14.
+ */
+public class PrintState {
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/ClientRequestTracker.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/ClientRequestTracker.java
new file mode 100644 (file)
index 0000000..4972b34
--- /dev/null
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.raft;
+
+import akka.actor.ActorRef;
+
+public interface ClientRequestTracker {
+    /**
+     * The client actor who is waiting for a response
+     *
+     * @return
+     */
+    ActorRef getClientActor();
+
+    /**
+     *
+     * @return
+     */
+    String getIdentifier();
+
+    /**
+     * The index of the log entry which needs to be replicated
+     *
+     * @return
+     */
+    long getIndex();
+
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/ClientRequestTrackerImpl.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/ClientRequestTrackerImpl.java
new file mode 100644 (file)
index 0000000..15de6d0
--- /dev/null
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.raft;
+
+import akka.actor.ActorRef;
+
+public class ClientRequestTrackerImpl implements ClientRequestTracker {
+
+    private final ActorRef clientActor;
+    private final String identifier;
+    private final long logIndex;
+
+    public ClientRequestTrackerImpl(ActorRef clientActor, String identifier,
+        long logIndex) {
+
+        this.clientActor = clientActor;
+
+        this.identifier = identifier;
+
+        this.logIndex = logIndex;
+    }
+
+    @Override public ActorRef getClientActor() {
+        return clientActor;
+    }
+
+    @Override public long getIndex() {
+        return logIndex;
+    }
+
+    public String getIdentifier() {
+        return identifier;
+    }
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/ElectionTerm.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/ElectionTerm.java
new file mode 100644 (file)
index 0000000..9f0d02e
--- /dev/null
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.raft;
+
+/**
+ * ElectionTerm contains information about a RaftActors election term.
+ * <p>
+ * This information includes the last known current term of the RaftActor
+ * and which peer was voted for by the RaftActor in that term
+ * <p>
+ * This class ensures that election term information is persisted
+ */
+public interface ElectionTerm {
+    /**
+     * latest term server has seen (initialized to 0
+     * on first boot, increases monotonically)
+     */
+    long getCurrentTerm();
+
+    /**
+     * candidateId that received vote in current
+     * term (or null if none)
+     */
+    String getVotedFor();
+
+    /**
+     * To be called mainly when we are recovering in-memory election state from
+     * persistent storage
+     *
+     * @param currentTerm
+     * @param votedFor
+     */
+    void update(long currentTerm, String votedFor);
+
+    /**
+     * To be called when we need to update the current term either because we
+     * received a message from someone with a more up-to-date term or because we
+     * just voted for someone
+     * <p>
+     * This information needs to be persisted so that on recovery the replica
+     * can start itself in the right term and know if it has already voted in
+     * that term or not
+     *
+     * @param currentTerm
+     * @param votedFor
+     */
+    void updateAndPersist(long currentTerm, String votedFor);
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/FollowerLogInformation.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/FollowerLogInformation.java
new file mode 100644 (file)
index 0000000..f3de983
--- /dev/null
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.raft;
+
+import java.util.concurrent.atomic.AtomicLong;
+
+/**
+ * The state of the followers log as known by the Leader
+ */
+public interface FollowerLogInformation {
+
+    /**
+     * Increment the value of the nextIndex
+     * @return
+     */
+    public long incrNextIndex();
+
+    /**
+     * Decrement the value of the nextIndex
+     * @return
+     */
+    public long decrNextIndex();
+
+    /**
+     *
+     * @param nextIndex
+     */
+    void setNextIndex(long nextIndex);
+
+    /**
+     * Increment the value of the matchIndex
+     * @return
+     */
+    public long incrMatchIndex();
+
+    public void setMatchIndex(long matchIndex);
+
+    /**
+     * The identifier of the follower
+     * This could simply be the url of the remote actor
+     */
+    public String getId();
+
+    /**
+     * for each server, index of the next log entry
+     * to send to that server (initialized to leader
+     *    last log index + 1)
+     */
+    public AtomicLong getNextIndex();
+
+    /**
+     * for each server, index of highest log entry
+     * known to be replicated on server
+     *    (initialized to 0, increases monotonically)
+     */
+    public AtomicLong getMatchIndex();
+
+
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/FollowerLogInformationImpl.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/FollowerLogInformationImpl.java
new file mode 100644 (file)
index 0000000..94f9a53
--- /dev/null
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.raft;
+
+import java.util.concurrent.atomic.AtomicLong;
+
+public class FollowerLogInformationImpl implements FollowerLogInformation{
+
+    private final String id;
+
+    private final AtomicLong nextIndex;
+
+    private final AtomicLong matchIndex;
+
+    public FollowerLogInformationImpl(String id, AtomicLong nextIndex,
+        AtomicLong matchIndex) {
+        this.id = id;
+        this.nextIndex = nextIndex;
+        this.matchIndex = matchIndex;
+    }
+
+    public long incrNextIndex(){
+        return nextIndex.incrementAndGet();
+    }
+
+    @Override public long decrNextIndex() {
+        return nextIndex.decrementAndGet();
+    }
+
+    @Override public void setNextIndex(long nextIndex) {
+        this.nextIndex.set(nextIndex);
+    }
+
+    public long incrMatchIndex(){
+        return matchIndex.incrementAndGet();
+    }
+
+    @Override public void setMatchIndex(long matchIndex) {
+        this.matchIndex.set(matchIndex);
+    }
+
+    public String getId() {
+        return id;
+    }
+
+    public AtomicLong getNextIndex() {
+        return nextIndex;
+    }
+
+    public AtomicLong getMatchIndex() {
+        return matchIndex;
+    }
+
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActor.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActor.java
new file mode 100644 (file)
index 0000000..0ff2341
--- /dev/null
@@ -0,0 +1,673 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.raft;
+
+import akka.actor.ActorRef;
+import akka.actor.ActorSelection;
+import akka.event.Logging;
+import akka.event.LoggingAdapter;
+import akka.japi.Procedure;
+import akka.persistence.RecoveryCompleted;
+import akka.persistence.SaveSnapshotFailure;
+import akka.persistence.SaveSnapshotSuccess;
+import akka.persistence.SnapshotOffer;
+import akka.persistence.SnapshotSelectionCriteria;
+import akka.persistence.UntypedPersistentActor;
+import org.opendaylight.controller.cluster.raft.behaviors.Candidate;
+import org.opendaylight.controller.cluster.raft.behaviors.Follower;
+import org.opendaylight.controller.cluster.raft.behaviors.Leader;
+import org.opendaylight.controller.cluster.raft.behaviors.RaftActorBehavior;
+import org.opendaylight.controller.cluster.raft.client.messages.AddRaftPeer;
+import org.opendaylight.controller.cluster.raft.client.messages.FindLeader;
+import org.opendaylight.controller.cluster.raft.client.messages.FindLeaderReply;
+import org.opendaylight.controller.cluster.raft.internal.messages.ApplySnapshot;
+import org.opendaylight.controller.cluster.raft.client.messages.RemoveRaftPeer;
+import org.opendaylight.controller.cluster.raft.internal.messages.ApplyState;
+import org.opendaylight.controller.cluster.raft.internal.messages.Replicate;
+
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * RaftActor encapsulates a state machine that needs to be kept synchronized
+ * in a cluster. It implements the RAFT algorithm as described in the paper
+ * <a href='https://ramcloud.stanford.edu/wiki/download/attachments/11370504/raft.pdf'>
+ * In Search of an Understandable Consensus Algorithm</a>
+ * <p/>
+ * RaftActor has 3 states and each state has a certain behavior associated
+ * with it. A Raft actor can behave as,
+ * <ul>
+ * <li> A Leader </li>
+ * <li> A Follower (or) </li>
+ * <li> A Candidate </li>
+ * </ul>
+ * <p/>
+ * <p/>
+ * A RaftActor MUST be a Leader in order to accept requests from clients to
+ * change the state of it's encapsulated state machine. Once a RaftActor becomes
+ * a Leader it is also responsible for ensuring that all followers ultimately
+ * have the same log and therefore the same state machine as itself.
+ * <p/>
+ * <p/>
+ * The current behavior of a RaftActor determines how election for leadership
+ * is initiated and how peer RaftActors react to request for votes.
+ * <p/>
+ * <p/>
+ * Each RaftActor also needs to know the current election term. It uses this
+ * information for a couple of things. One is to simply figure out who it
+ * voted for in the last election. Another is to figure out if the message
+ * it received to update it's state is stale.
+ * <p/>
+ * <p/>
+ * The RaftActor uses akka-persistence to store it's replicated log.
+ * Furthermore through it's behaviors a Raft Actor determines
+ * <p/>
+ * <ul>
+ * <li> when a log entry should be persisted </li>
+ * <li> when a log entry should be applied to the state machine (and) </li>
+ * <li> when a snapshot should be saved </li>
+ * </ul>
+ */
+public abstract class RaftActor extends UntypedPersistentActor {
+    protected final LoggingAdapter LOG =
+        Logging.getLogger(getContext().system(), this);
+
+    /**
+     * The current state determines the current behavior of a RaftActor
+     * A Raft Actor always starts off in the Follower State
+     */
+    private RaftActorBehavior currentBehavior;
+
+    /**
+     * This context should NOT be passed directly to any other actor it is
+     * only to be consumed by the RaftActorBehaviors
+     */
+    private RaftActorContext context;
+
+    /**
+     * The in-memory journal
+     */
+    private ReplicatedLogImpl replicatedLog = new ReplicatedLogImpl();
+
+
+    public RaftActor(String id, Map<String, String> peerAddresses) {
+        context = new RaftActorContextImpl(this.getSelf(),
+            this.getContext(),
+            id, new ElectionTermImpl(),
+            -1, -1, replicatedLog, peerAddresses, LOG);
+    }
+
+    @Override public void onReceiveRecover(Object message) {
+        if (message instanceof SnapshotOffer) {
+            SnapshotOffer offer = (SnapshotOffer) message;
+            Snapshot snapshot = (Snapshot) offer.snapshot();
+
+            // Create a replicated log with the snapshot information
+            // The replicated log can be used later on to retrieve this snapshot
+            // when we need to install it on a peer
+            replicatedLog = new ReplicatedLogImpl(snapshot);
+
+            // Apply the snapshot to the actors state
+            applySnapshot(snapshot.getState());
+
+        } else if (message instanceof ReplicatedLogEntry) {
+            replicatedLog.append((ReplicatedLogEntry) message);
+        } else if (message instanceof DeleteEntries) {
+            replicatedLog.removeFrom(((DeleteEntries) message).getFromIndex());
+        } else if (message instanceof UpdateElectionTerm) {
+            context.getTermInformation().update(((UpdateElectionTerm) message).getCurrentTerm(), ((UpdateElectionTerm) message).getVotedFor());
+        } else if (message instanceof RecoveryCompleted) {
+            LOG.debug(
+                "Last index in log : " + replicatedLog.lastIndex());
+            currentBehavior = switchBehavior(RaftState.Follower);
+        }
+    }
+
+    @Override public void onReceiveCommand(Object message) {
+        if (message instanceof ApplyState){
+            ApplyState applyState = (ApplyState) message;
+
+            LOG.debug("Applying state for log index {} data {}",
+                applyState.getReplicatedLogEntry().getIndex(),
+                applyState.getReplicatedLogEntry().getData());
+
+            applyState(applyState.getClientActor(), applyState.getIdentifier(),
+                applyState.getReplicatedLogEntry().getData());
+        } else if(message instanceof ApplySnapshot ) {
+            applySnapshot(((ApplySnapshot) message).getSnapshot());
+        } else if (message instanceof FindLeader) {
+            getSender().tell(
+                new FindLeaderReply(
+                    context.getPeerAddress(currentBehavior.getLeaderId())),
+                getSelf()
+            );
+        } else if (message instanceof SaveSnapshotSuccess) {
+            SaveSnapshotSuccess success = (SaveSnapshotSuccess) message;
+
+            // TODO: Not sure if we want to be this aggressive with trimming stuff
+            trimPersistentData(success.metadata().sequenceNr());
+
+        } else if (message instanceof SaveSnapshotFailure) {
+
+            // TODO: Handle failure in saving the snapshot
+            // Maybe do retries on failure
+
+        } else if (message instanceof AddRaftPeer){
+
+            // FIXME : Do not add raft peers like this.
+            // When adding a new Peer we have to ensure that the a majority of
+            // the peers know about the new Peer. Doing it this way may cause
+            // a situation where multiple Leaders may emerge
+            AddRaftPeer arp = (AddRaftPeer)message;
+           context.addToPeers(arp.getName(), arp.getAddress());
+
+        } else if (message instanceof RemoveRaftPeer){
+
+            RemoveRaftPeer rrp = (RemoveRaftPeer)message;
+            context.removePeer(rrp.getName());
+
+        } else {
+
+            RaftState state =
+                currentBehavior.handleMessage(getSender(), message);
+            currentBehavior = switchBehavior(state);
+        }
+    }
+
+
+
+    /**
+     * When a derived RaftActor needs to persist something it must call
+     * persistData.
+     *
+     * @param clientActor
+     * @param identifier
+     * @param data
+     */
+    protected void persistData(ActorRef clientActor, String identifier,
+        Object data) {
+
+        ReplicatedLogEntry replicatedLogEntry = new ReplicatedLogImplEntry(
+            context.getReplicatedLog().lastIndex() + 1,
+            context.getTermInformation().getCurrentTerm(), data);
+
+        LOG.debug("Persist data {}", replicatedLogEntry);
+
+        replicatedLog
+            .appendAndPersist(clientActor, identifier, replicatedLogEntry);
+    }
+
+    protected String getId() {
+        return context.getId();
+    }
+
+    /**
+     * Derived actors can call the isLeader method to check if the current
+     * RaftActor is the Leader or not
+     *
+     * @return true it this RaftActor is a Leader false otherwise
+     */
+    protected boolean isLeader() {
+        return context.getId().equals(currentBehavior.getLeaderId());
+    }
+
+    /**
+     * Derived actor can call getLeader if they need a reference to the Leader.
+     * This would be useful for example in forwarding a request to an actor
+     * which is the leader
+     *
+     * @return A reference to the leader if known, null otherwise
+     */
+    protected ActorSelection getLeader(){
+        String leaderId = currentBehavior.getLeaderId();
+        if (leaderId == null) {
+            return null;
+        }
+        String peerAddress = context.getPeerAddress(leaderId);
+        LOG.debug("getLeader leaderId = " + leaderId + " peerAddress = "
+            + peerAddress);
+        return context.actorSelection(peerAddress);
+    }
+
+    protected RaftState getRaftState() {
+        return currentBehavior.state();
+    }
+
+
+
+    /**
+     * The applyState method will be called by the RaftActor when some data
+     * needs to be applied to the actor's state
+     *
+     * @param clientActor A reference to the client who sent this message. This
+     *                    is the same reference that was passed to persistData
+     *                    by the derived actor. clientActor may be null when
+     *                    the RaftActor is behaving as a follower or during
+     *                    recovery.
+     * @param identifier  The identifier of the persisted data. This is also
+     *                    the same identifier that was passed to persistData by
+     *                    the derived actor. identifier may be null when
+     *                    the RaftActor is behaving as a follower or during
+     *                    recovery
+     * @param data        A piece of data that was persisted by the persistData call.
+     *                    This should NEVER be null.
+     */
+    protected abstract void applyState(ActorRef clientActor, String identifier,
+        Object data);
+
+    /**
+     * This method will be called by the RaftActor when a snapshot needs to be
+     * created. The derived actor should respond with its current state.
+     * <p/>
+     * During recovery the state that is returned by the derived actor will
+     * be passed back to it by calling the applySnapshot  method
+     *
+     * @return The current state of the actor
+     */
+    protected abstract Object createSnapshot();
+
+    /**
+     * This method will be called by the RaftActor during recovery to
+     * reconstruct the state of the actor.
+     * <p/>
+     * This method may also be called at any other point during normal
+     * operations when the derived actor is out of sync with it's peers
+     * and the only way to bring it in sync is by applying a snapshot
+     *
+     * @param snapshot A snapshot of the state of the actor
+     */
+    protected abstract void applySnapshot(Object snapshot);
+
+    private RaftActorBehavior switchBehavior(RaftState state) {
+        if (currentBehavior != null) {
+            if (currentBehavior.state() == state) {
+                return currentBehavior;
+            }
+            LOG.info("Switching from state " + currentBehavior.state() + " to "
+                + state);
+
+            try {
+                currentBehavior.close();
+            } catch (Exception e) {
+                LOG.error(e,
+                    "Failed to close behavior : " + currentBehavior.state());
+            }
+
+        } else {
+            LOG.info("Switching behavior to " + state);
+        }
+        RaftActorBehavior behavior = null;
+        if (state == RaftState.Candidate) {
+            behavior = new Candidate(context);
+        } else if (state == RaftState.Follower) {
+            behavior = new Follower(context);
+        } else {
+            behavior = new Leader(context);
+        }
+        return behavior;
+    }
+
+    private void trimPersistentData(long sequenceNumber) {
+        // Trim snapshots
+        // FIXME : Not sure how exactly the SnapshotSelectionCriteria is applied
+        // For now guessing that it is ANDed.
+        deleteSnapshots(new SnapshotSelectionCriteria(
+            sequenceNumber - 100000, 43200000));
+
+        // Trim journal
+        deleteMessages(sequenceNumber);
+    }
+
+
+    private class ReplicatedLogImpl implements ReplicatedLog {
+        private final List<ReplicatedLogEntry> journal;
+        private final Object snapshot;
+        private long snapshotIndex = -1;
+        private long snapshotTerm = -1;
+
+        public ReplicatedLogImpl(Snapshot snapshot) {
+            this.snapshot = snapshot.getState();
+            this.snapshotIndex = snapshot.getLastAppliedIndex();
+            this.snapshotTerm = snapshot.getLastAppliedTerm();
+
+            this.journal = new ArrayList<>(snapshot.getUnAppliedEntries());
+        }
+
+        public ReplicatedLogImpl() {
+            this.snapshot = null;
+            this.journal = new ArrayList<>();
+        }
+
+        @Override public ReplicatedLogEntry get(long index) {
+            int adjustedIndex = adjustedIndex(index);
+
+            if (adjustedIndex < 0 || adjustedIndex >= journal.size()) {
+                return null;
+            }
+
+            return journal.get(adjustedIndex);
+        }
+
+        @Override public ReplicatedLogEntry last() {
+            if (journal.size() == 0) {
+                return null;
+            }
+            return get(journal.size() - 1);
+        }
+
+        @Override public long lastIndex() {
+            if (journal.size() == 0) {
+                return -1;
+            }
+
+            return last().getIndex();
+        }
+
+        @Override public long lastTerm() {
+            if (journal.size() == 0) {
+                return -1;
+            }
+
+            return last().getTerm();
+        }
+
+
+        @Override public void removeFrom(long index) {
+            int adjustedIndex = adjustedIndex(index);
+
+            if (adjustedIndex < 0 || adjustedIndex >= journal.size()) {
+                return;
+            }
+
+            journal.subList(adjustedIndex , journal.size()).clear();
+        }
+
+
+        @Override public void removeFromAndPersist(long index) {
+            int adjustedIndex = adjustedIndex(index);
+
+            if (adjustedIndex < 0 || adjustedIndex >= journal.size()) {
+                return;
+            }
+
+            // FIXME: Maybe this should be done after the command is saved
+            journal.subList(adjustedIndex , journal.size()).clear();
+
+            persist(new DeleteEntries(adjustedIndex), new Procedure<DeleteEntries>(){
+
+                @Override public void apply(DeleteEntries param)
+                    throws Exception {
+                    //FIXME : Doing nothing for now
+                }
+            });
+
+
+        }
+
+        @Override public void append(
+            final ReplicatedLogEntry replicatedLogEntry) {
+            journal.add(replicatedLogEntry);
+        }
+
+        @Override public List<ReplicatedLogEntry> getFrom(long index) {
+            int adjustedIndex = adjustedIndex(index);
+
+            List<ReplicatedLogEntry> entries = new ArrayList<>(100);
+            if (adjustedIndex < 0 || adjustedIndex >= journal.size()) {
+                return entries;
+            }
+
+
+            for (int i = adjustedIndex;
+                 i < journal.size(); i++) {
+                entries.add(journal.get(i));
+            }
+            return entries;
+        }
+
+        @Override public void appendAndPersist(
+            final ReplicatedLogEntry replicatedLogEntry) {
+            appendAndPersist(null, null, replicatedLogEntry);
+        }
+
+        public void appendAndPersist(final ActorRef clientActor,
+            final String identifier,
+            final ReplicatedLogEntry replicatedLogEntry) {
+            context.getLogger().debug(
+                "Append log entry and persist " + replicatedLogEntry);
+            // FIXME : By adding the replicated log entry to the in-memory journal we are not truly ensuring durability of the logs
+            journal.add(replicatedLogEntry);
+
+            // When persisting events with persist it is guaranteed that the
+            // persistent actor will not receive further commands between the
+            // persist call and the execution(s) of the associated event
+            // handler. This also holds for multiple persist calls in context
+            // of a single command.
+            persist(replicatedLogEntry,
+                new Procedure<ReplicatedLogEntry>() {
+                    public void apply(ReplicatedLogEntry evt) throws Exception {
+                        // FIXME : Tentatively create a snapshot every hundred thousand entries. To be tuned.
+                        if (size() > 100000) {
+                            ReplicatedLogEntry lastAppliedEntry =
+                                get(context.getLastApplied());
+                            long lastAppliedIndex = -1;
+                            long lastAppliedTerm = -1;
+                            if (lastAppliedEntry != null) {
+                                lastAppliedIndex = lastAppliedEntry.getIndex();
+                                lastAppliedTerm = lastAppliedEntry.getTerm();
+                            }
+
+                            saveSnapshot(Snapshot.create(createSnapshot(),
+                                getFrom(context.getLastApplied() + 1),
+                                lastIndex(), lastTerm(), lastAppliedIndex,
+                                lastAppliedTerm));
+                        }
+                        // Send message for replication
+                        if (clientActor != null) {
+                            currentBehavior.handleMessage(getSelf(),
+                                new Replicate(clientActor, identifier,
+                                    replicatedLogEntry)
+                            );
+                        }
+                    }
+                }
+            );
+        }
+
+        @Override public long size() {
+            return journal.size() + snapshotIndex + 1;
+        }
+
+        @Override public boolean isPresent(long index) {
+            int adjustedIndex = adjustedIndex(index);
+
+            if (adjustedIndex < 0 || adjustedIndex >= journal.size()) {
+                return false;
+            }
+            return true;
+        }
+
+        @Override public boolean isInSnapshot(long index) {
+            return index <= snapshotIndex;
+        }
+
+        @Override public Object getSnapshot() {
+            return snapshot;
+        }
+
+        @Override public long getSnapshotIndex() {
+            return snapshotIndex;
+        }
+
+        @Override public long getSnapshotTerm() {
+            return snapshotTerm;
+        }
+
+        private int adjustedIndex(long index) {
+            if(snapshotIndex < 0){
+                return (int) index;
+            }
+            return (int) (index - snapshotIndex);
+        }
+    }
+
+
+    private static class ReplicatedLogImplEntry implements ReplicatedLogEntry,
+        Serializable {
+
+        private final long index;
+        private final long term;
+        private final Object payload;
+
+        public ReplicatedLogImplEntry(long index, long term, Object payload) {
+
+            this.index = index;
+            this.term = term;
+            this.payload = payload;
+        }
+
+        @Override public Object getData() {
+            return payload;
+        }
+
+        @Override public long getTerm() {
+            return term;
+        }
+
+        @Override public long getIndex() {
+            return index;
+        }
+
+        @Override public String toString() {
+            return "Entry{" +
+                "index=" + index +
+                ", term=" + term +
+                '}';
+        }
+    }
+
+    private static class DeleteEntries implements Serializable {
+        private final int fromIndex;
+
+
+        public DeleteEntries(int fromIndex) {
+            this.fromIndex = fromIndex;
+        }
+
+        public int getFromIndex() {
+            return fromIndex;
+        }
+    }
+
+
+    private static class Snapshot implements Serializable {
+        private final Object state;
+        private final List<ReplicatedLogEntry> unAppliedEntries;
+        private final long lastIndex;
+        private final long lastTerm;
+        private final long lastAppliedIndex;
+        private final long lastAppliedTerm;
+
+        private Snapshot(Object state,
+            List<ReplicatedLogEntry> unAppliedEntries, long lastIndex,
+            long lastTerm, long lastAppliedIndex, long lastAppliedTerm) {
+            this.state = state;
+            this.unAppliedEntries = unAppliedEntries;
+            this.lastIndex = lastIndex;
+            this.lastTerm = lastTerm;
+            this.lastAppliedIndex = lastAppliedIndex;
+            this.lastAppliedTerm = lastAppliedTerm;
+        }
+
+
+        public static Snapshot create(Object state,
+            List<ReplicatedLogEntry> entries, long lastIndex, long lastTerm,
+            long lastAppliedIndex, long lastAppliedTerm) {
+            return new Snapshot(state, entries, lastIndex, lastTerm,
+                lastAppliedIndex, lastAppliedTerm);
+        }
+
+        public Object getState() {
+            return state;
+        }
+
+        public List<ReplicatedLogEntry> getUnAppliedEntries() {
+            return unAppliedEntries;
+        }
+
+        public long getLastTerm() {
+            return lastTerm;
+        }
+
+        public long getLastAppliedIndex() {
+            return lastAppliedIndex;
+        }
+
+        public long getLastAppliedTerm() {
+            return lastAppliedTerm;
+        }
+    }
+
+    private class ElectionTermImpl implements ElectionTerm {
+        /**
+         * Identifier of the actor whose election term information this is
+         */
+        private long currentTerm = 0;
+        private String votedFor = null;
+
+        public long getCurrentTerm() {
+            return currentTerm;
+        }
+
+        public String getVotedFor() {
+            return votedFor;
+        }
+
+        @Override public void update(long currentTerm, String votedFor) {
+            LOG.info("Set currentTerm={}, votedFor={}", currentTerm, votedFor);
+
+            this.currentTerm = currentTerm;
+            this.votedFor = votedFor;
+        }
+
+        @Override
+        public void updateAndPersist(long currentTerm, String votedFor){
+            update(currentTerm, votedFor);
+            // FIXME : Maybe first persist then update the state
+            persist(new UpdateElectionTerm(this.currentTerm, this.votedFor), new Procedure<UpdateElectionTerm>(){
+
+                @Override public void apply(UpdateElectionTerm param)
+                    throws Exception {
+
+                }
+            });
+        }
+    }
+
+    private static class UpdateElectionTerm implements Serializable {
+        private final long currentTerm;
+        private final String votedFor;
+
+        public UpdateElectionTerm(long currentTerm, String votedFor) {
+            this.currentTerm = currentTerm;
+            this.votedFor = votedFor;
+        }
+
+        public long getCurrentTerm() {
+            return currentTerm;
+        }
+
+        public String getVotedFor() {
+            return votedFor;
+        }
+    }
+
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActorContext.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActorContext.java
new file mode 100644 (file)
index 0000000..7150ec0
--- /dev/null
@@ -0,0 +1,129 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.raft;
+
+import akka.actor.ActorRef;
+import akka.actor.ActorSelection;
+import akka.actor.ActorSystem;
+import akka.actor.Props;
+import akka.event.LoggingAdapter;
+
+import java.util.Map;
+
+/**
+ * The RaftActorContext contains that portion of the RaftActors state that
+ * needs to be shared with it's behaviors. A RaftActorContext should NEVER be
+ * used in any actor context outside the RaftActor that constructed it.
+ */
+public interface RaftActorContext {
+    /**
+     * Create a new local actor
+      * @param props
+     * @return
+     */
+    ActorRef actorOf(Props props);
+
+    /**
+     * Create a actor selection
+     * @param path
+     * @return
+     */
+    ActorSelection actorSelection(String path);
+
+    /**
+     * Get the identifier for the RaftActor. This identifier represents the
+     * name of the actor whose common state is being shared. For example the
+     * id could be 'inventory'
+     * @return the identifier
+     */
+    String getId();
+
+    /**
+     * A reference to the RaftActor itself. This could be used to send messages
+     * to the RaftActor
+     * @return
+     */
+    ActorRef getActor();
+
+    /**
+     * Get the ElectionTerm information
+     * @return
+     */
+    ElectionTerm getTermInformation();
+
+    /**
+     * index of highest log entry known to be
+     * committed (initialized to 0, increases
+     *    monotonically)
+     * @return
+     */
+    long getCommitIndex();
+
+
+    /**
+     *
+     */
+    void setCommitIndex(long commitIndex);
+
+    /**
+     * index of highest log entry applied to state
+     * machine (initialized to 0, increases
+     *    monotonically)
+     * @return
+     */
+    long getLastApplied();
+
+
+    /**
+     *
+     */
+    void setLastApplied(long lastApplied);
+
+    /**
+     * @return A representation of the log
+     */
+    ReplicatedLog getReplicatedLog();
+
+    /**
+     * @return The ActorSystem associated with this context
+     */
+    ActorSystem getActorSystem();
+
+    /**
+     *
+     * @return
+     */
+    LoggingAdapter getLogger();
+
+    /**
+     * Get a mapping of peer id's their addresses
+     * @return
+     */
+    Map<String, String> getPeerAddresses();
+
+    /**
+     *
+     * @param peerId
+     * @return
+     */
+    String getPeerAddress(String peerId);
+
+    /**
+     * Add to actor peers
+     * @param name
+     * @param address
+     */
+    void addToPeers(String name, String address);
+
+    /**
+     *
+     * @param name
+     */
+    public void removePeer(String name);
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActorContextImpl.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActorContextImpl.java
new file mode 100644 (file)
index 0000000..a0f1328
--- /dev/null
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.raft;
+
+import akka.actor.ActorRef;
+import akka.actor.ActorSelection;
+import akka.actor.ActorSystem;
+import akka.actor.Props;
+import akka.actor.UntypedActorContext;
+import akka.event.LoggingAdapter;
+
+import java.util.Map;
+
+public class RaftActorContextImpl implements RaftActorContext{
+
+    private final ActorRef actor;
+
+    private final UntypedActorContext context;
+
+    private final String id;
+
+    private final ElectionTerm termInformation;
+
+    private long commitIndex;
+
+    private long lastApplied;
+
+    private final ReplicatedLog replicatedLog;
+
+    private final Map<String, String> peerAddresses;
+
+    private final LoggingAdapter LOG;
+
+
+    public RaftActorContextImpl(ActorRef actor, UntypedActorContext context,
+        String id,
+        ElectionTerm termInformation, long commitIndex,
+        long lastApplied, ReplicatedLog replicatedLog, Map<String, String> peerAddresses, LoggingAdapter logger) {
+        this.actor = actor;
+        this.context = context;
+        this.id = id;
+        this.termInformation = termInformation;
+        this.commitIndex = commitIndex;
+        this.lastApplied = lastApplied;
+        this.replicatedLog = replicatedLog;
+        this.peerAddresses = peerAddresses;
+        this.LOG = logger;
+    }
+
+    public ActorRef actorOf(Props props){
+        return context.actorOf(props);
+    }
+
+    public ActorSelection actorSelection(String path){
+        return context.actorSelection(path);
+    }
+
+    public String getId() {
+        return id;
+    }
+
+    public ActorRef getActor() {
+        return actor;
+    }
+
+    public ElectionTerm getTermInformation() {
+        return termInformation;
+    }
+
+    public long getCommitIndex() {
+        return commitIndex;
+    }
+
+    @Override public void setCommitIndex(long commitIndex) {
+        this.commitIndex = commitIndex;
+    }
+
+    public long getLastApplied() {
+        return lastApplied;
+    }
+
+    @Override public void setLastApplied(long lastApplied) {
+        this.lastApplied = lastApplied;
+    }
+
+    @Override public ReplicatedLog getReplicatedLog() {
+        return replicatedLog;
+    }
+
+    @Override public ActorSystem getActorSystem() {
+        return context.system();
+    }
+
+    @Override public LoggingAdapter getLogger() {
+        return this.LOG;
+    }
+
+    @Override public Map<String, String> getPeerAddresses() {
+        return peerAddresses;
+    }
+
+    @Override public String getPeerAddress(String peerId) {
+        return peerAddresses.get(peerId);
+    }
+
+    @Override public void addToPeers(String name, String address) {
+        LOG.debug("Kamal--> addToPeer for:"+name);
+        peerAddresses.put(name, address);
+    }
+
+    @Override public void removePeer(String name) {
+        LOG.debug("Kamal--> removePeer for:"+name);
+        peerAddresses.remove(name);
+    }
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftState.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftState.java
new file mode 100644 (file)
index 0000000..65114eb
--- /dev/null
@@ -0,0 +1,7 @@
+package org.opendaylight.controller.cluster.raft;
+
+public enum RaftState {
+    Candidate,
+    Follower,
+    Leader
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/ReplicatedLog.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/ReplicatedLog.java
new file mode 100644 (file)
index 0000000..b7c8955
--- /dev/null
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.raft;
+
+import java.util.List;
+
+/**
+ * Represents the ReplicatedLog that needs to be kept in sync by the RaftActor
+ */
+public interface ReplicatedLog {
+    /**
+     * Get a replicated log entry at the specified index
+     *
+     * @param index the index of the log entry
+     * @return the ReplicatedLogEntry at index. null if index less than 0 or
+     * greater than the size of the in-memory journal.
+     */
+    ReplicatedLogEntry get(long index);
+
+
+    /**
+     * Get the last replicated log entry
+     *
+     * @return
+     */
+    ReplicatedLogEntry last();
+
+    /**
+     *
+     * @return
+     */
+    long lastIndex();
+
+    /**
+     *
+     * @return
+     */
+    long lastTerm();
+
+    /**
+     * To be called when we need to remove entries from the in-memory log.
+     * This method will remove all entries >= index. This method should be used
+     * during recovery to appropriately trim the log based on persisted
+     * information
+     *
+     * @param index the index of the log entry
+     */
+    void removeFrom(long index);
+
+
+    /**
+     * To be called when we need to remove entries from the in-memory log and we
+     * need that information persisted to disk. This method will remove all
+     * entries >= index.
+     * <p>
+     * The persisted information would then be used during recovery to properly
+     * reconstruct the state of the in-memory replicated log
+     *
+     * @param index the index of the log entry
+     */
+    void removeFromAndPersist(long index);
+
+    /**
+     * Append an entry to the log
+     * @param replicatedLogEntry
+     */
+    void append(ReplicatedLogEntry replicatedLogEntry);
+
+    /**
+     *
+     * @param replicatedLogEntry
+     */
+    void appendAndPersist(final ReplicatedLogEntry replicatedLogEntry);
+
+    /**
+     *
+     * @param index the index of the log entry
+     */
+    List<ReplicatedLogEntry> getFrom(long index);
+
+
+    /**
+     *
+     * @return
+     */
+    long size();
+
+    /**
+     * Checks if the entry at the specified index is present or not
+     *
+     * @param index the index of the log entry
+     * @return true if the entry is present in the in-memory journal
+     */
+    boolean isPresent(long index);
+
+    /**
+     * Checks if the entry is present in a snapshot
+     *
+     * @param index the index of the log entry
+     * @return true if the entry is in the snapshot. false if the entry is not
+     * in the snapshot even if the entry may be present in the replicated log
+     */
+    boolean isInSnapshot(long index);
+
+    /**
+     * Get the snapshot
+     *
+     * @return an object representing the snapshot if it exists. null otherwise
+     */
+    Object getSnapshot();
+
+    /**
+     * Get the index of the snapshot
+     *
+     * @return the index from which the snapshot was created. -1 otherwise.
+     */
+    long getSnapshotIndex();
+
+    /**
+     * Get the term of the snapshot
+     *
+     * @return the term of the index from which the snapshot was created. -1
+     * otherwise
+     */
+    long getSnapshotTerm();
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/ReplicatedLogEntry.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/ReplicatedLogEntry.java
new file mode 100644 (file)
index 0000000..3bbaa22
--- /dev/null
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.raft;
+
+/**
+ * Represents one entry in the replicated log
+ */
+public interface ReplicatedLogEntry {
+    /**
+     * The data stored in that entry
+     *
+     * @return
+     */
+    Object getData();
+
+    /**
+     * The term stored in that entry
+     *
+     * @return
+     */
+    long getTerm();
+
+    /**
+     * The index of the entry
+     *
+     * @return
+     */
+    long getIndex();
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/AbstractRaftActorBehavior.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/AbstractRaftActorBehavior.java
new file mode 100644 (file)
index 0000000..1d78bb0
--- /dev/null
@@ -0,0 +1,369 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.raft.behaviors;
+
+import akka.actor.ActorRef;
+import akka.actor.Cancellable;
+import org.opendaylight.controller.cluster.raft.ClientRequestTracker;
+import org.opendaylight.controller.cluster.raft.RaftActorContext;
+import org.opendaylight.controller.cluster.raft.RaftState;
+import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
+import org.opendaylight.controller.cluster.raft.internal.messages.ApplyState;
+import org.opendaylight.controller.cluster.raft.internal.messages.ElectionTimeout;
+import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
+import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
+import org.opendaylight.controller.cluster.raft.messages.RequestVote;
+import org.opendaylight.controller.cluster.raft.messages.RequestVoteReply;
+import scala.concurrent.duration.FiniteDuration;
+
+import java.util.Random;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * Abstract class that represents the behavior of a RaftActor
+ * <p/>
+ * All Servers:
+ * <ul>
+ * <li> If commitIndex > lastApplied: increment lastApplied, apply
+ * log[lastApplied] to state machine (§5.3)
+ * <li> If RPC request or response contains term T > currentTerm:
+ * set currentTerm = T, convert to follower (§5.1)
+ */
+public abstract class AbstractRaftActorBehavior implements RaftActorBehavior {
+
+    /**
+     * Information about the RaftActor whose behavior this class represents
+     */
+    protected final RaftActorContext context;
+
+    /**
+     * The maximum election time variance
+     */
+    private static final int ELECTION_TIME_MAX_VARIANCE = 100;
+
+    /**
+     * The interval at which a heart beat message will be sent to the remote
+     * RaftActor
+     * <p/>
+     * Since this is set to 100 milliseconds the Election timeout should be
+     * at least 200 milliseconds
+     */
+    protected static final FiniteDuration HEART_BEAT_INTERVAL =
+        new FiniteDuration(100, TimeUnit.MILLISECONDS);
+
+    /**
+     * The interval in which a new election would get triggered if no leader is found
+     */
+    private static final long ELECTION_TIME_INTERVAL =
+        HEART_BEAT_INTERVAL.toMillis() * 2;
+
+    /**
+     *
+     */
+    private Cancellable electionCancel = null;
+
+    /**
+     *
+     */
+    protected String leaderId = null;
+
+
+    protected AbstractRaftActorBehavior(RaftActorContext context) {
+        this.context = context;
+    }
+
+    /**
+     * Derived classes should not directly handle AppendEntries messages it
+     * should let the base class handle it first. Once the base class handles
+     * the AppendEntries message and does the common actions that are applicable
+     * in all RaftState's it will delegate the handling of the AppendEntries
+     * message to the derived class to do more state specific handling by calling
+     * this method
+     *
+     * @param sender         The actor that sent this message
+     * @param appendEntries  The AppendEntries message
+     * @return
+     */
+    protected abstract RaftState handleAppendEntries(ActorRef sender,
+        AppendEntries appendEntries);
+
+
+    /**
+     * appendEntries first processes the AppendEntries message and then
+     * delegates handling to a specific behavior
+     *
+     * @param sender
+     * @param appendEntries
+     * @return
+     */
+    protected RaftState appendEntries(ActorRef sender,
+        AppendEntries appendEntries) {
+
+        // 1. Reply false if term < currentTerm (§5.1)
+        if (appendEntries.getTerm() < currentTerm()) {
+            context.getLogger().debug(
+                "Cannot append entries because sender term " + appendEntries
+                    .getTerm() + " is less than " + currentTerm());
+            sender.tell(
+                new AppendEntriesReply(context.getId(), currentTerm(), false,
+                    lastIndex(), lastTerm()), actor()
+            );
+            return state();
+        }
+
+
+        return handleAppendEntries(sender, appendEntries);
+    }
+
+    /**
+     * Derived classes should not directly handle AppendEntriesReply messages it
+     * should let the base class handle it first. Once the base class handles
+     * the AppendEntriesReply message and does the common actions that are
+     * applicable in all RaftState's it will delegate the handling of the
+     * AppendEntriesReply message to the derived class to do more state specific
+     * handling by calling this method
+     *
+     * @param sender             The actor that sent this message
+     * @param appendEntriesReply The AppendEntriesReply message
+     * @return
+     */
+    protected abstract RaftState handleAppendEntriesReply(ActorRef sender,
+        AppendEntriesReply appendEntriesReply);
+
+    /**
+     * requestVote handles the RequestVote message. This logic is common
+     * for all behaviors
+     *
+     * @param sender
+     * @param requestVote
+     * @return
+     */
+    protected RaftState requestVote(ActorRef sender,
+        RequestVote requestVote) {
+
+        boolean grantVote = false;
+
+        //  Reply false if term < currentTerm (§5.1)
+        if (requestVote.getTerm() < currentTerm()) {
+            grantVote = false;
+
+            // If votedFor is null or candidateId, and candidate’s log is at
+            // least as up-to-date as receiver’s log, grant vote (§5.2, Â§5.4)
+        } else if (votedFor() == null || votedFor()
+            .equals(requestVote.getCandidateId())) {
+
+            boolean candidateLatest = false;
+
+            // From Â§5.4.1
+            // Raft determines which of two logs is more up-to-date
+            // by comparing the index and term of the last entries in the
+            // logs. If the logs have last entries with different terms, then
+            // the log with the later term is more up-to-date. If the logs
+            // end with the same term, then whichever log is longer is
+            // more up-to-date.
+            if (requestVote.getLastLogTerm() > lastTerm()) {
+                candidateLatest = true;
+            } else if ((requestVote.getLastLogTerm() == lastTerm())
+                && requestVote.getLastLogIndex() >= lastIndex()) {
+                candidateLatest = true;
+            }
+
+            if (candidateLatest) {
+                grantVote = true;
+                context.getTermInformation().updateAndPersist(requestVote.getTerm(),
+                    requestVote.getCandidateId());
+            }
+        }
+
+        sender.tell(new RequestVoteReply(currentTerm(), grantVote), actor());
+
+        return state();
+    }
+
+    /**
+     * Derived classes should not directly handle RequestVoteReply messages it
+     * should let the base class handle it first. Once the base class handles
+     * the RequestVoteReply message and does the common actions that are
+     * applicable in all RaftState's it will delegate the handling of the
+     * RequestVoteReply message to the derived class to do more state specific
+     * handling by calling this method
+     *
+     * @param sender           The actor that sent this message
+     * @param requestVoteReply The RequestVoteReply message
+     * @return
+     */
+    protected abstract RaftState handleRequestVoteReply(ActorRef sender,
+        RequestVoteReply requestVoteReply);
+
+    /**
+     * Creates a random election duration
+     *
+     * @return
+     */
+    protected FiniteDuration electionDuration() {
+        long variance = new Random().nextInt(ELECTION_TIME_MAX_VARIANCE);
+        return new FiniteDuration(ELECTION_TIME_INTERVAL + variance,
+            TimeUnit.MILLISECONDS);
+    }
+
+    /**
+     * stop the scheduled election
+     */
+    protected void stopElection() {
+        if (electionCancel != null && !electionCancel.isCancelled()) {
+            electionCancel.cancel();
+        }
+    }
+
+    /**
+     * schedule a new election
+     *
+     * @param interval
+     */
+    protected void scheduleElection(FiniteDuration interval) {
+        stopElection();
+
+        // Schedule an election. When the scheduler triggers an ElectionTimeout
+        // message is sent to itself
+        electionCancel =
+            context.getActorSystem().scheduler().scheduleOnce(interval,
+                context.getActor(), new ElectionTimeout(),
+                context.getActorSystem().dispatcher(), context.getActor());
+    }
+
+    /**
+     * Get the current term
+     * @return
+     */
+    protected long currentTerm() {
+        return context.getTermInformation().getCurrentTerm();
+    }
+
+    /**
+     * Get the candidate for whom we voted in the current term
+     * @return
+     */
+    protected String votedFor() {
+        return context.getTermInformation().getVotedFor();
+    }
+
+    /**
+     * Get the actor associated with this behavior
+     * @return
+     */
+    protected ActorRef actor() {
+        return context.getActor();
+    }
+
+    /**
+     * Get the term from the last entry in the log
+     *
+     * @return
+     */
+    protected long lastTerm() {
+        return context.getReplicatedLog().lastTerm();
+    }
+
+    /**
+     * Get the index from the last entry in the log
+     *
+     * @return
+     */
+    protected long lastIndex() {
+        return context.getReplicatedLog().lastIndex();
+    }
+
+    /**
+     * Find the client request tracker for a specific logIndex
+     *
+     * @param logIndex
+     * @return
+     */
+    protected ClientRequestTracker findClientRequestTracker(long logIndex) {
+        return null;
+    }
+
+    /**
+     * Find the log index from the previous to last entry in the log
+     *
+     * @return
+     */
+    protected long prevLogIndex(long index){
+        ReplicatedLogEntry prevEntry =
+            context.getReplicatedLog().get(index - 1);
+        if (prevEntry != null) {
+            return prevEntry.getIndex();
+        }
+        return -1;
+    }
+
+    /**
+     * Find the log term from the previous to last entry in the log
+     * @return
+     */
+    protected long prevLogTerm(long index){
+        ReplicatedLogEntry prevEntry =
+            context.getReplicatedLog().get(index - 1);
+        if (prevEntry != null) {
+            return prevEntry.getTerm();
+        }
+        return -1;
+    }
+
+    /**
+     * Apply the provided index to the state machine
+     *
+     * @param index a log index that is known to be committed
+     */
+    protected void applyLogToStateMachine(long index) {
+        // Now maybe we apply to the state machine
+        for (long i = context.getLastApplied() + 1;
+             i < index + 1; i++) {
+            ActorRef clientActor = null;
+            String identifier = null;
+            ClientRequestTracker tracker = findClientRequestTracker(i);
+
+            if (tracker != null) {
+                clientActor = tracker.getClientActor();
+                identifier = tracker.getIdentifier();
+            }
+            ReplicatedLogEntry replicatedLogEntry =
+                context.getReplicatedLog().get(i);
+
+            if (replicatedLogEntry != null) {
+                actor().tell(new ApplyState(clientActor, identifier,
+                    replicatedLogEntry), actor());
+            } else {
+                context.getLogger().error(
+                    "Missing index " + i + " from log. Cannot apply state.");
+            }
+        }
+        // Send a local message to the local RaftActor (it's derived class to be
+        // specific to apply the log to it's index)
+        context.setLastApplied(index);
+    }
+
+    @Override
+    public RaftState handleMessage(ActorRef sender, Object message) {
+        if (message instanceof AppendEntries) {
+            return appendEntries(sender, (AppendEntries) message);
+        } else if (message instanceof AppendEntriesReply) {
+            return handleAppendEntriesReply(sender, (AppendEntriesReply) message);
+        } else if (message instanceof RequestVote) {
+            return requestVote(sender, (RequestVote) message);
+        } else if (message instanceof RequestVoteReply) {
+            return handleRequestVoteReply(sender, (RequestVoteReply) message);
+        }
+        return state();
+    }
+
+    @Override public String getLeaderId() {
+        return leaderId;
+    }
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/Candidate.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/Candidate.java
new file mode 100644 (file)
index 0000000..ecd4901
--- /dev/null
@@ -0,0 +1,179 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.raft.behaviors;
+
+import akka.actor.ActorRef;
+import akka.actor.ActorSelection;
+import org.opendaylight.controller.cluster.raft.RaftActorContext;
+import org.opendaylight.controller.cluster.raft.RaftState;
+import org.opendaylight.controller.cluster.raft.internal.messages.ElectionTimeout;
+import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
+import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
+import org.opendaylight.controller.cluster.raft.messages.RaftRPC;
+import org.opendaylight.controller.cluster.raft.messages.RequestVote;
+import org.opendaylight.controller.cluster.raft.messages.RequestVoteReply;
+
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * The behavior of a RaftActor when it is in the CandidateState
+ * <p/>
+ * Candidates (§5.2):
+ * <ul>
+ * <li> On conversion to candidate, start election:
+ * <ul>
+ * <li> Increment currentTerm
+ * <li> Vote for self
+ * <li> Reset election timer
+ * <li> Send RequestVote RPCs to all other servers
+ * </ul>
+ * <li> If votes received from majority of servers: become leader
+ * <li> If AppendEntries RPC received from new leader: convert to
+ * follower
+ * <li> If election timeout elapses: start new election
+ * </ul>
+ */
+public class Candidate extends AbstractRaftActorBehavior {
+
+    private final Map<String, ActorSelection> peerToActor = new HashMap<>();
+
+    private int voteCount;
+
+    private final int votesRequired;
+
+    public Candidate(RaftActorContext context) {
+        super(context);
+
+        Collection<String> peerPaths = context.getPeerAddresses().values();
+
+        for (String peerPath : peerPaths) {
+            peerToActor.put(peerPath,
+                context.actorSelection(peerPath));
+        }
+
+        context.getLogger().debug("Election:Candidate has following peers:"+peerToActor.keySet());
+        if(peerPaths.size() > 0) {
+            // Votes are required from a majority of the peers including self.
+            // The votesRequired field therefore stores a calculated value
+            // of the number of votes required for this candidate to win an
+            // election based on it's known peers.
+            // If a peer was added during normal operation and raft replicas
+            // came to know about them then the new peer would also need to be
+            // taken into consideration when calculating this value.
+            // Here are some examples for what the votesRequired would be for n
+            // peers
+            // 0 peers = 1 votesRequired (0 + 1) / 2 + 1 = 1
+            // 2 peers = 2 votesRequired (2 + 1) / 2 + 1 = 2
+            // 4 peers = 3 votesRequired (4 + 1) / 2 + 1 = 3
+            int noOfPeers = peerPaths.size();
+            int self = 1;
+            votesRequired = (noOfPeers + self) / 2 + 1;
+        } else {
+            votesRequired = 0;
+        }
+
+        startNewTerm();
+        scheduleElection(electionDuration());
+    }
+
+    @Override protected RaftState handleAppendEntries(ActorRef sender,
+        AppendEntries appendEntries) {
+
+        return state();
+    }
+
+    @Override protected RaftState handleAppendEntriesReply(ActorRef sender,
+        AppendEntriesReply appendEntriesReply) {
+
+        return state();
+    }
+
+    @Override protected RaftState handleRequestVoteReply(ActorRef sender,
+        RequestVoteReply requestVoteReply) {
+
+        if (requestVoteReply.isVoteGranted()) {
+            voteCount++;
+        }
+
+        if (voteCount >= votesRequired) {
+            return RaftState.Leader;
+        }
+
+        return state();
+    }
+
+    @Override public RaftState state() {
+        return RaftState.Candidate;
+    }
+
+    @Override
+    public RaftState handleMessage(ActorRef sender, Object message) {
+
+        if (message instanceof RaftRPC) {
+            RaftRPC rpc = (RaftRPC) message;
+            // If RPC request or response contains term T > currentTerm:
+            // set currentTerm = T, convert to follower (§5.1)
+            // This applies to all RPC messages and responses
+            if (rpc.getTerm() > context.getTermInformation().getCurrentTerm()) {
+                context.getTermInformation().updateAndPersist(rpc.getTerm(), null);
+                return RaftState.Follower;
+            }
+        }
+
+        if (message instanceof ElectionTimeout) {
+            if (votesRequired == 0) {
+                // If there are no peers then we should be a Leader
+                // We wait for the election timeout to occur before declare
+                // ourselves the leader. This gives enough time for a leader
+                // who we do not know about (as a peer)
+                // to send a message to the candidate
+                return RaftState.Leader;
+            }
+            startNewTerm();
+            scheduleElection(electionDuration());
+            return state();
+        }
+        return super.handleMessage(sender, message);
+    }
+
+
+    private void startNewTerm() {
+
+
+        // set voteCount back to 1 (that is voting for self)
+        voteCount = 1;
+
+        // Increment the election term and vote for self
+        long currentTerm = context.getTermInformation().getCurrentTerm();
+        context.getTermInformation().updateAndPersist(currentTerm + 1, context.getId());
+
+        context.getLogger().debug("Starting new term " + (currentTerm+1));
+
+        // Request for a vote
+        // TODO: Retry request for vote if replies do not arrive in a reasonable
+        // amount of time TBD
+        for (ActorSelection peerActor : peerToActor.values()) {
+            peerActor.tell(new RequestVote(
+                    context.getTermInformation().getCurrentTerm(),
+                    context.getId(),
+                    context.getReplicatedLog().lastIndex(),
+                    context.getReplicatedLog().lastTerm()),
+                context.getActor()
+            );
+        }
+
+
+    }
+
+    @Override public void close() throws Exception {
+        stopElection();
+    }
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/Follower.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/Follower.java
new file mode 100644 (file)
index 0000000..532201b
--- /dev/null
@@ -0,0 +1,243 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.raft.behaviors;
+
+import akka.actor.ActorRef;
+import org.opendaylight.controller.cluster.raft.RaftActorContext;
+import org.opendaylight.controller.cluster.raft.RaftState;
+import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
+import org.opendaylight.controller.cluster.raft.internal.messages.ApplySnapshot;
+import org.opendaylight.controller.cluster.raft.internal.messages.ElectionTimeout;
+import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
+import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
+import org.opendaylight.controller.cluster.raft.messages.InstallSnapshot;
+import org.opendaylight.controller.cluster.raft.messages.RaftRPC;
+import org.opendaylight.controller.cluster.raft.messages.RequestVoteReply;
+
+/**
+ * The behavior of a RaftActor in the Follower state
+ * <p/>
+ * <ul>
+ * <li> Respond to RPCs from candidates and leaders
+ * <li> If election timeout elapses without receiving AppendEntries
+ * RPC from current leader or granting vote to candidate:
+ * convert to candidate
+ * </ul>
+ */
+public class Follower extends AbstractRaftActorBehavior {
+    public Follower(RaftActorContext context) {
+        super(context);
+
+        scheduleElection(electionDuration());
+    }
+
+    @Override protected RaftState handleAppendEntries(ActorRef sender,
+        AppendEntries appendEntries) {
+
+        // TODO : Refactor this method into a bunch of smaller methods
+        // to make it easier to read. Before refactoring ensure tests
+        // cover the code properly
+
+        // 1. Reply false if term < currentTerm (§5.1)
+        // This is handled in the appendEntries method of the base class
+
+        // If we got here then we do appear to be talking to the leader
+        leaderId = appendEntries.getLeaderId();
+
+        // 2. Reply false if log doesn’t contain an entry at prevLogIndex
+        // whose term matches prevLogTerm (§5.3)
+
+        ReplicatedLogEntry previousEntry = context.getReplicatedLog()
+            .get(appendEntries.getPrevLogIndex());
+
+
+        boolean outOfSync = true;
+
+        // First check if the logs are in sync or not
+        if (lastIndex() == -1
+            && appendEntries.getPrevLogIndex() != -1) {
+
+            // The follower's log is out of sync because the leader does have
+            // an entry at prevLogIndex and this follower has no entries in
+            // it's log.
+
+            context.getLogger().debug(
+                "The followers log is empty and the senders prevLogIndex is {}",
+                appendEntries.getPrevLogIndex());
+
+        } else if (lastIndex() > -1
+            && appendEntries.getPrevLogIndex() != -1
+            && previousEntry == null) {
+
+            // The follower's log is out of sync because the Leader's
+            // prevLogIndex entry was not found in it's log
+
+            context.getLogger().debug(
+                "The log is not empty but the prevLogIndex {} was not found in it",
+                appendEntries.getPrevLogIndex());
+
+        } else if (lastIndex() > -1
+            && previousEntry != null
+            && previousEntry.getTerm()!= appendEntries.getPrevLogTerm()) {
+
+            // The follower's log is out of sync because the Leader's
+            // prevLogIndex entry does exist in the follower's log but it has
+            // a different term in it
+
+            context.getLogger().debug(
+                "Cannot append entries because previous entry term {}  is not equal to append entries prevLogTerm {}"
+                , previousEntry.getTerm()
+                , appendEntries.getPrevLogTerm());
+        } else {
+            outOfSync = false;
+        }
+
+        if (outOfSync) {
+            // We found that the log was out of sync so just send a negative
+            // reply and return
+            sender.tell(
+                new AppendEntriesReply(context.getId(), currentTerm(), false,
+                    lastIndex(), lastTerm()), actor()
+            );
+            return state();
+        }
+
+        if (appendEntries.getEntries() != null
+            && appendEntries.getEntries().size() > 0) {
+            context.getLogger().debug(
+                "Number of entries to be appended = " + appendEntries
+                    .getEntries().size()
+            );
+
+            // 3. If an existing entry conflicts with a new one (same index
+            // but different terms), delete the existing entry and all that
+            // follow it (§5.3)
+            int addEntriesFrom = 0;
+            if (context.getReplicatedLog().size() > 0) {
+
+                // Find the entry up until which the one that is not in the
+                // follower's log
+                for (int i = 0;
+                     i < appendEntries.getEntries()
+                         .size(); i++, addEntriesFrom++) {
+                    ReplicatedLogEntry matchEntry =
+                        appendEntries.getEntries().get(i);
+                    ReplicatedLogEntry newEntry = context.getReplicatedLog()
+                        .get(matchEntry.getIndex());
+
+                    if (newEntry == null) {
+                        //newEntry not found in the log
+                        break;
+                    }
+
+                    if (newEntry.getTerm() == matchEntry
+                        .getTerm()) {
+                        continue;
+                    }
+
+                    context.getLogger().debug(
+                        "Removing entries from log starting at "
+                            + matchEntry.getIndex()
+                    );
+
+                    // Entries do not match so remove all subsequent entries
+                    context.getReplicatedLog()
+                        .removeFromAndPersist(matchEntry.getIndex());
+                    break;
+                }
+            }
+
+            context.getLogger().debug(
+                "After cleanup entries to be added from = " + (addEntriesFrom
+                    + lastIndex())
+            );
+
+            // 4. Append any new entries not already in the log
+            for (int i = addEntriesFrom;
+                 i < appendEntries.getEntries().size(); i++) {
+
+                context.getLogger().debug(
+                    "Append entry to log " + appendEntries.getEntries().get(i).getData()
+                        .toString()
+                );
+                context.getReplicatedLog()
+                    .appendAndPersist(appendEntries.getEntries().get(i));
+            }
+
+            context.getLogger().debug(
+                "Log size is now " + context.getReplicatedLog().size());
+        }
+
+
+        // 5. If leaderCommit > commitIndex, set commitIndex =
+        // min(leaderCommit, index of last new entry)
+
+        long prevCommitIndex = context.getCommitIndex();
+
+        context.setCommitIndex(Math.min(appendEntries.getLeaderCommit(),
+            context.getReplicatedLog().lastIndex()));
+
+        if (prevCommitIndex != context.getCommitIndex()) {
+            context.getLogger()
+                .debug("Commit index set to " + context.getCommitIndex());
+        }
+
+        // If commitIndex > lastApplied: increment lastApplied, apply
+        // log[lastApplied] to state machine (§5.3)
+        if (appendEntries.getLeaderCommit() > context.getLastApplied()) {
+            applyLogToStateMachine(appendEntries.getLeaderCommit());
+        }
+
+        sender.tell(new AppendEntriesReply(context.getId(), currentTerm(), true,
+            lastIndex(), lastTerm()), actor());
+
+        return state();
+    }
+
+    @Override protected RaftState handleAppendEntriesReply(ActorRef sender,
+        AppendEntriesReply appendEntriesReply) {
+        return state();
+    }
+
+    @Override protected RaftState handleRequestVoteReply(ActorRef sender,
+        RequestVoteReply requestVoteReply) {
+        return state();
+    }
+
+    @Override public RaftState state() {
+        return RaftState.Follower;
+    }
+
+    @Override public RaftState handleMessage(ActorRef sender, Object message) {
+        if (message instanceof RaftRPC) {
+            RaftRPC rpc = (RaftRPC) message;
+            // If RPC request or response contains term T > currentTerm:
+            // set currentTerm = T, convert to follower (§5.1)
+            // This applies to all RPC messages and responses
+            if (rpc.getTerm() > context.getTermInformation().getCurrentTerm()) {
+                context.getTermInformation().updateAndPersist(rpc.getTerm(), null);
+            }
+        }
+
+        if (message instanceof ElectionTimeout) {
+            return RaftState.Candidate;
+        } else if (message instanceof InstallSnapshot) {
+            InstallSnapshot snapshot = (InstallSnapshot) message;
+            actor().tell(new ApplySnapshot(snapshot), actor());
+        }
+
+        scheduleElection(electionDuration());
+
+        return super.handleMessage(sender, message);
+    }
+
+    @Override public void close() throws Exception {
+        stopElection();
+    }
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/Leader.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/Leader.java
new file mode 100644 (file)
index 0000000..fb8be8b
--- /dev/null
@@ -0,0 +1,407 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.raft.behaviors;
+
+import akka.actor.ActorRef;
+import akka.actor.ActorSelection;
+import akka.actor.Cancellable;
+import com.google.common.base.Preconditions;
+import org.opendaylight.controller.cluster.raft.ClientRequestTracker;
+import org.opendaylight.controller.cluster.raft.ClientRequestTrackerImpl;
+import org.opendaylight.controller.cluster.raft.FollowerLogInformation;
+import org.opendaylight.controller.cluster.raft.FollowerLogInformationImpl;
+import org.opendaylight.controller.cluster.raft.RaftActorContext;
+import org.opendaylight.controller.cluster.raft.RaftState;
+import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
+import org.opendaylight.controller.cluster.raft.internal.messages.ApplyState;
+import org.opendaylight.controller.cluster.raft.internal.messages.Replicate;
+import org.opendaylight.controller.cluster.raft.internal.messages.SendHeartBeat;
+import org.opendaylight.controller.cluster.raft.internal.messages.SendInstallSnapshot;
+import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
+import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
+import org.opendaylight.controller.cluster.raft.messages.InstallSnapshot;
+import org.opendaylight.controller.cluster.raft.messages.InstallSnapshotReply;
+import org.opendaylight.controller.cluster.raft.messages.RaftRPC;
+import org.opendaylight.controller.cluster.raft.messages.RequestVoteReply;
+import scala.concurrent.duration.FiniteDuration;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
+
+/**
+ * The behavior of a RaftActor when it is in the Leader state
+ * <p/>
+ * Leaders:
+ * <ul>
+ * <li> Upon election: send initial empty AppendEntries RPCs
+ * (heartbeat) to each server; repeat during idle periods to
+ * prevent election timeouts (§5.2)
+ * <li> If command received from client: append entry to local log,
+ * respond after entry applied to state machine (§5.3)
+ * <li> If last log index â‰¥ nextIndex for a follower: send
+ * AppendEntries RPC with log entries starting at nextIndex
+ * <ul>
+ * <li> If successful: update nextIndex and matchIndex for
+ * follower (§5.3)
+ * <li> If AppendEntries fails because of log inconsistency:
+ * decrement nextIndex and retry (§5.3)
+ * </ul>
+ * <li> If there exists an N such that N > commitIndex, a majority
+ * of matchIndex[i] â‰¥ N, and log[N].term == currentTerm:
+ * set commitIndex = N (§5.3, Â§5.4).
+ */
+public class Leader extends AbstractRaftActorBehavior {
+
+
+    private final Map<String, FollowerLogInformation> followerToLog =
+        new HashMap();
+
+    private final Map<String, ActorSelection> followerToActor = new HashMap<>();
+
+    private Cancellable heartbeatSchedule = null;
+    private Cancellable appendEntriesSchedule = null;
+    private Cancellable installSnapshotSchedule = null;
+
+    private List<ClientRequestTracker> trackerList = new ArrayList<>();
+
+    private final int minReplicationCount;
+
+    public Leader(RaftActorContext context) {
+        super(context);
+
+        if (lastIndex() >= 0) {
+            context.setCommitIndex(lastIndex());
+        }
+
+        for (String followerId : context.getPeerAddresses().keySet()) {
+            FollowerLogInformation followerLogInformation =
+                new FollowerLogInformationImpl(followerId,
+                    new AtomicLong(lastIndex()),
+                    new AtomicLong(-1));
+
+            followerToActor.put(followerId,
+                context.actorSelection(context.getPeerAddress(followerId)));
+
+            followerToLog.put(followerId, followerLogInformation);
+        }
+
+        context.getLogger().debug("Election:Leader has following peers:"+followerToActor.keySet());
+
+        if (followerToActor.size() > 0) {
+            minReplicationCount = (followerToActor.size() + 1) / 2 + 1;
+        } else {
+            minReplicationCount = 0;
+        }
+
+
+        // Immediately schedule a heartbeat
+        // Upon election: send initial empty AppendEntries RPCs
+        // (heartbeat) to each server; repeat during idle periods to
+        // prevent election timeouts (§5.2)
+        scheduleHeartBeat(new FiniteDuration(0, TimeUnit.SECONDS));
+
+        scheduleInstallSnapshotCheck(
+            new FiniteDuration(HEART_BEAT_INTERVAL.length() * 1000,
+                HEART_BEAT_INTERVAL.unit())
+        );
+
+    }
+
+    @Override protected RaftState handleAppendEntries(ActorRef sender,
+        AppendEntries appendEntries) {
+
+        return state();
+    }
+
+    @Override protected RaftState handleAppendEntriesReply(ActorRef sender,
+        AppendEntriesReply appendEntriesReply) {
+
+        // Update the FollowerLogInformation
+        String followerId = appendEntriesReply.getFollowerId();
+        FollowerLogInformation followerLogInformation =
+            followerToLog.get(followerId);
+        if (appendEntriesReply.isSuccess()) {
+            followerLogInformation
+                .setMatchIndex(appendEntriesReply.getLogLastIndex());
+            followerLogInformation
+                .setNextIndex(appendEntriesReply.getLogLastIndex() + 1);
+        } else {
+
+            // TODO: When we find that the follower is out of sync with the
+            // Leader we simply decrement that followers next index by 1.
+            // Would it be possible to do better than this? The RAFT spec
+            // does not explicitly deal with it but may be something for us to
+            // think about
+
+            followerLogInformation.decrNextIndex();
+        }
+
+        // Now figure out if this reply warrants a change in the commitIndex
+        // If there exists an N such that N > commitIndex, a majority
+        // of matchIndex[i] â‰¥ N, and log[N].term == currentTerm:
+        // set commitIndex = N (§5.3, Â§5.4).
+        for (long N = context.getCommitIndex() + 1; ; N++) {
+            int replicatedCount = 1;
+
+            for (FollowerLogInformation info : followerToLog.values()) {
+                if (info.getMatchIndex().get() >= N) {
+                    replicatedCount++;
+                }
+            }
+
+            if (replicatedCount >= minReplicationCount) {
+                ReplicatedLogEntry replicatedLogEntry =
+                    context.getReplicatedLog().get(N);
+                if (replicatedLogEntry != null
+                    && replicatedLogEntry.getTerm()
+                    == currentTerm()) {
+                    context.setCommitIndex(N);
+                }
+            } else {
+                break;
+            }
+        }
+
+        // Apply the change to the state machine
+        if (context.getCommitIndex() > context.getLastApplied()) {
+            applyLogToStateMachine(context.getCommitIndex());
+        }
+
+        return state();
+    }
+
+    protected ClientRequestTracker findClientRequestTracker(long logIndex) {
+        for (ClientRequestTracker tracker : trackerList) {
+            if (tracker.getIndex() == logIndex) {
+                return tracker;
+            }
+        }
+
+        return null;
+    }
+
+    @Override protected RaftState handleRequestVoteReply(ActorRef sender,
+        RequestVoteReply requestVoteReply) {
+        return state();
+    }
+
+    @Override public RaftState state() {
+        return RaftState.Leader;
+    }
+
+    @Override public RaftState handleMessage(ActorRef sender, Object message) {
+        Preconditions.checkNotNull(sender, "sender should not be null");
+
+        if (message instanceof RaftRPC) {
+            RaftRPC rpc = (RaftRPC) message;
+            // If RPC request or response contains term T > currentTerm:
+            // set currentTerm = T, convert to follower (§5.1)
+            // This applies to all RPC messages and responses
+            if (rpc.getTerm() > context.getTermInformation().getCurrentTerm()) {
+                context.getTermInformation().updateAndPersist(rpc.getTerm(), null);
+                return RaftState.Follower;
+            }
+        }
+
+        try {
+            if (message instanceof SendHeartBeat) {
+                return sendHeartBeat();
+            } else if(message instanceof SendInstallSnapshot) {
+                installSnapshotIfNeeded();
+            } else if (message instanceof Replicate) {
+                replicate((Replicate) message);
+            } else if (message instanceof InstallSnapshotReply){
+                handleInstallSnapshotReply(
+                    (InstallSnapshotReply) message);
+            }
+        } finally {
+            scheduleHeartBeat(HEART_BEAT_INTERVAL);
+        }
+
+        return super.handleMessage(sender, message);
+    }
+
+    private void handleInstallSnapshotReply(InstallSnapshotReply message) {
+        InstallSnapshotReply reply = message;
+        String followerId = reply.getFollowerId();
+        FollowerLogInformation followerLogInformation =
+            followerToLog.get(followerId);
+
+        followerLogInformation
+            .setMatchIndex(context.getReplicatedLog().getSnapshotIndex());
+        followerLogInformation
+            .setNextIndex(context.getReplicatedLog().getSnapshotIndex() + 1);
+    }
+
+    private void replicate(Replicate replicate) {
+        long logIndex = replicate.getReplicatedLogEntry().getIndex();
+
+        context.getLogger().debug("Replicate message " + logIndex);
+
+        if (followerToActor.size() == 0) {
+            context.setCommitIndex(
+                replicate.getReplicatedLogEntry().getIndex());
+
+            context.getActor()
+                .tell(new ApplyState(replicate.getClientActor(),
+                        replicate.getIdentifier(),
+                        replicate.getReplicatedLogEntry()),
+                    context.getActor()
+                );
+        } else {
+
+            // Create a tracker entry we will use this later to notify the
+            // client actor
+            trackerList.add(
+                new ClientRequestTrackerImpl(replicate.getClientActor(),
+                    replicate.getIdentifier(),
+                    logIndex)
+            );
+
+            sendAppendEntries();
+        }
+    }
+
+    private void sendAppendEntries() {
+        // Send an AppendEntries to all followers
+        for (String followerId : followerToActor.keySet()) {
+            ActorSelection followerActor =
+                followerToActor.get(followerId);
+
+            FollowerLogInformation followerLogInformation =
+                followerToLog.get(followerId);
+
+            long nextIndex = followerLogInformation.getNextIndex().get();
+
+            List<ReplicatedLogEntry> entries = Collections.emptyList();
+
+            if(context.getReplicatedLog().isPresent(nextIndex)){
+                // TODO: Instead of sending all entries from nextIndex
+                // only send a fixed number of entries to each follower
+                // This is to avoid the situation where there are a lot of
+                // entries to install for a fresh follower or to a follower
+                // that has fallen too far behind with the log but yet is not
+                // eligible to receive a snapshot
+                entries =
+                    context.getReplicatedLog().getFrom(nextIndex);
+            }
+
+            followerActor.tell(
+                new AppendEntries(currentTerm(), context.getId(),
+                    prevLogIndex(nextIndex), prevLogTerm(nextIndex),
+                    entries, context.getCommitIndex()
+                ),
+                actor()
+            );
+        }
+    }
+
+    /**
+     * An installSnapshot is scheduled at a interval that is a multiple of
+     * a HEARTBEAT_INTERVAL. This is to avoid the need to check for installing
+     * snapshots at every heartbeat.
+     */
+    private void installSnapshotIfNeeded(){
+        for (String followerId : followerToActor.keySet()) {
+            ActorSelection followerActor =
+                followerToActor.get(followerId);
+
+            FollowerLogInformation followerLogInformation =
+                followerToLog.get(followerId);
+
+            long nextIndex = followerLogInformation.getNextIndex().get();
+
+            if(!context.getReplicatedLog().isPresent(nextIndex) && context.getReplicatedLog().isInSnapshot(nextIndex)){
+                followerActor.tell(
+                    new InstallSnapshot(currentTerm(), context.getId(),
+                        context.getReplicatedLog().getSnapshotIndex(),
+                        context.getReplicatedLog().getSnapshotTerm(),
+                        context.getReplicatedLog().getSnapshot()
+                    ),
+                    actor()
+                );
+            }
+        }
+    }
+
+    private RaftState sendHeartBeat() {
+        if (followerToActor.size() > 0) {
+            sendAppendEntries();
+        }
+        return state();
+    }
+
+    private void stopHeartBeat() {
+        if (heartbeatSchedule != null && !heartbeatSchedule.isCancelled()) {
+            heartbeatSchedule.cancel();
+        }
+    }
+
+    private void stopInstallSnapshotSchedule() {
+        if (installSnapshotSchedule != null && !installSnapshotSchedule.isCancelled()) {
+            installSnapshotSchedule.cancel();
+        }
+    }
+
+    private void scheduleHeartBeat(FiniteDuration interval) {
+        if(followerToActor.keySet().size() == 0){
+            // Optimization - do not bother scheduling a heartbeat as there are
+            // no followers
+            return;
+        }
+
+        stopHeartBeat();
+
+        // Schedule a heartbeat. When the scheduler triggers a SendHeartbeat
+        // message is sent to itself.
+        // Scheduling the heartbeat only once here because heartbeats do not
+        // need to be sent if there are other messages being sent to the remote
+        // actor.
+        heartbeatSchedule =
+            context.getActorSystem().scheduler().scheduleOnce(
+                interval,
+                context.getActor(), new SendHeartBeat(),
+                context.getActorSystem().dispatcher(), context.getActor());
+    }
+
+
+    private void scheduleInstallSnapshotCheck(FiniteDuration interval) {
+        if(followerToActor.keySet().size() == 0){
+            // Optimization - do not bother scheduling a heartbeat as there are
+            // no followers
+            return;
+        }
+
+        stopInstallSnapshotSchedule();
+
+        // Schedule a message to send append entries to followers that can
+        // accept an append entries with some data in it
+        installSnapshotSchedule =
+            context.getActorSystem().scheduler().scheduleOnce(
+                interval,
+                context.getActor(), new SendInstallSnapshot(),
+                context.getActorSystem().dispatcher(), context.getActor());
+    }
+
+
+
+    @Override public void close() throws Exception {
+        stopHeartBeat();
+    }
+
+    @Override public String getLeaderId() {
+        return context.getId();
+    }
+
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/RaftActorBehavior.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/RaftActorBehavior.java
new file mode 100644 (file)
index 0000000..ca2d916
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.raft.behaviors;
+
+import akka.actor.ActorRef;
+import org.opendaylight.controller.cluster.raft.RaftState;
+
+/**
+ * A RaftActorBehavior represents the specific behavior of a RaftActor
+ * <p>
+ * A RaftActor can behave as one of the following,
+ * <ul>
+ *     <li> Follower </li>
+ *     <li> Candidate </li>
+ *     <li> Leader </li>
+ * </ul>
+ * <p>
+ * In each of these behaviors the Raft Actor handles the same Raft messages
+ * differently.
+ */
+public interface RaftActorBehavior extends AutoCloseable{
+    /**
+     * Handle a message. If the processing of the message warrants a state
+     * change then a new state should be returned otherwise this method should
+     * return the state for the current behavior.
+     *
+     * @param sender The sender of the message
+     * @param message A message that needs to be processed
+     *
+     * @return The new state or self (this)
+     */
+    RaftState handleMessage(ActorRef sender, Object message);
+
+    /**
+     * The state associated with a given behavior
+     *
+     * @return
+     */
+    RaftState state();
+
+    /**
+     *
+     * @return
+     */
+    String getLeaderId();
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/client/messages/AddRaftPeer.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/client/messages/AddRaftPeer.java
new file mode 100644 (file)
index 0000000..d1f4c43
--- /dev/null
@@ -0,0 +1,23 @@
+package org.opendaylight.controller.cluster.raft.client.messages;
+
+/**
+ * Created by kramesha on 7/17/14.
+ */
+public class AddRaftPeer {
+
+    private String name;
+    private String address;
+
+    public AddRaftPeer(String name, String address) {
+        this.name = name;
+        this.address = address;
+    }
+
+    public String getName() {
+        return name;
+    }
+
+    public String getAddress() {
+        return address;
+    }
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/client/messages/FindLeader.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/client/messages/FindLeader.java
new file mode 100644 (file)
index 0000000..a60aea4
--- /dev/null
@@ -0,0 +1,13 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.raft.client.messages;
+
+public class FindLeader {
+
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/client/messages/FindLeaderReply.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/client/messages/FindLeaderReply.java
new file mode 100644 (file)
index 0000000..b36ef11
--- /dev/null
@@ -0,0 +1,21 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.raft.client.messages;
+
+public class FindLeaderReply {
+    private final String leaderActor;
+
+    public FindLeaderReply(String leaderActor) {
+        this.leaderActor = leaderActor;
+    }
+
+    public String getLeaderActor() {
+        return leaderActor;
+    }
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/client/messages/RemoveRaftPeer.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/client/messages/RemoveRaftPeer.java
new file mode 100644 (file)
index 0000000..4b766e0
--- /dev/null
@@ -0,0 +1,16 @@
+package org.opendaylight.controller.cluster.raft.client.messages;
+
+/**
+ * Created by kramesha on 7/17/14.
+ */
+public class RemoveRaftPeer {
+    private String name;
+
+    public RemoveRaftPeer(String name) {
+        this.name = name;
+    }
+
+    public String getName() {
+        return name;
+    }
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/internal/messages/ApplySnapshot.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/internal/messages/ApplySnapshot.java
new file mode 100644 (file)
index 0000000..a7172e2
--- /dev/null
@@ -0,0 +1,21 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.raft.internal.messages;
+
+public class ApplySnapshot {
+    private final Object snapshot;
+
+    public ApplySnapshot(Object snapshot) {
+        this.snapshot = snapshot;
+    }
+
+    public Object getSnapshot() {
+        return snapshot;
+    }
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/internal/messages/ApplyState.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/internal/messages/ApplyState.java
new file mode 100644 (file)
index 0000000..c9ba26e
--- /dev/null
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.raft.internal.messages;
+
+import akka.actor.ActorRef;
+import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
+
+public class ApplyState {
+    private final ActorRef clientActor;
+    private final String identifier;
+    private final ReplicatedLogEntry replicatedLogEntry;
+
+    public ApplyState(ActorRef clientActor, String identifier,
+        ReplicatedLogEntry replicatedLogEntry) {
+        this.clientActor = clientActor;
+        this.identifier = identifier;
+        this.replicatedLogEntry = replicatedLogEntry;
+    }
+
+    public ActorRef getClientActor() {
+        return clientActor;
+    }
+
+    public String getIdentifier() {
+        return identifier;
+    }
+
+    public ReplicatedLogEntry getReplicatedLogEntry() {
+        return replicatedLogEntry;
+    }
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/internal/messages/CommitEntry.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/internal/messages/CommitEntry.java
new file mode 100644 (file)
index 0000000..5afd492
--- /dev/null
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.raft.internal.messages;
+
+/**
+ * Message sent to commit an entry to the log
+ */
+public class CommitEntry {
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/internal/messages/ElectionTimeout.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/internal/messages/ElectionTimeout.java
new file mode 100644 (file)
index 0000000..0a4b8fa
--- /dev/null
@@ -0,0 +1,12 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.raft.internal.messages;
+
+public class ElectionTimeout {
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/internal/messages/PersistEntry.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/internal/messages/PersistEntry.java
new file mode 100644 (file)
index 0000000..7afe0b5
--- /dev/null
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.raft.internal.messages;
+
+/**
+ * Message sent to Persist an entry into the transaction journal
+ */
+public class PersistEntry {
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/internal/messages/Replicate.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/internal/messages/Replicate.java
new file mode 100644 (file)
index 0000000..6ff7cfc
--- /dev/null
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.raft.internal.messages;
+
+import akka.actor.ActorRef;
+import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
+
+public class Replicate {
+    private final ActorRef clientActor;
+    private final String identifier;
+    private final ReplicatedLogEntry replicatedLogEntry;
+
+    public Replicate(ActorRef clientActor, String identifier,
+        ReplicatedLogEntry replicatedLogEntry) {
+
+        this.clientActor = clientActor;
+        this.identifier = identifier;
+        this.replicatedLogEntry = replicatedLogEntry;
+    }
+
+    public ActorRef getClientActor() {
+        return clientActor;
+    }
+
+    public String getIdentifier() {
+        return identifier;
+    }
+
+    public ReplicatedLogEntry getReplicatedLogEntry() {
+        return replicatedLogEntry;
+    }
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/internal/messages/SaveSnapshot.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/internal/messages/SaveSnapshot.java
new file mode 100644 (file)
index 0000000..20e5927
--- /dev/null
@@ -0,0 +1,16 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.raft.internal.messages;
+
+/**
+ * This message is sent by a RaftActor to itself so that a subclass can process
+ * it and use it to save it's state
+ */
+public class SaveSnapshot {
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/internal/messages/SendHeartBeat.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/internal/messages/SendHeartBeat.java
new file mode 100644 (file)
index 0000000..5048cbb
--- /dev/null
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.raft.internal.messages;
+
+/**
+ * This messages is sent to the Leader to prompt it to send a heartbeat
+ * to it's followers.
+ *
+ * Typically the Leader to itself on a schedule
+ */
+public class SendHeartBeat {
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/internal/messages/SendInstallSnapshot.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/internal/messages/SendInstallSnapshot.java
new file mode 100644 (file)
index 0000000..0c370aa
--- /dev/null
@@ -0,0 +1,12 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.raft.internal.messages;
+
+public class SendInstallSnapshot {
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/AbstractRaftRPC.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/AbstractRaftRPC.java
new file mode 100644 (file)
index 0000000..3cafda9
--- /dev/null
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.raft.messages;
+
+public class AbstractRaftRPC implements RaftRPC {
+    // term
+    protected long term;
+
+    protected AbstractRaftRPC(long term){
+        this.term = term;
+    }
+
+    public long getTerm() {
+        return term;
+    }
+
+
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/AppendEntries.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/AppendEntries.java
new file mode 100644 (file)
index 0000000..9bb5029
--- /dev/null
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.raft.messages;
+
+import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
+
+import java.util.List;
+
+/**
+ * Invoked by leader to replicate log entries (§5.3); also used as
+ * heartbeat (§5.2).
+ */
+public class AppendEntries extends AbstractRaftRPC {
+    // So that follower can redirect clients
+    private final String leaderId;
+
+    // Index of log entry immediately preceding new ones
+    private final long prevLogIndex;
+
+    // term of prevLogIndex entry
+    private final long prevLogTerm;
+
+    // log entries to store (empty for heartbeat;
+    // may send more than one for efficiency)
+    private final List<ReplicatedLogEntry> entries;
+
+    // leader's commitIndex
+    private final long leaderCommit;
+
+    public AppendEntries(long term, String leaderId, long prevLogIndex,
+        long prevLogTerm, List<ReplicatedLogEntry> entries, long leaderCommit) {
+        super(term);
+        this.leaderId = leaderId;
+        this.prevLogIndex = prevLogIndex;
+        this.prevLogTerm = prevLogTerm;
+        this.entries = entries;
+        this.leaderCommit = leaderCommit;
+    }
+
+    public String getLeaderId() {
+        return leaderId;
+    }
+
+    public long getPrevLogIndex() {
+        return prevLogIndex;
+    }
+
+    public long getPrevLogTerm() {
+        return prevLogTerm;
+    }
+
+    public List<ReplicatedLogEntry> getEntries() {
+        return entries;
+    }
+
+    public long getLeaderCommit() {
+        return leaderCommit;
+    }
+
+    @Override public String toString() {
+        return "AppendEntries{" +
+            "leaderId='" + leaderId + '\'' +
+            ", prevLogIndex=" + prevLogIndex +
+            ", prevLogTerm=" + prevLogTerm +
+            ", entries=" + entries +
+            ", leaderCommit=" + leaderCommit +
+            '}';
+    }
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/AppendEntriesReply.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/AppendEntriesReply.java
new file mode 100644 (file)
index 0000000..7524d8f
--- /dev/null
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.raft.messages;
+
+/**
+ * Reply for the AppendEntriesRpc message
+ */
+public class AppendEntriesReply extends AbstractRaftRPC{
+
+    // true if follower contained entry matching
+    // prevLogIndex and prevLogTerm
+    private final boolean success;
+
+    // The index of the last entry in the followers log
+    // This will be used to set the matchIndex for the follower on the
+    // Leader
+    private final long logLastIndex;
+
+    private final long logLastTerm;
+
+    // The followerId - this will be used to figure out which follower is
+    // responding
+    private final String followerId;
+
+    public AppendEntriesReply(String followerId, long term, boolean success, long logLastIndex, long logLastTerm) {
+        super(term);
+
+        this.followerId = followerId;
+        this.success = success;
+        this.logLastIndex = logLastIndex;
+        this.logLastTerm = logLastTerm;
+    }
+
+    public long getTerm() {
+        return term;
+    }
+
+    public boolean isSuccess() {
+        return success;
+    }
+
+    public long getLogLastIndex() {
+        return logLastIndex;
+    }
+
+    public long getLogLastTerm() {
+        return logLastTerm;
+    }
+
+    public String getFollowerId() {
+        return followerId;
+    }
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/InstallSnapshot.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/InstallSnapshot.java
new file mode 100644 (file)
index 0000000..888854f
--- /dev/null
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.raft.messages;
+
+public class InstallSnapshot extends AbstractRaftRPC {
+
+    private final String leaderId;
+    private final long lastIncludedIndex;
+    private final long lastIncludedTerm;
+    private final Object data;
+
+    public InstallSnapshot(long term, String leaderId, long lastIncludedIndex, long lastIncludedTerm, Object data) {
+        super(term);
+        this.leaderId = leaderId;
+        this.lastIncludedIndex = lastIncludedIndex;
+        this.lastIncludedTerm = lastIncludedTerm;
+        this.data = data;
+    }
+
+    public String getLeaderId() {
+        return leaderId;
+    }
+
+    public long getLastIncludedIndex() {
+        return lastIncludedIndex;
+    }
+
+    public long getLastIncludedTerm() {
+        return lastIncludedTerm;
+    }
+
+    public Object getData() {
+        return data;
+    }
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/InstallSnapshotReply.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/InstallSnapshotReply.java
new file mode 100644 (file)
index 0000000..85b89b7
--- /dev/null
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.raft.messages;
+
+public class InstallSnapshotReply extends AbstractRaftRPC {
+
+    // The followerId - this will be used to figure out which follower is
+    // responding
+    private final String followerId;
+
+    protected InstallSnapshotReply(long term, String followerId) {
+        super(term);
+        this.followerId = followerId;
+    }
+
+    public String getFollowerId() {
+        return followerId;
+    }
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/RaftRPC.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/RaftRPC.java
new file mode 100644 (file)
index 0000000..a770e54
--- /dev/null
@@ -0,0 +1,13 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.raft.messages;
+
+public interface RaftRPC {
+    public long getTerm();
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/RequestVote.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/RequestVote.java
new file mode 100644 (file)
index 0000000..981da17
--- /dev/null
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.raft.messages;
+
+/**
+ * Invoked by candidates to gather votes (§5.2).
+ */
+public class RequestVote extends AbstractRaftRPC{
+
+    // candidate requesting vote
+    private final String candidateId;
+
+    // index of candidate’s last log entry (§5.4)
+    private final long lastLogIndex;
+
+    // term of candidate’s last log entry (§5.4)
+    private final long lastLogTerm;
+
+    public RequestVote(long term, String candidateId, long lastLogIndex,
+        long lastLogTerm) {
+        super(term);
+        this.candidateId = candidateId;
+        this.lastLogIndex = lastLogIndex;
+        this.lastLogTerm = lastLogTerm;
+    }
+
+    public long getTerm() {
+        return term;
+    }
+
+    public String getCandidateId() {
+        return candidateId;
+    }
+
+    public long getLastLogIndex() {
+        return lastLogIndex;
+    }
+
+    public long getLastLogTerm() {
+        return lastLogTerm;
+    }
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/RequestVoteReply.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/RequestVoteReply.java
new file mode 100644 (file)
index 0000000..816120c
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.raft.messages;
+
+public class RequestVoteReply extends AbstractRaftRPC{
+
+    // true means candidate received vot
+    private final boolean voteGranted;
+
+    public RequestVoteReply(long term, boolean voteGranted) {
+        super(term);
+        this.voteGranted = voteGranted;
+    }
+
+    public long getTerm() {
+        return term;
+    }
+
+    public boolean isVoteGranted() {
+        return voteGranted;
+    }
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/resources/application.conf b/opendaylight/md-sal/sal-akka-raft/src/main/resources/application.conf
new file mode 100644 (file)
index 0000000..494a99e
--- /dev/null
@@ -0,0 +1,12 @@
+akka {
+    loglevel = "DEBUG"
+    actor {
+        serializers {
+          java = "akka.serialization.JavaSerializer"
+        }
+
+        serialization-bindings {
+            "org.opendaylight.controller.cluster.raft.RaftActor$ReplicatedLogImplEntry" = java
+        }
+    }
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/AbstractActorTest.java b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/AbstractActorTest.java
new file mode 100644 (file)
index 0000000..1971432
--- /dev/null
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.raft;
+
+import akka.actor.ActorSystem;
+import akka.testkit.JavaTestKit;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+
+public abstract class AbstractActorTest {
+    private static ActorSystem system;
+
+    @BeforeClass
+    public static void setUpClass() {
+        System.setProperty("shard.persistent", "false");
+        system = ActorSystem.create("test");
+    }
+
+    @AfterClass
+    public static void tearDownClass() {
+        JavaTestKit.shutdownActorSystem(system);
+        system = null;
+    }
+
+    protected ActorSystem getSystem() {
+        return system;
+    }
+
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/MockRaftActorContext.java b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/MockRaftActorContext.java
new file mode 100644 (file)
index 0000000..77d0071
--- /dev/null
@@ -0,0 +1,283 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.raft;
+
+import akka.actor.ActorRef;
+import akka.actor.ActorSelection;
+import akka.actor.ActorSystem;
+import akka.actor.Props;
+import akka.event.Logging;
+import akka.event.LoggingAdapter;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+public class MockRaftActorContext implements RaftActorContext {
+
+    private String id;
+    private ActorSystem system;
+    private ActorRef actor;
+    private long index = 0;
+    private long lastApplied = 0;
+    private final ElectionTerm electionTerm;
+    private ReplicatedLog replicatedLog;
+    private Map<String, String> peerAddresses = new HashMap();
+
+    public MockRaftActorContext(){
+        electionTerm = null;
+
+        initReplicatedLog();
+    }
+
+    public MockRaftActorContext(String id, ActorSystem system, ActorRef actor){
+        this.id = id;
+        this.system = system;
+        this.actor = actor;
+
+        final String id1 = id;
+        electionTerm = new ElectionTerm() {
+            /**
+             * Identifier of the actor whose election term information this is
+             */
+            private final String id = id1;
+            private long currentTerm = 0;
+            private String votedFor = "";
+
+            public long getCurrentTerm() {
+                return currentTerm;
+            }
+
+            public String getVotedFor() {
+                return votedFor;
+            }
+
+            public void update(long currentTerm, String votedFor){
+                this.currentTerm = currentTerm;
+                this.votedFor = votedFor;
+
+                // TODO : Write to some persistent state
+            }
+
+            @Override public void updateAndPersist(long currentTerm,
+                String votedFor) {
+                update(currentTerm, votedFor);
+            }
+        };
+
+        initReplicatedLog();
+    }
+
+
+    public void initReplicatedLog(){
+        this.replicatedLog = new SimpleReplicatedLog();
+        this.replicatedLog.append(new MockReplicatedLogEntry(1, 1, ""));
+    }
+
+    @Override public ActorRef actorOf(Props props) {
+        return system.actorOf(props);
+    }
+
+    @Override public ActorSelection actorSelection(String path) {
+        return system.actorSelection(path);
+    }
+
+    @Override public String getId() {
+        return id;
+    }
+
+    @Override public ActorRef getActor() {
+        return actor;
+    }
+
+    @Override public ElectionTerm getTermInformation() {
+        return electionTerm;
+    }
+
+    public void setIndex(long index){
+        this.index = index;
+    }
+
+    @Override public long getCommitIndex() {
+        return index;
+    }
+
+    @Override public void setCommitIndex(long commitIndex) {
+        this.index = commitIndex;
+    }
+
+    @Override public void setLastApplied(long lastApplied){
+        this.lastApplied = lastApplied;
+    }
+
+    @Override public long getLastApplied() {
+        return lastApplied;
+    }
+
+    public void setReplicatedLog(ReplicatedLog replicatedLog) {
+        this.replicatedLog = replicatedLog;
+    }
+
+    @Override public ReplicatedLog getReplicatedLog() {
+        return replicatedLog;
+    }
+
+    @Override public ActorSystem getActorSystem() {
+        return this.system;
+    }
+
+    @Override public LoggingAdapter getLogger() {
+        return Logging.getLogger(system, this);
+    }
+
+    @Override public Map<String, String> getPeerAddresses() {
+        return peerAddresses;
+    }
+
+    @Override public String getPeerAddress(String peerId) {
+        return peerAddresses.get(peerId);
+    }
+
+    @Override public void addToPeers(String name, String address) {
+        peerAddresses.put(name, address);
+    }
+
+    @Override public void removePeer(String name) {
+        peerAddresses.remove(name);
+    }
+
+    public void setPeerAddresses(Map<String, String> peerAddresses) {
+        this.peerAddresses = peerAddresses;
+    }
+
+
+
+    public static class SimpleReplicatedLog implements ReplicatedLog {
+        private final List<ReplicatedLogEntry> log = new ArrayList<>();
+
+        @Override public ReplicatedLogEntry get(long index) {
+            if(index >= log.size() || index < 0){
+                return null;
+            }
+            return log.get((int) index);
+        }
+
+        @Override public ReplicatedLogEntry last() {
+            if(log.size() == 0){
+                return null;
+            }
+            return log.get(log.size()-1);
+        }
+
+        @Override public long lastIndex() {
+            if(log.size() == 0){
+                return -1;
+            }
+
+            return last().getIndex();
+        }
+
+        @Override public long lastTerm() {
+            if(log.size() == 0){
+                return -1;
+            }
+
+            return last().getTerm();
+        }
+
+        @Override public void removeFrom(long index) {
+            if(index >= log.size() || index < 0){
+                return;
+            }
+
+            log.subList((int) index, log.size()).clear();
+            //log.remove((int) index);
+        }
+
+        @Override public void removeFromAndPersist(long index) {
+            removeFrom(index);
+        }
+
+        @Override public void append(ReplicatedLogEntry replicatedLogEntry) {
+            log.add(replicatedLogEntry);
+        }
+
+        @Override public void appendAndPersist(
+            ReplicatedLogEntry replicatedLogEntry) {
+            append(replicatedLogEntry);
+        }
+
+        @Override public List<ReplicatedLogEntry> getFrom(long index) {
+            if(index >= log.size() || index < 0){
+                return Collections.EMPTY_LIST;
+            }
+            List<ReplicatedLogEntry> entries = new ArrayList<>();
+            for(int i=(int) index ; i < log.size() ; i++) {
+                entries.add(get(i));
+            }
+            return entries;
+        }
+
+        @Override public long size() {
+            return log.size();
+        }
+
+        @Override public boolean isPresent(long index) {
+            if(index >= log.size() || index < 0){
+                return false;
+            }
+
+            return true;
+        }
+
+        @Override public boolean isInSnapshot(long index) {
+            return false;
+        }
+
+        @Override public Object getSnapshot() {
+            return null;
+        }
+
+        @Override public long getSnapshotIndex() {
+            return -1;
+        }
+
+        @Override public long getSnapshotTerm() {
+            return -1;
+        }
+    }
+
+    public static class MockReplicatedLogEntry implements ReplicatedLogEntry {
+
+        private final long term;
+        private final long index;
+        private final Object data;
+
+        public MockReplicatedLogEntry(long term, long index, Object data){
+
+            this.term = term;
+            this.index = index;
+            this.data = data;
+        }
+
+        @Override public Object getData() {
+            return data;
+        }
+
+        @Override public long getTerm() {
+            return term;
+        }
+
+        @Override public long getIndex() {
+            return index;
+        }
+    }
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/behaviors/AbstractRaftActorBehaviorTest.java b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/behaviors/AbstractRaftActorBehaviorTest.java
new file mode 100644 (file)
index 0000000..1a37b92
--- /dev/null
@@ -0,0 +1,364 @@
+package org.opendaylight.controller.cluster.raft.behaviors;
+
+import akka.actor.ActorRef;
+import akka.actor.Props;
+import akka.testkit.JavaTestKit;
+import org.junit.Test;
+import org.opendaylight.controller.cluster.raft.AbstractActorTest;
+import org.opendaylight.controller.cluster.raft.MockRaftActorContext;
+import org.opendaylight.controller.cluster.raft.RaftActorContext;
+import org.opendaylight.controller.cluster.raft.RaftState;
+import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
+import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
+import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
+import org.opendaylight.controller.cluster.raft.messages.RaftRPC;
+import org.opendaylight.controller.cluster.raft.messages.RequestVote;
+import org.opendaylight.controller.cluster.raft.messages.RequestVoteReply;
+import org.opendaylight.controller.cluster.raft.utils.DoNothingActor;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.junit.Assert.assertEquals;
+
+public abstract class AbstractRaftActorBehaviorTest extends AbstractActorTest {
+
+    private final ActorRef behaviorActor = getSystem().actorOf(Props.create(
+        DoNothingActor.class));
+
+    /**
+     * This test checks that when a new Raft RPC message is received with a newer
+     * term the RaftActor gets into the Follower state.
+     *
+     * @throws Exception
+     */
+    @Test
+    public void testHandleRaftRPCWithNewerTerm() throws Exception {
+        new JavaTestKit(getSystem()) {{
+
+            assertStateChangesToFollowerWhenRaftRPCHasNewerTerm(getTestActor(),
+                createAppendEntriesWithNewerTerm());
+
+            assertStateChangesToFollowerWhenRaftRPCHasNewerTerm(getTestActor(),
+                createAppendEntriesReplyWithNewerTerm());
+
+            assertStateChangesToFollowerWhenRaftRPCHasNewerTerm(getTestActor(),
+                createRequestVoteWithNewerTerm());
+
+            assertStateChangesToFollowerWhenRaftRPCHasNewerTerm(getTestActor(),
+                createRequestVoteReplyWithNewerTerm());
+
+
+        }};
+    }
+
+
+    /**
+     * This test verifies that when an AppendEntries is received with a term that
+     * is less that the currentTerm of the RaftActor then the RaftActor does not
+     * change it's state and it responds back with a failure
+     *
+     * @throws Exception
+     */
+    @Test
+    public void testHandleAppendEntriesSenderTermLessThanReceiverTerm()
+        throws Exception {
+        new JavaTestKit(getSystem()) {{
+
+            MockRaftActorContext context = (MockRaftActorContext)
+                createActorContext();
+
+            // First set the receivers term to a high number (1000)
+            context.getTermInformation().update(1000, "test");
+
+            AppendEntries appendEntries =
+                new AppendEntries(100, "leader-1", 0, 0, null, 101);
+
+            RaftActorBehavior behavior = createBehavior(context);
+
+            // Send an unknown message so that the state of the RaftActor remains unchanged
+            RaftState expected = behavior.handleMessage(getRef(), "unknown");
+
+            RaftState raftState =
+                behavior.handleMessage(getRef(), appendEntries);
+
+            assertEquals(expected, raftState);
+
+            // Also expect an AppendEntriesReply to be sent where success is false
+            final Boolean out = new ExpectMsg<Boolean>(duration("1 seconds"),
+                "AppendEntriesReply") {
+                // do not put code outside this method, will run afterwards
+                protected Boolean match(Object in) {
+                    if (in instanceof AppendEntriesReply) {
+                        AppendEntriesReply reply = (AppendEntriesReply) in;
+                        return reply.isSuccess();
+                    } else {
+                        throw noMatch();
+                    }
+                }
+            }.get();
+
+            assertEquals(false, out);
+
+
+        }};
+    }
+
+
+    @Test
+    public void testHandleAppendEntriesAddSameEntryToLog(){
+        new JavaTestKit(getSystem()) {
+            {
+
+                MockRaftActorContext context = (MockRaftActorContext)
+                    createActorContext();
+
+                // First set the receivers term to lower number
+                context.getTermInformation().update(2, "test");
+
+                // Prepare the receivers log
+                MockRaftActorContext.SimpleReplicatedLog log =
+                    new MockRaftActorContext.SimpleReplicatedLog();
+                log.append(
+                    new MockRaftActorContext.MockReplicatedLogEntry(1, 0, "zero"));
+
+                context.setReplicatedLog(log);
+
+                List<ReplicatedLogEntry> entries = new ArrayList<>();
+                entries.add(
+                    new MockRaftActorContext.MockReplicatedLogEntry(1, 0, "zero"));
+
+                AppendEntries appendEntries =
+                    new AppendEntries(2, "leader-1", -1, 1, entries, 0);
+
+                RaftActorBehavior behavior = createBehavior(context);
+
+                if (AbstractRaftActorBehaviorTest.this instanceof CandidateTest) {
+                    // Resetting the Candidates term to make sure it will match
+                    // the term sent by AppendEntries. If this was not done then
+                    // the test will fail because the Candidate will assume that
+                    // the message was sent to it from a lower term peer and will
+                    // thus respond with a failure
+                    context.getTermInformation().update(2, "test");
+                }
+
+                // Send an unknown message so that the state of the RaftActor remains unchanged
+                RaftState expected = behavior.handleMessage(getRef(), "unknown");
+
+                RaftState raftState =
+                    behavior.handleMessage(getRef(), appendEntries);
+
+                assertEquals(expected, raftState);
+
+                assertEquals(1, log.size());
+
+
+            }};
+    }
+
+    /**
+     * This test verifies that when a RequestVote is received by the RaftActor
+     * with a term which is greater than the RaftActors' currentTerm and the
+     * senders' log is more upto date than the receiver that the receiver grants
+     * the vote to the sender
+     */
+    @Test
+    public void testHandleRequestVoteWhenSenderTermGreaterThanCurrentTermAndSenderLogMoreUpToDate() {
+        new JavaTestKit(getSystem()) {{
+
+            new Within(duration("1 seconds")) {
+                protected void run() {
+
+                    RaftActorBehavior behavior = createBehavior(
+                        createActorContext(behaviorActor));
+
+                    RaftState raftState = behavior.handleMessage(getTestActor(),
+                        new RequestVote(1000, "test", 10000, 999));
+
+                    if(behavior.state() != RaftState.Follower){
+                        assertEquals(RaftState.Follower, raftState);
+                    } else {
+
+                        final Boolean out =
+                            new ExpectMsg<Boolean>(duration("1 seconds"),
+                                "RequestVoteReply") {
+                                // do not put code outside this method, will run afterwards
+                                protected Boolean match(Object in) {
+                                    if (in instanceof RequestVoteReply) {
+                                        RequestVoteReply reply =
+                                            (RequestVoteReply) in;
+                                        return reply.isVoteGranted();
+                                    } else {
+                                        throw noMatch();
+                                    }
+                                }
+                            }.get();
+
+                        assertEquals(true, out);
+                    }
+                }
+            };
+        }};
+    }
+
+    /**
+     * This test verifies that when a RaftActor receives a RequestVote message
+     * with a term that is greater than it's currentTerm but a less up-to-date
+     * log then the receiving RaftActor will not grant the vote to the sender
+     */
+    @Test
+    public void testHandleRequestVoteWhenSenderTermGreaterThanCurrentTermButSenderLogLessUptoDate() {
+        new JavaTestKit(getSystem()) {{
+
+            new Within(duration("1 seconds")) {
+                protected void run() {
+
+                    RaftActorContext actorContext =
+                        createActorContext(behaviorActor);
+
+                    MockRaftActorContext.SimpleReplicatedLog
+                        log = new MockRaftActorContext.SimpleReplicatedLog();
+                    log.append(
+                        new MockRaftActorContext.MockReplicatedLogEntry(20000,
+                            1000000, ""));
+
+                    ((MockRaftActorContext) actorContext).setReplicatedLog(log);
+
+                    RaftActorBehavior behavior = createBehavior(actorContext);
+
+                    RaftState raftState = behavior.handleMessage(getTestActor(),
+                        new RequestVote(1000, "test", 10000, 999));
+
+                    if(behavior.state() != RaftState.Follower){
+                        assertEquals(RaftState.Follower, raftState);
+                    } else {
+                        final Boolean out =
+                            new ExpectMsg<Boolean>(duration("1 seconds"),
+                                "RequestVoteReply") {
+                                // do not put code outside this method, will run afterwards
+                                protected Boolean match(Object in) {
+                                    if (in instanceof RequestVoteReply) {
+                                        RequestVoteReply reply =
+                                            (RequestVoteReply) in;
+                                        return reply.isVoteGranted();
+                                    } else {
+                                        throw noMatch();
+                                    }
+                                }
+                            }.get();
+
+                        assertEquals(false, out);
+                    }
+                }
+            };
+        }};
+    }
+
+
+
+    /**
+     * This test verifies that the receiving RaftActor will not grant a vote
+     * to a sender if the sender's term is lesser than the currentTerm of the
+     * recipient RaftActor
+     */
+    @Test
+    public void testHandleRequestVoteWhenSenderTermLessThanCurrentTerm() {
+        new JavaTestKit(getSystem()) {{
+
+            new Within(duration("1 seconds")) {
+                protected void run() {
+
+                    RaftActorContext context =
+                        createActorContext(behaviorActor);
+
+                    context.getTermInformation().update(1000, null);
+
+                    RaftActorBehavior follower = createBehavior(context);
+
+                    follower.handleMessage(getTestActor(),
+                        new RequestVote(999, "test", 10000, 999));
+
+                    final Boolean out =
+                        new ExpectMsg<Boolean>(duration("1 seconds"),
+                            "RequestVoteReply") {
+                            // do not put code outside this method, will run afterwards
+                            protected Boolean match(Object in) {
+                                if (in instanceof RequestVoteReply) {
+                                    RequestVoteReply reply =
+                                        (RequestVoteReply) in;
+                                    return reply.isVoteGranted();
+                                } else {
+                                    throw noMatch();
+                                }
+                            }
+                        }.get();
+
+                    assertEquals(false, out);
+                }
+            };
+        }};
+    }
+
+    protected void assertStateChangesToFollowerWhenRaftRPCHasNewerTerm(
+        ActorRef actorRef, RaftRPC rpc) {
+
+        RaftActorContext actorContext = createActorContext();
+        setLastLogEntry(
+            (MockRaftActorContext) actorContext, 0, 0, "");
+
+        RaftState raftState = createBehavior(actorContext)
+            .handleMessage(actorRef, rpc);
+
+        assertEquals(RaftState.Follower, raftState);
+    }
+
+    protected MockRaftActorContext.SimpleReplicatedLog setLastLogEntry(
+        MockRaftActorContext actorContext, long term, long index, Object data) {
+        return setLastLogEntry(actorContext,
+            new MockRaftActorContext.MockReplicatedLogEntry(term, index, data));
+    }
+
+    protected MockRaftActorContext.SimpleReplicatedLog setLastLogEntry(
+        MockRaftActorContext actorContext, ReplicatedLogEntry logEntry) {
+        MockRaftActorContext.SimpleReplicatedLog
+            log = new MockRaftActorContext.SimpleReplicatedLog();
+        log.append(logEntry);
+        actorContext.setReplicatedLog(log);
+
+        return log;
+    }
+
+    protected abstract RaftActorBehavior createBehavior(
+        RaftActorContext actorContext);
+
+    protected RaftActorBehavior createBehavior() {
+        return createBehavior(createActorContext());
+    }
+
+    protected RaftActorContext createActorContext() {
+        return new MockRaftActorContext();
+    }
+
+    protected RaftActorContext createActorContext(ActorRef actor) {
+        return new MockRaftActorContext("test", getSystem(), actor);
+    }
+
+    protected AppendEntries createAppendEntriesWithNewerTerm() {
+        return new AppendEntries(100, "leader-1", 0, 0, null, 1);
+    }
+
+    protected AppendEntriesReply createAppendEntriesReplyWithNewerTerm() {
+        return new AppendEntriesReply("follower-1", 100, false, 100, 100);
+    }
+
+    protected RequestVote createRequestVoteWithNewerTerm() {
+        return new RequestVote(100, "candidate-1", 10, 100);
+    }
+
+    protected RequestVoteReply createRequestVoteReplyWithNewerTerm() {
+        return new RequestVoteReply(100, false);
+    }
+
+
+
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/behaviors/CandidateTest.java b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/behaviors/CandidateTest.java
new file mode 100644 (file)
index 0000000..8bcee58
--- /dev/null
@@ -0,0 +1,299 @@
+package org.opendaylight.controller.cluster.raft.behaviors;
+
+import akka.actor.ActorRef;
+import akka.actor.Props;
+import akka.testkit.JavaTestKit;
+import junit.framework.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.opendaylight.controller.cluster.raft.MockRaftActorContext;
+import org.opendaylight.controller.cluster.raft.RaftActorContext;
+import org.opendaylight.controller.cluster.raft.RaftState;
+import org.opendaylight.controller.cluster.raft.internal.messages.ElectionTimeout;
+import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
+import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
+import org.opendaylight.controller.cluster.raft.messages.RequestVote;
+import org.opendaylight.controller.cluster.raft.messages.RequestVoteReply;
+import org.opendaylight.controller.cluster.raft.utils.DoNothingActor;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.junit.Assert.assertEquals;
+
+public class CandidateTest extends AbstractRaftActorBehaviorTest {
+
+    private final ActorRef candidateActor = getSystem().actorOf(Props.create(
+        DoNothingActor.class));
+
+    private final ActorRef peerActor1 = getSystem().actorOf(Props.create(
+        DoNothingActor.class));
+
+    private final ActorRef peerActor2 = getSystem().actorOf(Props.create(
+        DoNothingActor.class));
+
+    private final ActorRef peerActor3 = getSystem().actorOf(Props.create(
+        DoNothingActor.class));
+
+    private final ActorRef peerActor4 = getSystem().actorOf(Props.create(
+        DoNothingActor.class));
+
+    private final Map<String, String> onePeer = new HashMap<>();
+    private final Map<String, String> twoPeers = new HashMap<>();
+    private final Map<String, String> fourPeers = new HashMap<>();
+
+    @Before
+    public void setUp(){
+        onePeer.put(peerActor1.path().toString(),
+            peerActor1.path().toString());
+
+        twoPeers.put(peerActor1.path().toString(),
+            peerActor1.path().toString());
+        twoPeers.put(peerActor2.path().toString(),
+            peerActor2.path().toString());
+
+        fourPeers.put(peerActor1.path().toString(),
+            peerActor1.path().toString());
+        fourPeers.put(peerActor2.path().toString(),
+            peerActor2.path().toString());
+        fourPeers.put(peerActor3.path().toString(),
+            peerActor3.path().toString());
+        fourPeers.put(peerActor4.path().toString(),
+            peerActor3.path().toString());
+
+
+    }
+
+    @Test
+    public void testWhenACandidateIsCreatedItIncrementsTheCurrentTermAndVotesForItself(){
+        RaftActorContext raftActorContext = createActorContext();
+        long expectedTerm = raftActorContext.getTermInformation().getCurrentTerm();
+
+        new Candidate(raftActorContext);
+
+        assertEquals(expectedTerm+1, raftActorContext.getTermInformation().getCurrentTerm());
+        assertEquals(raftActorContext.getId(), raftActorContext.getTermInformation().getVotedFor());
+    }
+
+    @Test
+    public void testThatAnElectionTimeoutIsTriggered(){
+        new JavaTestKit(getSystem()) {{
+
+            new Within(duration("1 seconds")) {
+                protected void run() {
+
+                    Candidate candidate = new Candidate(createActorContext(getTestActor()));
+
+                    final Boolean out = new ExpectMsg<Boolean>(duration("1 seconds"), "ElectionTimeout") {
+                        // do not put code outside this method, will run afterwards
+                        protected Boolean match(Object in) {
+                            if (in instanceof ElectionTimeout) {
+                                 return true;
+                            } else {
+                                throw noMatch();
+                            }
+                        }
+                    }.get();
+
+                    assertEquals(true, out);
+                }
+            };
+        }};
+    }
+
+    @Test
+    public void testHandleElectionTimeoutWhenThereAreZeroPeers(){
+        RaftActorContext raftActorContext = createActorContext();
+        Candidate candidate =
+            new Candidate(raftActorContext);
+
+        RaftState raftState =
+            candidate.handleMessage(candidateActor, new ElectionTimeout());
+
+        Assert.assertEquals(RaftState.Leader, raftState);
+    }
+
+    @Test
+    public void testHandleElectionTimeoutWhenThereAreTwoNodesInCluster(){
+        MockRaftActorContext raftActorContext =
+            (MockRaftActorContext) createActorContext();
+        raftActorContext.setPeerAddresses(onePeer);
+        Candidate candidate =
+            new Candidate(raftActorContext);
+
+        RaftState raftState =
+            candidate.handleMessage(candidateActor, new ElectionTimeout());
+
+        Assert.assertEquals(RaftState.Candidate, raftState);
+    }
+
+    @Test
+    public void testBecomeLeaderOnReceivingMajorityVotesInThreeNodesInCluster(){
+        MockRaftActorContext raftActorContext =
+            (MockRaftActorContext) createActorContext();
+        raftActorContext.setPeerAddresses(twoPeers);
+        Candidate candidate =
+            new Candidate(raftActorContext);
+
+        RaftState stateOnFirstVote = candidate.handleMessage(peerActor1, new RequestVoteReply(0, true));
+
+        Assert.assertEquals(RaftState.Leader, stateOnFirstVote);
+
+    }
+
+    @Test
+    public void testBecomeLeaderOnReceivingMajorityVotesInFiveNodesInCluster(){
+        MockRaftActorContext raftActorContext =
+            (MockRaftActorContext) createActorContext();
+        raftActorContext.setPeerAddresses(fourPeers);
+        Candidate candidate =
+            new Candidate(raftActorContext);
+
+        RaftState stateOnFirstVote = candidate.handleMessage(peerActor1, new RequestVoteReply(0, true));
+
+        RaftState stateOnSecondVote = candidate.handleMessage(peerActor2, new RequestVoteReply(0, true));
+
+        Assert.assertEquals(RaftState.Candidate, stateOnFirstVote);
+        Assert.assertEquals(RaftState.Leader, stateOnSecondVote);
+
+    }
+
+    @Test
+    public void testResponseToAppendEntriesWithLowerTerm(){
+        new JavaTestKit(getSystem()) {{
+
+            new Within(duration("1 seconds")) {
+                protected void run() {
+
+                    Candidate candidate = new Candidate(createActorContext(getTestActor()));
+
+                    candidate.handleMessage(getTestActor(), new AppendEntries(0, "test", 0,0,Collections.EMPTY_LIST, 0));
+
+                    final Boolean out = new ExpectMsg<Boolean>(duration("1 seconds"), "AppendEntriesResponse") {
+                        // do not put code outside this method, will run afterwards
+                        protected Boolean match(Object in) {
+                            if (in instanceof AppendEntriesReply) {
+                                AppendEntriesReply reply = (AppendEntriesReply) in;
+                                return reply.isSuccess();
+                            } else {
+                                throw noMatch();
+                            }
+                        }
+                    }.get();
+
+                    assertEquals(false, out);
+                }
+            };
+        }};
+    }
+
+    @Test
+    public void testResponseToRequestVoteWithLowerTerm(){
+        new JavaTestKit(getSystem()) {{
+
+            new Within(duration("1 seconds")) {
+                protected void run() {
+
+                    Candidate candidate = new Candidate(createActorContext(getTestActor()));
+
+                    candidate.handleMessage(getTestActor(), new RequestVote(0, "test", 0, 0));
+
+                    final Boolean out = new ExpectMsg<Boolean>(duration("1 seconds"), "AppendEntriesResponse") {
+                        // do not put code outside this method, will run afterwards
+                        protected Boolean match(Object in) {
+                            if (in instanceof RequestVoteReply) {
+                                RequestVoteReply reply = (RequestVoteReply) in;
+                                return reply.isVoteGranted();
+                            } else {
+                                throw noMatch();
+                            }
+                        }
+                    }.get();
+
+                    assertEquals(false, out);
+                }
+            };
+        }};
+    }
+
+    @Test
+    public void testHandleRequestVoteWhenSenderTermEqualToCurrentTermAndVotedForIsNull(){
+        new JavaTestKit(getSystem()) {{
+
+            new Within(duration("1 seconds")) {
+                protected void run() {
+
+                    RaftActorContext context = createActorContext(getTestActor());
+
+                    context.getTermInformation().update(1000, null);
+
+                    // Once a candidate is created it will immediately increment the current term so after
+                    // construction the currentTerm should be 1001
+                    RaftActorBehavior follower = createBehavior(context);
+
+                    follower.handleMessage(getTestActor(), new RequestVote(1001, "test", 10000, 999));
+
+                    final Boolean out = new ExpectMsg<Boolean>(duration("1 seconds"), "RequestVoteReply") {
+                        // do not put code outside this method, will run afterwards
+                        protected Boolean match(Object in) {
+                            if (in instanceof RequestVoteReply) {
+                                RequestVoteReply reply = (RequestVoteReply) in;
+                                return reply.isVoteGranted();
+                            } else {
+                                throw noMatch();
+                            }
+                        }
+                    }.get();
+
+                    assertEquals(true, out);
+                }
+            };
+        }};
+    }
+
+    @Test
+    public void testHandleRequestVoteWhenSenderTermEqualToCurrentTermAndVotedForIsNotTheSameAsCandidateId(){
+        new JavaTestKit(getSystem()) {{
+
+            new Within(duration("1 seconds")) {
+                protected void run() {
+
+                    RaftActorContext context = createActorContext(getTestActor());
+
+                    context.getTermInformation().update(1000, "test");
+
+                    RaftActorBehavior follower = createBehavior(context);
+
+                    follower.handleMessage(getTestActor(), new RequestVote(1001, "candidate", 10000, 999));
+
+                    final Boolean out = new ExpectMsg<Boolean>(duration("1 seconds"), "RequestVoteReply") {
+                        // do not put code outside this method, will run afterwards
+                        protected Boolean match(Object in) {
+                            if (in instanceof RequestVoteReply) {
+                                RequestVoteReply reply = (RequestVoteReply) in;
+                                return reply.isVoteGranted();
+                            } else {
+                                throw noMatch();
+                            }
+                        }
+                    }.get();
+
+                    assertEquals(false, out);
+                }
+            };
+        }};
+    }
+
+
+
+    @Override protected RaftActorBehavior createBehavior(RaftActorContext actorContext) {
+        return new Candidate(actorContext);
+    }
+
+    @Override protected RaftActorContext createActorContext() {
+        return new MockRaftActorContext("test", getSystem(), candidateActor);
+    }
+
+
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/behaviors/FollowerTest.java b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/behaviors/FollowerTest.java
new file mode 100644 (file)
index 0000000..b7c371d
--- /dev/null
@@ -0,0 +1,410 @@
+package org.opendaylight.controller.cluster.raft.behaviors;
+
+import akka.actor.ActorRef;
+import akka.actor.Props;
+import akka.testkit.JavaTestKit;
+import junit.framework.Assert;
+import org.junit.Test;
+import org.opendaylight.controller.cluster.raft.MockRaftActorContext;
+import org.opendaylight.controller.cluster.raft.RaftActorContext;
+import org.opendaylight.controller.cluster.raft.RaftState;
+import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
+import org.opendaylight.controller.cluster.raft.internal.messages.ElectionTimeout;
+import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
+import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
+import org.opendaylight.controller.cluster.raft.messages.RequestVote;
+import org.opendaylight.controller.cluster.raft.messages.RequestVoteReply;
+import org.opendaylight.controller.cluster.raft.utils.DoNothingActor;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+
+public class FollowerTest extends AbstractRaftActorBehaviorTest {
+
+    private final ActorRef followerActor = getSystem().actorOf(Props.create(
+        DoNothingActor.class));
+
+
+    @Override protected RaftActorBehavior createBehavior(RaftActorContext actorContext) {
+        return new Follower(actorContext);
+    }
+
+    @Override protected RaftActorContext createActorContext() {
+        return new MockRaftActorContext("test", getSystem(), followerActor);
+    }
+
+    @Test
+    public void testThatAnElectionTimeoutIsTriggered(){
+        new JavaTestKit(getSystem()) {{
+
+            new Within(duration("1 seconds")) {
+                protected void run() {
+
+                    Follower follower = new Follower(createActorContext(getTestActor()));
+
+                    final Boolean out = new ExpectMsg<Boolean>(duration("1 seconds"), "ElectionTimeout") {
+                        // do not put code outside this method, will run afterwards
+                        protected Boolean match(Object in) {
+                            if (in instanceof ElectionTimeout) {
+                                return true;
+                            } else {
+                                throw noMatch();
+                            }
+                        }
+                    }.get();
+
+                    assertEquals(true, out);
+                }
+            };
+        }};
+    }
+
+    @Test
+    public void testHandleElectionTimeout(){
+        RaftActorContext raftActorContext = createActorContext();
+        Follower follower =
+            new Follower(raftActorContext);
+
+        RaftState raftState =
+            follower.handleMessage(followerActor, new ElectionTimeout());
+
+        Assert.assertEquals(RaftState.Candidate, raftState);
+    }
+
+    @Test
+    public void testHandleRequestVoteWhenSenderTermEqualToCurrentTermAndVotedForIsNull(){
+        new JavaTestKit(getSystem()) {{
+
+            new Within(duration("1 seconds")) {
+                protected void run() {
+
+                    RaftActorContext context = createActorContext(getTestActor());
+
+                    context.getTermInformation().update(1000, null);
+
+                    RaftActorBehavior follower = createBehavior(context);
+
+                    follower.handleMessage(getTestActor(), new RequestVote(1000, "test", 10000, 999));
+
+                    final Boolean out = new ExpectMsg<Boolean>(duration("1 seconds"), "RequestVoteReply") {
+                        // do not put code outside this method, will run afterwards
+                        protected Boolean match(Object in) {
+                            if (in instanceof RequestVoteReply) {
+                                RequestVoteReply reply = (RequestVoteReply) in;
+                                return reply.isVoteGranted();
+                            } else {
+                                throw noMatch();
+                            }
+                        }
+                    }.get();
+
+                    assertEquals(true, out);
+                }
+            };
+        }};
+    }
+
+    @Test
+    public void testHandleRequestVoteWhenSenderTermEqualToCurrentTermAndVotedForIsNotTheSameAsCandidateId(){
+        new JavaTestKit(getSystem()) {{
+
+            new Within(duration("1 seconds")) {
+                protected void run() {
+
+                    RaftActorContext context = createActorContext(getTestActor());
+
+                    context.getTermInformation().update(1000, "test");
+
+                    RaftActorBehavior follower = createBehavior(context);
+
+                    follower.handleMessage(getTestActor(), new RequestVote(1000, "candidate", 10000, 999));
+
+                    final Boolean out = new ExpectMsg<Boolean>(duration("1 seconds"), "RequestVoteReply") {
+                        // do not put code outside this method, will run afterwards
+                        protected Boolean match(Object in) {
+                            if (in instanceof RequestVoteReply) {
+                                RequestVoteReply reply = (RequestVoteReply) in;
+                                return reply.isVoteGranted();
+                            } else {
+                                throw noMatch();
+                            }
+                        }
+                    }.get();
+
+                    assertEquals(false, out);
+                }
+            };
+        }};
+    }
+
+    /**
+     * This test verifies that when an AppendEntries RPC is received by a RaftActor
+     * with a commitIndex that is greater than what has been applied to the
+     * state machine of the RaftActor, the RaftActor applies the state and
+     * sets it current applied state to the commitIndex of the sender.
+     *
+     * @throws Exception
+     */
+    @Test
+    public void testHandleAppendEntriesWithNewerCommitIndex() throws Exception {
+        new JavaTestKit(getSystem()) {{
+
+            RaftActorContext context =
+                createActorContext();
+
+            context.setLastApplied(100);
+            setLastLogEntry((MockRaftActorContext) context, 0, 0, "");
+
+            List<ReplicatedLogEntry> entries =
+                Arrays.asList(
+                    (ReplicatedLogEntry) new MockRaftActorContext.MockReplicatedLogEntry(100, 101,
+                        "foo")
+                );
+
+            // The new commitIndex is 101
+            AppendEntries appendEntries =
+                new AppendEntries(100, "leader-1", 0, 0, entries, 101);
+
+            RaftState raftState =
+                createBehavior(context).handleMessage(getRef(), appendEntries);
+
+            assertEquals(101L, context.getLastApplied());
+
+        }};
+    }
+
+    /**
+     * This test verifies that when an AppendEntries is received a specific prevLogTerm
+     * which does not match the term that is in RaftActors log entry at prevLogIndex
+     * then the RaftActor does not change it's state and it returns a failure.
+     *
+     * @throws Exception
+     */
+    @Test
+    public void testHandleAppendEntriesSenderPrevLogTermNotSameAsReceiverPrevLogTerm()
+        throws Exception {
+        new JavaTestKit(getSystem()) {{
+
+            MockRaftActorContext context = (MockRaftActorContext)
+                createActorContext();
+
+            // First set the receivers term to lower number
+            context.getTermInformation().update(95, "test");
+
+            // Set the last log entry term for the receiver to be greater than
+            // what we will be sending as the prevLogTerm in AppendEntries
+            MockRaftActorContext.SimpleReplicatedLog mockReplicatedLog =
+                setLastLogEntry(context, 20, 0, "");
+
+            // AppendEntries is now sent with a bigger term
+            // this will set the receivers term to be the same as the sender's term
+            AppendEntries appendEntries =
+                new AppendEntries(100, "leader-1", 0, 0, null, 101);
+
+            RaftActorBehavior behavior = createBehavior(context);
+
+            // Send an unknown message so that the state of the RaftActor remains unchanged
+            RaftState expected = behavior.handleMessage(getRef(), "unknown");
+
+            RaftState raftState =
+                behavior.handleMessage(getRef(), appendEntries);
+
+            assertEquals(expected, raftState);
+
+            // Also expect an AppendEntriesReply to be sent where success is false
+            final Boolean out = new ExpectMsg<Boolean>(duration("1 seconds"),
+                "AppendEntriesReply") {
+                // do not put code outside this method, will run afterwards
+                protected Boolean match(Object in) {
+                    if (in instanceof AppendEntriesReply) {
+                        AppendEntriesReply reply = (AppendEntriesReply) in;
+                        return reply.isSuccess();
+                    } else {
+                        throw noMatch();
+                    }
+                }
+            }.get();
+
+            assertEquals(false, out);
+
+
+        }};
+    }
+
+
+
+    /**
+     * This test verifies that when a new AppendEntries message is received with
+     * new entries and the logs of the sender and receiver match that the new
+     * entries get added to the log and the log is incremented by the number of
+     * entries received in appendEntries
+     *
+     * @throws Exception
+     */
+    @Test
+    public void testHandleAppendEntriesAddNewEntries() throws Exception {
+        new JavaTestKit(getSystem()) {{
+
+            MockRaftActorContext context = (MockRaftActorContext)
+                createActorContext();
+
+            // First set the receivers term to lower number
+            context.getTermInformation().update(1, "test");
+
+            // Prepare the receivers log
+            MockRaftActorContext.SimpleReplicatedLog log =
+                new MockRaftActorContext.SimpleReplicatedLog();
+            log.append(
+                new MockRaftActorContext.MockReplicatedLogEntry(1, 0, "zero"));
+            log.append(
+                new MockRaftActorContext.MockReplicatedLogEntry(1, 1, "one"));
+            log.append(
+                new MockRaftActorContext.MockReplicatedLogEntry(1, 2, "two"));
+
+            context.setReplicatedLog(log);
+
+            // Prepare the entries to be sent with AppendEntries
+            List<ReplicatedLogEntry> entries = new ArrayList<>();
+            entries.add(
+                new MockRaftActorContext.MockReplicatedLogEntry(1, 3, "three"));
+            entries.add(
+                new MockRaftActorContext.MockReplicatedLogEntry(1, 4, "four"));
+
+            // Send appendEntries with the same term as was set on the receiver
+            // before the new behavior was created (1 in this case)
+            // This will not work for a Candidate because as soon as a Candidate
+            // is created it increments the term
+            AppendEntries appendEntries =
+                new AppendEntries(1, "leader-1", 2, 1, entries, 4);
+
+            RaftActorBehavior behavior = createBehavior(context);
+
+            // Send an unknown message so that the state of the RaftActor remains unchanged
+            RaftState expected = behavior.handleMessage(getRef(), "unknown");
+
+            RaftState raftState =
+                behavior.handleMessage(getRef(), appendEntries);
+
+            assertEquals(expected, raftState);
+            assertEquals(5, log.last().getIndex() + 1);
+            assertNotNull(log.get(3));
+            assertNotNull(log.get(4));
+
+            // Also expect an AppendEntriesReply to be sent where success is false
+            final Boolean out = new ExpectMsg<Boolean>(duration("1 seconds"),
+                "AppendEntriesReply") {
+                // do not put code outside this method, will run afterwards
+                protected Boolean match(Object in) {
+                    if (in instanceof AppendEntriesReply) {
+                        AppendEntriesReply reply = (AppendEntriesReply) in;
+                        return reply.isSuccess();
+                    } else {
+                        throw noMatch();
+                    }
+                }
+            }.get();
+
+            assertEquals(true, out);
+
+
+        }};
+    }
+
+
+
+    /**
+     * This test verifies that when a new AppendEntries message is received with
+     * new entries and the logs of the sender and receiver are out-of-sync that
+     * the log is first corrected by removing the out of sync entries from the
+     * log and then adding in the new entries sent with the AppendEntries message
+     *
+     * @throws Exception
+     */
+    @Test
+    public void testHandleAppendEntriesCorrectReceiverLogEntries()
+        throws Exception {
+        new JavaTestKit(getSystem()) {{
+
+            MockRaftActorContext context = (MockRaftActorContext)
+                createActorContext();
+
+            // First set the receivers term to lower number
+            context.getTermInformation().update(2, "test");
+
+            // Prepare the receivers log
+            MockRaftActorContext.SimpleReplicatedLog log =
+                new MockRaftActorContext.SimpleReplicatedLog();
+            log.append(
+                new MockRaftActorContext.MockReplicatedLogEntry(1, 0, "zero"));
+            log.append(
+                new MockRaftActorContext.MockReplicatedLogEntry(1, 1, "one"));
+            log.append(
+                new MockRaftActorContext.MockReplicatedLogEntry(1, 2, "two"));
+
+            context.setReplicatedLog(log);
+
+            // Prepare the entries to be sent with AppendEntries
+            List<ReplicatedLogEntry> entries = new ArrayList<>();
+            entries.add(
+                new MockRaftActorContext.MockReplicatedLogEntry(2, 2, "two-1"));
+            entries.add(
+                new MockRaftActorContext.MockReplicatedLogEntry(2, 3, "three"));
+
+            // Send appendEntries with the same term as was set on the receiver
+            // before the new behavior was created (1 in this case)
+            // This will not work for a Candidate because as soon as a Candidate
+            // is created it increments the term
+            AppendEntries appendEntries =
+                new AppendEntries(2, "leader-1", 1, 1, entries, 3);
+
+            RaftActorBehavior behavior = createBehavior(context);
+
+            // Send an unknown message so that the state of the RaftActor remains unchanged
+            RaftState expected = behavior.handleMessage(getRef(), "unknown");
+
+            RaftState raftState =
+                behavior.handleMessage(getRef(), appendEntries);
+
+            assertEquals(expected, raftState);
+
+            // The entry at index 2 will be found out-of-sync with the leader
+            // and will be removed
+            // Then the two new entries will be added to the log
+            // Thus making the log to have 4 entries
+            assertEquals(4, log.last().getIndex() + 1);
+            assertNotNull(log.get(2));
+
+
+            assertEquals("one", log.get(1).getData());
+
+            // Check that the entry at index 2 has the new data
+            assertEquals("two-1", log.get(2).getData());
+
+            assertEquals("three", log.get(3).getData());
+            assertNotNull(log.get(3));
+
+            // Also expect an AppendEntriesReply to be sent where success is false
+            final Boolean out = new ExpectMsg<Boolean>(duration("1 seconds"),
+                "AppendEntriesReply") {
+                // do not put code outside this method, will run afterwards
+                protected Boolean match(Object in) {
+                    if (in instanceof AppendEntriesReply) {
+                        AppendEntriesReply reply = (AppendEntriesReply) in;
+                        return reply.isSuccess();
+                    } else {
+                        throw noMatch();
+                    }
+                }
+            }.get();
+
+            assertEquals(true, out);
+
+
+        }};
+    }
+
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/behaviors/LeaderTest.java b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/behaviors/LeaderTest.java
new file mode 100644 (file)
index 0000000..35bf6f1
--- /dev/null
@@ -0,0 +1,205 @@
+package org.opendaylight.controller.cluster.raft.behaviors;
+
+import akka.actor.ActorRef;
+import akka.actor.Props;
+import akka.testkit.JavaTestKit;
+import junit.framework.Assert;
+import org.junit.Test;
+import org.opendaylight.controller.cluster.raft.MockRaftActorContext;
+import org.opendaylight.controller.cluster.raft.RaftActorContext;
+import org.opendaylight.controller.cluster.raft.RaftState;
+import org.opendaylight.controller.cluster.raft.internal.messages.ApplyState;
+import org.opendaylight.controller.cluster.raft.internal.messages.Replicate;
+import org.opendaylight.controller.cluster.raft.internal.messages.SendHeartBeat;
+import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
+import org.opendaylight.controller.cluster.raft.utils.DoNothingActor;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.junit.Assert.assertEquals;
+
+public class LeaderTest extends AbstractRaftActorBehaviorTest {
+
+    private ActorRef leaderActor =
+        getSystem().actorOf(Props.create(DoNothingActor.class));
+    private ActorRef senderActor =
+        getSystem().actorOf(Props.create(DoNothingActor.class));
+
+    @Test
+    public void testHandleMessageForUnknownMessage() throws Exception {
+        new JavaTestKit(getSystem()) {{
+            Leader leader =
+                new Leader(createActorContext());
+
+            // handle message should return the Leader state when it receives an
+            // unknown message
+            RaftState state = leader.handleMessage(senderActor, "foo");
+            Assert.assertEquals(RaftState.Leader, state);
+        }};
+    }
+
+
+    @Test
+    public void testThatLeaderSendsAHeartbeatMessageToAllFollowers() {
+        new JavaTestKit(getSystem()) {{
+
+            new Within(duration("1 seconds")) {
+                protected void run() {
+
+                    ActorRef followerActor = getTestActor();
+
+                    MockRaftActorContext actorContext =
+                        (MockRaftActorContext) createActorContext();
+
+                    Map<String, String> peerAddresses = new HashMap();
+
+                    peerAddresses.put(followerActor.path().toString(),
+                        followerActor.path().toString());
+
+                    actorContext.setPeerAddresses(peerAddresses);
+
+                    Leader leader = new Leader(actorContext);
+                    leader.handleMessage(senderActor, new SendHeartBeat());
+
+                    final String out =
+                        new ExpectMsg<String>(duration("1 seconds"),
+                            "match hint") {
+                            // do not put code outside this method, will run afterwards
+                            protected String match(Object in) {
+                                if (in instanceof AppendEntries) {
+                                    if (((AppendEntries) in).getTerm()
+                                        == 0) {
+                                        return "match";
+                                    }
+                                    return null;
+                                } else {
+                                    throw noMatch();
+                                }
+                            }
+                        }.get(); // this extracts the received message
+
+                    assertEquals("match", out);
+
+                }
+
+
+            };
+        }};
+    }
+
+    @Test
+    public void testHandleReplicateMessageSendAppendEntriesToFollower() {
+        new JavaTestKit(getSystem()) {{
+
+            new Within(duration("1 seconds")) {
+                protected void run() {
+
+                    ActorRef followerActor = getTestActor();
+
+                    MockRaftActorContext actorContext =
+                        (MockRaftActorContext) createActorContext();
+
+                    Map<String, String> peerAddresses = new HashMap();
+
+                    peerAddresses.put(followerActor.path().toString(),
+                        followerActor.path().toString());
+
+                    actorContext.setPeerAddresses(peerAddresses);
+
+                    Leader leader = new Leader(actorContext);
+                    RaftState raftState = leader
+                        .handleMessage(senderActor, new Replicate(null, null,
+                            new MockRaftActorContext.MockReplicatedLogEntry(1,
+                                100,
+                                "foo")
+                        ));
+
+                    // State should not change
+                    assertEquals(RaftState.Leader, raftState);
+
+                    final String out =
+                        new ExpectMsg<String>(duration("1 seconds"),
+                            "match hint") {
+                            // do not put code outside this method, will run afterwards
+                            protected String match(Object in) {
+                                if (in instanceof AppendEntries) {
+                                    if (((AppendEntries) in).getTerm()
+                                        == 0) {
+                                        return "match";
+                                    }
+                                    return null;
+                                } else {
+                                    throw noMatch();
+                                }
+                            }
+                        }.get(); // this extracts the received message
+
+                    assertEquals("match", out);
+
+                }
+
+
+            };
+        }};
+    }
+
+    @Test
+    public void testHandleReplicateMessageWhenThereAreNoFollowers() {
+        new JavaTestKit(getSystem()) {{
+
+            new Within(duration("1 seconds")) {
+                protected void run() {
+
+                    ActorRef raftActor = getTestActor();
+
+                    MockRaftActorContext actorContext =
+                        new MockRaftActorContext("test", getSystem(), raftActor);
+
+                    Leader leader = new Leader(actorContext);
+                    RaftState raftState = leader
+                        .handleMessage(senderActor, new Replicate(null, "state-id",
+                            new MockRaftActorContext.MockReplicatedLogEntry(1,
+                                100,
+                                "foo")
+                        ));
+
+                    // State should not change
+                    assertEquals(RaftState.Leader, raftState);
+
+                    assertEquals(100, actorContext.getCommitIndex());
+
+                    final String out =
+                        new ExpectMsg<String>(duration("1 seconds"),
+                            "match hint") {
+                            // do not put code outside this method, will run afterwards
+                            protected String match(Object in) {
+                                if (in instanceof ApplyState) {
+                                    if (((ApplyState) in).getIdentifier().equals("state-id")) {
+                                        return "match";
+                                    }
+                                    return null;
+                                } else {
+                                    throw noMatch();
+                                }
+                            }
+                        }.get(); // this extracts the received message
+
+                    assertEquals("match", out);
+
+                }
+
+
+            };
+        }};
+    }
+
+    @Override protected RaftActorBehavior createBehavior(
+        RaftActorContext actorContext) {
+        return new Leader(actorContext);
+    }
+
+    @Override protected RaftActorContext createActorContext() {
+        return new MockRaftActorContext("test", getSystem(), leaderActor);
+    }
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/utils/DoNothingActor.java b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/utils/DoNothingActor.java
new file mode 100644 (file)
index 0000000..741c473
--- /dev/null
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.raft.utils;
+
+import akka.actor.UntypedActor;
+
+public class DoNothingActor extends UntypedActor{
+    @Override public void onReceive(Object message) throws Exception {
+
+    }
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/test/resources/application.conf b/opendaylight/md-sal/sal-akka-raft/src/test/resources/application.conf
new file mode 100644 (file)
index 0000000..2647850
--- /dev/null
@@ -0,0 +1,11 @@
+akka {
+    actor {
+        serializers {
+          java = "akka.serialization.JavaSerializer"
+        }
+
+        serialization-bindings {
+            "org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification" = java
+        }
+    }
+}
\ No newline at end of file
index eac65ad6775a8b6ac6a9b8c99b8f710a78d9a484..24ec89ebb27da82f92fa569216ee7db5fc07d6cc 100644 (file)
@@ -4,7 +4,7 @@ import org.opendaylight.controller.md.sal.common.api.data.TransactionChain;
 import org.opendaylight.yangtools.yang.binding.DataObject;
 import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
 
-public interface BindingTransactionChain extends TransactionChain<InstanceIdentifier<?>, DataObject> {
+public interface BindingTransactionChain extends TransactionFactory, TransactionChain<InstanceIdentifier<?>, DataObject> {
 
     @Override
     ReadOnlyTransaction newReadOnlyTransaction();
index b60d8ff1be71ba7dc0e22a93f240445868241e10..619a08eac5f30a6f3a7ff7f5f154b1be4c74bdbc 100644 (file)
@@ -10,6 +10,7 @@ package org.opendaylight.controller.md.sal.binding.api;
 import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker;
 import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
 import org.opendaylight.controller.md.sal.common.api.data.TransactionChainFactory;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener;
 import org.opendaylight.yangtools.concepts.ListenerRegistration;
 import org.opendaylight.yangtools.yang.binding.DataObject;
 import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
@@ -20,7 +21,8 @@ import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
  * <p>
  * For more information on usage, please see the documentation in {@link AsyncDataBroker}.
  */
-public interface DataBroker extends AsyncDataBroker<InstanceIdentifier<?>, DataObject, DataChangeListener>, BindingService, TransactionChainFactory<InstanceIdentifier<?>, DataObject> {
+public interface DataBroker extends TransactionFactory, AsyncDataBroker<InstanceIdentifier<?>, DataObject, DataChangeListener>, BindingService, TransactionChainFactory<InstanceIdentifier<?>, DataObject> {
+
     @Override
     ReadOnlyTransaction newReadOnlyTransaction();
 
@@ -33,4 +35,7 @@ public interface DataBroker extends AsyncDataBroker<InstanceIdentifier<?>, DataO
     @Override
     ListenerRegistration<DataChangeListener> registerDataChangeListener(LogicalDatastoreType store,
             InstanceIdentifier<?> path, DataChangeListener listener, DataChangeScope triggeringScope);
+
+    @Override
+    BindingTransactionChain createTransactionChain(TransactionChainListener listener);
 }
diff --git a/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/MountPoint.java b/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/MountPoint.java
new file mode 100644 (file)
index 0000000..ee0a113
--- /dev/null
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.binding.api;
+
+import org.opendaylight.controller.sal.binding.api.BindingAwareService;
+import org.opendaylight.yangtools.concepts.Identifiable;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+
+import com.google.common.base.Optional;
+
+public interface MountPoint extends Identifiable<InstanceIdentifier<?>>{
+
+    <T extends BindingAwareService> Optional<T> getService(Class<T> service);
+
+}
diff --git a/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/MountPointService.java b/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/MountPointService.java
new file mode 100644 (file)
index 0000000..dd3a37e
--- /dev/null
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.binding.api;
+
+import java.util.EventListener;
+
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+
+import com.google.common.base.Optional;
+
+public interface MountPointService {
+
+    Optional<MountPoint> getMountPoint(InstanceIdentifier<?> mountPoint);
+
+    <T extends MountPointListener> ListenerRegistration<T> registerListener(InstanceIdentifier<?> path, T listener);
+
+
+    public interface MountPointListener extends EventListener {
+
+        void onMountPointCreated(InstanceIdentifier<?> path);
+
+        void onMountPointRemoved(InstanceIdentifier<?> path);
+
+    }
+
+}
diff --git a/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/TransactionFactory.java b/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/TransactionFactory.java
new file mode 100644 (file)
index 0000000..4483cf9
--- /dev/null
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.binding.api;
+
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataTransactionFactory;
+import org.opendaylight.yangtools.yang.binding.DataObject;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+
+public interface TransactionFactory extends AsyncDataTransactionFactory<InstanceIdentifier<?>, DataObject>{
+
+    @Override
+    ReadOnlyTransaction newReadOnlyTransaction();
+
+    @Override
+    ReadWriteTransaction newReadWriteTransaction();
+
+    @Override
+    WriteTransaction newWriteOnlyTransaction();
+
+}
index 044a700d84f26b968cb29f6fe3cd6a80e0dbf0ec..a6ba2e2799b99f884c119dae6ef67e44bd85a562 100644 (file)
@@ -22,6 +22,10 @@ public interface WriteTransaction extends AsyncWriteTransaction<InstanceIdentifi
     /**
      * Stores a piece of data at the specified path. This acts as an add / replace
      * operation, which is to say that whole subtree will be replaced by the specified data.
+     * * <p>
+     * This method does not automatically create missing parent nodes. It is equivalent to invoking
+     * {@link #put(LogicalDatastoreType, InstanceIdentifier, DataObject, boolean)}
+     * with <code>createMissingParents</code> set to false.
      * <p>
      * For more information on usage and examples, please see the documentation in {@link AsyncWriteTransaction}.
      * <p>
@@ -39,15 +43,50 @@ public interface WriteTransaction extends AsyncWriteTransaction<InstanceIdentifi
      */
     <T extends DataObject> void put(LogicalDatastoreType store, InstanceIdentifier<T> path, T data);
 
+
+    /**
+     * Stores a piece of data at the specified path. This acts as an add /
+     * replace operation, which is to say that whole subtree will be replaced by
+     * the specified data.
+     * <p>
+     * For more information on usage and examples, please see the documentation
+     * in {@link AsyncWriteTransaction}.
+     * <p>
+     * If you need to make sure that a parent object exists but you do not want
+     * modify its pre-existing state by using put, consider using {@link #merge}
+     * instead.
+     *
+     * Note: Using <code>createMissingParents</code> with value true, may
+     * introduce garbage in data store, or recreate nodes, which were deleted by
+     * previous transaction.
+     *
+     * @param store
+     *            the logical data store which should be modified
+     * @param path
+     *            the data object path
+     * @param data
+     *            the data object to be written to the specified path
+     * @param createMissingParents
+     *            if true, any missing parent nodes will be automatically
+     *            created using a merge operation.
+     * @throws IllegalStateException
+     *             if the transaction has already been submitted
+     */
+    <T extends DataObject> void put(LogicalDatastoreType store, InstanceIdentifier<T> path, T data,
+            boolean createMissingParents);
+
     /**
      * Merges a piece of data with the existing data at a specified path. Any pre-existing data
      * which is not explicitly overwritten will be preserved. This means that if you store a container,
      * its child lists will be merged.
      * <p>
+     * This method does not automatically create missing parent nodes. It is equivalent to invoking
+     * {@link #merge(LogicalDatastoreType, InstanceIdentifier, DataObject, boolean)}
+     * with <code>createMissingParents</code> set to false.
+     * <p>
      * For more information on usage and examples, please see the documentation in {@link AsyncWriteTransaction}.
      *<p>
      * If you require an explicit replace operation, use {@link #put} instead.
-     *
      * @param store
      *            the logical data store which should be modified
      * @param path
@@ -59,6 +98,31 @@ public interface WriteTransaction extends AsyncWriteTransaction<InstanceIdentifi
      */
     <T extends DataObject> void merge(LogicalDatastoreType store, InstanceIdentifier<T> path, T data);
 
+    /**
+     * Merges a piece of data with the existing data at a specified path. Any
+     * pre-existing data which is not explicitly overwritten will be preserved.
+     * This means that if you store a container, its child lists will be merged.
+     * <p>
+     * For more information on usage and examples, please see the documentation
+     * in {@link AsyncWriteTransaction}.
+     * <p>
+     * If you require an explicit replace operation, use {@link #put} instead.
+     *
+     * @param store
+     *            the logical data store which should be modified
+     * @param path
+     *            the data object path
+     * @param data
+     *            the data object to be merged to the specified path
+     * @param createMissingParents
+     *            if true, any missing parent nodes will be automatically created
+     *            using a merge operation.
+     * @throws IllegalStateException
+     *             if the transaction has already been submitted
+     */
+    <T extends DataObject> void merge(LogicalDatastoreType store, InstanceIdentifier<T> path, T data,
+            boolean createMissingParents);
+
     @Override
     void delete(LogicalDatastoreType store, InstanceIdentifier<?> path);
 }
index a41186dae11eda694687e7698663dffbc3364c26..5e24560853e733c42744512366721a4d8b12823c 100644 (file)
@@ -168,6 +168,9 @@ public interface BindingAwareBroker {
          * Returns the implemented RPC service interface.
          */
         Class<T> getServiceType();
+
+        @Override
+        void close();
     }
 
     /**
index 5aa3b99b3009ea850c8131eef042391b0d7c6d5a..7fa40b8d070c8301ba892080d587b651318cf1d1 100644 (file)
@@ -33,5 +33,5 @@ public interface DataProviderService extends DataBrokerService, DataProvisionSer
      * @deprecated Data Reader contract is removed from capabilities of MD-SAL and is replaced by  replaced by org.opendaylight.controller.sal.core.spi.data.DOMStore contract.
      */
     @Deprecated
-    Registration<DataReader<InstanceIdentifier<? extends DataObject>,DataObject>> registerDataReader(InstanceIdentifier<? extends DataObject> path,DataReader<InstanceIdentifier<? extends DataObject>,DataObject> reader);
+    Registration registerDataReader(InstanceIdentifier<? extends DataObject> path,DataReader<InstanceIdentifier<? extends DataObject>,DataObject> reader);
 }
index 3988bc6960266654e86110f3c3c96764b8db854d..44be74b4248b0d25ee40d484d17ff8c96e82d6a5 100644 (file)
@@ -10,14 +10,12 @@ package org.opendaylight.controller.md.sal.binding.impl;
 import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.List;
-import java.util.Map.Entry;
 import java.util.concurrent.ExecutionException;
 
 import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
 import org.opendaylight.controller.md.sal.common.impl.util.compat.DataNormalizationException;
 import org.opendaylight.controller.md.sal.common.impl.util.compat.DataNormalizationOperation;
 import org.opendaylight.controller.md.sal.dom.api.DOMDataReadWriteTransaction;
-import org.opendaylight.yangtools.yang.binding.DataObject;
 import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.InstanceIdentifier.PathArgument;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
@@ -34,27 +32,8 @@ public class AbstractReadWriteTransaction extends AbstractWriteTransaction<DOMDa
         super(delegate, codec);
     }
 
-    protected final void doPutWithEnsureParents(final LogicalDatastoreType store, final InstanceIdentifier<?> path, final DataObject data) {
-        final Entry<org.opendaylight.yangtools.yang.data.api.InstanceIdentifier, NormalizedNode<?, ?>> normalized = getCodec()
-                .toNormalizedNode(path, data);
-
-        final org.opendaylight.yangtools.yang.data.api.InstanceIdentifier normalizedPath = normalized.getKey();
-        ensureParentsByMerge(store, normalizedPath, path);
-        LOG.debug("Tx: {} : Putting data {}", getDelegate().getIdentifier(), normalizedPath);
-        doPut(store, path, data);
-    }
-
-    protected final void doMergeWithEnsureParents(final LogicalDatastoreType store, final InstanceIdentifier<?> path, final DataObject data) {
-        final Entry<org.opendaylight.yangtools.yang.data.api.InstanceIdentifier, NormalizedNode<?, ?>> normalized = getCodec()
-                .toNormalizedNode(path, data);
-
-        final org.opendaylight.yangtools.yang.data.api.InstanceIdentifier normalizedPath = normalized.getKey();
-        ensureParentsByMerge(store, normalizedPath, path);
-        LOG.debug("Tx: {} : Merge data {}", getDelegate().getIdentifier(), normalizedPath);
-        doMerge(store, path, data);
-    }
-
-    private final void ensureParentsByMerge(final LogicalDatastoreType store,
+    @Override
+    protected final void ensureParentsByMerge(final LogicalDatastoreType store,
             final org.opendaylight.yangtools.yang.data.api.InstanceIdentifier normalizedPath,
             final InstanceIdentifier<?> path) {
         List<PathArgument> currentArguments = new ArrayList<>();
index a8eef5a3cae2687f6cc552454407e0bb8bf9eb97..f8c56f95b3aaa26848faca888085bf790446f503 100644 (file)
@@ -30,7 +30,7 @@ import com.google.common.util.concurrent.CheckedFuture;
  * Abstract Base Transaction for transactions which are backed by
  * {@link DOMDataWriteTransaction}
  */
-public class AbstractWriteTransaction<T extends DOMDataWriteTransaction> extends
+public abstract class AbstractWriteTransaction<T extends DOMDataWriteTransaction> extends
         AbstractForwardedTransaction<T> {
 
     private static final Logger LOG = LoggerFactory.getLogger(AbstractWriteTransaction.class);
@@ -40,15 +40,36 @@ public class AbstractWriteTransaction<T extends DOMDataWriteTransaction> extends
         super(delegate, codec);
     }
 
-    protected final void doPut(final LogicalDatastoreType store,
-            final InstanceIdentifier<?> path, final DataObject data) {
+
+    public final <U extends DataObject> void put(final LogicalDatastoreType store,
+            final InstanceIdentifier<U> path, final U data, final boolean createParents) {
        final Entry<org.opendaylight.yangtools.yang.data.api.InstanceIdentifier, NormalizedNode<?, ?>> normalized = getCodec()
                 .toNormalizedNode(path, data);
-        ensureListParentIfNeeded(store,path,normalized);
+        if(createParents) {
+            ensureParentsByMerge(store, normalized.getKey(), path);
+        } else {
+            ensureListParentIfNeeded(store,path,normalized);
+        }
         getDelegate().put(store, normalized.getKey(), normalized.getValue());
     }
 
 
+    public final <U extends DataObject> void merge(final LogicalDatastoreType store,
+            final InstanceIdentifier<U> path, final U data,final boolean createParents) {
+
+        final Entry<org.opendaylight.yangtools.yang.data.api.InstanceIdentifier, NormalizedNode<?, ?>> normalized = getCodec()
+                .toNormalizedNode(path, data);
+
+        if(createParents) {
+            ensureParentsByMerge(store, normalized.getKey(), path);
+        } else {
+            ensureListParentIfNeeded(store,path,normalized);
+        }
+
+        getDelegate().merge(store, normalized.getKey(), normalized.getValue());
+    }
+
+
     /**
      *
      * Ensures list parent if item is list, otherwise noop.
@@ -106,14 +127,16 @@ public class AbstractWriteTransaction<T extends DOMDataWriteTransaction> extends
         }
     }
 
-    protected final void doMerge(final LogicalDatastoreType store,
-            final InstanceIdentifier<?> path, final DataObject data) {
-
-        final Entry<org.opendaylight.yangtools.yang.data.api.InstanceIdentifier, NormalizedNode<?, ?>> normalized = getCodec()
-                .toNormalizedNode(path, data);
-        ensureListParentIfNeeded(store,path,normalized);
-        getDelegate().merge(store, normalized.getKey(), normalized.getValue());
-    }
+    /**
+     * Subclasses of this class are required to implement creation of parent
+     * nodes based on behaviour of their underlying transaction.
+     *
+     * @param store
+     * @param key
+     * @param path
+     */
+    protected abstract void ensureParentsByMerge(LogicalDatastoreType store,
+            org.opendaylight.yangtools.yang.data.api.InstanceIdentifier key, InstanceIdentifier<?> path);
 
     protected final void doDelete(final LogicalDatastoreType store,
             final InstanceIdentifier<?> path) {
index 29790fb60a1a22209047acd44d1057087b707194..e62b4f736a944c180f47a79e3eff132011452491 100644 (file)
@@ -7,15 +7,23 @@
  */
 package org.opendaylight.controller.md.sal.binding.impl;
 
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+
 import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
 import org.opendaylight.controller.md.sal.common.api.TransactionStatus;
 import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
 import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
 import org.opendaylight.controller.md.sal.common.impl.service.AbstractDataTransaction;
+import org.opendaylight.controller.md.sal.common.impl.util.compat.DataNormalizationException;
+import org.opendaylight.controller.md.sal.common.impl.util.compat.DataNormalizationOperation;
 import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
 import org.opendaylight.yangtools.yang.binding.DataObject;
 import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
 import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.data.api.InstanceIdentifier.PathArgument;
+
 import com.google.common.util.concurrent.CheckedFuture;
 import com.google.common.util.concurrent.ListenableFuture;
 
@@ -27,15 +35,37 @@ class BindingDataWriteTransactionImpl<T extends DOMDataWriteTransaction> extends
     }
 
     @Override
-    public <T extends DataObject> void put(final LogicalDatastoreType store, final InstanceIdentifier<T> path,
-                                           final T data) {
-        doPut(store, path, data);
+    public <U extends DataObject> void put(final LogicalDatastoreType store, final InstanceIdentifier<U> path,
+                                           final U data) {
+        put(store, path, data,false);
     }
 
     @Override
     public <T extends DataObject> void merge(final LogicalDatastoreType store, final InstanceIdentifier<T> path,
                                              final T data) {
-        doMerge(store, path, data);
+        merge(store, path, data,false);
+    }
+
+
+    @Override
+    protected void ensureParentsByMerge(final LogicalDatastoreType store,
+            final org.opendaylight.yangtools.yang.data.api.InstanceIdentifier normalizedPath, final InstanceIdentifier<?> path) {
+        List<PathArgument> currentArguments = new ArrayList<>();
+        DataNormalizationOperation<?> currentOp = getCodec().getDataNormalizer().getRootOperation();
+        Iterator<PathArgument> iterator = normalizedPath.getPathArguments().iterator();
+        while (iterator.hasNext()) {
+            PathArgument currentArg = iterator.next();
+            try {
+                currentOp = currentOp.getChild(currentArg);
+            } catch (DataNormalizationException e) {
+                throw new IllegalArgumentException(String.format("Invalid child encountered in path %s", path), e);
+            }
+            currentArguments.add(currentArg);
+            org.opendaylight.yangtools.yang.data.api.InstanceIdentifier currentPath = org.opendaylight.yangtools.yang.data.api.InstanceIdentifier.create(
+                    currentArguments);
+
+            getDelegate().merge(store, currentPath, currentOp.createDefault(currentArg));
+        }
     }
 
     @Override
index f8c1cf6b99f4dfd7664251db168b0f59eddce557..b4b9e314bee385fcd5b092b937edb2c9a1e0b86a 100644 (file)
@@ -9,6 +9,7 @@ package org.opendaylight.controller.md.sal.binding.impl;
 
 import java.lang.reflect.Method;
 import java.util.AbstractMap.SimpleEntry;
+import java.util.Collection;
 import java.util.HashSet;
 import java.util.Iterator;
 import java.util.Map.Entry;
@@ -406,9 +407,8 @@ public class BindingToNormalizedNodeCodec implements SchemaContextListener {
         return Iterables.filter(augmentations, new Predicate<AugmentationSchema>() {
             @Override
             public boolean apply(final AugmentationSchema schema) {
-                final Set<DataSchemaNode> childNodes = schema.getChildNodes();
-                return !schema.getChildNodes().isEmpty()
-                        && module.equals(Iterables.get(childNodes, 0).getQName().getModule());
+                final Collection<DataSchemaNode> childNodes = schema.getChildNodes();
+                return !childNodes.isEmpty() && module.equals(Iterables.get(childNodes, 0).getQName().getModule());
             }
         });
     }
index 12f26b09bb2cf1645d645438302e705c3c0489ce..c924b74a12bb0e7c6f080e98dffc09352faa16a7 100644 (file)
@@ -55,6 +55,7 @@ import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.ListenableFuture;
 import com.google.common.util.concurrent.ListeningExecutorService;
 
+@SuppressWarnings("deprecation")
 public class ForwardedBackwardsCompatibleDataBroker extends AbstractForwardedDataBroker implements DataProviderService, AutoCloseable {
 
     private static final Logger LOG = LoggerFactory.getLogger(ForwardedBackwardsCompatibleDataBroker.class);
@@ -87,7 +88,7 @@ public class ForwardedBackwardsCompatibleDataBroker extends AbstractForwardedDat
     }
 
     @Override
-    public Registration<DataCommitHandler<InstanceIdentifier<? extends DataObject>, DataObject>> registerCommitHandler(
+    public Registration registerCommitHandler(
             final InstanceIdentifier<? extends DataObject> path,
             final DataCommitHandler<InstanceIdentifier<? extends DataObject>, DataObject> commitHandler) {
 
@@ -122,7 +123,7 @@ public class ForwardedBackwardsCompatibleDataBroker extends AbstractForwardedDat
     }
 
     @Override
-    public Registration<DataReader<InstanceIdentifier<? extends DataObject>, DataObject>> registerDataReader(
+    public Registration registerDataReader(
             final InstanceIdentifier<? extends DataObject> path,
             final DataReader<InstanceIdentifier<? extends DataObject>, DataObject> reader) {
         throw new UnsupportedOperationException("Data reader contract is not supported.");
@@ -213,10 +214,13 @@ public class ForwardedBackwardsCompatibleDataBroker extends AbstractForwardedDat
         @Override
         public void putOperationalData(final InstanceIdentifier<? extends DataObject> path, final DataObject data) {
             boolean previouslyRemoved = posponedRemovedOperational.remove(path);
+
+            @SuppressWarnings({ "rawtypes", "unchecked" })
+            final InstanceIdentifier<DataObject> castedPath = (InstanceIdentifier) path;
             if(previouslyRemoved) {
-                doPutWithEnsureParents(LogicalDatastoreType.OPERATIONAL, path, data);
+                put(LogicalDatastoreType.OPERATIONAL, castedPath, data,true);
             } else {
-                doMergeWithEnsureParents(LogicalDatastoreType.OPERATIONAL, path, data);
+                merge(LogicalDatastoreType.OPERATIONAL, castedPath, data,true);
             }
         }
 
@@ -231,10 +235,12 @@ public class ForwardedBackwardsCompatibleDataBroker extends AbstractForwardedDat
                 created.put(path, data);
             }
             updated.put(path, data);
+            @SuppressWarnings({"rawtypes","unchecked"})
+            final InstanceIdentifier<DataObject> castedPath = (InstanceIdentifier) path;
             if(previouslyRemoved) {
-                doPutWithEnsureParents(LogicalDatastoreType.CONFIGURATION, path, data);
+                put(LogicalDatastoreType.CONFIGURATION, castedPath, data,true);
             } else {
-                doMergeWithEnsureParents(LogicalDatastoreType.CONFIGURATION, path, data);
+                merge(LogicalDatastoreType.CONFIGURATION, castedPath, data,true);
             }
         }
 
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/sal/binding/codegen/RpcIsNotRoutedException.java b/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/sal/binding/codegen/RpcIsNotRoutedException.java
new file mode 100644 (file)
index 0000000..5317324
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.sal.binding.codegen;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * Exception is raised when supplied Bidning Aware
+ * RPCService class is not routed and was used in context
+ * where routed RPCs should only be used.
+ *
+ */
+public class RpcIsNotRoutedException extends IllegalStateException {
+
+    private static final long serialVersionUID = 1L;
+
+    public RpcIsNotRoutedException(final String message, final Throwable cause) {
+        super(Preconditions.checkNotNull(message), cause);
+    }
+
+    public RpcIsNotRoutedException(final String message) {
+        super(Preconditions.checkNotNull(message));
+    }
+}
index 9b7b8e28c90657aedc2f53c8c17d1697a0cd6e62..c7c5f10f71bd1073a9e6040ce62b23bbd39c0bc3 100644 (file)
@@ -84,8 +84,9 @@ public interface RuntimeCodeGenerator {
      *            - Subclass of RpcService for which Router is to be generated.
      * @return Instance of RpcService of provided serviceType which implements
      *         also {@link RpcRouter}<T> and {@link org.opendaylight.controller.sal.binding.spi.DelegateProxy}
+     * @throws RpcIsNotRoutedException
      */
-    <T extends RpcService> RpcRouter<T> getRouterFor(Class<T> serviceType,String name) throws IllegalArgumentException;
+    <T extends RpcService> RpcRouter<T> getRouterFor(Class<T> serviceType,String name) throws IllegalArgumentException, RpcIsNotRoutedException;
 
     NotificationInvokerFactory getInvokerFactory();
 }
index 9605a4d3723b6f21ccf8ce5810ad078ba3e3bbd6..1fa54be2005b06d0be70226ac4abd30625f8a8bb 100644 (file)
@@ -7,6 +7,8 @@
  */
 package org.opendaylight.controller.sal.binding.codegen.impl;
 
+import com.google.common.base.Supplier;
+
 import java.util.Map;
 import java.util.WeakHashMap;
 
@@ -19,16 +21,16 @@ import javax.annotation.concurrent.GuardedBy;
 
 import org.eclipse.xtext.xbase.lib.Extension;
 import org.opendaylight.controller.sal.binding.api.rpc.RpcRouter;
+import org.opendaylight.controller.sal.binding.codegen.RpcIsNotRoutedException;
 import org.opendaylight.controller.sal.binding.spi.NotificationInvokerFactory;
 import org.opendaylight.yangtools.sal.binding.generator.util.JavassistUtils;
+import org.opendaylight.yangtools.yang.binding.BindingMapping;
 import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
 import org.opendaylight.yangtools.yang.binding.NotificationListener;
 import org.opendaylight.yangtools.yang.binding.RpcService;
 import org.opendaylight.yangtools.yang.binding.annotations.RoutingContext;
 import org.opendaylight.yangtools.yang.binding.util.ClassLoaderUtils;
 
-import com.google.common.base.Supplier;
-
 abstract class AbstractRuntimeCodeGenerator implements org.opendaylight.controller.sal.binding.codegen.RuntimeCodeGenerator, NotificationInvokerFactory {
     @GuardedBy("this")
     private final Map<Class<? extends NotificationListener>, RuntimeGeneratedInvokerPrototype> invokerClasses = new WeakHashMap<>();
@@ -56,11 +58,11 @@ abstract class AbstractRuntimeCodeGenerator implements org.opendaylight.controll
     protected abstract <T extends RpcService> Supplier<T> directProxySupplier(final Class<T> serviceType);
     protected abstract <T extends RpcService> Supplier<T> routerSupplier(final Class<T> serviceType, RpcServiceMetadata metadata);
 
-    private RpcServiceMetadata getRpcMetadata(final CtClass iface) throws ClassNotFoundException, NotFoundException {
+    private RpcServiceMetadata getRpcMetadata(final CtClass iface) throws ClassNotFoundException, NotFoundException, RpcIsNotRoutedException {
         final RpcServiceMetadata metadata = new RpcServiceMetadata();
 
         for (CtMethod method : iface.getMethods()) {
-            if (iface.equals(method.getDeclaringClass()) && method.getParameterTypes().length == 1) {
+            if (isRpcMethodWithInput(iface, method)) {
                 final RpcMetadata routingPair = getRpcMetadata(method);
                 if (routingPair != null) {
                     metadata.addContext(routingPair.getContext());
@@ -81,6 +83,8 @@ abstract class AbstractRuntimeCodeGenerator implements org.opendaylight.controll
                      *        remains to be investigated.
                      */
                     Thread.currentThread().getContextClassLoader().loadClass(routingPair.getInputType().getName());
+                } else {
+                    throw new RpcIsNotRoutedException("RPC " + method.getName() + " from "+ iface.getName() +" is not routed");
                 }
             }
         }
@@ -88,6 +92,18 @@ abstract class AbstractRuntimeCodeGenerator implements org.opendaylight.controll
         return metadata;
     }
 
+
+    private boolean isRpcMethodWithInput(final CtClass iface, final CtMethod method) throws NotFoundException {
+        if(iface.equals(method.getDeclaringClass())
+                && method.getParameterTypes().length == 1) {
+            final CtClass onlyArg = method.getParameterTypes()[0];
+            if(onlyArg.isInterface() && onlyArg.getName().endsWith(BindingMapping.RPC_INPUT_SUFFIX)) {
+                return true;
+            }
+        }
+        return false;
+    }
+
     private RpcMetadata getRpcMetadata(final CtMethod method) throws NotFoundException {
         final CtClass inputClass = method.getParameterTypes()[0];
         return rpcMethodMetadata(inputClass, inputClass, method.getName());
@@ -120,20 +136,17 @@ abstract class AbstractRuntimeCodeGenerator implements org.opendaylight.controll
             return invoker;
         }
 
-        utils.getLock().lock();
-        try {
+        synchronized (utils) {
             invoker = ClassLoaderUtils.withClassLoader(cls.getClassLoader(), new Supplier<RuntimeGeneratedInvokerPrototype>() {
                 @Override
                 public RuntimeGeneratedInvokerPrototype get() {
                     return generateListenerInvoker(cls);
                 }
             });
-
-            invokerClasses.put(cls, invoker);
-            return invoker;
-        } finally {
-            utils.getLock().unlock();
         }
+
+        invokerClasses.put(cls, invoker);
+        return invoker;
     }
 
     @Override
@@ -143,16 +156,13 @@ abstract class AbstractRuntimeCodeGenerator implements org.opendaylight.controll
 
     @Override
     public final <T extends RpcService> T getDirectProxyFor(final Class<T> serviceType) {
-        utils.getLock().lock();
-        try {
+        synchronized (utils) {
             return ClassLoaderUtils.withClassLoader(serviceType.getClassLoader(), directProxySupplier(serviceType));
-        } finally {
-            utils.getLock().unlock();
         }
     }
 
     @Override
-    public final <T extends RpcService> RpcRouter<T> getRouterFor(final Class<T> serviceType, final String name) {
+    public final <T extends RpcService> RpcRouter<T> getRouterFor(final Class<T> serviceType, final String name) throws RpcIsNotRoutedException {
         final RpcServiceMetadata metadata = ClassLoaderUtils.withClassLoader(serviceType.getClassLoader(), new Supplier<RpcServiceMetadata>() {
             @Override
             public RpcServiceMetadata get() {
@@ -164,12 +174,9 @@ abstract class AbstractRuntimeCodeGenerator implements org.opendaylight.controll
             }
         });
 
-        utils.getLock().lock();
-        try {
+        synchronized (utils) {
             final T instance = ClassLoaderUtils.withClassLoader(serviceType.getClassLoader(), routerSupplier(serviceType, metadata));
             return new RpcRouterCodegenInstance<T>(name, serviceType, instance, metadata.getContexts());
-        } finally {
-            utils.getLock().unlock();
         }
     }
 
index 9ede01b6a3e62ba3bdbd685b22309ab799b09ba7..d4cfb563f633cb1f21a0a6055010a9a98bfd19b7 100644 (file)
@@ -147,14 +147,14 @@ public class DataBrokerImpl extends
         }
 
         @Override
-        public Registration<DataReader<InstanceIdentifier<? extends DataObject>, DataObject>> registerConfigurationReader(
+        public Registration registerConfigurationReader(
                 final InstanceIdentifier<? extends DataObject> path,
                 final DataReader<InstanceIdentifier<? extends DataObject>, DataObject> reader) {
             throw new UnsupportedOperationException("Not supported");
         }
 
         @Override
-        public Registration<DataReader<InstanceIdentifier<? extends DataObject>, DataObject>> registerOperationalReader(
+        public Registration registerOperationalReader(
                 final InstanceIdentifier<? extends DataObject> path,
                 final DataReader<InstanceIdentifier<? extends DataObject>, DataObject> reader) {
             throw new UnsupportedOperationException("Not supported");
index c61ec4926a65f58f31fd03657162b0af70b23c0e..f2e467038f003cbc3acaa9dad3f4d108b0df691c 100644 (file)
@@ -9,16 +9,14 @@ package org.opendaylight.controller.sal.binding.impl;
 
 import static com.google.common.base.Preconditions.checkState;
 
-import com.google.common.cache.CacheBuilder;
-import com.google.common.cache.CacheLoader;
-import com.google.common.cache.LoadingCache;
-
 import java.util.EventListener;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Set;
-import java.util.WeakHashMap;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.atomic.AtomicBoolean;
 
 import org.opendaylight.controller.md.sal.common.api.routing.RouteChange;
 import org.opendaylight.controller.md.sal.common.api.routing.RouteChangeListener;
@@ -29,6 +27,7 @@ import org.opendaylight.controller.sal.binding.api.BindingAwareBroker.RpcRegistr
 import org.opendaylight.controller.sal.binding.api.RpcProviderRegistry;
 import org.opendaylight.controller.sal.binding.api.rpc.RpcContextIdentifier;
 import org.opendaylight.controller.sal.binding.api.rpc.RpcRouter;
+import org.opendaylight.controller.sal.binding.codegen.RpcIsNotRoutedException;
 import org.opendaylight.controller.sal.binding.codegen.RuntimeCodeGenerator;
 import org.opendaylight.controller.sal.binding.codegen.RuntimeCodeHelper;
 import org.opendaylight.controller.sal.binding.codegen.impl.SingletonHolder;
@@ -41,6 +40,13 @@ import org.opendaylight.yangtools.yang.binding.RpcService;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import com.google.common.base.Throwables;
+import com.google.common.cache.Cache;
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.CacheLoader;
+import com.google.common.cache.LoadingCache;
+import com.google.common.util.concurrent.UncheckedExecutionException;
+
 public class RpcProviderRegistryImpl implements RpcProviderRegistry, RouteChangePublisher<RpcContextIdentifier, InstanceIdentifier<?>> {
 
     private RuntimeCodeGenerator rpcFactory = SingletonHolder.RPC_GENERATOR_IMPL;
@@ -56,7 +62,9 @@ public class RpcProviderRegistryImpl implements RpcProviderRegistry, RouteChange
                 }
             });
 
-    private final Map<Class<? extends RpcService>, RpcRouter<?>> rpcRouters = new WeakHashMap<>();
+    private final Cache<Class<? extends RpcService>, RpcRouter<?>> rpcRouters = CacheBuilder.newBuilder().weakKeys()
+            .build();
+
     private final ListenerRegistry<RouteChangeListener<RpcContextIdentifier, InstanceIdentifier<?>>> routeChangeListeners = ListenerRegistry
             .create();
     private final ListenerRegistry<RouterInstantiationListener> routerInstantiationListener = ListenerRegistry.create();
@@ -85,12 +93,20 @@ public class RpcProviderRegistryImpl implements RpcProviderRegistry, RouteChange
     @Override
     public final <T extends RpcService> RpcRegistration<T> addRpcImplementation(final Class<T> type, final T implementation)
             throws IllegalStateException {
-        @SuppressWarnings("unchecked")
-        RpcRouter<T> potentialRouter = (RpcRouter<T>) rpcRouters.get(type);
-        if (potentialRouter != null) {
+
+        // FIXME: This should be well documented - addRpcImplementation for
+        // routed RPCs
+        try {
+            // Note: If RPC is really global, expected count of registrations
+            // of this method is really low.
+            RpcRouter<T> potentialRouter = getRpcRouter(type);
             checkState(potentialRouter.getDefaultService() == null,
-                    "Default service for routed RPC already registered.");
+                        "Default service for routed RPC already registered.");
             return potentialRouter.registerDefaultService(implementation);
+        } catch (RpcIsNotRoutedException e) {
+            // NOOP - we could safely continue, since RPC is not routed
+            // so we fallback to global routing.
+            LOG.debug("RPC is not routed. Using global registration.",e);
         }
         T publicProxy = getRpcService(type);
         RpcService currentDelegate = RuntimeCodeHelper.getDelegate(publicProxy);
@@ -107,28 +123,37 @@ public class RpcProviderRegistryImpl implements RpcProviderRegistry, RouteChange
         return (T) publicProxies.getUnchecked(type);
     }
 
-    @SuppressWarnings({ "unchecked", "rawtypes" })
+
     public <T extends RpcService> RpcRouter<T> getRpcRouter(final Class<T> type) {
-        RpcRouter<?> potentialRouter = rpcRouters.get(type);
-        if (potentialRouter != null) {
-            return (RpcRouter<T>) potentialRouter;
-        }
-        synchronized (this) {
-            /**
-             * Potential Router could be instantiated by other thread while we
-             * were waiting for the lock.
-             */
-            potentialRouter = rpcRouters.get(type);
-            if (potentialRouter != null) {
-                return (RpcRouter<T>) potentialRouter;
+        try {
+            final AtomicBoolean created = new AtomicBoolean(false);
+            @SuppressWarnings( "unchecked")
+            // LoadingCache is unsuitable for RpcRouter since we need to distinguish
+            // first creation of RPC Router, so that is why
+            // we are using normal cache with load API and shared AtomicBoolean
+            // for this call, which will be set to true if router was created.
+            RpcRouter<T> router = (RpcRouter<T>) rpcRouters.get(type,new Callable<RpcRouter<?>>() {
+
+                @Override
+                public org.opendaylight.controller.sal.binding.api.rpc.RpcRouter<?> call()  {
+                    RpcRouter<?> router = rpcFactory.getRouterFor(type, name);
+                    router.registerRouteChangeListener(new RouteChangeForwarder<T>(type));
+                    LOG.debug("Registering router {} as global implementation of {} in {}", router, type.getSimpleName(), this);
+                    RuntimeCodeHelper.setDelegate(getRpcService(type), router.getInvocationProxy());
+                    created.set(true);
+                    return router;
+                }
+            });
+            if(created.get()) {
+                notifyListenersRoutedCreated(router);
             }
-            RpcRouter<T> router = rpcFactory.getRouterFor(type, name);
-            router.registerRouteChangeListener(new RouteChangeForwarder(type));
-            LOG.debug("Registering router {} as global implementation of {} in {}", router, type.getSimpleName(), this);
-            RuntimeCodeHelper.setDelegate(getRpcService(type), router.getInvocationProxy());
-            rpcRouters.put(type, router);
-            notifyListenersRoutedCreated(router);
             return router;
+        } catch (ExecutionException | UncheckedExecutionException e) {
+            // We rethrow Runtime Exceptions which were wrapped by
+            // Execution Exceptions
+            // otherwise we throw IllegalStateException with original
+            Throwables.propagateIfPossible(e.getCause());
+            throw new IllegalStateException("Could not load RPC Router for "+type.getName(),e);
         }
     }
 
@@ -159,7 +184,7 @@ public class RpcProviderRegistryImpl implements RpcProviderRegistry, RouteChange
             final RouterInstantiationListener listener) {
         ListenerRegistration<RouterInstantiationListener> reg = routerInstantiationListener.register(listener);
         try {
-            for (RpcRouter<?> router : rpcRouters.values()) {
+            for (RpcRouter<?> router : rpcRouters.asMap().values()) {
                 listener.onRpcRouterCreated(router);
             }
         } catch (Exception e) {
@@ -225,7 +250,7 @@ public class RpcProviderRegistryImpl implements RpcProviderRegistry, RouteChange
                 try {
                     listener.getInstance().onRouteChange(toPublish);
                 } catch (Exception e) {
-                    e.printStackTrace();
+                    LOG.error("Unhandled exception during invoking listener",listener.getInstance(),e);
                 }
             }
         }
index 71253d02d66ff0230ad483dea2297afee072c8ba..293a61e0ea3d90a04b1d073d1e2d4ccaa181da22 100644 (file)
@@ -16,7 +16,6 @@ import java.util.Collections;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 import org.opendaylight.controller.md.sal.binding.impl.AbstractForwardedDataBroker;
-import org.opendaylight.controller.md.sal.common.api.data.DataCommitHandler;
 import org.opendaylight.controller.md.sal.common.api.routing.RouteChangePublisher;
 import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
 import org.opendaylight.controller.sal.binding.api.RpcProviderRegistry;
@@ -59,7 +58,7 @@ public class BindingIndependentConnector implements //
     private final BindingToDomCommitHandler bindingToDomCommitHandler;
     private final DomToBindingCommitHandler domToBindingCommitHandler;
 
-    private Registration<DataCommitHandler<org.opendaylight.yangtools.yang.data.api.InstanceIdentifier, CompositeNode>> biCommitHandlerRegistration;
+    private Registration biCommitHandlerRegistration;
     private RpcProvisionRegistry biRpcRegistry;
     private RpcProviderRegistry baRpcRegistry;
 
index 8c74008990614ea07bba2499508c7d6143a15c4a..e1f8c6c65236eb31ee051f5af6b0ff7489df9baf 100644 (file)
@@ -18,7 +18,7 @@ import org.opendaylight.yangtools.yang.data.impl.codec.DeserializationException;
 import org.opendaylight.controller.sal.core.api.Broker.ProviderSession;
 import org.opendaylight.controller.sal.core.api.mount.MountProvisionInstance;
 import org.opendaylight.controller.sal.core.api.mount.MountProvisionService;
-import org.opendaylight.controller.sal.core.api.mount.MountProvisionService.MountProvisionListener;
+import org.opendaylight.controller.sal.core.api.mount.MountProvisionListener;
 import org.opendaylight.yangtools.concepts.ListenerRegistration;
 import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
 
index 110e5b4dce3f02e7f32aad00d0c8915ea188bb47..8782046eeef3c13da933bc01ab8d68476b0a0664 100644 (file)
@@ -23,6 +23,7 @@ import org.opendaylight.controller.sal.binding.api.BindingAwareBroker.RoutedRpcR
 import org.opendaylight.controller.sal.binding.api.BindingAwareBroker.RpcRegistration;
 import org.opendaylight.controller.sal.binding.api.rpc.RpcContextIdentifier;
 import org.opendaylight.controller.sal.binding.api.rpc.RpcRouter;
+import org.opendaylight.controller.sal.binding.codegen.RpcIsNotRoutedException;
 import org.opendaylight.controller.sal.binding.impl.RpcProviderRegistryImpl;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.bi.ba.rpcservice.rev140701.OpendaylightTestRpcServiceService;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelList;
@@ -77,6 +78,28 @@ public class RpcProviderRegistryTest {
         assertNotNull(regTwo);
     }
 
+    @Test
+    public void routedRpcRegisteredUsingGlobalAsDefaultInstance() throws Exception {
+        OpendaylightTestRoutedRpcService def = Mockito.mock(OpendaylightTestRoutedRpcService.class);
+        rpcRegistry.addRpcImplementation(OpendaylightTestRoutedRpcService.class, def);
+        RpcRouter<OpendaylightTestRoutedRpcService> router = rpcRegistry.getRpcRouter(OpendaylightTestRoutedRpcService.class);
+        assertEquals(def, router.getDefaultService());
+    }
+
+    @Test
+    public void nonRoutedRegisteredAsRouted() {
+        OpendaylightTestRpcServiceService one = Mockito.mock(OpendaylightTestRpcServiceService.class);
+        try {
+            rpcRegistry.addRoutedRpcImplementation(OpendaylightTestRpcServiceService.class, one);
+            fail("RpcIsNotRoutedException should be thrown");
+        } catch (RpcIsNotRoutedException e) {
+            assertNotNull(e.getMessage());
+        } catch (Exception e) {
+            fail("RpcIsNotRoutedException should be thrown");
+        }
+
+    }
+
     @Test
     public void testRpcRouterInstance() throws Exception  {
         OpendaylightTestRoutedRpcService def = Mockito.mock(OpendaylightTestRoutedRpcService.class);
index b577e2af1f2c845586615985fa77ec1169abd291..b504837d6780aa344eb706a786733b321a2fa65e 100644 (file)
@@ -7,12 +7,16 @@
  */
 package org.opendaylight.controller.md.sal.binding.impl.test;
 
+import static org.junit.Assert.assertTrue;
+
 import java.util.concurrent.ExecutionException;
 
 import org.junit.Test;
+import org.opendaylight.controller.md.sal.binding.api.ReadOnlyTransaction;
 import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
 import org.opendaylight.controller.md.sal.binding.test.AbstractDataBrokerTest;
 import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.Top;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.TopBuilder;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelList;
@@ -20,19 +24,49 @@ import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controll
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelListKey;
 import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
 
+import com.google.common.base.Optional;
+
 
 public class WriteTransactionTest extends AbstractDataBrokerTest {
 
     private static final InstanceIdentifier<Top> TOP_PATH = InstanceIdentifier.create(Top.class);
     private static final TopLevelListKey TOP_LIST_KEY = new TopLevelListKey("foo");
     private static final InstanceIdentifier<TopLevelList> NODE_PATH = TOP_PATH.child(TopLevelList.class, TOP_LIST_KEY);
-
+    private static final TopLevelList NODE = new TopLevelListBuilder().setKey(TOP_LIST_KEY).build();
     @Test
     public void test() throws InterruptedException, ExecutionException {
         WriteTransaction writeTx = getDataBroker().newWriteOnlyTransaction();
         writeTx.put(LogicalDatastoreType.OPERATIONAL, TOP_PATH, new TopBuilder().build());
-        writeTx.put(LogicalDatastoreType.OPERATIONAL, NODE_PATH, new TopLevelListBuilder().setKey(TOP_LIST_KEY).build());
+        writeTx.put(LogicalDatastoreType.OPERATIONAL, NODE_PATH, NODE);
         writeTx.submit().get();
     }
 
+    @Test
+    public void testPutCreateParentsSuccess() throws TransactionCommitFailedException, InterruptedException, ExecutionException {
+
+        WriteTransaction writeTx = getDataBroker().newWriteOnlyTransaction();
+        writeTx.put(LogicalDatastoreType.OPERATIONAL, NODE_PATH, NODE,true);
+        writeTx.submit().checkedGet();
+
+        ReadOnlyTransaction readTx = getDataBroker().newReadOnlyTransaction();
+        Optional<Top> topNode = readTx.read(LogicalDatastoreType.OPERATIONAL, TOP_PATH).get();
+        assertTrue("Top node must exists after commit",topNode.isPresent());
+        Optional<TopLevelList> listNode = readTx.read(LogicalDatastoreType.OPERATIONAL, NODE_PATH).get();
+        assertTrue("List node must exists after commit",listNode.isPresent());
+    }
+
+    @Test
+    public void testMergeCreateParentsSuccess() throws TransactionCommitFailedException, InterruptedException, ExecutionException {
+
+        WriteTransaction writeTx = getDataBroker().newWriteOnlyTransaction();
+        writeTx.merge(LogicalDatastoreType.OPERATIONAL, NODE_PATH, NODE,true);
+        writeTx.submit().checkedGet();
+
+        ReadOnlyTransaction readTx = getDataBroker().newReadOnlyTransaction();
+        Optional<Top> topNode = readTx.read(LogicalDatastoreType.OPERATIONAL, TOP_PATH).get();
+        assertTrue("Top node must exists after commit",topNode.isPresent());
+        Optional<TopLevelList> listNode = readTx.read(LogicalDatastoreType.OPERATIONAL, NODE_PATH).get();
+        assertTrue("List node must exists after commit",listNode.isPresent());
+    }
+
 }
index e6bbaab2d9dec48c659ac1bbf38e646fc021662b..9ec5bcf6c29ea58114f58da3c1eba47999841a55 100644 (file)
@@ -33,7 +33,7 @@ import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.Node
 import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.SalFlowListener;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.SwitchFlowRemoved;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.FlowCookie;
-import org.opendaylight.yangtools.concepts.Registration;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
 import org.opendaylight.yangtools.yang.binding.NotificationListener;
 import org.opendaylight.yangtools.yang.binding.RpcService;
 
@@ -43,8 +43,8 @@ public class NotificationTest extends AbstractTest {
     private final FlowListener listener1 = new FlowListener();
     private final FlowListener listener2 = new FlowListener();
 
-    private Registration<NotificationListener> listener1Reg;
-    private Registration<NotificationListener> listener2Reg;
+    private ListenerRegistration<NotificationListener> listener1Reg;
+    private ListenerRegistration<NotificationListener> listener2Reg;
 
     private NotificationProviderService notifyProviderService;
 
index 8f367de6c8976c687600a941b491aff4fcbfeb2f..830942f3377db22d9e7f431091aac3732b88bcef 100644 (file)
@@ -40,14 +40,14 @@ public abstract class AbstractBindingSalProviderInstance<D extends DataProviderS
     }
 
     @Override
-    public Registration<DataReader<InstanceIdentifier<? extends DataObject>, DataObject>> registerDataReader(
+    public Registration registerDataReader(
             InstanceIdentifier<? extends DataObject> path,
             DataReader<InstanceIdentifier<? extends DataObject>, DataObject> reader) {
         return getDataBrokerChecked().registerDataReader(path, reader);
     }
 
     @Override
-    public Registration<DataCommitHandler<InstanceIdentifier<? extends DataObject>, DataObject>> registerCommitHandler(
+    public Registration registerCommitHandler(
             InstanceIdentifier<? extends DataObject> path,
             DataCommitHandler<InstanceIdentifier<? extends DataObject>, DataObject> commitHandler) {
         return getDataBrokerChecked().registerCommitHandler(path, commitHandler);
index f088c0a0bdde1f29f5ff0bd4054d3b3137ab8f7e..3b3217db6ba8f5cde5dbc88c6ede86d8ea2a0147 100644 (file)
@@ -11,7 +11,7 @@ import java.util.EventListener;
 
 import org.opendaylight.yangtools.concepts.Registration;
 
-public interface RegistrationListener<T extends Registration<?>> extends EventListener {
+public interface RegistrationListener<T extends Registration> extends EventListener {
 
     void onRegister(T registration);
 
index 224751c37dbd8dabebe44da114b8d1055253877e..dceb3de5bf1af839a72c28ddb14ba0dfa0132a21 100644 (file)
@@ -20,7 +20,7 @@ import org.opendaylight.yangtools.concepts.Registration;
  * @param <D>
  */
 @Deprecated
-public interface DataCommitHandlerRegistration<P extends Path<P>,D> extends Registration<DataCommitHandler<P, D>>{
+public interface DataCommitHandlerRegistration<P extends Path<P>,D> extends Registration {
 
     P getPath();
 }
index bd80a5389316f754a202af7c363c2332f7951859..6c7166eaee01fc9557acf2ef8962a825ef9b12bd 100644 (file)
@@ -20,7 +20,7 @@ import org.opendaylight.yangtools.concepts.Registration;
 @Deprecated
 public interface DataProvisionService<P extends Path<P> , D> {
 
-    public Registration<DataCommitHandler<P, D>> registerCommitHandler(P path, DataCommitHandler<P, D> commitHandler);
+    public Registration registerCommitHandler(P path, DataCommitHandler<P, D> commitHandler);
 
     public ListenerRegistration<RegistrationListener<DataCommitHandlerRegistration<P, D>>>
         registerCommitHandlerListener(RegistrationListener<DataCommitHandlerRegistration<P, D>> commitHandlerListener);
index bfca8f8bccf316d956dd8d621e0e868adf19b8f9..feccbbad92f694fb070af73c355376acbf26e928 100644 (file)
@@ -7,9 +7,11 @@
  */
 package org.opendaylight.controller.md.sal.common.api.notify;
 
-import org.opendaylight.yangtools.concepts.Registration;
+import java.util.EventListener;
 
-public interface NotificationSubscriptionService<T,N,L> {
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
 
-    Registration<L> registerNotificationListener(T type,L listener);
+public interface NotificationSubscriptionService<T,N,L extends EventListener> {
+
+    ListenerRegistration<L> registerNotificationListener(T type,L listener);
 }
index 6fe8d6921730fc55e4fe3900ea66823ee2b644ed..7b1cdeae9f7dbc707b6b4aafe60cb02a773c7ca3 100644 (file)
@@ -17,7 +17,7 @@ import org.opendaylight.yangtools.concepts.Registration;
  * @param <P> the path identifier type
  * @param <S> the RPC implementation type
  */
-public interface RoutedRegistration<C, P extends Path<P>, S> extends Registration<S> {
+public interface RoutedRegistration<C, P extends Path<P>, S> extends Registration {
 
     /**
      * Registers the RPC implementation associated with this registration for the given path
index ee9af6cb80d91ccc4083a61e3e6a01604769a757..5dfb55ebd939615a10c6b0b0ceb4c4a6758c0940 100644 (file)
@@ -97,13 +97,13 @@ public abstract class AbstractDataReadRouter<P extends Path<P>, D> implements Da
      * @param reader Reader instance which is responsible for reading particular subpath.
      * @return
      */
-    public Registration<DataReader<P, D>> registerOperationalReader(P path, DataReader<P, D> reader) {
+    public Registration registerOperationalReader(P path, DataReader<P, D> reader) {
         OperationalDataReaderRegistration<P, D> ret = new OperationalDataReaderRegistration<>(path, reader);
         operationalReaders.put(path, ret);
         return ret;
     }
 
-    public Registration<DataReader<P, D>> registerConfigurationReader(P path, DataReader<P, D> reader) {
+    public Registration registerConfigurationReader(P path, DataReader<P, D> reader) {
         ConfigurationDataReaderRegistration<P, D> ret = new ConfigurationDataReaderRegistration<>(path, reader);
         configReaders.put(path, ret);
         return ret;
index d8bde3d41a623178e1e202b6ba1bc67aa7b49525..a732f2f1b96b72ee72a6987e2f2b94651656bf09 100644 (file)
@@ -230,7 +230,7 @@ public abstract class AbstractDataBroker<P extends Path<P>, D extends Object, DC
     }
 
     @Override
-    public final Registration<DataCommitHandler<P, D>> registerCommitHandler(final P path,
+    public final Registration registerCommitHandler(final P path,
             final DataCommitHandler<P, D> commitHandler) {
         synchronized (commitHandler) {
             final DataCommitHandlerRegistrationImpl<P, D> registration = new DataCommitHandlerRegistrationImpl<P, D>(
@@ -266,8 +266,8 @@ public abstract class AbstractDataBroker<P extends Path<P>, D extends Object, DC
     public final CompositeObjectRegistration<DataReader<P, D>> registerDataReader(final P path,
             final DataReader<P, D> reader) {
 
-        final Registration<DataReader<P, D>> confReg = getDataReadRouter().registerConfigurationReader(path, reader);
-        final Registration<DataReader<P, D>> dataReg = getDataReadRouter().registerOperationalReader(path, reader);
+        final Registration confReg = getDataReadRouter().registerConfigurationReader(path, reader);
+        final Registration dataReg = getDataReadRouter().registerOperationalReader(path, reader);
         return new CompositeObjectRegistration<DataReader<P, D>>(reader, Arrays.asList(confReg, dataReg));
     }
 
index 613cf1bfe24e1b31cf854d978c35c48ef11d96f3..e85534cdd3ba40a921db0156ccbe86fd624b9541 100644 (file)
@@ -23,7 +23,7 @@ import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
  */
 public interface DOMDataBroker extends
         AsyncDataBroker<InstanceIdentifier, NormalizedNode<?, ?>, DOMDataChangeListener>,
-        TransactionChainFactory<InstanceIdentifier, NormalizedNode<?, ?>>, BrokerService {
+        TransactionChainFactory<InstanceIdentifier, NormalizedNode<?, ?>>, BrokerService, DOMService {
 
     /**
      * {@inheritDoc}
diff --git a/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMMountPoint.java b/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMMountPoint.java
new file mode 100644 (file)
index 0000000..c0baf19
--- /dev/null
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html.
+ */
+
+package org.opendaylight.controller.md.sal.dom.api;
+
+import org.opendaylight.yangtools.concepts.Identifiable;
+import org.opendaylight.yangtools.yang.data.api.InstanceIdentifier;
+
+import com.google.common.base.Optional;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+
+public interface DOMMountPoint extends Identifiable<InstanceIdentifier> {
+
+    <T extends DOMService> Optional<T> getService(Class<T> cls);
+
+    SchemaContext getSchemaContext();
+}
diff --git a/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMMountPointService.java b/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMMountPointService.java
new file mode 100644 (file)
index 0000000..3155bd5
--- /dev/null
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html.
+ */
+
+package org.opendaylight.controller.md.sal.dom.api;
+
+import org.opendaylight.controller.sal.core.api.BrokerService;
+import org.opendaylight.controller.sal.core.api.mount.MountProvisionListener;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.concepts.ObjectRegistration;
+import org.opendaylight.yangtools.yang.data.api.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+
+import com.google.common.base.Optional;
+
+
+public interface DOMMountPointService extends BrokerService {
+
+    Optional<DOMMountPoint> getMountPoint(InstanceIdentifier path);
+
+    DOMMountPointBuilder createMountPoint(InstanceIdentifier path);
+
+    ListenerRegistration<MountProvisionListener> registerProvisionListener(MountProvisionListener listener);
+
+    public interface DOMMountPointBuilder {
+
+        <T extends DOMService> DOMMountPointBuilder addService(Class<T> type,T impl);
+
+        DOMMountPointBuilder addInitialSchemaContext(SchemaContext ctx);
+
+        ObjectRegistration<DOMMountPoint> register();
+    }
+}
diff --git a/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMService.java b/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMService.java
new file mode 100644 (file)
index 0000000..357cb8b
--- /dev/null
@@ -0,0 +1,13 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html.
+ */
+
+package org.opendaylight.controller.md.sal.dom.api;
+
+public interface DOMService {
+
+}
index 724cbe75f420e41ccfcfbe041f4b2854e2e1d616..b48f6cb61a6fd6a1c4c8555292fd0ff4a821b652 100644 (file)
@@ -78,13 +78,18 @@ public interface Broker {
      *
      * @param cons
      *            Consumer to be registered.
-     * @param context
      * @return a session specific to consumer registration
      * @throws IllegalArgumentException
      *             If the consumer is <code>null</code>.
      * @throws IllegalStateException
      *             If the consumer is already registered.
      */
+    ConsumerSession registerConsumer(Consumer cons);
+
+    /*
+     * @deprecated Use registerConsumer(Consumer cons) instead (BundleContext is no longer used)
+     */
+    @Deprecated
     ConsumerSession registerConsumer(Consumer cons, BundleContext context);
 
     /**
@@ -110,13 +115,18 @@ public interface Broker {
      *
      * @param prov
      *            Provider to be registered.
-     * @param context
      * @return a session unique to the provider registration.
      * @throws IllegalArgumentException
      *             If the provider is <code>null</code>.
      * @throws IllegalStateException
      *             If the consumer is already registered.
      */
+    ProviderSession registerProvider(Provider prov);
+
+    /*
+     * @deprecated Use registerProvider(Provider cons) instead (BundleContext is no longer used)
+     */
+    @Deprecated
     ProviderSession registerProvider(Provider prov, BundleContext context);
 
     /**
index a693cd6c49b475bc5f046cbb48c465f37858729f..a5ab8ac911b20fc703b96185af66e9bb60c170d7 100644 (file)
@@ -38,21 +38,22 @@ public interface Consumer {
     public void onSessionInitiated(ConsumerSession session);
 
     /**
-     * Get a set of implementations of consumer functionality to be registered
-     * into system during the consumer registration to the SAL.
-     *
-     * This method is invoked by {@link Broker#registerConsumer(Consumer)}.
-     *
-     * @return Set of consumer functionality.
+     * @deprecated - no longer used or needed
+     * *
+     * Suggested implementation until removed:
+     * @code {
+     * public Collection<ConsumerFunctionality> getConsumerFunctionality() {
+     *    return Collections.emptySet();
+     * }
+     * }
      */
+    @Deprecated
     public Collection<ConsumerFunctionality> getConsumerFunctionality();
 
     /**
-     * The marker interface for the interfaces describing the consumer
-     * functionality contracts.
-     *
-     *
+     * @deprecated - no longer used or needed
      */
+    @Deprecated
     public interface ConsumerFunctionality {
 
     }
index 0a57d12579239ae29e9fff251651db8483f1ee16..4f32983f84b7e7f5a0408e1b5dfcdf4f5a01b65c 100644 (file)
@@ -42,27 +42,22 @@ public interface Provider {
     public void onSessionInitiated(ProviderSession session);
 
     /**
-     * Gets a set of implementations of provider functionality to be registered
-     * into system during the provider registration to the SAL.
+     * @deprecated - No longer used or needed
      *
-     * <p>
-     * This method is invoked by {@link Broker#registerProvider(Provider)} to
-     * learn the initial provided functionality
-     *
-     * @return Set of provider's functionality.
+     * Suggested implementation until removed:
+     * @code {
+     * public Collection<ProviderFunctionality> getProviderFunctionality() {
+     *  return Collections.emptySet();
+     * }
+     * }
      */
+    @Deprecated
     public Collection<ProviderFunctionality> getProviderFunctionality();
 
     /**
-     * Functionality provided by the {@link Provider}
-     *
-     * <p>
-     * Marker interface used to mark the interfaces describing specific
-     * functionality which could be exposed by providers to other components.
-     *
-
-     *
+     * @deprecated - no longer used or needed
      */
+    @Deprecated
     public interface ProviderFunctionality {
 
     }
index a22a6ef75e6be7d25461071b1c8290d1a13c0be4..13a50090c093154ab2422549d314615026d5afd8 100644 (file)
@@ -8,13 +8,14 @@
 package org.opendaylight.controller.sal.core.api;
 
 import org.opendaylight.controller.md.sal.common.api.routing.RouteChangePublisher;
+import org.opendaylight.controller.md.sal.dom.api.DOMService;
 import org.opendaylight.controller.sal.core.api.Broker.RoutedRpcRegistration;
 import org.opendaylight.controller.sal.core.api.Broker.RpcRegistration;
 import org.opendaylight.yangtools.concepts.ListenerRegistration;
 import org.opendaylight.yangtools.yang.common.QName;
 import org.opendaylight.yangtools.yang.data.api.InstanceIdentifier;
 
-public interface RpcProvisionRegistry extends RpcImplementation, BrokerService, RouteChangePublisher<RpcRoutingContext, InstanceIdentifier> {
+public interface RpcProvisionRegistry extends RpcImplementation, BrokerService, RouteChangePublisher<RpcRoutingContext, InstanceIdentifier>, DOMService {
 
     /**
      * Registers an implementation of the rpc.
index 0810fa3e98ab9041c51b0febb83184ec14f6463d..97b640647a6b6e1c4dedbb34ec09d21d8acf8df7 100644 (file)
@@ -63,9 +63,9 @@ public interface DataProviderService extends
     void removeRefresher(DataStoreIdentifier store, DataRefresher refresher);
 
 
-    Registration<DataReader<InstanceIdentifier, CompositeNode>> registerConfigurationReader(InstanceIdentifier path, DataReader<InstanceIdentifier, CompositeNode> reader);
+    Registration registerConfigurationReader(InstanceIdentifier path, DataReader<InstanceIdentifier, CompositeNode> reader);
 
-    Registration<DataReader<InstanceIdentifier, CompositeNode>> registerOperationalReader(InstanceIdentifier path, DataReader<InstanceIdentifier, CompositeNode> reader);
+    Registration registerOperationalReader(InstanceIdentifier path, DataReader<InstanceIdentifier, CompositeNode> reader);
 
     public interface DataRefresher extends Provider.ProviderFunctionality {
 
index 5698b969771bd4296cbcb8d85ebbd03fcbca59ae..a4343ba5cf7e2b8da6cd9034b2264f2fd698310a 100644 (file)
@@ -21,6 +21,7 @@ import com.google.common.util.concurrent.ListenableFuture;
  * Interface representing a single mount instance and represents a way for
  * clients to access underlying data, RPCs and notifications.
  */
+@Deprecated
 public interface MountInstance extends //
         NotificationService, //
         DataBrokerService {
index 29e3b911c1d1b1d8a5cdf64b30da0842bff7d11f..a5c3b5d0f6460a3ecc805994a6877883ebf09738 100644 (file)
@@ -12,6 +12,7 @@ import org.opendaylight.controller.sal.core.api.data.DataProviderService;
 import org.opendaylight.controller.sal.core.api.notify.NotificationPublishService;
 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
 
+@Deprecated
 public interface MountProvisionInstance extends //
         MountInstance,//
         NotificationPublishService, //
diff --git a/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/sal/core/api/mount/MountProvisionListener.java b/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/sal/core/api/mount/MountProvisionListener.java
new file mode 100644 (file)
index 0000000..6485147
--- /dev/null
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html.
+ */
+
+package org.opendaylight.controller.sal.core.api.mount;
+
+import java.util.EventListener;
+import org.opendaylight.yangtools.yang.data.api.InstanceIdentifier;
+
+public interface MountProvisionListener extends EventListener {
+
+    void onMountPointCreated(InstanceIdentifier path);
+
+    void onMountPointRemoved(InstanceIdentifier path);
+
+}
index 1185c4528cee3e60bbc7bba70ab3beddc5eac021..807b020b72299b379ae53a3c3932db88f1aeb949 100644 (file)
@@ -7,11 +7,13 @@
  */
 package org.opendaylight.controller.sal.core.api.mount;
 
-import java.util.EventListener;
-
 import org.opendaylight.yangtools.concepts.ListenerRegistration;
 import org.opendaylight.yangtools.yang.data.api.InstanceIdentifier;
 
+/**
+ * @deprecated Use org.opendaylight.controller.md.sal.dom.api.DOMMountPointService instead
+ */
+@Deprecated
 public interface MountProvisionService extends MountService {
 
     @Override
@@ -23,11 +25,4 @@ public interface MountProvisionService extends MountService {
 
     ListenerRegistration<MountProvisionListener> registerProvisionListener(MountProvisionListener listener);
 
-    public interface MountProvisionListener extends EventListener {
-
-        void onMountPointCreated(InstanceIdentifier path);
-
-        void onMountPointRemoved(InstanceIdentifier path);
-
-    }
 }
index 6d1f17255c7defd4a0e61a890d0b6246b4b466a1..a730203f4785504732375a49e4dd38d69cc63dbd 100644 (file)
@@ -13,7 +13,10 @@ import org.opendaylight.yangtools.yang.data.api.InstanceIdentifier;
 /**
  * Client-level interface for interacting with mount points. It provides access
  * to {@link MountInstance} instances based on their path.
+ *
+ *  @deprecated Use org.opendaylight.controller.md.sal.dom.api.DOMMountPointService instead
  */
+@Deprecated
 public interface MountService extends BrokerService {
     /**
      * Obtain access to a mount instance registered at the specified path.
index f1156c39642af935a0d0a88d5d4d55bc6f40077f..4f5c7abf5f1bb560ea2b1d5d1b57982cfc700f4a 100644 (file)
@@ -7,6 +7,7 @@
  */
 package org.opendaylight.controller.sal.core.api.notify;
 
+import org.opendaylight.controller.md.sal.dom.api.DOMService;
 import org.opendaylight.yangtools.yang.data.api.CompositeNode;
 
 /**
@@ -23,7 +24,7 @@ import org.opendaylight.yangtools.yang.data.api.CompositeNode;
  * {@link NotificationListener#onNotification(CompositeNode)}
  * </ol>
  */
-public interface NotificationPublishService extends NotificationService {
+public interface NotificationPublishService extends NotificationService, DOMService {
     /**
      * Publishes a notification.
      *
index 1d67ca08241af4489c9582fdb5e39a576f67afca..9c0db42d6ceaf8007645b026057bc92558727d3b 100644 (file)
@@ -8,7 +8,7 @@
 package org.opendaylight.controller.sal.core.api.notify;
 
 import org.opendaylight.controller.sal.core.api.BrokerService;
-import org.opendaylight.yangtools.concepts.Registration;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
 import org.opendaylight.yangtools.yang.common.QName;
 
 
@@ -38,6 +38,6 @@ public interface NotificationService extends BrokerService {
      * @param notification
      * @param listener
      */
-    Registration<NotificationListener> addNotificationListener(QName notification,
+    ListenerRegistration<NotificationListener> addNotificationListener(QName notification,
             NotificationListener listener);
 }
index f361af948cb1c600a1c778fe97958cbcef1643d0..8405a78c33a46f7f943624d4b6c6b1bd8aeb18f5 100644 (file)
@@ -77,7 +77,7 @@ public class BackwardsCompatibleDataBroker implements DataProviderService {
     }
 
     @Override
-    public Registration<DataCommitHandler<InstanceIdentifier, CompositeNode>> registerCommitHandler(
+    public Registration registerCommitHandler(
             final InstanceIdentifier path, final DataCommitHandler<InstanceIdentifier, CompositeNode> commitHandler) {
         // FIXME Do real forwarding
         return new AbstractObjectRegistration<DataCommitHandler<InstanceIdentifier,CompositeNode>>(commitHandler) {
@@ -117,13 +117,13 @@ public class BackwardsCompatibleDataBroker implements DataProviderService {
     }
 
     @Override
-    public Registration<DataReader<InstanceIdentifier, CompositeNode>> registerConfigurationReader(
+    public Registration registerConfigurationReader(
             final InstanceIdentifier path, final DataReader<InstanceIdentifier, CompositeNode> reader) {
         throw new UnsupportedOperationException("Data Reader contract is not supported.");
     }
 
     @Override
-    public Registration<DataReader<InstanceIdentifier, CompositeNode>> registerOperationalReader(
+    public Registration registerOperationalReader(
             final InstanceIdentifier path, final DataReader<InstanceIdentifier, CompositeNode> reader) {
         throw new UnsupportedOperationException("Data Reader contract is not supported.");
     }
diff --git a/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/mount/DOMMountPointServiceImpl.java b/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/mount/DOMMountPointServiceImpl.java
new file mode 100644 (file)
index 0000000..cdb78fc
--- /dev/null
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html.
+ */
+
+package org.opendaylight.controller.md.sal.dom.broker.impl.mount;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import org.opendaylight.controller.md.sal.dom.api.DOMMountPoint;
+import org.opendaylight.controller.md.sal.dom.api.DOMMountPointService;
+import org.opendaylight.controller.md.sal.dom.api.DOMService;
+import org.opendaylight.controller.md.sal.dom.broker.spi.mount.SimpleDOMMountPoint;
+import org.opendaylight.controller.sal.core.api.mount.MountProvisionListener;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.concepts.ObjectRegistration;
+import org.opendaylight.yangtools.concepts.util.ListenerRegistry;
+import org.opendaylight.yangtools.yang.data.api.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.ClassToInstanceMap;
+import com.google.common.collect.MutableClassToInstanceMap;
+
+public class DOMMountPointServiceImpl implements DOMMountPointService {
+
+    private final Map<InstanceIdentifier, SimpleDOMMountPoint> mountPoints = new HashMap<>();
+
+    private final ListenerRegistry<MountProvisionListener> listeners = ListenerRegistry.create();
+
+    @Override
+    public Optional<DOMMountPoint> getMountPoint(final InstanceIdentifier path) {
+        return Optional.<DOMMountPoint>fromNullable(mountPoints.get(path));
+    }
+
+    @Override
+    public DOMMountPointBuilder createMountPoint(final InstanceIdentifier path) {
+        Preconditions.checkState(!mountPoints.containsKey(path), "Mount point already exists");
+        return new DOMMountPointBuilderImpl(path);
+    }
+
+    public void notifyMountCreated(final InstanceIdentifier identifier) {
+        for (final ListenerRegistration<MountProvisionListener> listener : listeners
+                .getListeners()) {
+            listener.getInstance().onMountPointCreated(identifier);
+        }
+    }
+
+    @Override
+    public ListenerRegistration<MountProvisionListener> registerProvisionListener(
+            final MountProvisionListener listener) {
+        return listeners.register(listener);
+    }
+
+    public ObjectRegistration<DOMMountPoint> registerMountPoint(final SimpleDOMMountPoint mountPoint) {
+        synchronized (mountPoints) {
+            Preconditions.checkState(!mountPoints.containsKey(mountPoint.getIdentifier()), "Mount point already exists");
+            mountPoints.put(mountPoint.getIdentifier(), mountPoint);
+        }
+        notifyMountCreated(mountPoint.getIdentifier());
+
+        // FIXME this shouldnt be null
+        return null;
+    }
+
+    public class DOMMountPointBuilderImpl implements DOMMountPointBuilder {
+
+        ClassToInstanceMap<DOMService> services = MutableClassToInstanceMap.create();
+        private SimpleDOMMountPoint mountPoint;
+        private final InstanceIdentifier path;
+        private SchemaContext schemaContext;
+
+        public DOMMountPointBuilderImpl(final InstanceIdentifier path) {
+            this.path = path;
+        }
+
+        @Override
+        public <T extends DOMService> DOMMountPointBuilder addService(final Class<T> type, final T impl) {
+            services.putInstance(type, impl);
+            return this;
+        }
+
+        @Override
+        public DOMMountPointBuilder addInitialSchemaContext(final SchemaContext ctx) {
+            schemaContext = ctx;
+            return this;
+        }
+
+        @Override
+        public ObjectRegistration<DOMMountPoint> register() {
+            Preconditions.checkState(mountPoint == null, "Mount point is already built.");
+            mountPoint = SimpleDOMMountPoint.create(path, services,schemaContext);
+            return registerMountPoint(mountPoint);
+        }
+    }
+}
index 1af03e50465e2acc922f19ad66f15a3c640bfc34..68066778dbd29308dbb6093b6c59d98e62dce080 100644 (file)
@@ -62,22 +62,13 @@ public class BrokerImpl implements Broker, RpcProvisionRegistry, AutoCloseable {
     @Override
     public ConsumerSession registerConsumer(final Consumer consumer,
             final BundleContext ctx) {
-        checkPredicates(consumer);
-        log.trace("Registering consumer {}", consumer);
-        final ConsumerContextImpl session = newSessionFor(consumer, ctx);
-        consumer.onSessionInitiated(session);
-        sessions.add(session);
-        return session;
+        return registerConsumer(consumer);
     }
 
     @Override
     public ProviderSession registerProvider(final Provider provider,
             final BundleContext ctx) {
-        checkPredicates(provider);
-        final ProviderContextImpl session = newSessionFor(provider, ctx);
-        provider.onSessionInitiated(session);
-        providerSessions.add(session);
-        return session;
+        return registerProvider(provider);
     }
 
     protected Future<RpcResult<CompositeNode>> invokeRpcAsync(final QName rpc,
@@ -106,14 +97,12 @@ public class BrokerImpl implements Broker, RpcProvisionRegistry, AutoCloseable {
     }
 
     // Private Factory methods
-    private ConsumerContextImpl newSessionFor(final Consumer provider,
-            final BundleContext ctx) {
+    private ConsumerContextImpl newSessionFor(final Consumer provider) {
         ConsumerContextImpl ret = new ConsumerContextImpl(provider, this);
         return ret;
     }
 
-    private ProviderContextImpl newSessionFor(final Provider provider,
-            final BundleContext ctx) {
+    private ProviderContextImpl newSessionFor(final Provider provider) {
         ProviderContextImpl ret = new ProviderContextImpl(provider, this);
         return ret;
     }
@@ -208,4 +197,25 @@ public class BrokerImpl implements Broker, RpcProvisionRegistry, AutoCloseable {
         return Optional.fromNullable(services.getInstance(service));
     }
 
+
+    @Override
+    public ConsumerSession registerConsumer(Consumer consumer) {
+        checkPredicates(consumer);
+        log.trace("Registering consumer {}", consumer);
+        final ConsumerContextImpl session = newSessionFor(consumer);
+        consumer.onSessionInitiated(session);
+        sessions.add(session);
+        return session;
+    }
+
+
+    @Override
+    public ProviderSession registerProvider(Provider provider) {
+        checkPredicates(provider);
+        final ProviderContextImpl session = newSessionFor(provider);
+        provider.onSessionInitiated(session);
+        providerSessions.add(session);
+        return session;
+    }
+
 }
index 69f518bb32f5c57f467285f0a32056240893043b..32a0ad20caa1026764a9044ee68a3936ffbe4c0d 100644 (file)
@@ -45,13 +45,13 @@ public class DataBrokerImpl extends AbstractDataBroker<InstanceIdentifier, Compo
     }
 
     @Override
-    public Registration<DataReader<InstanceIdentifier, CompositeNode>> registerConfigurationReader(
+    public Registration registerConfigurationReader(
             InstanceIdentifier path, DataReader<InstanceIdentifier, CompositeNode> reader) {
         return getDataReadRouter().registerConfigurationReader(path, reader);
     }
 
     @Override
-    public Registration<DataReader<InstanceIdentifier, CompositeNode>> registerOperationalReader(
+    public Registration registerOperationalReader(
             InstanceIdentifier path, DataReader<InstanceIdentifier, CompositeNode> reader) {
         return getDataReadRouter().registerOperationalReader(path, reader);
     }
index 60a7e81c4c046183e6ee59e0f27d48e5a15e90bc..d8174c312ab0e9622a23f17549dd841130a633ff 100644 (file)
@@ -49,7 +49,7 @@ public class GlobalBundleScanningSchemaServiceImpl implements SchemaContextProvi
     private final BundleContext context;
 
     private ServiceTracker<SchemaServiceListener, SchemaServiceListener> listenerTracker;
-    private BundleTracker<Iterable<Registration<URL>>> bundleTracker;
+    private BundleTracker<Iterable<Registration>> bundleTracker;
     private boolean starting = true;
     private static GlobalBundleScanningSchemaServiceImpl instance;
 
@@ -156,9 +156,9 @@ public class GlobalBundleScanningSchemaServiceImpl implements SchemaContextProvi
         }
     }
 
-    private class BundleScanner implements BundleTrackerCustomizer<Iterable<Registration<URL>>> {
+    private class BundleScanner implements BundleTrackerCustomizer<Iterable<Registration>> {
         @Override
-        public Iterable<Registration<URL>> addingBundle(final Bundle bundle, final BundleEvent event) {
+        public Iterable<Registration> addingBundle(final Bundle bundle, final BundleEvent event) {
 
             if (bundle.getBundleId() == 0) {
                 return Collections.emptyList();
@@ -169,7 +169,7 @@ public class GlobalBundleScanningSchemaServiceImpl implements SchemaContextProvi
                 return Collections.emptyList();
             }
 
-            final List<Registration<URL>> urls = new ArrayList<>();
+            final List<Registration> urls = new ArrayList<>();
             while (enumeration.hasMoreElements()) {
                 final URL u = enumeration.nextElement();
                 try {
@@ -189,7 +189,7 @@ public class GlobalBundleScanningSchemaServiceImpl implements SchemaContextProvi
         }
 
         @Override
-        public void modifiedBundle(final Bundle bundle, final BundleEvent event, final Iterable<Registration<URL>> object) {
+        public void modifiedBundle(final Bundle bundle, final BundleEvent event, final Iterable<Registration> object) {
             LOG.debug("Modified bundle {} {} {}", bundle, event, object);
         }
 
@@ -200,8 +200,8 @@ public class GlobalBundleScanningSchemaServiceImpl implements SchemaContextProvi
          */
 
         @Override
-        public synchronized void removedBundle(final Bundle bundle, final BundleEvent event, final Iterable<Registration<URL>> urls) {
-            for (Registration<URL> url : urls) {
+        public synchronized void removedBundle(final Bundle bundle, final BundleEvent event, final Iterable<Registration> urls) {
+            for (Registration url : urls) {
                 try {
                     url.close();
                 } catch (Exception e) {
index e69343d4fe82c7f36a7a9c378c8669e67cb1114b..dd47cb41f763275c0441ffcacecb8f53670730df 100644 (file)
@@ -42,6 +42,7 @@ import org.opendaylight.yangtools.yang.model.api.SchemaContext;
 
 import com.google.common.util.concurrent.ListenableFuture;
 
+@Deprecated
 public class MountPointImpl implements MountProvisionInstance, SchemaContextProvider {
 
     private final SchemaAwareRpcBroker rpcs;
@@ -76,7 +77,7 @@ public class MountPointImpl implements MountProvisionInstance, SchemaContextProv
     }
 
     @Override
-    public Registration<NotificationListener> addNotificationListener(final QName notification, final NotificationListener listener) {
+    public ListenerRegistration<NotificationListener> addNotificationListener(final QName notification, final NotificationListener listener) {
         return notificationRouter.addNotificationListener(notification, listener);
     }
 
@@ -91,13 +92,13 @@ public class MountPointImpl implements MountProvisionInstance, SchemaContextProv
     }
 
     @Override
-    public Registration<DataReader<InstanceIdentifier, CompositeNode>> registerOperationalReader(
+    public Registration registerOperationalReader(
             final InstanceIdentifier path, final DataReader<InstanceIdentifier, CompositeNode> reader) {
         return dataReader.registerOperationalReader(path, reader);
     }
 
     @Override
-    public Registration<DataReader<InstanceIdentifier, CompositeNode>> registerConfigurationReader(
+    public Registration registerConfigurationReader(
             final InstanceIdentifier path, final DataReader<InstanceIdentifier, CompositeNode> reader) {
         return dataReader.registerConfigurationReader(path, reader);
     }
@@ -150,7 +151,7 @@ public class MountPointImpl implements MountProvisionInstance, SchemaContextProv
     }
 
     @Override
-    public Registration<DataCommitHandler<InstanceIdentifier, CompositeNode>> registerCommitHandler(
+    public Registration registerCommitHandler(
             final InstanceIdentifier path, final DataCommitHandler<InstanceIdentifier, CompositeNode> commitHandler) {
         return dataReader.registerCommitHandler(path, commitHandler);
     }
index 55a6ee77b470c2c2a98f5f2fbd30e47d1ba8f17d..ac2ab04bbebf7eed79e27d4b617eb62e4b5906d0 100644 (file)
@@ -14,11 +14,13 @@ import java.util.concurrent.ConcurrentMap;
 
 import org.opendaylight.controller.sal.core.api.data.DataProviderService;
 import org.opendaylight.controller.sal.core.api.mount.MountProvisionInstance;
+import org.opendaylight.controller.sal.core.api.mount.MountProvisionListener;
 import org.opendaylight.controller.sal.core.api.mount.MountProvisionService;
 import org.opendaylight.yangtools.concepts.ListenerRegistration;
 import org.opendaylight.yangtools.concepts.util.ListenerRegistry;
 import org.opendaylight.yangtools.yang.data.api.InstanceIdentifier;
 
+@Deprecated
 public class MountPointManagerImpl implements MountProvisionService {
 
     private final ListenerRegistry<MountProvisionListener> listeners =
index 7fba31114f6d53a2c491f91511109a32b6c8efec..1e93202007cbd9a800e485a9ba0343298160c035 100644 (file)
@@ -12,7 +12,7 @@ import java.util.Collection;
 import org.opendaylight.controller.sal.core.api.notify.NotificationListener;
 import org.opendaylight.controller.sal.dom.broker.spi.NotificationRouter;
 import org.opendaylight.yangtools.concepts.AbstractListenerRegistration;
-import org.opendaylight.yangtools.concepts.Registration;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
 import org.opendaylight.yangtools.yang.common.QName;
 import org.opendaylight.yangtools.yang.data.api.CompositeNode;
 import org.slf4j.Logger;
@@ -25,12 +25,12 @@ import com.google.common.collect.Multimaps;
 public class NotificationRouterImpl implements NotificationRouter {
     private static Logger log = LoggerFactory.getLogger(NotificationRouterImpl.class);
 
-    private final Multimap<QName, ListenerRegistration> listeners = Multimaps.synchronizedSetMultimap(HashMultimap.<QName, ListenerRegistration>create());
+    private final Multimap<QName, MyListenerRegistration> listeners = Multimaps.synchronizedSetMultimap(HashMultimap.<QName, MyListenerRegistration>create());
 //    private Registration<NotificationListener> defaultListener;
 
     private void sendNotification(CompositeNode notification) {
         final QName type = notification.getNodeType();
-        final Collection<ListenerRegistration> toNotify = listeners.get(type);
+        final Collection<MyListenerRegistration> toNotify = listeners.get(type);
         log.trace("Publishing notification " + type);
 
         if ((toNotify == null) || toNotify.isEmpty()) {
@@ -38,7 +38,7 @@ public class NotificationRouterImpl implements NotificationRouter {
             return;
         }
 
-        for (ListenerRegistration listener : toNotify) {
+        for (MyListenerRegistration listener : toNotify) {
             try {
                 // FIXME: ensure that notification is immutable
                 listener.getInstance().onNotification(notification);
@@ -54,17 +54,17 @@ public class NotificationRouterImpl implements NotificationRouter {
     }
 
     @Override
-    public Registration<NotificationListener> addNotificationListener(QName notification, NotificationListener listener) {
-        ListenerRegistration ret = new ListenerRegistration(notification, listener);
+    public ListenerRegistration<NotificationListener> addNotificationListener(QName notification, NotificationListener listener) {
+        MyListenerRegistration ret = new MyListenerRegistration(notification, listener);
         listeners.put(notification, ret);
         return ret;
     }
 
-    private class ListenerRegistration extends AbstractListenerRegistration<NotificationListener> {
+    private class MyListenerRegistration extends AbstractListenerRegistration<NotificationListener> {
 
         final QName type;
 
-        public ListenerRegistration(QName type, NotificationListener instance) {
+        public MyListenerRegistration(QName type, NotificationListener instance) {
             super(instance);
             this.type = type;
         }
index 8d33ff7997dab4b2a09d813c9200e8b95f7dc762..275107ed8fb062bcfaf7c766cd0701ab3f0625af 100644 (file)
@@ -39,9 +39,9 @@ public abstract class AbstractBrokerServiceProxy<T extends BrokerService> implem
         return reference;
     }
 
-    private final Set<Registration<?>> registrations = Collections.synchronizedSet(new HashSet<Registration<?>>());
+    private final Set<Registration> registrations = Collections.synchronizedSet(new HashSet<Registration>());
 
-    protected <R extends Registration<?>> R addRegistration(final R registration) {
+    protected <R extends Registration> R addRegistration(final R registration) {
         if (registration != null) {
             registrations.add(registration);
         }
@@ -63,7 +63,7 @@ public abstract class AbstractBrokerServiceProxy<T extends BrokerService> implem
             RuntimeException potentialException = new RuntimeException(
                     "Uncaught exceptions occured during unregistration");
             boolean hasSuppressed = false;
-            for (Registration<?> registration : registrations) {
+            for (Registration registration : registrations) {
                 try {
                     registration.close();
                 } catch (Exception e) {
index caae2971f8bf8eaba5cc392f7252aa123c086fa4..5e5b25bdc85fa5129a5187f1fbfb712005751efd 100644 (file)
@@ -57,19 +57,19 @@ public class DataProviderServiceProxy extends AbstractBrokerServiceProxy<DataPro
     }
 
     @Override
-    public Registration<DataCommitHandler<InstanceIdentifier, CompositeNode>> registerCommitHandler(
+    public Registration registerCommitHandler(
             InstanceIdentifier path, DataCommitHandler<InstanceIdentifier, CompositeNode> commitHandler) {
         return addRegistration(getDelegate().registerCommitHandler(path, commitHandler));
     }
 
     @Override
-    public Registration<DataReader<InstanceIdentifier, CompositeNode>> registerConfigurationReader(
+    public Registration registerConfigurationReader(
             InstanceIdentifier path, DataReader<InstanceIdentifier, CompositeNode> reader) {
         return addRegistration(getDelegate().registerConfigurationReader(path, reader));
     }
 
     @Override
-    public Registration<DataReader<InstanceIdentifier, CompositeNode>> registerOperationalReader(
+    public Registration registerOperationalReader(
             InstanceIdentifier path, DataReader<InstanceIdentifier, CompositeNode> reader) {
         return addRegistration(getDelegate().registerOperationalReader(path, reader));
     }
index 24d5430d6d3713a0c6b3e5a5252eb710e9550e7d..2669f1279e6e2391f702ce8e12513f150f415647 100644 (file)
@@ -8,6 +8,7 @@
 package org.opendaylight.controller.sal.dom.broker.osgi;
 
 import org.opendaylight.controller.sal.core.api.mount.MountProvisionInstance;
+import org.opendaylight.controller.sal.core.api.mount.MountProvisionListener;
 import org.opendaylight.controller.sal.core.api.mount.MountProvisionService;
 import org.opendaylight.yangtools.concepts.ListenerRegistration;
 import org.opendaylight.yangtools.yang.data.api.InstanceIdentifier;
index cd26c4ea5c51f1f8d0283d8fb40803954528e48b..72cd41b9336d5233ad2129f7d81644b38065730b 100644 (file)
@@ -9,7 +9,7 @@ package org.opendaylight.controller.sal.dom.broker.osgi;
 
 import org.opendaylight.controller.sal.core.api.notify.NotificationListener;
 import org.opendaylight.controller.sal.core.api.notify.NotificationPublishService;
-import org.opendaylight.yangtools.concepts.Registration;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
 import org.opendaylight.yangtools.yang.common.QName;
 import org.opendaylight.yangtools.yang.data.api.CompositeNode;
 import org.osgi.framework.ServiceReference;
@@ -22,7 +22,7 @@ public class NotificationPublishServiceProxy extends AbstractBrokerServiceProxy<
     }
 
     @Override
-    public Registration<NotificationListener> addNotificationListener(QName notification, NotificationListener listener) {
+    public ListenerRegistration<NotificationListener> addNotificationListener(QName notification, NotificationListener listener) {
         return addRegistration(getDelegate().addNotificationListener(notification, listener));
 
     }
index a0051dc38e5efa2f3156bc3fc6a2e9b8fb4d5420..4da7f025b34bec4ba57cf483e3595e6d0bbf545f 100644 (file)
@@ -9,7 +9,7 @@ package org.opendaylight.controller.sal.dom.broker.osgi;
 
 import org.opendaylight.controller.sal.core.api.notify.NotificationListener;
 import org.opendaylight.controller.sal.core.api.notify.NotificationService;
-import org.opendaylight.yangtools.concepts.Registration;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
 import org.opendaylight.yangtools.yang.common.QName;
 import org.osgi.framework.ServiceReference;
 
@@ -21,7 +21,7 @@ public class NotificationServiceProxy extends AbstractBrokerServiceProxy<Notific
     }
 
     @Override
-    public Registration<NotificationListener> addNotificationListener(QName notification, NotificationListener listener) {
+    public ListenerRegistration<NotificationListener> addNotificationListener(QName notification, NotificationListener listener) {
         return addRegistration(getDelegate().addNotificationListener(notification, listener));
     }
 }
index ebe95d6eb5589624536c887f7b8802898b0e7384..2d8bd186c5fb3e5eb297722b10724fb44cee022d 100644 (file)
@@ -8,7 +8,7 @@
 package org.opendaylight.controller.sal.dom.broker.spi;
 
 import org.opendaylight.controller.sal.core.api.notify.NotificationListener;
-import org.opendaylight.yangtools.concepts.Registration;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
 import org.opendaylight.yangtools.yang.common.QName;
 import org.opendaylight.yangtools.yang.data.api.CompositeNode;
 
@@ -22,7 +22,7 @@ public interface NotificationRouter {
      * @param notification
      * @param listener
      */
-    Registration<NotificationListener> addNotificationListener(QName notification,
+    ListenerRegistration<NotificationListener> addNotificationListener(QName notification,
             NotificationListener listener);
 
 }
index 29392dc7b3e91e6c6dbe202cbe1b4e4292cce17d..cd1a792d677cd7e4cbaead37b652cd2e8d9a4f21 100644 (file)
@@ -92,8 +92,7 @@ public final class YangSchemaUtils {
     }
 
     private static DataSchemaNode searchInChoices(final DataNodeContainer node, final QName arg) {
-        Set<DataSchemaNode> children = node.getChildNodes();
-        for (DataSchemaNode child : children) {
+        for (DataSchemaNode child : node.getChildNodes()) {
             if (child instanceof ChoiceNode) {
                 ChoiceNode choiceNode = (ChoiceNode) child;
                 DataSchemaNode potential = searchInCases(choiceNode, arg);
diff --git a/opendaylight/md-sal/sal-dom-broker/src/test/java/org/opendaylight/controller/md/sal/dom/broker/impl/MountPointServiceTest.java b/opendaylight/md-sal/sal-dom-broker/src/test/java/org/opendaylight/controller/md/sal/dom/broker/impl/MountPointServiceTest.java
new file mode 100644 (file)
index 0000000..917976b
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html.
+ */
+
+package org.opendaylight.controller.md.sal.dom.broker.impl;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import com.google.common.base.Optional;
+import org.junit.Before;
+import org.junit.Test;
+import org.opendaylight.controller.md.sal.dom.api.DOMMountPoint;
+import org.opendaylight.controller.md.sal.dom.api.DOMMountPointService;
+import org.opendaylight.controller.md.sal.dom.api.DOMMountPointService.DOMMountPointBuilder;
+import org.opendaylight.controller.md.sal.dom.broker.impl.mount.DOMMountPointServiceImpl;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.data.api.InstanceIdentifier;
+
+public class MountPointServiceTest {
+
+    private DOMMountPointService mountService;
+    private static final InstanceIdentifier PATH = InstanceIdentifier.of(QName.create("namespace", "12-12-2012", "top"));
+
+    @Before
+    public void setup() {
+        mountService = new DOMMountPointServiceImpl();
+    }
+
+    @Test
+    public void createSimpleMountPoint() {
+        Optional<DOMMountPoint> mountNotPresent = mountService.getMountPoint(PATH);
+        assertFalse(mountNotPresent.isPresent());
+        DOMMountPointBuilder mountBuilder = mountService.createMountPoint(PATH);
+        mountBuilder.register();
+
+        Optional<DOMMountPoint> mountPresent = mountService.getMountPoint(PATH);
+        assertTrue(mountPresent.isPresent());
+    }
+}
diff --git a/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/md/sal/dom/broker/spi/mount/SimpleDOMMountPoint.java b/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/md/sal/dom/broker/spi/mount/SimpleDOMMountPoint.java
new file mode 100644 (file)
index 0000000..48a6878
--- /dev/null
@@ -0,0 +1,41 @@
+package org.opendaylight.controller.md.sal.dom.broker.spi.mount;
+
+import org.opendaylight.controller.md.sal.dom.api.DOMMountPoint;
+import org.opendaylight.controller.md.sal.dom.api.DOMService;
+import org.opendaylight.yangtools.yang.data.api.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+
+import com.google.common.base.Optional;
+import com.google.common.collect.ClassToInstanceMap;
+import com.google.common.collect.ImmutableClassToInstanceMap;
+
+public class SimpleDOMMountPoint implements DOMMountPoint {
+
+    private final InstanceIdentifier identifier;
+    private final ClassToInstanceMap<DOMService> services;
+    private final SchemaContext schemaContext;
+
+    public static final SimpleDOMMountPoint create(final InstanceIdentifier identifier, final ClassToInstanceMap<DOMService> services, final SchemaContext ctx) {
+        return new SimpleDOMMountPoint(identifier, services, ctx);
+    }
+    private SimpleDOMMountPoint(final InstanceIdentifier identifier, final ClassToInstanceMap<DOMService> services, final SchemaContext ctx) {
+        this.identifier = identifier;
+        this.services = ImmutableClassToInstanceMap.copyOf(services);
+        this.schemaContext = ctx;
+    }
+
+    @Override
+    public InstanceIdentifier getIdentifier() {
+        return identifier;
+    }
+
+    @Override
+    public SchemaContext getSchemaContext() {
+        return schemaContext;
+    }
+
+    @Override
+    public <T extends DOMService> Optional<T> getService(final Class<T> cls) {
+        return Optional.fromNullable(services.getInstance(cls));
+    }
+}
index 82903ea4ec174a373dc0648901cde3c5c02d0348..8964a80228bf848538dc83dfc6db71c05f93a102 100644 (file)
@@ -1,7 +1,15 @@
 package org.opendaylight.controller.sal.connect.netconf.listener;
 
-import java.util.Arrays;
+import com.google.common.base.Objects;
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+import com.google.common.base.Predicate;
+import com.google.common.base.Splitter;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Iterables;
+
 import java.util.Collection;
+import java.util.HashSet;
 import java.util.Set;
 
 import org.opendaylight.controller.netconf.client.NetconfClientSession;
@@ -10,24 +18,50 @@ import org.opendaylight.yangtools.yang.common.QName;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.google.common.base.Objects;
-import com.google.common.base.Optional;
-import com.google.common.base.Predicate;
-import com.google.common.collect.FluentIterable;
-import com.google.common.collect.Iterables;
-import com.google.common.collect.Sets;
-
 public final class NetconfSessionCapabilities {
+    private static final class ParameterMatcher {
+        private final Predicate<String> predicate;
+        private final int skipLength;
+
+        ParameterMatcher(final String name) {
+            predicate = new Predicate<String>() {
+                @Override
+                public boolean apply(final String input) {
+                    return input.startsWith(name);
+                }
+            };
 
-    private static final Logger logger = LoggerFactory.getLogger(NetconfSessionCapabilities.class);
+            this.skipLength = name.length();
+        }
 
-    private final Set<String> capabilities;
+        private String from(final Iterable<String> params) {
+            final Optional<String> o = Iterables.tryFind(params, predicate);
+            if (!o.isPresent()) {
+                return null;
+            }
+
+            return o.get().substring(skipLength);
+        }
+    }
+
+    private static final Logger LOG = LoggerFactory.getLogger(NetconfSessionCapabilities.class);
+    private static final ParameterMatcher MODULE_PARAM = new ParameterMatcher("module=");
+    private static final ParameterMatcher REVISION_PARAM = new ParameterMatcher("revision=");
+    private static final ParameterMatcher BROKEN_REVISON_PARAM = new ParameterMatcher("amp;revision=");
+    private static final Splitter AMP_SPLITTER = Splitter.on('&');
+    private static final Predicate<String> CONTAINS_REVISION = new Predicate<String>() {
+        @Override
+        public boolean apply(final String input) {
+            return input.contains("revision=");
+        }
+    };
 
     private final Set<QName> moduleBasedCaps;
+    private final Set<String> capabilities;
 
     private NetconfSessionCapabilities(final Set<String> capabilities, final Set<QName> moduleBasedCaps) {
-        this.capabilities = capabilities;
-        this.moduleBasedCaps = moduleBasedCaps;
+        this.capabilities = Preconditions.checkNotNull(capabilities);
+        this.moduleBasedCaps = Preconditions.checkNotNull(moduleBasedCaps);
     }
 
     public Set<QName> getModuleBasedCaps() {
@@ -65,47 +99,45 @@ public final class NetconfSessionCapabilities {
     }
 
     public static NetconfSessionCapabilities fromStrings(final Collection<String> capabilities) {
-        final Set<QName> moduleBasedCaps = Sets.newHashSet();
+        final Set<QName> moduleBasedCaps = new HashSet<>();
 
         for (final String capability : capabilities) {
-            if(isModuleBasedCapability(capability)) {
-                final String[] parts = capability.split("\\?");
-                final String namespace = parts[0];
-                final FluentIterable<String> queryParams = FluentIterable.from(Arrays.asList(parts[1].split("&")));
-
-                String revision = getStringAndTransform(queryParams, "revision=", "revision=");
-
-                final String moduleName = getStringAndTransform(queryParams, "module=", "module=");
+            final int qmark = capability.indexOf('?');
+            if (qmark == -1) {
+                continue;
+            }
 
-                if (revision == null) {
-                    logger.debug("Netconf device was not reporting revision correctly, trying to get amp;revision=");
-                    revision = getStringAndTransform(queryParams, "amp;revision=", "amp;revision=");
+            final String namespace = capability.substring(0, qmark);
+            final Iterable<String> queryParams = AMP_SPLITTER.split(capability.substring(qmark + 1));
+            final String moduleName = MODULE_PARAM.from(queryParams);
+            if (moduleName == null) {
+                continue;
+            }
 
-                    if (revision == null) {
-                        logger.warn("Netconf device returned revision incorrectly escaped for {}", capability);
-                    }
-                }
+            String revision = REVISION_PARAM.from(queryParams);
+            if (revision != null) {
                 moduleBasedCaps.add(QName.create(namespace, revision, moduleName));
+                continue;
             }
-        }
-
-        return new NetconfSessionCapabilities(Sets.newHashSet(capabilities), moduleBasedCaps);
-    }
 
-    private static boolean isModuleBasedCapability(final String capability) {
-        return capability.contains("?") && capability.contains("module=") && capability.contains("revision=");
-    }
+            /*
+             * We have seen devices which mis-escape revision, but the revision may not
+             * even be there. First check if there is a substring that matches revision.
+             */
+            if (!Iterables.any(queryParams, CONTAINS_REVISION)) {
+                continue;
+            }
 
-    private static String getStringAndTransform(final Iterable<String> queryParams, final String match,
-                                                final String substringToRemove) {
-        final Optional<String> found = Iterables.tryFind(queryParams, new Predicate<String>() {
-            @Override
-            public boolean apply(final String input) {
-                return input.startsWith(match);
+            LOG.debug("Netconf device was not reporting revision correctly, trying to get amp;revision=");
+            revision = BROKEN_REVISON_PARAM.from(queryParams);
+            if (revision == null) {
+                LOG.warn("Netconf device returned revision incorrectly escaped for {}, ignoring it", capability);
             }
-        });
 
-        return found.isPresent() ? found.get().replaceAll(substringToRemove, "") : null;
-    }
+            // FIXME: do we really want to continue here?
+            moduleBasedCaps.add(QName.create(namespace, revision, moduleName));
+        }
 
+        return new NetconfSessionCapabilities(ImmutableSet.copyOf(capabilities), ImmutableSet.copyOf(moduleBasedCaps));
+    }
 }
index 0aeec64690d971abfa7f74b1bd58385e669163fe..3ec3eb16336e2ca0aae2f2891cd66e8581a9573e 100644 (file)
@@ -12,7 +12,6 @@ import com.google.common.collect.Collections2;
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Iterables;
-import com.google.common.collect.Lists;
 import com.google.common.collect.Sets;
 
 import java.net.URI;
@@ -29,8 +28,8 @@ import org.opendaylight.controller.netconf.api.NetconfMessage;
 import org.opendaylight.controller.netconf.util.messages.NetconfMessageUtil;
 import org.opendaylight.yangtools.yang.common.QName;
 import org.opendaylight.yangtools.yang.common.RpcError;
-import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
 import org.opendaylight.yangtools.yang.common.RpcError.ErrorSeverity;
+import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
 import org.opendaylight.yangtools.yang.data.api.CompositeNode;
 import org.opendaylight.yangtools.yang.data.api.InstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.Node;
@@ -81,8 +80,7 @@ public class NetconfMessageTransformUtil {
             return null;
         }
 
-        for (final org.opendaylight.yangtools.yang.data.api.InstanceIdentifier.PathArgument component : Lists
-                .reverse(identifier.getPath())) {
+        for (final org.opendaylight.yangtools.yang.data.api.InstanceIdentifier.PathArgument component : identifier.getReversePathArguments()) {
             if (component instanceof InstanceIdentifier.NodeIdentifierWithPredicates) {
                 previous = toNode((InstanceIdentifier.NodeIdentifierWithPredicates)component, previous);
             } else {
@@ -142,12 +140,12 @@ public class NetconfMessageTransformUtil {
 
         ErrorSeverity severity = toRpcErrorSeverity( ex.getErrorSeverity() );
         return severity == ErrorSeverity.ERROR ?
-                    RpcResultBuilder.newError(
+                RpcResultBuilder.newError(
                         toRpcErrorType( ex.getErrorType() ), ex.getErrorTag().getTagValue(),
                         ex.getLocalizedMessage(), null, infoBuilder.toString(), ex.getCause() ) :
-                    RpcResultBuilder.newWarning(
-                        toRpcErrorType( ex.getErrorType() ), ex.getErrorTag().getTagValue(),
-                        ex.getLocalizedMessage(), null, infoBuilder.toString(), ex.getCause() );
+                            RpcResultBuilder.newWarning(
+                                    toRpcErrorType( ex.getErrorType() ), ex.getErrorTag().getTagValue(),
+                                    ex.getLocalizedMessage(), null, infoBuilder.toString(), ex.getCause() );
     }
 
     private static ErrorSeverity toRpcErrorSeverity( final NetconfDocumentedException.ErrorSeverity severity ) {
index bd075d0606b08f3f94982e32d18ecdb468b9830a..1896e69f325aee65946e264499b1022c2e696f9e 100644 (file)
@@ -10,6 +10,7 @@ package org.opendaylight.controller.sal.connect.netconf.util;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Maps;
 import com.google.common.collect.Sets;
+import java.util.Collection;
 import java.util.Collections;
 import java.util.List;
 import java.util.Map;
@@ -36,11 +37,11 @@ class NodeContainerProxy implements ContainerSchemaNode {
         this.qName = Preconditions.checkNotNull(qName, "qName");
     }
 
-    public NodeContainerProxy(final QName qName, final Set<DataSchemaNode> childNodes) {
+    public NodeContainerProxy(final QName qName, final Collection<DataSchemaNode> childNodes) {
         this(qName, asMap(childNodes));
     }
 
-    private static Map<QName, DataSchemaNode> asMap(final Set<DataSchemaNode> childNodes) {
+    private static Map<QName, DataSchemaNode> asMap(final Collection<DataSchemaNode> childNodes) {
         final Map<QName, DataSchemaNode> mapped = Maps.newHashMap();
         for (final DataSchemaNode childNode : childNodes) {
             mapped.put(childNode.getQName(), childNode);
index defaab629f3684de2be6a5b1d7a578749cd493e0..46ea4ff73c40ae0d29ec267d834a19e2d88faa19 100644 (file)
@@ -102,11 +102,11 @@ public class NetconfDeviceTest {
         device.onRemoteSessionUp(sessionCaps, listener);
 
         verify(messageTransformer, timeout(10000).times(2)).toNotification(netconfMessage);
-        verify(facade, times(2)).onNotification(compositeNode);
+        verify(facade, timeout(10000).times(2)).onNotification(compositeNode);
 
         device.onNotification(netconfMessage);
-        verify(messageTransformer, times(3)).toNotification(netconfMessage);
-        verify(facade, times(3)).onNotification(compositeNode);
+        verify(messageTransformer, timeout(10000).times(3)).toNotification(netconfMessage);
+        verify(facade, timeout(10000).times(3)).onNotification(compositeNode);
     }
 
     @Test
index 41eb2e6f925551770f0278d93ab3694ab6dc5884..8e0e85cabbba31f53c43a7a6b28f6cb83e9c7501 100644 (file)
@@ -31,7 +31,6 @@ import java.io.ByteArrayInputStream;
 import java.io.StringWriter;
 import java.util.Collections;
 import java.util.List;
-import java.util.Set;
 
 /*
  * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
@@ -57,7 +56,7 @@ public class EncoderDecoderUtil {
     factory.setIgnoringComments(true);
   }
 
-  private static DataSchemaNode findChildNode(Set<DataSchemaNode> children,
+  private static DataSchemaNode findChildNode(Iterable<DataSchemaNode> children,
       String name) {
     List<DataNodeContainer> containers = Lists.newArrayList();
 
index 28e358a06acb99a8c05d54b208fd476d826164f3..8d609823a71e11c69fe11a67d184cd4e276f1eb5 100644 (file)
@@ -113,7 +113,7 @@ public class NormalizedNodeXmlConverterTest {
         + childNodeName);
   }
 
-  static DataSchemaNode findChildNode(final Set<DataSchemaNode> children, final String name) {
+  static DataSchemaNode findChildNode(final Iterable<DataSchemaNode> children, final String name) {
     List<DataNodeContainer> containers = Lists.newArrayList();
 
     for (DataSchemaNode dataSchemaNode : children) {
index 4b4cef9f70873b2c34858de93e529d91d0ea7a5e..cd2d91559b5a2c022a4a09194e9cece23cfdd4f6 100644 (file)
@@ -13,6 +13,7 @@ import com.google.common.base.Preconditions;
 import com.google.gson.stream.JsonWriter;
 import java.io.IOException;
 import java.net.URI;
+import java.util.Collection;
 import java.util.Collections;
 import java.util.HashSet;
 import java.util.List;
@@ -83,7 +84,7 @@ class JsonMapper {
 
         final Set<QName> foundLists = new HashSet<>();
 
-        Set<DataSchemaNode> parentSchemaChildNodes = parentSchema == null ? Collections.<DataSchemaNode> emptySet()
+        Collection<DataSchemaNode> parentSchemaChildNodes = parentSchema == null ? Collections.<DataSchemaNode> emptySet()
                 : parentSchema.getChildNodes();
 
         for (Node<?> child : parent.getValue()) {
@@ -170,7 +171,7 @@ class JsonMapper {
         }
     }
 
-    private static DataSchemaNode findFirstSchemaForNode(final Node<?> node, final Set<DataSchemaNode> dataSchemaNode) {
+    private static DataSchemaNode findFirstSchemaForNode(final Node<?> node, final Iterable<DataSchemaNode> dataSchemaNode) {
         for (DataSchemaNode dsn : dataSchemaNode) {
             if (node.getNodeType().equals(dsn.getQName())) {
                 return dsn;
index 29672f39d47c4222b297972be5b1dbeef3f1d6fe..0a67c84d8b2b04c56a31fa2d7e9dd2e3e8821934 100644 (file)
@@ -17,21 +17,26 @@ import com.google.common.base.Strings;
 import com.google.common.collect.BiMap;
 import com.google.common.collect.FluentIterable;
 import com.google.common.collect.HashBiMap;
+import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Iterables;
+
 import java.io.UnsupportedEncodingException;
 import java.net.URI;
 import java.net.URLDecoder;
 import java.net.URLEncoder;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collection;
 import java.util.Collections;
 import java.util.Comparator;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicReference;
+
 import javax.ws.rs.core.Response.Status;
+
 import org.opendaylight.controller.sal.core.api.mount.MountInstance;
 import org.opendaylight.controller.sal.core.api.mount.MountService;
 import org.opendaylight.controller.sal.rest.api.Draft02;
@@ -81,13 +86,12 @@ public class ControllerContext implements SchemaContextListener {
 
     private static final Splitter SLASH_SPLITTER = Splitter.on('/');
 
-    private static final Splitter COLON_SPLITTER = Splitter.on(':');
-
     private final BiMap<URI, String> uriToModuleName = HashBiMap.<URI, String> create();
 
     private final Map<String, URI> moduleNameToUri = uriToModuleName.inverse();
 
-    private final Map<QName, RpcDefinition> qnameToRpc = new ConcurrentHashMap<>();
+    private final AtomicReference<Map<QName, RpcDefinition>> qnameToRpc =
+            new AtomicReference<>(Collections.<QName, RpcDefinition>emptyMap());
 
     private volatile SchemaContext globalSchema;
     private volatile MountService mountService;
@@ -721,8 +725,6 @@ public class ControllerContext implements SchemaContextListener {
     private void collectInstanceDataNodeContainers(final List<DataSchemaNode> potentialSchemaNodes,
             final DataNodeContainer container, final String name) {
 
-        Set<DataSchemaNode> childNodes = container.getChildNodes();
-
         Predicate<DataSchemaNode> filter = new Predicate<DataSchemaNode>() {
             @Override
             public boolean apply(final DataSchemaNode node) {
@@ -730,7 +732,7 @@ public class ControllerContext implements SchemaContextListener {
             }
         };
 
-        Iterable<DataSchemaNode> nodes = Iterables.filter(childNodes, filter);
+        Iterable<DataSchemaNode> nodes = Iterables.filter(container.getChildNodes(), filter);
 
         // Can't combine this loop with the filter above because the filter is
         // lazily-applied by Iterables.filter.
@@ -792,24 +794,31 @@ public class ControllerContext implements SchemaContextListener {
     }
 
     private static String toModuleName(final String str) {
-        Preconditions.<String> checkNotNull(str);
-        if (str.indexOf(':') != -1) {
-            final Iterable<String> args = COLON_SPLITTER.split(str);
-            if (Iterables.size(args) == 2) {
-                return args.iterator().next();
-            }
+        final int idx = str.indexOf(':');
+        if (idx == -1) {
+            return null;
         }
-        return null;
+
+        // Make sure there is only one occurrence
+        if (str.indexOf(':', idx + 1) != -1) {
+            return null;
+        }
+
+        return str.substring(0, idx);
     }
 
     private static String toNodeName(final String str) {
-        if (str.indexOf(':') != -1) {
-            final Iterable<String> args = COLON_SPLITTER.split(str);
-            if (Iterables.size(args) == 2) {
-                return Iterables.get(args, 1);
-            }
+        final int idx = str.indexOf(':');
+        if (idx == -1) {
+            return str;
+        }
+
+        // Make sure there is only one occurrence
+        if (str.indexOf(':', idx + 1) != -1) {
+            return str;
         }
-        return str;
+
+        return str.substring(idx + 1);
     }
 
     private QName toQName(final String name) {
@@ -845,26 +854,28 @@ public class ControllerContext implements SchemaContextListener {
         return namespace.isPresent() ? QName.create(namespace.get(), node) : null;
     }
 
-    private boolean isListOrContainer(final DataSchemaNode node) {
+    private static boolean isListOrContainer(final DataSchemaNode node) {
         return node instanceof ListSchemaNode || node instanceof ContainerSchemaNode;
     }
 
     public RpcDefinition getRpcDefinition(final String name) {
         final QName validName = this.toQName(name);
-        return validName == null ? null : this.qnameToRpc.get(validName);
+        return validName == null ? null : this.qnameToRpc.get().get(validName);
     }
 
     @Override
     public void onGlobalContextUpdated(final SchemaContext context) {
         if (context != null) {
-            this.qnameToRpc.clear();
-            this.setGlobalSchema(context);
-            Set<RpcDefinition> _operations = context.getOperations();
-            for (final RpcDefinition operation : _operations) {
-                {
-                    this.qnameToRpc.put(operation.getQName(), operation);
-                }
+            final Collection<RpcDefinition> defs = context.getOperations();
+            final Map<QName, RpcDefinition> newMap = new HashMap<>(defs.size());
+
+            for (final RpcDefinition operation : defs) {
+                newMap.put(operation.getQName(), operation);
             }
+
+            // FIXME: still not completely atomic
+            this.qnameToRpc.set(ImmutableMap.copyOf(newMap));
+            this.setGlobalSchema(context);
         }
     }
 
index bb7547d3302f1d172c1271416c08e0895ac46398..4e807b4e230906e68f77dd16d6055a9245a97aaf 100644 (file)
@@ -490,7 +490,13 @@ public class RestconfImpl implements RestconfService {
         }
 
         final String identifierDecoded = controllerContext.urlPathArgDecode(identifierEncoded);
-        RpcDefinition rpc = controllerContext.getRpcDefinition(identifierDecoded);
+
+        RpcDefinition rpc = null;
+        if (mountPoint == null) {
+            rpc = controllerContext.getRpcDefinition(identifierDecoded);
+        } else {
+            rpc = findRpc(mountPoint.getSchemaContext(), identifierDecoded);
+        }
 
         if (rpc == null) {
             throw new RestconfDocumentedException("RPC does not exist.", ErrorType.RPC, ErrorTag.UNKNOWN_ELEMENT);
@@ -504,6 +510,25 @@ public class RestconfImpl implements RestconfService {
 
     }
 
+    private RpcDefinition findRpc(final SchemaContext schemaContext, final String identifierDecoded) {
+        final String[] splittedIdentifier = identifierDecoded.split(":");
+        if (splittedIdentifier.length != 2) {
+            throw new RestconfDocumentedException(identifierDecoded
+                    + " couldn't be splitted to 2 parts (module:rpc name)", ErrorType.APPLICATION,
+                    ErrorTag.INVALID_VALUE);
+        }
+        for (Module module : schemaContext.getModules()) {
+            if (module.getName().equals(splittedIdentifier[0])) {
+                for (RpcDefinition rpcDefinition : module.getRpcs()) {
+                    if (rpcDefinition.getQName().getLocalName().equals(splittedIdentifier[1])) {
+                        return rpcDefinition;
+                    }
+                }
+            }
+        }
+        return null;
+    }
+
     private StructuredData callRpc(final RpcExecutor rpcExecutor, final CompositeNode payload, boolean prettyPrint) {
         if (rpcExecutor == null) {
             throw new RestconfDocumentedException("RPC does not exist.", ErrorType.RPC, ErrorTag.UNKNOWN_ELEMENT);
index 4c5e3ab5301e0dc1fa4f27507030bc479f49a012..148b33bd6545ffe544418ddc6b393258535c5c9a 100644 (file)
@@ -37,6 +37,8 @@ public abstract class AbstractRpcExecutor implements RpcExecutor {
             throw new RestconfDocumentedException(e.getMessage(), ErrorType.PROTOCOL, ErrorTag.INVALID_VALUE);
         } catch (UnsupportedOperationException e) {
             throw new RestconfDocumentedException(e.getMessage(), ErrorType.RPC, ErrorTag.OPERATION_NOT_SUPPORTED);
+        } catch (RestconfDocumentedException e) {
+            throw e;
         } catch (Exception e) {
             throw new RestconfDocumentedException("The operation encountered an unexpected error while executing.", e);
         }
index 313b766ed3312bad6cb5f74a542e84893226aa54..2adf9b553092dd19edf4b7ddda45cff736a0f1ff 100644 (file)
@@ -36,7 +36,6 @@ import javax.ws.rs.core.UriInfo;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
-
 import org.opendaylight.controller.sal.core.api.mount.MountInstance;
 import org.opendaylight.controller.sal.restconf.impl.BrokerFacade;
 import org.opendaylight.controller.sal.restconf.impl.ControllerContext;
@@ -298,6 +297,14 @@ public class InvokeRpcMethodTest {
         assertNotNull(output.getSchema());
     }
 
+    /**
+     *
+     * Tests calling of RestConfImpl method invokeRpc. In the method there is searched rpc in remote schema context.
+     * This rpc is then executed.
+     *
+     * I wasn't able to simulate calling of rpc on remote device therefore this testing method raise method when rpc is
+     * invoked.
+     */
     @Test
     public void testMountedRpcCallNoPayload_Success() throws Exception {
         RpcResult<CompositeNode> rpcResult = RpcResultBuilder.<CompositeNode>success().build();
@@ -313,21 +320,32 @@ public class InvokeRpcMethodTest {
         MountInstance mockMountPoint = mock(MountInstance.class);
         when(mockMountPoint.rpc(eq(cancelToastQName), any(CompositeNode.class))).thenReturn(mockListener);
 
+        when(mockMountPoint.getSchemaContext()).thenReturn(TestUtils.loadSchemaContext("/invoke-rpc"));
+
         InstanceIdWithSchemaNode mockedInstanceId = mock(InstanceIdWithSchemaNode.class);
         when(mockedInstanceId.getMountPoint()).thenReturn(mockMountPoint);
 
         ControllerContext mockedContext = mock(ControllerContext.class);
-        String cancelToastStr = "toaster:cancel-toast";
-        when(mockedContext.urlPathArgDecode(cancelToastStr)).thenReturn(cancelToastStr);
-        when(mockedContext.getRpcDefinition(cancelToastStr)).thenReturn(mockRpc);
+        String rpcNoop = "invoke-rpc-module:rpc-noop";
+        when(mockedContext.urlPathArgDecode(rpcNoop)).thenReturn(rpcNoop);
+        when(mockedContext.getRpcDefinition(rpcNoop)).thenReturn(mockRpc);
         when(
-                mockedContext.toMountPointIdentifier("opendaylight-inventory:nodes/node/"
-                        + "REMOTE_HOST/yang-ext:mount/toaster:cancel-toast")).thenReturn(mockedInstanceId);
+                mockedContext.toMountPointIdentifier(eq("opendaylight-inventory:nodes/node/"
+                        + "REMOTE_HOST/yang-ext:mount/invoke-rpc-module:rpc-noop"))).thenReturn(mockedInstanceId);
 
         restconfImpl.setControllerContext(mockedContext);
-        StructuredData output = restconfImpl.invokeRpc(
-                "opendaylight-inventory:nodes/node/REMOTE_HOST/yang-ext:mount/toaster:cancel-toast", "", uriInfo);
-        assertEquals(null, output);
+        try {
+            restconfImpl.invokeRpc(
+                    "opendaylight-inventory:nodes/node/REMOTE_HOST/yang-ext:mount/invoke-rpc-module:rpc-noop", "",
+                    uriInfo);
+            fail("RestconfDocumentedException wasn't raised");
+        } catch (RestconfDocumentedException e) {
+            List<RestconfError> errors = e.getErrors();
+            assertNotNull(errors);
+            assertEquals(1, errors.size());
+            assertEquals(ErrorType.APPLICATION, errors.iterator().next().getErrorType());
+            assertEquals(ErrorTag.OPERATION_FAILED, errors.iterator().next().getErrorTag());
+        }
 
         // additional validation in the fact that the restconfImpl does not
         // throw an exception.
index ba06645354fb67ec215db9c67914a9367a7be43c..208c2164d506530f00b0cfd0a000325da447390e 100644 (file)
@@ -16,5 +16,8 @@ module invoke-rpc-module {
         }
     }
   }  
-  
+
+  rpc rpc-noop {
+  }
+
 }
\ No newline at end of file
index 31f4253318f146e95c2a0923f3f08d3e14cc4a2d..9a1816b90e88f333417a6c62d335c67b7f04739f 100644 (file)
@@ -16,7 +16,7 @@ import org.opendaylight.controller.sal.core.api.Broker;
 import org.opendaylight.controller.sal.core.api.Provider;
 import org.opendaylight.controller.sal.core.api.model.SchemaService;
 import org.opendaylight.controller.sal.core.api.mount.MountProvisionService;
-import org.opendaylight.controller.sal.core.api.mount.MountProvisionService.MountProvisionListener;
+import org.opendaylight.controller.sal.core.api.mount.MountProvisionListener;
 import org.opendaylight.controller.sal.rest.doc.impl.ApiDocGenerator;
 import org.opendaylight.controller.sal.rest.doc.mountpoints.MountPointSwagger;
 import org.opendaylight.yangtools.concepts.ListenerRegistration;
index 68d31de8da35688737e927689d5f55a9085de2e9..5ba8b26bc1eb6bb69ac0d5e3564dbbfe5f8c27cf 100644 (file)
@@ -14,6 +14,7 @@ import java.text.ParseException;
 import java.text.SimpleDateFormat;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collection;
 import java.util.Collections;
 import java.util.Comparator;
 import java.util.Date;
@@ -161,7 +162,7 @@ public class BaseYangSwaggerGenerator {
 
         List<Api> apis = new ArrayList<Api>();
 
-        Set<DataSchemaNode> dataSchemaNodes = m.getChildNodes();
+        Collection<DataSchemaNode> dataSchemaNodes = m.getChildNodes();
         _logger.debug("child nodes size [{}]", dataSchemaNodes.size());
         for (DataSchemaNode node : dataSchemaNodes) {
             if ((node instanceof ListSchemaNode) || (node instanceof ContainerSchemaNode)) {
@@ -240,9 +241,8 @@ public class BaseYangSwaggerGenerator {
         apis.add(api);
         if ((node instanceof ListSchemaNode) || (node instanceof ContainerSchemaNode)) {
             DataNodeContainer schemaNode = (DataNodeContainer) node;
-            Set<DataSchemaNode> dataSchemaNodes = schemaNode.getChildNodes();
 
-            for (DataSchemaNode childNode : dataSchemaNodes) {
+            for (DataSchemaNode childNode : schemaNode.getChildNodes()) {
                 // We don't support going to leaf nodes today. Only lists and
                 // containers.
                 if (childNode instanceof ListSchemaNode || childNode instanceof ContainerSchemaNode) {
index 8bac0d211e71cb2d88969c72d9b3f21a9fb00919..95bb1a094371db4f17d38c310c0043b07006c5c3 100644 (file)
@@ -126,9 +126,8 @@ public class ModelGenerator {
             JSONException {
 
         String moduleName = module.getName();
-        Set<DataSchemaNode> childNodes = module.getChildNodes();
 
-        for (DataSchemaNode childNode : childNodes) {
+        for (DataSchemaNode childNode : module.getChildNodes()) {
             JSONObject configModuleJSON = null;
             JSONObject operationalModuleJSON = null;
 
@@ -271,13 +270,12 @@ public class ModelGenerator {
         String containerDescription = container.getDescription();
         moduleJSON.put(DESCRIPTION_KEY, containerDescription);
 
-        Set<DataSchemaNode> containerChildren = container.getChildNodes();
-        JSONObject properties = processChildren(containerChildren, moduleName, models, isConfig);
+        JSONObject properties = processChildren(container.getChildNodes(), moduleName, models, isConfig);
         moduleJSON.put(PROPERTIES_KEY, properties);
         return moduleJSON;
     }
 
-    private JSONObject processChildren(Set<DataSchemaNode> nodes, String moduleName,
+    private JSONObject processChildren(Iterable<DataSchemaNode> nodes, String moduleName,
             JSONObject models) throws JSONException, IOException {
         return processChildren(nodes, moduleName, models, null);
     }
@@ -292,7 +290,7 @@ public class ModelGenerator {
      * @throws JSONException
      * @throws IOException
      */
-    private JSONObject processChildren(Set<DataSchemaNode> nodes, String moduleName,
+    private JSONObject processChildren(Iterable<DataSchemaNode> nodes, String moduleName,
             JSONObject models, Boolean isConfig) throws JSONException, IOException {
 
         JSONObject properties = new JSONObject();
@@ -418,11 +416,10 @@ public class ModelGenerator {
     private JSONObject processListSchemaNode(ListSchemaNode listNode, String moduleName,
             JSONObject models, Boolean isConfig) throws JSONException, IOException {
 
-        Set<DataSchemaNode> listChildren = listNode.getChildNodes();
         String fileName = (BooleanUtils.isNotFalse(isConfig)?OperationBuilder.CONFIG:OperationBuilder.OPERATIONAL) +
                                                                 listNode.getQName().getLocalName();
 
-        JSONObject childSchemaProperties = processChildren(listChildren, moduleName, models);
+        JSONObject childSchemaProperties = processChildren(listNode.getChildNodes(), moduleName, models);
         JSONObject childSchema = getSchemaTemplate();
         childSchema.put(TYPE_KEY, OBJECT_TYPE);
         childSchema.put(PROPERTIES_KEY, childSchemaProperties);
index fcabb088f4decbac32df1ba08648cb3d2d18fdf9..20e0fa56a798f3b7fa2c779c644134563d8b8e80 100644 (file)
@@ -23,7 +23,7 @@ import javax.ws.rs.core.UriInfo;
 import org.opendaylight.controller.sal.core.api.model.SchemaService;
 import org.opendaylight.controller.sal.core.api.mount.MountProvisionInstance;
 import org.opendaylight.controller.sal.core.api.mount.MountProvisionService;
-import org.opendaylight.controller.sal.core.api.mount.MountProvisionService.MountProvisionListener;
+import org.opendaylight.controller.sal.core.api.mount.MountProvisionListener;
 import org.opendaylight.controller.sal.rest.doc.impl.BaseYangSwaggerGenerator;
 import org.opendaylight.controller.sal.rest.doc.swagger.Api;
 import org.opendaylight.controller.sal.rest.doc.swagger.ApiDeclaration;
index 6f31a7ec76ac6e365429faefbf035b93764731cc..d123456262a2c7359aa052599e9fd98b6c78a265 100644 (file)
@@ -20,7 +20,7 @@ import org.opendaylight.controller.sal.binding.api.BindingAwareBroker;
 import org.opendaylight.controller.sal.binding.api.NotificationService;
 import org.opendaylight.controller.sal.binding.api.data.DataBrokerService;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.packet.service.rev130709.PacketProcessingService;
-import org.opendaylight.yangtools.concepts.Registration;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
 import org.opendaylight.yangtools.yang.binding.NotificationListener;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -33,7 +33,7 @@ public class L2SwitchProvider extends AbstractBindingAwareConsumer
 
   private final static Logger _logger = LoggerFactory.getLogger(L2SwitchProvider.class);
 
-  private Registration<NotificationListener> listenerRegistration;
+  private ListenerRegistration<NotificationListener> listenerRegistration;
   private AddressTracker addressTracker;
   private TopologyLinkDataChangeHandler topologyLinkDataChangeHandler;
 
index 88281bd59369858f8818adfa75b741e4d4fea367..8d67fb4ed09371394dc10cc9b9d8105f860d16db 100644 (file)
@@ -16,7 +16,7 @@ import org.opendaylight.controller.sample.kitchen.api.KitchenService;
 import org.opendaylight.controller.sample.kitchen.impl.KitchenServiceImpl;
 import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.ToastType;
 import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.ToasterService;
-import org.opendaylight.yangtools.concepts.Registration;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
 import org.opendaylight.yangtools.yang.binding.NotificationListener;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 import org.slf4j.Logger;
@@ -50,7 +50,7 @@ public final class KitchenServiceModule extends AbstractKitchenServiceModule {
 
         final KitchenServiceImpl kitchenService = new KitchenServiceImpl(toasterService);
 
-        final Registration<NotificationListener> toasterListenerReg =
+        final ListenerRegistration<NotificationListener> toasterListenerReg =
                 getNotificationServiceDependency().registerNotificationListener( kitchenService );
 
         final KitchenServiceRuntimeRegistration runtimeReg =
index b96d2be47e11b5f66fda9bc2d4df156e1b08c421..8c9b60e43f0b7dc373b3365039ab950f86df6bfc 100644 (file)
@@ -28,7 +28,6 @@ import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.
 import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.OpendaylightPortStatisticsService;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.OpendaylightQueueStatisticsService;
 import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.concepts.Registration;
 import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
 import org.opendaylight.yangtools.yang.binding.NotificationListener;
 import org.slf4j.Logger;
@@ -76,7 +75,7 @@ public class StatisticsProvider implements AutoCloseable {
 
     private final StatisticsListener updateCommiter = new StatisticsListener(StatisticsProvider.this);
 
-    private Registration<NotificationListener> listenerRegistration;
+    private ListenerRegistration<NotificationListener> listenerRegistration;
 
     private ListenerRegistration<DataChangeListener> flowCapableTrackerRegistration;
 
index b219722ba864eb6a5b306171038112e5a2f2ebf9..91db6e264b63518f48f9326882ea994ebb9a3bd4 100644 (file)
@@ -9,7 +9,7 @@ package org.opendaylight.md.controller.topology.lldp;
 
 import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
 import org.opendaylight.controller.sal.binding.api.data.DataProviderService;
-import org.opendaylight.yangtools.concepts.Registration;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
 import org.opendaylight.yangtools.yang.binding.NotificationListener;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -19,7 +19,7 @@ public class LLDPDiscoveryProvider implements AutoCloseable {
     private DataProviderService dataService;
     private NotificationProviderService notificationService;
     private final LLDPDiscoveryListener commiter = new LLDPDiscoveryListener(LLDPDiscoveryProvider.this);
-    private Registration<NotificationListener> listenerRegistration;
+    private ListenerRegistration<NotificationListener> listenerRegistration;
 
     public DataProviderService getDataService() {
         return this.dataService;
@@ -38,7 +38,7 @@ public class LLDPDiscoveryProvider implements AutoCloseable {
     }
 
     public void start() {
-        Registration<NotificationListener> registerNotificationListener = this.getNotificationService().registerNotificationListener(this.commiter);
+        ListenerRegistration<NotificationListener> registerNotificationListener = this.getNotificationService().registerNotificationListener(this.commiter);
         this.listenerRegistration = registerNotificationListener;
         LLDPLinkAger.getInstance().setManager(this);
         LOG.info("LLDPDiscoveryListener Started.");
index d656bda932f2251e18ea7f71f6c19500115456d3..a87971bc6bc0e1d6a294c30a08dfb73cf81484be 100644 (file)
@@ -19,7 +19,7 @@ import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.
 import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.Topology;
 import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.TopologyBuilder;
 import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.TopologyKey;
-import org.opendaylight.yangtools.concepts.Registration;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
 import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
 import org.opendaylight.yangtools.yang.binding.NotificationListener;
 import org.osgi.framework.BundleContext;
@@ -28,7 +28,7 @@ import org.slf4j.LoggerFactory;
 
 public class FlowCapableTopologyProvider extends AbstractBindingAwareProvider implements AutoCloseable {
     private final static Logger LOG = LoggerFactory.getLogger(FlowCapableTopologyProvider.class);
-    private Registration<NotificationListener> listenerRegistration;
+    private ListenerRegistration<NotificationListener> listenerRegistration;
     private Thread thread;
 
     /**
index 66d0d4da4673e16650bea4e6a46b864847b160e6..42afb160a8a8ea5efb8811427256c70ae2c4b023 100644 (file)
@@ -10,7 +10,6 @@ package org.opendaylight.controller.netconf.cli.commands.output;
 import com.google.common.base.Preconditions;
 import java.util.Collections;
 import java.util.Iterator;
-import java.util.Set;
 import org.opendaylight.yangtools.yang.model.api.ContainerSchemaNode;
 import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
 
@@ -20,9 +19,9 @@ import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
 public class OutputDefinition implements Iterable<DataSchemaNode> {
 
     public static final OutputDefinition EMPTY_OUTPUT = new OutputDefinition(Collections.<DataSchemaNode>emptySet());
-    private final Set<DataSchemaNode> childNodes;
+    private final Iterable<DataSchemaNode> childNodes;
 
-    public OutputDefinition(final Set<DataSchemaNode> childNodes) {
+    public OutputDefinition(final Iterable<DataSchemaNode> childNodes) {
         this.childNodes = childNodes;
     }