<feature version='${mdsal.version}'>odl-mdsal-common</feature>
<feature version='${config.version}'>odl-config-startup</feature>
<feature version='${config.version}'>odl-config-netty</feature>
+ <bundle>mvn:com.lmax/disruptor/${lmax.version}</bundle>
<bundle>mvn:org.opendaylight.controller/sal-core-api/${project.version}</bundle>
<bundle>mvn:org.opendaylight.controller/sal-core-spi/${project.version}</bundle>
<bundle>mvn:org.opendaylight.controller/sal-broker-impl/${project.version}</bundle>
</parent>
<artifactId>features-netconf</artifactId>
- <packaging>pom</packaging>
+ <packaging>jar</packaging>
<properties>
<features.file>features.xml</features.file>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-auth</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>netconf-notifications-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>netconf-notifications-impl</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>ietf-netconf</artifactId>
+ </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>ietf-netconf-monitoring</artifactId>
<groupId>org.opendaylight.controller</groupId>
<artifactId>ietf-netconf-monitoring-extension</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>ietf-netconf-notifications</artifactId>
+ </dependency>
<dependency>
<groupId>org.opendaylight.yangtools.model</groupId>
<artifactId>ietf-inet-types</artifactId>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-monitoring</artifactId>
</dependency>
+ <!-- test to validate features.xml -->
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>features-test</artifactId>
+ <version>${yangtools.version}</version>
+ <scope>test</scope>
+ </dependency>
+ <!-- dependency for opendaylight-karaf-empty for use by testing -->
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>opendaylight-karaf-empty</artifactId>
+ <version>${commons.opendaylight.version}</version>
+ <type>zip</type>
+ </dependency>
</dependencies>
<build>
</execution>
</executions>
</plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-surefire-plugin</artifactId>
+ <version>${surefire.version}</version>
+ <configuration>
+ <systemPropertyVariables>
+ <karaf.distro.groupId>org.opendaylight.controller</karaf.distro.groupId>
+ <karaf.distro.artifactId>opendaylight-karaf-empty</karaf.distro.artifactId>
+ <karaf.distro.version>${commons.opendaylight.version}</karaf.distro.version>
+ </systemPropertyVariables>
+ <dependenciesToScan>
+ <dependency>org.opendaylight.yangtools:features-test</dependency>
+ </dependenciesToScan>
+ </configuration>
+ </plugin>
</plugins>
</build>
<scm>
<bundle>mvn:org.opendaylight.controller/netconf-api/${project.version}</bundle>
<bundle>mvn:org.opendaylight.controller/netconf-auth/${project.version}</bundle>
<bundle>mvn:org.opendaylight.controller/ietf-netconf-monitoring/${project.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/ietf-netconf/${project.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/ietf-netconf-notifications/${project.version}</bundle>
<bundle>mvn:org.opendaylight.controller/ietf-netconf-monitoring-extension/${project.version}</bundle>
<bundle>mvn:org.opendaylight.yangtools.model/ietf-inet-types/${ietf-inet-types.version}</bundle>
<bundle>mvn:org.opendaylight.yangtools.model/ietf-yang-types/${ietf-yang-types.version}</bundle>
<feature version='${project.version}'>odl-config-netconf-connector</feature>
<!-- Netconf will not provide schemas without monitoring -->
<feature version='${project.version}'>odl-netconf-monitoring</feature>
+ <feature version='${project.version}'>odl-netconf-notifications-impl</feature>
<bundle>mvn:org.opendaylight.controller/netconf-impl/${project.version}</bundle>
</feature>
<feature name='odl-config-netconf-connector' version='${project.version}' description="OpenDaylight :: Netconf :: Connector">
<feature version='${project.version}'>odl-netconf-api</feature>
<feature version='${project.version}'>odl-netconf-mapping-api</feature>
<feature version='${project.version}'>odl-netconf-util</feature>
+ <feature version='${project.version}'>odl-netconf-notifications-api</feature>
<bundle>mvn:org.opendaylight.controller/config-netconf-connector/${project.version}</bundle>
</feature>
<feature name='odl-netconf-netty-util' version='${project.version}' description="OpenDaylight :: Netconf :: Netty Util">
<feature version='${project.version}'>odl-netconf-util</feature>
<bundle>mvn:org.opendaylight.controller/netconf-monitoring/${project.version}</bundle>
</feature>
+ <feature name='odl-netconf-notifications-api' version='${project.version}' description="OpenDaylight :: Netconf :: Notification :: Api">
+ <feature version='${project.version}'>odl-netconf-api</feature>
+ <bundle>mvn:org.opendaylight.controller/netconf-notifications-api/${project.version}</bundle>
+ </feature>
+ <feature name='odl-netconf-notifications-impl' version='${project.version}' description="OpenDaylight :: Netconf :: Monitoring :: Impl">
+ <feature version='${project.version}'>odl-netconf-notifications-api</feature>
+ <feature version='${project.version}'>odl-netconf-util</feature>
+ <feature version='${yangtools.version}'>odl-yangtools-binding-generator</feature>
+ <bundle>mvn:org.opendaylight.controller/netconf-notifications-impl/${project.version}</bundle>
+ </feature>
</features>
<dependency>
<groupId>org.ops4j.pax.exam</groupId>
<artifactId>pax-exam-container-karaf</artifactId>
- <version>${pax.exam.version}</version>
+ <version>${exam.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<dependency>
<groupId>org.ops4j.pax.exam</groupId>
<artifactId>pax-exam</artifactId>
- <version>${pax.exam.version}</version>
+ <version>${exam.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<northbound.jolokia.version>1.5.0-SNAPSHOT</northbound.jolokia.version>
<opendaylight-l2-types.version>2013.08.27.7-SNAPSHOT</opendaylight-l2-types.version>
<osgi-brandfragment.web.version>0.1.0-SNAPSHOT</osgi-brandfragment.web.version>
- <pax.exam.version>4.0.0</pax.exam.version>
<parboiled.version>1.1.6</parboiled.version>
<parboiled.scala.version>1.1.6</parboiled.scala.version>
<propertymavenplugin.version>1.0-alpha-2</propertymavenplugin.version>
<yangtools.version>0.7.0-SNAPSHOT</yangtools.version>
<sshd-core.version>0.12.0</sshd-core.version>
<jmh.version>0.9.7</jmh.version>
+ <lmax.version>3.3.0</lmax.version>
</properties>
<dependencyManagement>
<artifactId>guava</artifactId>
<version>${guava.version}</version>
</dependency>
+ <dependency>
+ <groupId>com.lmax</groupId>
+ <artifactId>disruptor</artifactId>
+ <version>${lmax.version}</version>
+ </dependency>
+
<!-- 3rd party dependencies needed by config-->
<dependency>
<groupId>com.jcabi</groupId>
* @param connectStrategyFactory Factory for creating reconnection strategy for every reconnect attempt
*
* @return Future representing the reconnection task. It will report completion based on reestablishStrategy, e.g.
- * success if it indicates no further attempts should be made and failure if it reports an error
+ * success is never reported, only failure when it runs out of reconnection attempts.
*/
protected Future<Void> createReconnectingClient(final InetSocketAddress address, final ReconnectStrategyFactory connectStrategyFactory,
final PipelineInitializer<S> initializer) {
import io.netty.util.concurrent.DefaultPromise;
import io.netty.util.concurrent.EventExecutor;
import io.netty.util.concurrent.Future;
+import io.netty.util.concurrent.GenericFutureListener;
import io.netty.util.concurrent.Promise;
import java.net.InetSocketAddress;
import org.slf4j.Logger;
channel.pipeline().addLast(new ClosedChannelHandler(ReconnectPromise.this));
}
});
+
+ pending.addListener(new GenericFutureListener<Future<Object>>() {
+ @Override
+ public void operationComplete(Future<Object> future) throws Exception {
+ if (!future.isSuccess()) {
+ ReconnectPromise.this.setFailure(future.cause());
+ }
+ }
+ });
}
/**
*/
package org.opendaylight.controller.config.manager.impl.dynamicmbean;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.ImmutableSet.Builder;
import java.lang.annotation.Annotation;
import java.lang.reflect.Method;
import java.util.ArrayList;
* @return list of found annotations
*/
static <T extends Annotation> List<T> findMethodAnnotationInSuperClassesAndIfcs(
- final Method setter, Class<T> annotationType,
- Set<Class<?>> inspectedInterfaces) {
- List<T> result = new ArrayList<T>();
+ final Method setter, final Class<T> annotationType,
+ final Set<Class<?>> inspectedInterfaces) {
+ Builder<T> result = ImmutableSet.builder();
Class<?> inspectedClass = setter.getDeclaringClass();
do {
try {
} catch (NoSuchMethodException e) {
inspectedClass = Object.class; // no need to go further
}
- } while (inspectedClass.equals(Object.class) == false);
+ } while (!inspectedClass.equals(Object.class));
+
// inspect interfaces
for (Class<?> ifc : inspectedInterfaces) {
if (ifc.isInterface() == false) {
}
}
- return result;
+ return new ArrayList<>(result.build());
}
/**
* @return list of found annotations
*/
static <T extends Annotation> List<T> findClassAnnotationInSuperClassesAndIfcs(
- Class<?> clazz, Class<T> annotationType, Set<Class<?>> interfaces) {
+ final Class<?> clazz, final Class<T> annotationType, final Set<Class<?>> interfaces) {
List<T> result = new ArrayList<T>();
Class<?> declaringClass = clazz;
do {
* @return empty string if no annotation is found, or list of descriptions
* separated by newline
*/
- static String aggregateDescriptions(List<Description> descriptions) {
+ static String aggregateDescriptions(final List<Description> descriptions) {
StringBuilder builder = new StringBuilder();
for (Description d : descriptions) {
if (builder.length() != 0) {
package org.opendaylight.controller.config.manager.impl.util;
import static org.junit.Assert.assertEquals;
-
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Sets;
import java.util.Collections;
public class InterfacesHelperTest {
- interface SuperA {
+ public interface SuperA {
}
- interface SuperBMXBean {
+ public interface SuperBMXBean {
}
- interface SuperC extends SuperA, SuperBMXBean {
+ public interface SuperC extends SuperA, SuperBMXBean {
}
- class SuperClass implements SuperC {
+ public class SuperClass implements SuperC {
}
@MXBean
- interface SubA {
+ public interface SubA {
}
@ServiceInterfaceAnnotation(value = "a", osgiRegistrationType = SuperA.class, namespace = "n", revision = "r", localName = "l")
- interface Service extends AbstractServiceInterface{}
+ public interface Service extends AbstractServiceInterface{}
@ServiceInterfaceAnnotation(value = "b", osgiRegistrationType = SuperC.class, namespace = "n", revision = "r", localName = "l")
- interface SubService extends Service{}
+ public interface SubService extends Service{}
- abstract class SubClass extends SuperClass implements SubA, Module {
+ public abstract class SubClass extends SuperClass implements SubA, Module {
}
- abstract class SubClassWithService implements SubService, Module {
+ public abstract class SubClassWithService implements SubService, Module {
}
assertThat(runtimeBeans.size(), is(4));
{
- RuntimeBeanEntry streamRB = findFirstByYangName(runtimeBeans,
- "stream");
+ RuntimeBeanEntry streamRB = findFirstByNamePrefix(runtimeBeans,
+ "ThreadStream");
assertNotNull(streamRB);
assertFalse(streamRB.getKeyYangName().isPresent());
assertFalse(streamRB.getKeyJavaName().isPresent());
+ " in " + runtimeBeans);
}
+ protected RuntimeBeanEntry findFirstByNamePrefix(final Collection<RuntimeBeanEntry> runtimeBeans, final String namePrefix) {
+ for (RuntimeBeanEntry rb : runtimeBeans) {
+ if (namePrefix.equals(rb.getJavaNamePrefix())) {
+ return rb;
+ }
+ }
+
+ throw new IllegalArgumentException("Name prefix not found:" + namePrefix
+ + " in " + runtimeBeans);
+ }
+
@Test
public void testGetWhenConditionMatcher() {
assertMatches("config",
assertThat(threadRB.getRpcs().size(), is(2));
}
{
- RuntimeBeanEntry streamRB = findFirstByYangName(runtimeBeans,
- "stream");
+ RuntimeBeanEntry streamRB = findFirstByNamePrefix(runtimeBeans,
+ "ThreadStream");
assertNotNull(streamRB);
assertFalse(streamRB.getKeyYangName().isPresent());
assertFalse(streamRB.getKeyJavaName().isPresent());
import akka.actor.ActorRef;
import akka.actor.Props;
import akka.actor.UntypedActor;
-import akka.event.Logging;
-import akka.event.LoggingAdapter;
import org.opendaylight.controller.cluster.example.messages.KeyValue;
import org.opendaylight.controller.cluster.example.messages.KeyValueSaved;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class ClientActor extends UntypedActor {
- protected final LoggingAdapter LOG =
- Logging.getLogger(getContext().system(), this);
+ protected final Logger LOG = LoggerFactory.getLogger(getClass());
private final ActorRef target;
try {
bs = fromObject(state);
} catch (Exception e) {
- LOG.error(e, "Exception in creating snapshot");
+ LOG.error("Exception in creating snapshot", e);
}
getSelf().tell(new CaptureSnapshotReply(bs.toByteArray()), null);
}
try {
state.putAll((HashMap) toObject(snapshot));
} catch (Exception e) {
- LOG.error(e, "Exception in applying snapshot");
+ LOG.error("Exception in applying snapshot", e);
}
if(LOG.isDebugEnabled()) {
LOG.debug("Snapshot applied to state : {}", ((HashMap) state).size());
* This will stop the timeout clock
*/
void markFollowerInActive();
+
+
+ /**
+ * This will return the active time of follower, since it was last reset
+ * @return time in milliseconds
+ */
+ long timeSinceLastActivity();
+
}
stopwatch.stop();
}
}
+
+ @Override
+ public long timeSinceLastActivity() {
+ return stopwatch.elapsed(TimeUnit.MILLISECONDS);
+ }
}
import akka.actor.ActorRef;
import akka.actor.ActorSelection;
-import akka.event.Logging;
-import akka.event.LoggingAdapter;
import akka.japi.Procedure;
import akka.persistence.RecoveryCompleted;
import akka.persistence.SaveSnapshotFailure;
import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
import org.opendaylight.controller.protobuff.messages.cluster.raft.AppendEntriesMessages;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* RaftActor encapsulates a state machine that needs to be kept synchronized
* </ul>
*/
public abstract class RaftActor extends AbstractUntypedPersistentActor {
- protected final LoggingAdapter LOG =
- Logging.getLogger(getContext().system(), this);
+ protected final Logger LOG = LoggerFactory.getLogger(getClass());
/**
* The current state determines the current behavior of a RaftActor
} else if (message instanceof SaveSnapshotFailure) {
SaveSnapshotFailure saveSnapshotFailure = (SaveSnapshotFailure) message;
- LOG.error(saveSnapshotFailure.cause(), "{}: SaveSnapshotFailure received for snapshot Cause:",
- persistenceId());
+ LOG.error("{}: SaveSnapshotFailure received for snapshot Cause:",
+ persistenceId(), saveSnapshotFailure.cause());
context.getReplicatedLog().snapshotRollback();
import akka.actor.ActorSelection;
import akka.actor.ActorSystem;
import akka.actor.Props;
-import akka.event.LoggingAdapter;
-
import java.util.Map;
+import org.slf4j.Logger;
/**
* The RaftActorContext contains that portion of the RaftActors state that
*
* @return
*/
- LoggingAdapter getLogger();
+ Logger getLogger();
/**
* Get a mapping of peerId's to their addresses
package org.opendaylight.controller.cluster.raft;
+import static com.google.common.base.Preconditions.checkState;
import akka.actor.ActorRef;
import akka.actor.ActorSelection;
import akka.actor.ActorSystem;
import akka.actor.Props;
import akka.actor.UntypedActorContext;
-import akka.event.LoggingAdapter;
import java.util.Map;
-
-import static com.google.common.base.Preconditions.checkState;
+import org.slf4j.Logger;
public class RaftActorContextImpl implements RaftActorContext {
private final Map<String, String> peerAddresses;
- private final LoggingAdapter LOG;
+ private final Logger LOG;
private final ConfigParams configParams;
ElectionTerm termInformation, long commitIndex,
long lastApplied, ReplicatedLog replicatedLog,
Map<String, String> peerAddresses, ConfigParams configParams,
- LoggingAdapter logger) {
+ Logger logger) {
this.actor = actor;
this.context = context;
this.id = id;
return context.system();
}
- @Override public LoggingAdapter getLogger() {
+ @Override public Logger getLogger() {
return this.LOG;
}
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
-import java.util.concurrent.TimeUnit;
import org.opendaylight.controller.cluster.raft.ClientRequestTracker;
import org.opendaylight.controller.cluster.raft.ClientRequestTrackerImpl;
import org.opendaylight.controller.cluster.raft.FollowerLogInformation;
// Upon election: send initial empty AppendEntries RPCs
// (heartbeat) to each server; repeat during idle periods to
// prevent election timeouts (§5.2)
- scheduleHeartBeat(new FiniteDuration(0, TimeUnit.SECONDS));
+ sendAppendEntries(0);
}
/**
purgeInMemoryLog();
}
+ //Send the next log entry immediately, if possible, no need to wait for heartbeat to trigger that event
+ sendUpdatesToFollower(followerId, followerLogInformation, false);
return this;
}
followerLogInformation.markFollowerActive();
if (followerToSnapshot.getChunkIndex() == reply.getChunkIndex()) {
+ boolean wasLastChunk = false;
if (reply.isSuccess()) {
if(followerToSnapshot.isLastChunk(reply.getChunkIndex())) {
//this was the last chunk reply
// we can remove snapshot from the memory
setSnapshot(Optional.<ByteString>absent());
}
+ wasLastChunk = true;
} else {
followerToSnapshot.markSendStatus(true);
followerToSnapshot.markSendStatus(false);
}
+
+ if (!wasLastChunk && followerToSnapshot.canSendNextChunk()) {
+ ActorSelection followerActor = context.getPeerActorSelection(followerId);
+ if(followerActor != null) {
+ sendSnapshotChunk(followerActor, followerId);
+ }
+ }
+
} else {
LOG.error("{}: Chunk index {} in InstallSnapshotReply from follower {} does not match expected index {}",
context.getId(), reply.getChunkIndex(), followerId,
context.setCommitIndex(logIndex);
applyLogToStateMachine(logIndex);
} else {
- sendAppendEntries();
+ sendAppendEntries(0);
}
}
- private void sendAppendEntries() {
+ private void sendAppendEntries(long timeSinceLastActivityInterval) {
// Send an AppendEntries to all followers
-
for (Entry<String, FollowerLogInformation> e : followerToLog.entrySet()) {
final String followerId = e.getKey();
- ActorSelection followerActor = context.getPeerActorSelection(followerId);
+ final FollowerLogInformation followerLogInformation = e.getValue();
+ // This checks helps not to send a repeat message to the follower
+ if(!followerLogInformation.isFollowerActive() ||
+ followerLogInformation.timeSinceLastActivity() >= timeSinceLastActivityInterval) {
+ sendUpdatesToFollower(followerId, followerLogInformation, true);
+ }
+ }
+ }
- if (followerActor != null) {
- FollowerLogInformation followerLogInformation = followerToLog.get(followerId);
- long followerNextIndex = followerLogInformation.getNextIndex();
- boolean isFollowerActive = followerLogInformation.isFollowerActive();
+ /**
+ *
+ * This method checks if any update needs to be sent to the given follower. This includes append log entries,
+ * sending next snapshot chunk, and initiating a snapshot.
+ * @return true if any update is sent, false otherwise
+ */
- FollowerToSnapshot followerToSnapshot = mapFollowerToSnapshot.get(followerId);
- if (followerToSnapshot != null) {
- // if install snapshot is in process , then sent next chunk if possible
- if (isFollowerActive && followerToSnapshot.canSendNextChunk()) {
- sendSnapshotChunk(followerActor, followerId);
- } else {
- // we send a heartbeat even if we have not received a reply for the last chunk
- sendAppendEntriesToFollower(followerActor, followerNextIndex,
- Collections.<ReplicatedLogEntry>emptyList(), followerId);
- }
+ private void sendUpdatesToFollower(String followerId, FollowerLogInformation followerLogInformation,
+ boolean sendHeartbeat) {
+
+ ActorSelection followerActor = context.getPeerActorSelection(followerId);
+ if (followerActor != null) {
+ long followerNextIndex = followerLogInformation.getNextIndex();
+ boolean isFollowerActive = followerLogInformation.isFollowerActive();
+
+ if (mapFollowerToSnapshot.get(followerId) != null) {
+ // if install snapshot is in process , then sent next chunk if possible
+ if (isFollowerActive && mapFollowerToSnapshot.get(followerId).canSendNextChunk()) {
+ sendSnapshotChunk(followerActor, followerId);
+ } else if(sendHeartbeat) {
+ // we send a heartbeat even if we have not received a reply for the last chunk
+ sendAppendEntriesToFollower(followerActor, followerLogInformation.getNextIndex(),
+ Collections.<ReplicatedLogEntry>emptyList(), followerId);
+ }
+ } else {
+ long leaderLastIndex = context.getReplicatedLog().lastIndex();
+ long leaderSnapShotIndex = context.getReplicatedLog().getSnapshotIndex();
+ if (isFollowerActive &&
+ context.getReplicatedLog().isPresent(followerNextIndex)) {
+ // FIXME : Sending one entry at a time
+ final List<ReplicatedLogEntry> entries = context.getReplicatedLog().getFrom(followerNextIndex, 1);
- } else {
- long leaderLastIndex = context.getReplicatedLog().lastIndex();
- long leaderSnapShotIndex = context.getReplicatedLog().getSnapshotIndex();
- final List<ReplicatedLogEntry> entries;
-
- LOG.debug("{}: Checking sendAppendEntries for {}, leaderLastIndex: {}, leaderSnapShotIndex: {}",
- context.getId(), leaderLastIndex, leaderSnapShotIndex);
-
- if (isFollowerActive && context.getReplicatedLog().isPresent(followerNextIndex)) {
- LOG.debug("{}: sendAppendEntries: {} is present for {}", context.getId(),
- followerNextIndex, followerId);
-
- // FIXME : Sending one entry at a time
- entries = context.getReplicatedLog().getFrom(followerNextIndex, 1);
-
- } else if (isFollowerActive && followerNextIndex >= 0 &&
- leaderLastIndex >= followerNextIndex ) {
- // if the followers next index is not present in the leaders log, and
- // if the follower is just not starting and if leader's index is more than followers index
- // then snapshot should be sent
-
- if(LOG.isDebugEnabled()) {
- LOG.debug(String.format("%s: InitiateInstallSnapshot to follower: %s," +
- "follower-nextIndex: %s, leader-snapshot-index: %s, " +
- "leader-last-index: %s", context.getId(), followerId,
- followerNextIndex, leaderSnapShotIndex, leaderLastIndex));
- }
- actor().tell(new InitiateInstallSnapshot(), actor());
-
- // we would want to sent AE as the capture snapshot might take time
- entries = Collections.<ReplicatedLogEntry>emptyList();
-
- } else {
- //we send an AppendEntries, even if the follower is inactive
- // in-order to update the followers timestamp, in case it becomes active again
- entries = Collections.<ReplicatedLogEntry>emptyList();
+ sendAppendEntriesToFollower(followerActor, followerNextIndex, entries, followerId);
+
+ } else if (isFollowerActive && followerNextIndex >= 0 &&
+ leaderLastIndex >= followerNextIndex) {
+ // if the followers next index is not present in the leaders log, and
+ // if the follower is just not starting and if leader's index is more than followers index
+ // then snapshot should be sent
+
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("InitiateInstallSnapshot to follower:{}," +
+ "follower-nextIndex:{}, leader-snapshot-index:{}, " +
+ "leader-last-index:{}", followerId,
+ followerNextIndex, leaderSnapShotIndex, leaderLastIndex
+ );
}
+ actor().tell(new InitiateInstallSnapshot(), actor());
- sendAppendEntriesToFollower(followerActor, followerNextIndex, entries, followerId);
+ // Send heartbeat to follower whenever install snapshot is initiated.
+ sendAppendEntriesToFollower(followerActor, followerLogInformation.getNextIndex(),
+ Collections.<ReplicatedLogEntry>emptyList(), followerId);
+
+ } else if(sendHeartbeat) {
+ //we send an AppendEntries, even if the follower is inactive
+ // in-order to update the followers timestamp, in case it becomes active again
+ sendAppendEntriesToFollower(followerActor, followerLogInformation.getNextIndex(),
+ Collections.<ReplicatedLogEntry>emptyList(), followerId);
}
+
}
}
}
// no need to capture snapshot
sendSnapshotChunk(followerActor, e.getKey());
- } else {
+ } else if (!context.isSnapshotCaptureInitiated()) {
initiateCaptureSnapshot();
//we just need 1 follower who would need snapshot to be installed.
// when we have the snapshot captured, we would again check (in SendInstallSnapshot)
actor().tell(new CaptureSnapshot(lastIndex(), lastTerm(),
lastAppliedIndex, lastAppliedTerm, isInstallSnapshotInitiated),
actor());
+ context.setSnapshotCaptureInitiated(true);
}
context.getReplicatedLog().getSnapshotIndex(),
context.getReplicatedLog().getSnapshotTerm(),
nextSnapshotChunk,
- followerToSnapshot.incrementChunkIndex(),
- followerToSnapshot.getTotalChunks(),
+ followerToSnapshot.incrementChunkIndex(),
+ followerToSnapshot.getTotalChunks(),
Optional.of(followerToSnapshot.getLastChunkHashCode())
).toSerializable(),
actor()
followerToSnapshot.getTotalChunks());
}
} catch (IOException e) {
- LOG.error(e, "{}: InstallSnapshot failed for Leader.", context.getId());
+ LOG.error("{}: InstallSnapshot failed for Leader.", context.getId(), e);
}
}
private void sendHeartBeat() {
if (!followerToLog.isEmpty()) {
- sendAppendEntries();
+ sendAppendEntries(context.getConfigParams().getHeartBeatInterval().toMillis());
}
}
import akka.actor.ActorRef;
import akka.actor.Cancellable;
-import akka.event.LoggingAdapter;
import java.util.Random;
import java.util.concurrent.TimeUnit;
import org.opendaylight.controller.cluster.raft.ClientRequestTracker;
import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
import org.opendaylight.controller.cluster.raft.messages.RequestVote;
import org.opendaylight.controller.cluster.raft.messages.RequestVoteReply;
+import org.slf4j.Logger;
import scala.concurrent.duration.FiniteDuration;
/**
/**
*
*/
- protected final LoggingAdapter LOG;
+ protected final Logger LOG;
/**
*
} else {
//if one index is not present in the log, no point in looping
// around as the rest wont be present either
- LOG.warning(
+ LOG.warn(
"{}: Missing index {} from log. Cannot apply state. Ignoring {} to {}",
context.getId(), i, i, index);
break;
try {
close();
} catch (Exception e) {
- LOG.error(e, "{}: Failed to close behavior : {}", context.getId(), this.state());
+ LOG.error("{}: Failed to close behavior : {}", context.getId(), this.state(), e);
}
return behavior;
snapshotTracker = null;
} catch (Exception e){
- LOG.error(e, "{}: Exception in InstallSnapshot of follower", context.getId());
+ LOG.error("{}: Exception in InstallSnapshot of follower", context.getId(), e);
//send reply with success as false. The chunk will be sent again on failure
sender.tell(new InstallSnapshotReply(currentTerm(), context.getId(),
installSnapshot.getChunkIndex(), false), actor());
package org.opendaylight.controller.cluster.raft.behaviors;
-import akka.event.LoggingAdapter;
import com.google.common.base.Optional;
import com.google.protobuf.ByteString;
+import org.slf4j.Logger;
/**
* SnapshotTracker does house keeping for a snapshot that is being installed in chunks on the Follower
*/
public class SnapshotTracker {
- private final LoggingAdapter LOG;
+ private final Logger LOG;
private final int totalChunks;
private ByteString collectedChunks = ByteString.EMPTY;
private int lastChunkIndex = AbstractLeader.FIRST_CHUNK_INDEX - 1;
private boolean sealed = false;
private int lastChunkHashCode = AbstractLeader.INITIAL_LAST_CHUNK_HASH_CODE;
- SnapshotTracker(LoggingAdapter LOG, int totalChunks){
+ SnapshotTracker(Logger LOG, int totalChunks){
this.LOG = LOG;
this.totalChunks = totalChunks;
}
}
public static class InvalidChunkException extends Exception {
+ private static final long serialVersionUID = 1L;
+
InvalidChunkException(String message){
super(message);
}
import akka.actor.ActorSelection;
import akka.actor.ActorSystem;
import akka.actor.Props;
-import akka.event.Logging;
-import akka.event.LoggingAdapter;
import com.google.common.base.Preconditions;
import com.google.protobuf.GeneratedMessage;
import java.io.Serializable;
import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
import org.opendaylight.controller.protobuff.messages.cluster.raft.AppendEntriesMessages;
import org.opendaylight.controller.protobuff.messages.cluster.raft.test.MockPayloadMessages;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class MockRaftActorContext implements RaftActorContext {
public void initReplicatedLog(){
this.replicatedLog = new SimpleReplicatedLog();
- this.replicatedLog.append(new MockReplicatedLogEntry(1, 1, new MockPayload("")));
+ this.replicatedLog.append(new MockReplicatedLogEntry(1, 0, new MockPayload("1")));
+ this.replicatedLog.append(new MockReplicatedLogEntry(1, 1, new MockPayload("2")));
}
@Override public ActorRef actorOf(Props props) {
return this.system;
}
- @Override public LoggingAdapter getLogger() {
- return Logging.getLogger(system, this);
+ @Override public Logger getLogger() {
+ return LoggerFactory.getLogger(getClass());
}
@Override public Map<String, String> getPeerAddresses() {
package org.opendaylight.controller.cluster.raft;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyObject;
+import static org.mockito.Matchers.eq;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
import akka.actor.ActorRef;
import akka.actor.ActorSystem;
import akka.actor.PoisonPill;
import org.opendaylight.controller.cluster.raft.base.messages.ApplyState;
import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshot;
import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply;
+import org.opendaylight.controller.cluster.raft.base.messages.InitiateInstallSnapshot;
import org.opendaylight.controller.cluster.raft.behaviors.Follower;
import org.opendaylight.controller.cluster.raft.behaviors.Leader;
import org.opendaylight.controller.cluster.raft.client.messages.FindLeader;
import scala.concurrent.duration.Duration;
import scala.concurrent.duration.FiniteDuration;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.mockito.Matchers.any;
-import static org.mockito.Matchers.anyObject;
-import static org.mockito.Matchers.eq;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-
public class RaftActorTest extends AbstractActorTest {
};
}
+ @Test
+ public void testFakeSnapshotsForLeaderWithInInitiateSnapshots() throws Exception {
+ new JavaTestKit(getSystem()) {
+ {
+ String persistenceId = "leader1";
+
+ ActorRef followerActor1 =
+ getSystem().actorOf(Props.create(MessageCollectorActor.class));
+ ActorRef followerActor2 =
+ getSystem().actorOf(Props.create(MessageCollectorActor.class));
+
+ DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
+ config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
+ config.setIsolatedLeaderCheckInterval(new FiniteDuration(1, TimeUnit.DAYS));
+
+ DataPersistenceProvider dataPersistenceProvider = mock(DataPersistenceProvider.class);
+
+ Map<String, String> peerAddresses = new HashMap<>();
+ peerAddresses.put("follower-1", followerActor1.path().toString());
+ peerAddresses.put("follower-2", followerActor2.path().toString());
+
+ TestActorRef<MockRaftActor> mockActorRef = TestActorRef.create(getSystem(),
+ MockRaftActor.props(persistenceId, peerAddresses,
+ Optional.<ConfigParams>of(config), dataPersistenceProvider), persistenceId);
+
+ MockRaftActor leaderActor = mockActorRef.underlyingActor();
+ leaderActor.getRaftActorContext().setCommitIndex(9);
+ leaderActor.getRaftActorContext().setLastApplied(9);
+ leaderActor.getRaftActorContext().getTermInformation().update(1, persistenceId);
+
+ leaderActor.waitForInitializeBehaviorComplete();
+
+ Leader leader = new Leader(leaderActor.getRaftActorContext());
+ leaderActor.setCurrentBehavior(leader);
+ assertEquals(RaftState.Leader, leaderActor.getCurrentBehavior().state());
+
+ // create 5 entries in the log
+ MockRaftActorContext.MockReplicatedLogBuilder logBuilder = new MockRaftActorContext.MockReplicatedLogBuilder();
+ leaderActor.getRaftActorContext().setReplicatedLog(logBuilder.createEntries(5, 10, 1).build());
+ //set the snapshot index to 4 , 0 to 4 are snapshotted
+ leaderActor.getRaftActorContext().getReplicatedLog().setSnapshotIndex(4);
+ assertEquals(5, leaderActor.getReplicatedLog().size());
+
+ leaderActor.onReceiveCommand(new AppendEntriesReply("follower-1", 1, true, 9, 1));
+ assertEquals(5, leaderActor.getReplicatedLog().size());
+
+ // set the 2nd follower nextIndex to 1 which has been snapshotted
+ leaderActor.onReceiveCommand(new AppendEntriesReply("follower-2", 1, true, 0, 1));
+ assertEquals(5, leaderActor.getReplicatedLog().size());
+
+ // simulate a real snapshot
+ leaderActor.onReceiveCommand(new InitiateInstallSnapshot());
+ assertEquals(5, leaderActor.getReplicatedLog().size());
+ assertEquals(String.format("expected to be Leader but was %s. Current Leader = %s ",
+ leaderActor.getCurrentBehavior().state(),leaderActor.getLeaderId())
+ , RaftState.Leader, leaderActor.getCurrentBehavior().state());
+
+
+ //reply from a slow follower does not initiate a fake snapshot
+ leaderActor.onReceiveCommand(new AppendEntriesReply("follower-2", 1, true, 9, 1));
+ assertEquals("Fake snapshot should not happen when Initiate is in progress", 5, leaderActor.getReplicatedLog().size());
+
+ ByteString snapshotBytes = fromObject(Arrays.asList(
+ new MockRaftActorContext.MockPayload("foo-0"),
+ new MockRaftActorContext.MockPayload("foo-1"),
+ new MockRaftActorContext.MockPayload("foo-2"),
+ new MockRaftActorContext.MockPayload("foo-3"),
+ new MockRaftActorContext.MockPayload("foo-4")));
+ leaderActor.onReceiveCommand(new CaptureSnapshotReply(snapshotBytes.toByteArray()));
+ assertFalse(leaderActor.getRaftActorContext().isSnapshotCaptureInitiated());
+
+ assertEquals("Real snapshot didn't clear the log till lastApplied", 0, leaderActor.getReplicatedLog().size());
+
+ //reply from a slow follower after should not raise errors
+ leaderActor.onReceiveCommand(new AppendEntriesReply("follower-2", 1, true, 5, 1));
+ assertEquals(0, leaderActor.getReplicatedLog().size());
+
+ mockActorRef.tell(PoisonPill.getInstance(), getRef());
+
+ }
+ };
+ }
+
+
+
private ByteString fromObject(Object snapshot) throws Exception {
ByteArrayOutputStream b = null;
ObjectOutputStream o = null;
import org.opendaylight.controller.cluster.raft.utils.DoNothingActor;
import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor;
import org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages;
+import org.slf4j.impl.SimpleLogger;
import scala.concurrent.duration.FiniteDuration;
public class LeaderTest extends AbstractRaftActorBehaviorTest {
+ static {
+ // This enables trace logging for the tests.
+ System.setProperty(SimpleLogger.LOG_KEY_PREFIX + MockRaftActorContext.class.getName(), "trace");
+ }
+
private final ActorRef leaderActor =
getSystem().actorOf(Props.create(DoNothingActor.class));
private final ActorRef senderActor =
@Test
public void testThatLeaderSendsAHeartbeatMessageToAllFollowers() {
new JavaTestKit(getSystem()) {{
-
new Within(duration("1 seconds")) {
@Override
protected void run() {
-
ActorRef followerActor = getTestActor();
MockRaftActorContext actorContext = (MockRaftActorContext) createActorContext();
Map<String, String> peerAddresses = new HashMap<>();
- peerAddresses.put(followerActor.path().toString(),
- followerActor.path().toString());
+ String followerId = "follower";
+ peerAddresses.put(followerId, followerActor.path().toString());
actorContext.setPeerAddresses(peerAddresses);
+ long term = 1;
+ actorContext.getTermInformation().update(term, "");
+
Leader leader = new Leader(actorContext);
- leader.handleMessage(senderActor, new SendHeartBeat());
- final String out =
- new ExpectMsg<String>(duration("1 seconds"), "match hint") {
- // do not put code outside this method, will run afterwards
- @Override
- protected String match(Object in) {
- Object msg = fromSerializableMessage(in);
- if (msg instanceof AppendEntries) {
- if (((AppendEntries)msg).getTerm() == 0) {
- return "match";
- }
- return null;
- } else {
- throw noMatch();
- }
- }
- }.get(); // this extracts the received message
+ // Leader should send an immediate heartbeat with no entries as follower is inactive.
+ long lastIndex = actorContext.getReplicatedLog().lastIndex();
+ AppendEntries appendEntries = expectMsgClass(duration("5 seconds"), AppendEntries.class);
+ assertEquals("getTerm", term, appendEntries.getTerm());
+ assertEquals("getPrevLogIndex", -1, appendEntries.getPrevLogIndex());
+ assertEquals("getPrevLogTerm", -1, appendEntries.getPrevLogTerm());
+ assertEquals("Entries size", 0, appendEntries.getEntries().size());
- assertEquals("match", out);
+ // The follower would normally reply - simulate that explicitly here.
+ leader.handleMessage(followerActor, new AppendEntriesReply(
+ followerId, term, true, lastIndex - 1, term));
+ assertEquals("isFollowerActive", true, leader.getFollower(followerId).isFollowerActive());
+
+ // Sleep for the heartbeat interval so AppendEntries is sent.
+ Uninterruptibles.sleepUninterruptibly(actorContext.getConfigParams().
+ getHeartBeatInterval().toMillis(), TimeUnit.MILLISECONDS);
+
+ leader.handleMessage(senderActor, new SendHeartBeat());
+ appendEntries = expectMsgClass(duration("5 seconds"), AppendEntries.class);
+ assertEquals("getPrevLogIndex", lastIndex - 1, appendEntries.getPrevLogIndex());
+ assertEquals("getPrevLogTerm", term, appendEntries.getPrevLogTerm());
+ assertEquals("Entries size", 1, appendEntries.getEntries().size());
+ assertEquals("Entry getIndex", lastIndex, appendEntries.getEntries().get(0).getIndex());
+ assertEquals("Entry getTerm", term, appendEntries.getEntries().get(0).getTerm());
}
};
}};
@Test
public void testHandleReplicateMessageSendAppendEntriesToFollower() {
new JavaTestKit(getSystem()) {{
-
new Within(duration("1 seconds")) {
@Override
protected void run() {
-
ActorRef followerActor = getTestActor();
- MockRaftActorContext actorContext =
- (MockRaftActorContext) createActorContext();
+ MockRaftActorContext actorContext = (MockRaftActorContext) createActorContext();
Map<String, String> peerAddresses = new HashMap<>();
- peerAddresses.put(followerActor.path().toString(),
- followerActor.path().toString());
+ String followerId = "follower";
+ peerAddresses.put(followerId, followerActor.path().toString());
actorContext.setPeerAddresses(peerAddresses);
+ long term = 1;
+ actorContext.getTermInformation().update(term, "");
+
Leader leader = new Leader(actorContext);
- RaftActorBehavior raftBehavior = leader
- .handleMessage(senderActor, new Replicate(null, null,
- new MockRaftActorContext.MockReplicatedLogEntry(1,
- 100,
- new MockRaftActorContext.MockPayload("foo"))
- ));
+
+ // Leader will send an immediate heartbeat - ignore it.
+ expectMsgClass(duration("5 seconds"), AppendEntries.class);
+
+ // The follower would normally reply - simulate that explicitly here.
+ long lastIndex = actorContext.getReplicatedLog().lastIndex();
+ leader.handleMessage(followerActor, new AppendEntriesReply(
+ followerId, term, true, lastIndex, term));
+ assertEquals("isFollowerActive", true, leader.getFollower(followerId).isFollowerActive());
+
+ MockRaftActorContext.MockPayload payload = new MockRaftActorContext.MockPayload("foo");
+ MockRaftActorContext.MockReplicatedLogEntry newEntry = new MockRaftActorContext.MockReplicatedLogEntry(
+ 1, lastIndex + 1, payload);
+ actorContext.getReplicatedLog().append(newEntry);
+ RaftActorBehavior raftBehavior = leader.handleMessage(senderActor,
+ new Replicate(null, null, newEntry));
// State should not change
assertTrue(raftBehavior instanceof Leader);
- final String out =
- new ExpectMsg<String>(duration("1 seconds"), "match hint") {
- // do not put code outside this method, will run afterwards
- @Override
- protected String match(Object in) {
- Object msg = fromSerializableMessage(in);
- if (msg instanceof AppendEntries) {
- if (((AppendEntries)msg).getTerm() == 0) {
- return "match";
- }
- return null;
- } else {
- throw noMatch();
- }
- }
- }.get(); // this extracts the received message
-
- assertEquals("match", out);
+ AppendEntries appendEntries = expectMsgClass(duration("5 seconds"), AppendEntries.class);
+ assertEquals("getPrevLogIndex", lastIndex, appendEntries.getPrevLogIndex());
+ assertEquals("getPrevLogTerm", term, appendEntries.getPrevLogTerm());
+ assertEquals("Entries size", 1, appendEntries.getEntries().size());
+ assertEquals("Entry getIndex", lastIndex + 1, appendEntries.getEntries().get(0).getIndex());
+ assertEquals("Entry getTerm", term, appendEntries.getEntries().get(0).getTerm());
+ assertEquals("Entry payload", payload, appendEntries.getEntries().get(0).getData());
}
};
}};
@Test
public void testHandleReplicateMessageWhenThereAreNoFollowers() {
new JavaTestKit(getSystem()) {{
-
new Within(duration("1 seconds")) {
@Override
protected void run() {
leader.getFollowerToSnapshot().getNextChunk();
leader.getFollowerToSnapshot().incrementChunkIndex();
+ Uninterruptibles.sleepUninterruptibly(actorContext.getConfigParams().getHeartBeatInterval().toMillis(),
+ TimeUnit.MILLISECONDS);
+
leader.handleMessage(leaderActor, new SendHeartBeat());
- AppendEntries aeproto = (AppendEntries)MessageCollectorActor.getFirstMatching(
+ AppendEntries aeproto = MessageCollectorActor.getFirstMatching(
followerActor, AppendEntries.class);
assertNotNull("AppendEntries should be sent even if InstallSnapshotReply is not " +
leader.handleMessage(senderActor, new SendHeartBeat());
- InstallSnapshotMessages.InstallSnapshot isproto = (InstallSnapshotMessages.InstallSnapshot)
- MessageCollectorActor.getFirstMatching(followerActor,
- InstallSnapshot.SERIALIZABLE_CLASS);
+ InstallSnapshotMessages.InstallSnapshot isproto = MessageCollectorActor.getFirstMatching(followerActor,
+ InstallSnapshot.SERIALIZABLE_CLASS);
assertNotNull("Installsnapshot should get called for sending the next chunk of snapshot",
isproto);
//update follower timestamp
leader.markFollowerActive(followerActor.path().toString());
+ Uninterruptibles.sleepUninterruptibly(actorContext.getConfigParams().getHeartBeatInterval().toMillis(),
+ TimeUnit.MILLISECONDS);
+
// this should invoke a sendinstallsnapshot as followersLastIndex < snapshotIndex
RaftActorBehavior raftBehavior = leader.handleMessage(
senderActor, new Replicate(null, "state-id", entry));
RaftActorBehavior raftBehavior = leader.handleMessage(
leaderActor, new InitiateInstallSnapshot());
- CaptureSnapshot cs = (CaptureSnapshot) MessageCollectorActor.
+ CaptureSnapshot cs = MessageCollectorActor.
getFirstMatching(leaderActor, CaptureSnapshot.class);
assertNotNull(cs);
assertEquals(1, cs.getLastAppliedTerm());
assertEquals(4, cs.getLastIndex());
assertEquals(2, cs.getLastTerm());
+
+ // if an initiate is started again when first is in progress, it shouldnt initiate Capture
+ raftBehavior = leader.handleMessage(leaderActor, new InitiateInstallSnapshot());
+ List<Object> captureSnapshots = MessageCollectorActor.getAllMatching(leaderActor, CaptureSnapshot.class);
+ assertEquals("CaptureSnapshot should not get invoked when initiate is in progress", 1, captureSnapshots.size());
+
}};
}
Leader leader = new Leader(actorContext);
+ // Ignore initial heartbeat.
+ expectMsgClass(duration("5 seconds"), AppendEntries.class);
+
// new entry
ReplicatedLogImplEntry entry =
new ReplicatedLogImplEntry(newEntryIndex, currentTerm,
MockLeader leader = new MockLeader(actorContext);
+ // Ignore initial heartbeat.
+ expectMsgClass(duration("5 seconds"), AppendEntries.class);
+
Map<String, String> leadersSnapshot = new HashMap<>();
leadersSnapshot.put("1", "A");
leadersSnapshot.put("2", "B");
assertEquals(snapshotIndex + 1, fli.getNextIndex());
}};
}
+ @Test
+ public void testSendSnapshotfromInstallSnapshotReply() throws Exception {
+ new JavaTestKit(getSystem()) {{
+
+ TestActorRef<MessageCollectorActor> followerActor =
+ TestActorRef.create(getSystem(), Props.create(MessageCollectorActor.class), "follower-reply");
+
+ Map<String, String> peerAddresses = new HashMap<>();
+ peerAddresses.put("follower-reply",
+ followerActor.path().toString());
+
+ final int followersLastIndex = 2;
+ final int snapshotIndex = 3;
+ final int snapshotTerm = 1;
+ final int currentTerm = 2;
+
+ MockRaftActorContext actorContext =
+ (MockRaftActorContext) createActorContext();
+ DefaultConfigParamsImpl configParams = new DefaultConfigParamsImpl(){
+ @Override
+ public int getSnapshotChunkSize() {
+ return 50;
+ }
+ };
+ configParams.setHeartBeatInterval(new FiniteDuration(9, TimeUnit.SECONDS));
+ configParams.setIsolatedLeaderCheckInterval(new FiniteDuration(10, TimeUnit.SECONDS));
+
+ actorContext.setConfigParams(configParams);
+ actorContext.setPeerAddresses(peerAddresses);
+ actorContext.setCommitIndex(followersLastIndex);
+
+ MockLeader leader = new MockLeader(actorContext);
+
+ Map<String, String> leadersSnapshot = new HashMap<>();
+ leadersSnapshot.put("1", "A");
+ leadersSnapshot.put("2", "B");
+ leadersSnapshot.put("3", "C");
+
+ // set the snapshot variables in replicatedlog
+ actorContext.getReplicatedLog().setSnapshotIndex(snapshotIndex);
+ actorContext.getReplicatedLog().setSnapshotTerm(snapshotTerm);
+ actorContext.getTermInformation().update(currentTerm, leaderActor.path().toString());
+
+ ByteString bs = toByteString(leadersSnapshot);
+ leader.setSnapshot(Optional.of(bs));
+
+ leader.handleMessage(leaderActor, new SendInstallSnapshot(bs));
+
+ List<Object> objectList = MessageCollectorActor.getAllMatching(followerActor,
+ InstallSnapshotMessages.InstallSnapshot.class);
+
+ assertEquals(1, objectList.size());
+
+ Object o = objectList.get(0);
+ assertTrue(o instanceof InstallSnapshotMessages.InstallSnapshot);
+
+ InstallSnapshotMessages.InstallSnapshot installSnapshot = (InstallSnapshotMessages.InstallSnapshot) o;
+
+ assertEquals(1, installSnapshot.getChunkIndex());
+ assertEquals(3, installSnapshot.getTotalChunks());
+
+ leader.handleMessage(followerActor, new InstallSnapshotReply(actorContext.getTermInformation().getCurrentTerm(),
+ "follower-reply", installSnapshot.getChunkIndex(), true));
+
+ objectList = MessageCollectorActor.getAllMatching(followerActor,
+ InstallSnapshotMessages.InstallSnapshot.class);
+
+ assertEquals(2, objectList.size());
+
+ installSnapshot = (InstallSnapshotMessages.InstallSnapshot) objectList.get(1);
+
+ leader.handleMessage(followerActor, new InstallSnapshotReply(actorContext.getTermInformation().getCurrentTerm(),
+ "follower-reply", installSnapshot.getChunkIndex(), true));
+
+ objectList = MessageCollectorActor.getAllMatching(followerActor,
+ InstallSnapshotMessages.InstallSnapshot.class);
+
+ assertEquals(3, objectList.size());
+
+ installSnapshot = (InstallSnapshotMessages.InstallSnapshot) objectList.get(2);
+
+ // Send snapshot reply one more time and make sure that a new snapshot message should not be sent to follower
+ leader.handleMessage(followerActor, new InstallSnapshotReply(actorContext.getTermInformation().getCurrentTerm(),
+ "follower-reply", installSnapshot.getChunkIndex(), true));
+
+ objectList = MessageCollectorActor.getAllMatching(followerActor,
+ InstallSnapshotMessages.InstallSnapshot.class);
+
+ // Count should still stay at 3
+ assertEquals(3, objectList.size());
+ }};
+ }
+
@Test
- public void testHandleInstallSnapshotReplyWithInvalidChunkIndex() throws Exception {
+ public void testHandleInstallSnapshotReplyWithInvalidChunkIndex() throws Exception{
new JavaTestKit(getSystem()) {{
TestActorRef<MessageCollectorActor> followerActor =
leader.handleMessage(leaderActor, new SendInstallSnapshot(bs));
- Object o = MessageCollectorActor.getAllMessages(followerActor).get(0);
-
- assertTrue(o instanceof InstallSnapshotMessages.InstallSnapshot);
+ MessageCollectorActor.getAllMatching(followerActor,
+ InstallSnapshotMessages.InstallSnapshot.class);
- InstallSnapshotMessages.InstallSnapshot installSnapshot = (InstallSnapshotMessages.InstallSnapshot) o;
+ InstallSnapshotMessages.InstallSnapshot installSnapshot = MessageCollectorActor.getFirstMatching(
+ followerActor, InstallSnapshotMessages.InstallSnapshot.class);
+ assertNotNull(installSnapshot);
assertEquals(1, installSnapshot.getChunkIndex());
assertEquals(3, installSnapshot.getTotalChunks());
+ followerActor.underlyingActor().clear();
- leader.handleMessage(followerActor, new InstallSnapshotReply(actorContext.getTermInformation().getCurrentTerm(), followerActor.path().toString(), -1, false));
-
- leader.handleMessage(leaderActor, new SendHeartBeat());
+ leader.handleMessage(followerActor, new InstallSnapshotReply(actorContext.getTermInformation().getCurrentTerm(),
+ followerActor.path().toString(), -1, false));
- o = MessageCollectorActor.getAllMessages(followerActor).get(1);
+ Uninterruptibles.sleepUninterruptibly(actorContext.getConfigParams().getHeartBeatInterval().toMillis(),
+ TimeUnit.MILLISECONDS);
- assertTrue(o instanceof InstallSnapshotMessages.InstallSnapshot);
+ leader.handleMessage(leaderActor, new SendHeartBeat());
- installSnapshot = (InstallSnapshotMessages.InstallSnapshot) o;
+ installSnapshot = MessageCollectorActor.getFirstMatching(
+ followerActor, InstallSnapshotMessages.InstallSnapshot.class);
+ assertNotNull(installSnapshot);
assertEquals(1, installSnapshot.getChunkIndex());
assertEquals(3, installSnapshot.getTotalChunks());
public void testHandleSnapshotSendsPreviousChunksHashCodeWhenSendingNextChunk() throws Exception {
new JavaTestKit(getSystem()) {
{
-
TestActorRef<MessageCollectorActor> followerActor =
- TestActorRef.create(getSystem(), Props.create(MessageCollectorActor.class), "follower");
+ TestActorRef.create(getSystem(), Props.create(MessageCollectorActor.class), "follower-chunk");
Map<String, String> peerAddresses = new HashMap<>();
peerAddresses.put(followerActor.path().toString(),
leader.handleMessage(leaderActor, new SendInstallSnapshot(bs));
- Object o = MessageCollectorActor.getAllMessages(followerActor).get(0);
-
- assertTrue(o instanceof InstallSnapshotMessages.InstallSnapshot);
-
- InstallSnapshotMessages.InstallSnapshot installSnapshot = (InstallSnapshotMessages.InstallSnapshot) o;
+ InstallSnapshotMessages.InstallSnapshot installSnapshot = MessageCollectorActor.getFirstMatching(
+ followerActor, InstallSnapshotMessages.InstallSnapshot.class);
+ assertNotNull(installSnapshot);
assertEquals(1, installSnapshot.getChunkIndex());
assertEquals(3, installSnapshot.getTotalChunks());
int hashCode = installSnapshot.getData().hashCode();
- leader.handleMessage(followerActor, new InstallSnapshotReply(installSnapshot.getTerm(),followerActor.path().toString(),1,true ));
-
- leader.handleMessage(leaderActor, new SendHeartBeat());
-
- Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
-
- o = MessageCollectorActor.getAllMessages(followerActor).get(1);
+ followerActor.underlyingActor().clear();
- assertTrue(o instanceof InstallSnapshotMessages.InstallSnapshot);
+ leader.handleMessage(followerActor, new InstallSnapshotReply(installSnapshot.getTerm(),followerActor.path().toString(),1,true ));
- installSnapshot = (InstallSnapshotMessages.InstallSnapshot) o;
+ installSnapshot = MessageCollectorActor.getFirstMatching(
+ followerActor, InstallSnapshotMessages.InstallSnapshot.class);
+ assertNotNull(installSnapshot);
assertEquals(2, installSnapshot.getChunkIndex());
assertEquals(3, installSnapshot.getTotalChunks());
@Override
protected RaftActorContext createActorContext(ActorRef actorRef) {
- return new MockRaftActorContext("test", getSystem(), actorRef);
+ DefaultConfigParamsImpl configParams = new DefaultConfigParamsImpl();
+ configParams.setHeartBeatInterval(new FiniteDuration(50, TimeUnit.MILLISECONDS));
+ configParams.setElectionTimeoutFactor(100000);
+ MockRaftActorContext context = new MockRaftActorContext("test", getSystem(), actorRef);
+ context.setConfigParams(configParams);
+ return context;
}
private ByteString toByteString(Map<String, String> state) {
}
public static class ForwardMessageToBehaviorActor extends MessageCollectorActor {
- private static AbstractRaftActorBehavior behavior;
-
- public ForwardMessageToBehaviorActor(){
-
- }
+ AbstractRaftActorBehavior behavior;
@Override public void onReceive(Object message) throws Exception {
+ if(behavior != null) {
+ behavior.handleMessage(sender(), message);
+ }
+
super.onReceive(message);
- behavior.handleMessage(sender(), message);
}
- public static void setBehavior(AbstractRaftActorBehavior behavior){
- ForwardMessageToBehaviorActor.behavior = behavior;
+ public static Props props() {
+ return Props.create(ForwardMessageToBehaviorActor.class);
}
}
@Test
public void testLeaderCreatedWithCommitIndexLessThanLastIndex() throws Exception {
new JavaTestKit(getSystem()) {{
-
- ActorRef leaderActor = getSystem().actorOf(Props.create(MessageCollectorActor.class));
+ TestActorRef<ForwardMessageToBehaviorActor> leaderActor = TestActorRef.create(getSystem(),
+ Props.create(ForwardMessageToBehaviorActor.class));
MockRaftActorContext leaderActorContext =
- new MockRaftActorContext("leader", getSystem(), leaderActor);
+ new MockRaftActorContext("leader", getSystem(), leaderActor);
- ActorRef followerActor = getSystem().actorOf(Props.create(ForwardMessageToBehaviorActor.class));
+ TestActorRef<ForwardMessageToBehaviorActor> followerActor = TestActorRef.create(getSystem(),
+ ForwardMessageToBehaviorActor.props());
MockRaftActorContext followerActorContext =
- new MockRaftActorContext("follower", getSystem(), followerActor);
+ new MockRaftActorContext("follower", getSystem(), followerActor);
Follower follower = new Follower(followerActorContext);
-
- ForwardMessageToBehaviorActor.setBehavior(follower);
+ followerActor.underlyingActor().behavior = follower;
Map<String, String> peerAddresses = new HashMap<>();
- peerAddresses.put(followerActor.path().toString(),
- followerActor.path().toString());
+ peerAddresses.put("follower", followerActor.path().toString());
leaderActorContext.setPeerAddresses(peerAddresses);
//create 3 entries
leaderActorContext.setReplicatedLog(
- new MockRaftActorContext.MockReplicatedLogBuilder().createEntries(0, 3, 1).build());
+ new MockRaftActorContext.MockReplicatedLogBuilder().createEntries(0, 3, 1).build());
leaderActorContext.setCommitIndex(1);
// follower too has the exact same log entries and has the same commit index
followerActorContext.setReplicatedLog(
- new MockRaftActorContext.MockReplicatedLogBuilder().createEntries(0, 3, 1).build());
+ new MockRaftActorContext.MockReplicatedLogBuilder().createEntries(0, 3, 1).build());
followerActorContext.setCommitIndex(1);
Leader leader = new Leader(leaderActorContext);
- leader.markFollowerActive(followerActor.path().toString());
-
- leader.handleMessage(leaderActor, new SendHeartBeat());
-
- AppendEntries appendEntries = (AppendEntries) MessageCollectorActor
- .getFirstMatching(followerActor, AppendEntries.class);
+ AppendEntries appendEntries = MessageCollectorActor.getFirstMatching(followerActor, AppendEntries.class);
assertNotNull(appendEntries);
assertEquals(1, appendEntries.getLeaderCommit());
- assertEquals(1, appendEntries.getEntries().get(0).getIndex());
+ assertEquals(0, appendEntries.getEntries().size());
assertEquals(0, appendEntries.getPrevLogIndex());
- AppendEntriesReply appendEntriesReply =
- (AppendEntriesReply) MessageCollectorActor.getFirstMatching(
+ AppendEntriesReply appendEntriesReply = MessageCollectorActor.getFirstMatching(
leaderActor, AppendEntriesReply.class);
-
assertNotNull(appendEntriesReply);
- // follower returns its next index
assertEquals(2, appendEntriesReply.getLogLastIndex());
assertEquals(1, appendEntriesReply.getLogLastTerm());
+ // follower returns its next index
+ assertEquals(2, appendEntriesReply.getLogLastIndex());
+ assertEquals(1, appendEntriesReply.getLogLastTerm());
}};
}
@Test
public void testLeaderCreatedWithCommitIndexLessThanFollowersCommitIndex() throws Exception {
new JavaTestKit(getSystem()) {{
-
- ActorRef leaderActor = getSystem().actorOf(Props.create(MessageCollectorActor.class));
+ TestActorRef<ForwardMessageToBehaviorActor> leaderActor = TestActorRef.create(getSystem(),
+ Props.create(ForwardMessageToBehaviorActor.class));
MockRaftActorContext leaderActorContext =
- new MockRaftActorContext("leader", getSystem(), leaderActor);
+ new MockRaftActorContext("leader", getSystem(), leaderActor);
- ActorRef followerActor = getSystem().actorOf(
- Props.create(ForwardMessageToBehaviorActor.class));
+ TestActorRef<ForwardMessageToBehaviorActor> followerActor = TestActorRef.create(getSystem(),
+ ForwardMessageToBehaviorActor.props());
MockRaftActorContext followerActorContext =
- new MockRaftActorContext("follower", getSystem(), followerActor);
+ new MockRaftActorContext("follower", getSystem(), followerActor);
Follower follower = new Follower(followerActorContext);
-
- ForwardMessageToBehaviorActor.setBehavior(follower);
+ followerActor.underlyingActor().behavior = follower;
Map<String, String> peerAddresses = new HashMap<>();
- peerAddresses.put(followerActor.path().toString(),
- followerActor.path().toString());
+ peerAddresses.put("follower", followerActor.path().toString());
leaderActorContext.setPeerAddresses(peerAddresses);
leaderActorContext.getReplicatedLog().removeFrom(0);
leaderActorContext.setReplicatedLog(
- new MockRaftActorContext.MockReplicatedLogBuilder().createEntries(0, 3, 1).build());
+ new MockRaftActorContext.MockReplicatedLogBuilder().createEntries(0, 3, 1).build());
leaderActorContext.setCommitIndex(1);
followerActorContext.getReplicatedLog().removeFrom(0);
followerActorContext.setReplicatedLog(
- new MockRaftActorContext.MockReplicatedLogBuilder().createEntries(0, 3, 1).build());
+ new MockRaftActorContext.MockReplicatedLogBuilder().createEntries(0, 3, 1).build());
// follower has the same log entries but its commit index > leaders commit index
followerActorContext.setCommitIndex(2);
Leader leader = new Leader(leaderActorContext);
- leader.markFollowerActive(followerActor.path().toString());
-
- leader.handleMessage(leaderActor, new SendHeartBeat());
-
- AppendEntries appendEntries = (AppendEntries) MessageCollectorActor
- .getFirstMatching(followerActor, AppendEntries.class);
+ // Initial heartbeat
+ AppendEntries appendEntries = MessageCollectorActor.getFirstMatching(followerActor, AppendEntries.class);
assertNotNull(appendEntries);
assertEquals(1, appendEntries.getLeaderCommit());
- assertEquals(1, appendEntries.getEntries().get(0).getIndex());
+ assertEquals(0, appendEntries.getEntries().size());
assertEquals(0, appendEntries.getPrevLogIndex());
- AppendEntriesReply appendEntriesReply =
- (AppendEntriesReply) MessageCollectorActor.getFirstMatching(
+ AppendEntriesReply appendEntriesReply = MessageCollectorActor.getFirstMatching(
leaderActor, AppendEntriesReply.class);
+ assertNotNull(appendEntriesReply);
+
+ assertEquals(2, appendEntriesReply.getLogLastIndex());
+ assertEquals(1, appendEntriesReply.getLogLastTerm());
+
+ leaderActor.underlyingActor().behavior = leader;
+ leader.handleMessage(followerActor, appendEntriesReply);
+
+ leaderActor.underlyingActor().clear();
+ followerActor.underlyingActor().clear();
+
+ Uninterruptibles.sleepUninterruptibly(leaderActorContext.getConfigParams().getHeartBeatInterval().toMillis(),
+ TimeUnit.MILLISECONDS);
+
+ leader.handleMessage(leaderActor, new SendHeartBeat());
+ appendEntries = MessageCollectorActor.getFirstMatching(followerActor, AppendEntries.class);
+ assertNotNull(appendEntries);
+
+ assertEquals(1, appendEntries.getLeaderCommit());
+ assertEquals(0, appendEntries.getEntries().size());
+ assertEquals(2, appendEntries.getPrevLogIndex());
+
+ appendEntriesReply = MessageCollectorActor.getFirstMatching(leaderActor, AppendEntriesReply.class);
assertNotNull(appendEntriesReply);
assertEquals(2, appendEntriesReply.getLogLastIndex());
assertEquals(1, appendEntriesReply.getLogLastTerm());
+ assertEquals(1, followerActorContext.getCommitIndex());
}};
}
assertEquals(2, leaderActorContext.getCommitIndex());
ApplyLogEntries applyLogEntries =
- (ApplyLogEntries) MessageCollectorActor.getFirstMatching(leaderActor,
- ApplyLogEntries.class);
+ MessageCollectorActor.getFirstMatching(leaderActor,
+ ApplyLogEntries.class);
assertNotNull(applyLogEntries);
}};
}
+
+ @Test
+ public void testAppendEntryCallAtEndofAppendEntryReply() throws Exception {
+ new JavaTestKit(getSystem()) {{
+ TestActorRef<MessageCollectorActor> leaderActor = TestActorRef.create(getSystem(),
+ Props.create(MessageCollectorActor.class));
+
+ MockRaftActorContext leaderActorContext =
+ new MockRaftActorContext("leader", getSystem(), leaderActor);
+
+ DefaultConfigParamsImpl configParams = new DefaultConfigParamsImpl();
+ //configParams.setHeartBeatInterval(new FiniteDuration(9, TimeUnit.SECONDS));
+ configParams.setIsolatedLeaderCheckInterval(new FiniteDuration(10, TimeUnit.SECONDS));
+
+ leaderActorContext.setConfigParams(configParams);
+
+ TestActorRef<ForwardMessageToBehaviorActor> followerActor = TestActorRef.create(getSystem(),
+ ForwardMessageToBehaviorActor.props());
+
+ MockRaftActorContext followerActorContext =
+ new MockRaftActorContext("follower-reply", getSystem(), followerActor);
+
+ followerActorContext.setConfigParams(configParams);
+
+ Follower follower = new Follower(followerActorContext);
+ followerActor.underlyingActor().behavior = follower;
+
+ Map<String, String> peerAddresses = new HashMap<>();
+ peerAddresses.put("follower-reply",
+ followerActor.path().toString());
+
+ leaderActorContext.setPeerAddresses(peerAddresses);
+
+ leaderActorContext.getReplicatedLog().removeFrom(0);
+ leaderActorContext.setCommitIndex(-1);
+ leaderActorContext.setLastApplied(-1);
+
+ followerActorContext.getReplicatedLog().removeFrom(0);
+ followerActorContext.setCommitIndex(-1);
+ followerActorContext.setLastApplied(-1);
+
+ Leader leader = new Leader(leaderActorContext);
+
+ AppendEntriesReply appendEntriesReply = MessageCollectorActor.getFirstMatching(
+ leaderActor, AppendEntriesReply.class);
+ assertNotNull(appendEntriesReply);
+ System.out.println("appendEntriesReply: "+appendEntriesReply);
+ leader.handleMessage(followerActor, appendEntriesReply);
+
+ // Clear initial heartbeat messages
+
+ leaderActor.underlyingActor().clear();
+ followerActor.underlyingActor().clear();
+
+ // create 3 entries
+ leaderActorContext.setReplicatedLog(
+ new MockRaftActorContext.MockReplicatedLogBuilder().createEntries(0, 3, 1).build());
+ leaderActorContext.setCommitIndex(1);
+ leaderActorContext.setLastApplied(1);
+
+ Uninterruptibles.sleepUninterruptibly(leaderActorContext.getConfigParams().getHeartBeatInterval().toMillis(),
+ TimeUnit.MILLISECONDS);
+
+ leader.handleMessage(leaderActor, new SendHeartBeat());
+
+ AppendEntries appendEntries = MessageCollectorActor.getFirstMatching(followerActor, AppendEntries.class);
+ assertNotNull(appendEntries);
+
+ // Should send first log entry
+ assertEquals(1, appendEntries.getLeaderCommit());
+ assertEquals(0, appendEntries.getEntries().get(0).getIndex());
+ assertEquals(-1, appendEntries.getPrevLogIndex());
+
+ appendEntriesReply = MessageCollectorActor.getFirstMatching(leaderActor, AppendEntriesReply.class);
+ assertNotNull(appendEntriesReply);
+
+ assertEquals(1, appendEntriesReply.getLogLastTerm());
+ assertEquals(0, appendEntriesReply.getLogLastIndex());
+
+ followerActor.underlyingActor().clear();
+
+ leader.handleAppendEntriesReply(followerActor, appendEntriesReply);
+
+ appendEntries = MessageCollectorActor.getFirstMatching(followerActor, AppendEntries.class);
+ assertNotNull(appendEntries);
+
+ // Should send second log entry
+ assertEquals(1, appendEntries.getLeaderCommit());
+ assertEquals(1, appendEntries.getEntries().get(0).getIndex());
+ }};
+ }
+
class MockLeader extends Leader {
FollowerToSnapshot fts;
package org.opendaylight.controller.cluster.raft.behaviors;
import static org.junit.Assert.assertEquals;
-import static org.mockito.Mockito.mock;
-import akka.event.LoggingAdapter;
import com.google.common.base.Optional;
import com.google.protobuf.ByteString;
import java.io.ByteArrayOutputStream;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class SnapshotTrackerTest {
+ Logger logger = LoggerFactory.getLogger(getClass());
+
Map<String, String> data;
ByteString byteString;
ByteString chunk1;
@Test
public void testAddChunk() throws SnapshotTracker.InvalidChunkException {
- SnapshotTracker tracker1 = new SnapshotTracker(mock(LoggingAdapter.class), 5);
+ SnapshotTracker tracker1 = new SnapshotTracker(logger, 5);
tracker1.addChunk(1, chunk1, Optional.<Integer>absent());
tracker1.addChunk(2, chunk2, Optional.<Integer>absent());
tracker1.addChunk(3, chunk3, Optional.<Integer>absent());
// Verify that an InvalidChunkException is thrown when we try to add a chunk to a sealed tracker
- SnapshotTracker tracker2 = new SnapshotTracker(mock(LoggingAdapter.class), 2);
+ SnapshotTracker tracker2 = new SnapshotTracker(logger, 2);
tracker2.addChunk(1, chunk1, Optional.<Integer>absent());
tracker2.addChunk(2, chunk2, Optional.<Integer>absent());
}
// The first chunk's index must at least be FIRST_CHUNK_INDEX
- SnapshotTracker tracker3 = new SnapshotTracker(mock(LoggingAdapter.class), 2);
+ SnapshotTracker tracker3 = new SnapshotTracker(logger, 2);
try {
tracker3.addChunk(AbstractLeader.FIRST_CHUNK_INDEX - 1, chunk1, Optional.<Integer>absent());
}
// Out of sequence chunk indexes won't work
- SnapshotTracker tracker4 = new SnapshotTracker(mock(LoggingAdapter.class), 2);
+ SnapshotTracker tracker4 = new SnapshotTracker(logger, 2);
tracker4.addChunk(AbstractLeader.FIRST_CHUNK_INDEX, chunk1, Optional.<Integer>absent());
// No exceptions will be thrown when invalid chunk is added with the right sequence
// If the lastChunkHashCode is missing
- SnapshotTracker tracker5 = new SnapshotTracker(mock(LoggingAdapter.class), 2);
+ SnapshotTracker tracker5 = new SnapshotTracker(logger, 2);
tracker5.addChunk(AbstractLeader.FIRST_CHUNK_INDEX, chunk1, Optional.<Integer>absent());
// Look I can add the same chunk again
// An exception will be thrown when an invalid chunk is addedd with the right sequence
// when the lastChunkHashCode is present
- SnapshotTracker tracker6 = new SnapshotTracker(mock(LoggingAdapter.class), 2);
+ SnapshotTracker tracker6 = new SnapshotTracker(logger, 2);
tracker6.addChunk(AbstractLeader.FIRST_CHUNK_INDEX, chunk1, Optional.of(-1));
public void testGetSnapShot() throws SnapshotTracker.InvalidChunkException {
// Trying to get a snapshot before all chunks have been received will throw an exception
- SnapshotTracker tracker1 = new SnapshotTracker(mock(LoggingAdapter.class), 5);
+ SnapshotTracker tracker1 = new SnapshotTracker(logger, 5);
tracker1.addChunk(1, chunk1, Optional.<Integer>absent());
try {
}
- SnapshotTracker tracker2 = new SnapshotTracker(mock(LoggingAdapter.class), 3);
+ SnapshotTracker tracker2 = new SnapshotTracker(logger, 3);
tracker2.addChunk(1, chunk1, Optional.<Integer>absent());
tracker2.addChunk(2, chunk2, Optional.<Integer>absent());
@Test
public void testGetCollectedChunks() throws SnapshotTracker.InvalidChunkException {
- SnapshotTracker tracker1 = new SnapshotTracker(mock(LoggingAdapter.class), 5);
+ SnapshotTracker tracker1 = new SnapshotTracker(logger, 5);
ByteString chunks = chunk1.concat(chunk2);
import akka.pattern.Patterns;
import akka.util.Timeout;
import com.google.common.collect.Lists;
+import com.google.common.util.concurrent.Uninterruptibles;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.TimeUnit;
public class MessageCollectorActor extends UntypedActor {
- private List<Object> messages = new ArrayList<>();
+ private final List<Object> messages = new ArrayList<>();
@Override public void onReceive(Object message) throws Exception {
if(message instanceof String){
}
}
+ public void clear() {
+ messages.clear();
+ }
+
public static List<Object> getAllMessages(ActorRef actor) throws Exception {
FiniteDuration operationDuration = Duration.create(5, TimeUnit.SECONDS);
Timeout operationTimeout = new Timeout(operationDuration);
* @param clazz
* @return
*/
- public static Object getFirstMatching(ActorRef actor, Class<?> clazz) throws Exception {
- List<Object> allMessages = getAllMessages(actor);
+ public static <T> T getFirstMatching(ActorRef actor, Class<T> clazz) throws Exception {
+ for(int i = 0; i < 50; i++) {
+ List<Object> allMessages = getAllMessages(actor);
- for(Object message : allMessages){
- if(message.getClass().equals(clazz)){
- return message;
+ for(Object message : allMessages){
+ if(message.getClass().equals(clazz)){
+ return (T) message;
+ }
}
+
+ Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
}
return null;
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-monitoring</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>netconf-notifications-api</artifactId>
+ </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-binding-broker-impl</artifactId>
<groupId>org.opendaylight.yangtools.thirdparty</groupId>
<artifactId>antlr4-runtime-osgi-nohead</artifactId>
</dependency>
+
<!--Compile scopes for all testing dependencies are intentional-->
<!--This way, all testing dependencies can be transitively used by other integration test modules-->
<!--If the dependencies are test scoped, they are not visible to other maven modules depending on sal-binding-it-->
import static org.ops4j.pax.exam.CoreOptions.frameworkProperty;
import static org.ops4j.pax.exam.CoreOptions.junitBundles;
import static org.ops4j.pax.exam.CoreOptions.mavenBundle;
+import static org.ops4j.pax.exam.CoreOptions.systemPackages;
import static org.ops4j.pax.exam.CoreOptions.systemProperty;
-
import org.ops4j.pax.exam.Option;
import org.ops4j.pax.exam.options.DefaultCompositeOption;
import org.ops4j.pax.exam.util.PathUtils;
bindingAwareSalBundles(),
mavenBundle("commons-codec", "commons-codec").versionAsInProject(),
- systemProperty("org.osgi.framework.system.packages.extra").value("sun.nio.ch"),
+ systemPackages("sun.nio.ch", "sun.misc"),
mavenBundle("io.netty", "netty-common").versionAsInProject(), //
mavenBundle("io.netty", "netty-buffer").versionAsInProject(), //
mavenBundle("io.netty", "netty-handler").versionAsInProject(), //
mavenBundle("org.eclipse.birt.runtime.3_7_1", "org.apache.xml.resolver", "1.2.0"),
mavenBundle(CONTROLLER, "config-netconf-connector").versionAsInProject(), //
+ mavenBundle(CONTROLLER, "netconf-notifications-api").versionAsInProject(), //
+ mavenBundle(CONTROLLER, "ietf-netconf").versionAsInProject(), //
+ mavenBundle(CONTROLLER, "ietf-netconf-notifications").versionAsInProject(), //
mavenBundle(CONTROLLER, "netconf-impl").versionAsInProject(), //
mavenBundle(CONTROLLER, "config-persister-file-xml-adapter").versionAsInProject().noStart(),
mavenBundle(CONTROLLER, "sal-common-util").versionAsInProject(), // //
- mavenBundle(CONTROLLER, "sal-inmemory-datastore").versionAsInProject(), // /
+ mavenBundle("com.lmax", "disruptor").versionAsInProject(),
+ mavenBundle(CONTROLLER, "sal-inmemory-datastore").versionAsInProject(), //
mavenBundle(CONTROLLER, "sal-broker-impl").versionAsInProject(), // //
mavenBundle(CONTROLLER, "sal-core-spi").versionAsInProject().update(), //
package org.opendaylight.controller.cluster.common.actor;
import akka.actor.UntypedActor;
-import akka.event.Logging;
-import akka.event.LoggingAdapter;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public abstract class AbstractUntypedActor extends UntypedActor {
- protected final LoggingAdapter LOG =
- Logging.getLogger(getContext().system(), this);
+ protected final Logger LOG = LoggerFactory.getLogger(getClass());
public AbstractUntypedActor() {
if(LOG.isDebugEnabled()) {
package org.opendaylight.controller.cluster.common.actor;
-import akka.event.Logging;
-import akka.event.LoggingAdapter;
import akka.japi.Procedure;
import akka.persistence.SnapshotSelectionCriteria;
import akka.persistence.UntypedPersistentActor;
import org.opendaylight.controller.cluster.DataPersistenceProvider;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public abstract class AbstractUntypedPersistentActor extends UntypedPersistentActor {
- protected final LoggingAdapter LOG =
- Logging.getLogger(getContext().system(), this);
+ protected final Logger LOG = LoggerFactory.getLogger(getClass());
public AbstractUntypedPersistentActor() {
if(LOG.isDebugEnabled()) {
try {
procedure.apply(o);
} catch (Exception e) {
- LOG.error(e, "An unexpected error occurred");
+ LOG.error("An unexpected error occurred", e);
}
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.node.utils.stream;
+
+import java.io.IOException;
+
+/**
+ * Exception thrown from NormalizedNodeInputStreamReader when the input stream does not contain
+ * valid serialized data.
+ *
+ * @author Thomas Pantelis
+ */
+public class InvalidNormalizedNodeStreamException extends IOException {
+ private static final long serialVersionUID = 1L;
+
+ public InvalidNormalizedNodeStreamException(String message) {
+ super(message);
+ }
+}
private final StringBuilder reusableStringBuilder = new StringBuilder(50);
+ private boolean readSignatureMarker = true;
+
public NormalizedNodeInputStreamReader(InputStream stream) throws IOException {
Preconditions.checkNotNull(stream);
input = new DataInputStream(stream);
@Override
public NormalizedNode<?, ?> readNormalizedNode() throws IOException {
+ readSignatureMarkerAndVersionIfNeeded();
+ return readNormalizedNodeInternal();
+ }
+
+ private void readSignatureMarkerAndVersionIfNeeded() throws IOException {
+ if(readSignatureMarker) {
+ readSignatureMarker = false;
+
+ byte marker = input.readByte();
+ if(marker != NormalizedNodeOutputStreamWriter.SIGNATURE_MARKER) {
+ throw new InvalidNormalizedNodeStreamException(String.format(
+ "Invalid signature marker: %d", marker));
+ }
+
+ input.readShort(); // read the version - not currently used/needed.
+ }
+ }
+
+ private NormalizedNode<?, ?> readNormalizedNodeInternal() throws IOException {
// each node should start with a byte
byte nodeType = input.readByte();
return bytes;
case ValueTypes.YANG_IDENTIFIER_TYPE :
- return readYangInstanceIdentifier();
+ return readYangInstanceIdentifierInternal();
default :
return null;
}
public YangInstanceIdentifier readYangInstanceIdentifier() throws IOException {
+ readSignatureMarkerAndVersionIfNeeded();
+ return readYangInstanceIdentifierInternal();
+ }
+
+ private YangInstanceIdentifier readYangInstanceIdentifierInternal() throws IOException {
int size = input.readInt();
List<PathArgument> pathArguments = new ArrayList<>(size);
lastLeafSetQName = nodeType;
- LeafSetEntryNode<Object> child = (LeafSetEntryNode<Object>)readNormalizedNode();
+ LeafSetEntryNode<Object> child = (LeafSetEntryNode<Object>)readNormalizedNodeInternal();
while(child != null) {
builder.withChild(child);
- child = (LeafSetEntryNode<Object>)readNormalizedNode();
+ child = (LeafSetEntryNode<Object>)readNormalizedNodeInternal();
}
return builder;
}
NormalizedNodeContainerBuilder builder) throws IOException {
LOG.debug("Reading data container (leaf nodes) nodes");
- NormalizedNode<?, ?> child = readNormalizedNode();
+ NormalizedNode<?, ?> child = readNormalizedNodeInternal();
while(child != null) {
builder.addChild(child);
- child = readNormalizedNode();
+ child = readNormalizedNodeInternal();
}
return builder;
}
private static final Logger LOG = LoggerFactory.getLogger(NormalizedNodeOutputStreamWriter.class);
+ static final byte SIGNATURE_MARKER = (byte) 0xab;
+ static final short CURRENT_VERSION = (short) 1;
+
static final byte IS_CODE_VALUE = 1;
static final byte IS_STRING_VALUE = 2;
static final byte IS_NULL_VALUE = 3;
private NormalizedNodeWriter normalizedNodeWriter;
+ private boolean wroteSignatureMarker;
+
public NormalizedNodeOutputStreamWriter(OutputStream stream) throws IOException {
Preconditions.checkNotNull(stream);
output = new DataOutputStream(stream);
}
public void writeNormalizedNode(NormalizedNode<?, ?> node) throws IOException {
+ writeSignatureMarkerAndVersionIfNeeded();
normalizedNodeWriter().write(node);
}
+ private void writeSignatureMarkerAndVersionIfNeeded() throws IOException {
+ if(!wroteSignatureMarker) {
+ output.writeByte(SIGNATURE_MARKER);
+ output.writeShort(CURRENT_VERSION);
+ wroteSignatureMarker = true;
+ }
+ }
+
@Override
public void leafNode(YangInstanceIdentifier.NodeIdentifier name, Object value) throws IOException, IllegalArgumentException {
Preconditions.checkNotNull(name, "Node identifier should not be null");
private void startNode(final QName qName, byte nodeType) throws IOException {
Preconditions.checkNotNull(qName, "QName of node identifier should not be null.");
+
+ writeSignatureMarkerAndVersionIfNeeded();
+
// First write the type of node
output.writeByte(nodeType);
// Write Start Tag
}
public void writeYangInstanceIdentifier(YangInstanceIdentifier identifier) throws IOException {
+ writeSignatureMarkerAndVersionIfNeeded();
+ writeYangInstanceIdentifierInternal(identifier);
+ }
+
+ private void writeYangInstanceIdentifierInternal(YangInstanceIdentifier identifier) throws IOException {
Iterable<YangInstanceIdentifier.PathArgument> pathArguments = identifier.getPathArguments();
int size = Iterables.size(pathArguments);
output.writeInt(size);
output.write(bytes);
break;
case ValueTypes.YANG_IDENTIFIER_TYPE:
- writeYangInstanceIdentifier((YangInstanceIdentifier) value);
+ writeYangInstanceIdentifierInternal((YangInstanceIdentifier) value);
break;
case ValueTypes.NULL_TYPE :
break;
import org.apache.commons.lang.SerializationUtils;
import org.junit.Assert;
import org.junit.Test;
+import org.opendaylight.controller.cluster.datastore.node.NormalizedNodeToNodeCodec;
+import org.opendaylight.controller.cluster.datastore.util.InstanceIdentifierUtils;
import org.opendaylight.controller.cluster.datastore.util.TestModel;
+import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
import org.opendaylight.yangtools.yang.data.api.schema.LeafSetEntryNode;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeStreamWriter;
-import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeWriter;
import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableLeafSetEntryNodeBuilder;
public class NormalizedNodeStreamReaderWriterTest {
@Test
- public void testNormalizedNodeStreamReaderWriter() throws IOException {
+ public void testNormalizedNodeStreaming() throws IOException {
- testNormalizedNodeStreamReaderWriter(createTestContainer());
+ ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
+ NormalizedNodeOutputStreamWriter writer = new NormalizedNodeOutputStreamWriter(byteArrayOutputStream);
+
+ NormalizedNode<?, ?> testContainer = createTestContainer();
+ writer.writeNormalizedNode(testContainer);
QName toaster = QName.create("http://netconfcentral.org/ns/toaster","2009-11-20","toaster");
QName darknessFactor = QName.create("http://netconfcentral.org/ns/toaster","2009-11-20","darknessFactor");
withNodeIdentifier(new NodeIdentifier(toaster)).
withChild(ImmutableNodes.leafNode(darknessFactor, "1000")).build();
- testNormalizedNodeStreamReaderWriter(Builders.containerBuilder().
+ ContainerNode toasterContainer = Builders.containerBuilder().
withNodeIdentifier(new NodeIdentifier(SchemaContext.NAME)).
- withChild(toasterNode).build());
+ withChild(toasterNode).build();
+ writer.writeNormalizedNode(toasterContainer);
+
+ NormalizedNodeInputStreamReader reader = new NormalizedNodeInputStreamReader(
+ new ByteArrayInputStream(byteArrayOutputStream.toByteArray()));
+
+ NormalizedNode<?,?> node = reader.readNormalizedNode();
+ Assert.assertEquals(testContainer, node);
+
+ node = reader.readNormalizedNode();
+ Assert.assertEquals(toasterContainer, node);
+
+ writer.close();
}
private NormalizedNode<?, ?> createTestContainer() {
build();
}
- private void testNormalizedNodeStreamReaderWriter(NormalizedNode<?, ?> input) throws IOException {
+ @Test
+ public void testYangInstanceIdentifierStreaming() throws IOException {
+ YangInstanceIdentifier path = YangInstanceIdentifier.builder(TestModel.TEST_PATH).
+ node(TestModel.OUTER_LIST_QNAME).nodeWithKey(
+ TestModel.INNER_LIST_QNAME, TestModel.ID_QNAME, 10).build();
- byte[] byteData = null;
+ ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
+ NormalizedNodeOutputStreamWriter writer =
+ new NormalizedNodeOutputStreamWriter(byteArrayOutputStream);
+ writer.writeYangInstanceIdentifier(path);
+
+ NormalizedNodeInputStreamReader reader = new NormalizedNodeInputStreamReader(
+ new ByteArrayInputStream(byteArrayOutputStream.toByteArray()));
+
+ YangInstanceIdentifier newPath = reader.readYangInstanceIdentifier();
+ Assert.assertEquals(path, newPath);
+
+ writer.close();
+ }
+
+ @Test
+ public void testNormalizedNodeAndYangInstanceIdentifierStreaming() throws IOException {
- try(ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
- NormalizedNodeStreamWriter writer = new NormalizedNodeOutputStreamWriter(byteArrayOutputStream)) {
+ ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
+ NormalizedNodeOutputStreamWriter writer = new NormalizedNodeOutputStreamWriter(byteArrayOutputStream);
- NormalizedNodeWriter normalizedNodeWriter = NormalizedNodeWriter.forStreamWriter(writer);
- normalizedNodeWriter.write(input);
- byteData = byteArrayOutputStream.toByteArray();
+ NormalizedNode<?, ?> testContainer = TestModel.createBaseTestContainerBuilder().build();
+ writer.writeNormalizedNode(testContainer);
- }
+ YangInstanceIdentifier path = YangInstanceIdentifier.builder(TestModel.TEST_PATH).
+ node(TestModel.OUTER_LIST_QNAME).nodeWithKey(
+ TestModel.INNER_LIST_QNAME, TestModel.ID_QNAME, 10).build();
+
+ writer.writeYangInstanceIdentifier(path);
NormalizedNodeInputStreamReader reader = new NormalizedNodeInputStreamReader(
- new ByteArrayInputStream(byteData));
+ new ByteArrayInputStream(byteArrayOutputStream.toByteArray()));
NormalizedNode<?,?> node = reader.readNormalizedNode();
- Assert.assertEquals(input, node);
+ Assert.assertEquals(testContainer, node);
+
+ YangInstanceIdentifier newPath = reader.readYangInstanceIdentifier();
+ Assert.assertEquals(path, newPath);
+
+ writer.close();
+ }
+
+ @Test(expected=InvalidNormalizedNodeStreamException.class, timeout=10000)
+ public void testInvalidNormalizedNodeStream() throws IOException {
+ byte[] protobufBytes = new NormalizedNodeToNodeCodec(null).encode(
+ TestModel.createBaseTestContainerBuilder().build()).getNormalizedNode().toByteArray();
+
+ NormalizedNodeInputStreamReader reader = new NormalizedNodeInputStreamReader(
+ new ByteArrayInputStream(protobufBytes));
+
+ reader.readNormalizedNode();
+ }
+
+ @Test(expected=InvalidNormalizedNodeStreamException.class, timeout=10000)
+ public void testInvalidYangInstanceIdentifierStream() throws IOException {
+ YangInstanceIdentifier path = YangInstanceIdentifier.builder(TestModel.TEST_PATH).build();
+
+ byte[] protobufBytes = ShardTransactionMessages.DeleteData.newBuilder().setInstanceIdentifierPathArguments(
+ InstanceIdentifierUtils.toSerializable(path)).build().toByteArray();
+
+ NormalizedNodeInputStreamReader reader = new NormalizedNodeInputStreamReader(
+ new ByteArrayInputStream(protobufBytes));
+
+ reader.readYangInstanceIdentifier();
}
@Test
<groupId>org.opendaylight.yangtools</groupId>
<artifactId>yang-data-impl</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.apache.commons</groupId>
+ <artifactId>commons-lang3</artifactId>
+ </dependency>
+
</dependencies>
<build>
import akka.util.Timeout;
import java.util.concurrent.TimeUnit;
+import org.apache.commons.lang3.text.WordUtils;
import org.opendaylight.controller.cluster.datastore.config.ConfigurationReader;
import org.opendaylight.controller.cluster.datastore.config.FileConfigurationReader;
import org.opendaylight.controller.cluster.raft.ConfigParams;
*/
public class DatastoreContext {
- private final InMemoryDOMDataStoreConfigProperties dataStoreProperties;
- private final Duration shardTransactionIdleTimeout;
- private final int operationTimeoutInSeconds;
- private final String dataStoreMXBeanType;
- private final ConfigParams shardRaftConfig;
- private final int shardTransactionCommitTimeoutInSeconds;
- private final int shardTransactionCommitQueueCapacity;
- private final Timeout shardInitializationTimeout;
- private final Timeout shardLeaderElectionTimeout;
- private final boolean persistent;
- private final ConfigurationReader configurationReader;
- private final long shardElectionTimeoutFactor;
-
- private DatastoreContext(InMemoryDOMDataStoreConfigProperties dataStoreProperties,
- ConfigParams shardRaftConfig, String dataStoreMXBeanType, int operationTimeoutInSeconds,
- Duration shardTransactionIdleTimeout, int shardTransactionCommitTimeoutInSeconds,
- int shardTransactionCommitQueueCapacity, Timeout shardInitializationTimeout,
- Timeout shardLeaderElectionTimeout,
- boolean persistent, ConfigurationReader configurationReader, long shardElectionTimeoutFactor) {
- this.dataStoreProperties = dataStoreProperties;
- this.shardRaftConfig = shardRaftConfig;
- this.dataStoreMXBeanType = dataStoreMXBeanType;
- this.operationTimeoutInSeconds = operationTimeoutInSeconds;
- this.shardTransactionIdleTimeout = shardTransactionIdleTimeout;
- this.shardTransactionCommitTimeoutInSeconds = shardTransactionCommitTimeoutInSeconds;
- this.shardTransactionCommitQueueCapacity = shardTransactionCommitQueueCapacity;
- this.shardInitializationTimeout = shardInitializationTimeout;
- this.shardLeaderElectionTimeout = shardLeaderElectionTimeout;
- this.persistent = persistent;
- this.configurationReader = configurationReader;
- this.shardElectionTimeoutFactor = shardElectionTimeoutFactor;
+ public static final Duration DEFAULT_SHARD_TRANSACTION_IDLE_TIMEOUT = Duration.create(10, TimeUnit.MINUTES);
+ public static final int DEFAULT_OPERATION_TIMEOUT_IN_SECONDS = 5;
+ public static final int DEFAULT_SHARD_TX_COMMIT_TIMEOUT_IN_SECONDS = 30;
+ public static final int DEFAULT_JOURNAL_RECOVERY_BATCH_SIZE = 1000;
+ public static final int DEFAULT_SNAPSHOT_BATCH_COUNT = 20000;
+ public static final int DEFAULT_HEARTBEAT_INTERVAL_IN_MILLIS = 500;
+ public static final int DEFAULT_ISOLATED_LEADER_CHECK_INTERVAL_IN_MILLIS = DEFAULT_HEARTBEAT_INTERVAL_IN_MILLIS * 10;
+ public static final int DEFAULT_SHARD_TX_COMMIT_QUEUE_CAPACITY = 20000;
+ public static final Timeout DEFAULT_SHARD_INITIALIZATION_TIMEOUT = new Timeout(5, TimeUnit.MINUTES);
+ public static final Timeout DEFAULT_SHARD_LEADER_ELECTION_TIMEOUT = new Timeout(30, TimeUnit.SECONDS);
+ public static final boolean DEFAULT_PERSISTENT = true;
+ public static final FileConfigurationReader DEFAULT_CONFIGURATION_READER = new FileConfigurationReader();
+ public static final int DEFAULT_SHARD_SNAPSHOT_DATA_THRESHOLD_PERCENTAGE = 12;
+ public static final int DEFAULT_SHARD_ELECTION_TIMEOUT_FACTOR = 2;
+ public static final int DEFAULT_TX_CREATION_INITIAL_RATE_LIMIT = 100;
+ public static final String UNKNOWN_DATA_STORE_TYPE = "unknown";
+
+ private InMemoryDOMDataStoreConfigProperties dataStoreProperties;
+ private Duration shardTransactionIdleTimeout = DatastoreContext.DEFAULT_SHARD_TRANSACTION_IDLE_TIMEOUT;
+ private int operationTimeoutInSeconds = DEFAULT_OPERATION_TIMEOUT_IN_SECONDS;
+ private String dataStoreMXBeanType;
+ private int shardTransactionCommitTimeoutInSeconds = DEFAULT_SHARD_TX_COMMIT_TIMEOUT_IN_SECONDS;
+ private int shardTransactionCommitQueueCapacity = DEFAULT_SHARD_TX_COMMIT_QUEUE_CAPACITY;
+ private Timeout shardInitializationTimeout = DEFAULT_SHARD_INITIALIZATION_TIMEOUT;
+ private Timeout shardLeaderElectionTimeout = DEFAULT_SHARD_LEADER_ELECTION_TIMEOUT;
+ private boolean persistent = DEFAULT_PERSISTENT;
+ private ConfigurationReader configurationReader = DEFAULT_CONFIGURATION_READER;
+ private long transactionCreationInitialRateLimit = DEFAULT_TX_CREATION_INITIAL_RATE_LIMIT;
+ private DefaultConfigParamsImpl raftConfig = new DefaultConfigParamsImpl();
+ private String dataStoreType = UNKNOWN_DATA_STORE_TYPE;
+
+ private DatastoreContext(){
+ setShardJournalRecoveryLogBatchSize(DEFAULT_JOURNAL_RECOVERY_BATCH_SIZE);
+ setSnapshotBatchCount(DEFAULT_SNAPSHOT_BATCH_COUNT);
+ setHeartbeatInterval(DEFAULT_HEARTBEAT_INTERVAL_IN_MILLIS);
+ setIsolatedLeaderCheckInterval(DEFAULT_ISOLATED_LEADER_CHECK_INTERVAL_IN_MILLIS);
+ setSnapshotDataThresholdPercentage(DEFAULT_SHARD_SNAPSHOT_DATA_THRESHOLD_PERCENTAGE);
}
public static Builder newBuilder() {
}
public ConfigParams getShardRaftConfig() {
- return shardRaftConfig;
+ return raftConfig;
}
public int getShardTransactionCommitTimeoutInSeconds() {
}
public long getShardElectionTimeoutFactor(){
- return this.shardElectionTimeoutFactor;
+ return raftConfig.getElectionTimeoutFactor();
+ }
+
+ public String getDataStoreType(){
+ return dataStoreType;
+ }
+
+ public long getTransactionCreationInitialRateLimit() {
+ return transactionCreationInitialRateLimit;
+ }
+
+ private void setHeartbeatInterval(long shardHeartbeatIntervalInMillis){
+ raftConfig.setHeartBeatInterval(new FiniteDuration(shardHeartbeatIntervalInMillis,
+ TimeUnit.MILLISECONDS));
+ }
+
+ private void setShardJournalRecoveryLogBatchSize(int shardJournalRecoveryLogBatchSize){
+ raftConfig.setJournalRecoveryLogBatchSize(shardJournalRecoveryLogBatchSize);
+ }
+
+
+ private void setIsolatedLeaderCheckInterval(long shardIsolatedLeaderCheckIntervalInMillis) {
+ raftConfig.setIsolatedLeaderCheckInterval(
+ new FiniteDuration(shardIsolatedLeaderCheckIntervalInMillis, TimeUnit.MILLISECONDS));
+ }
+
+ private void setElectionTimeoutFactor(long shardElectionTimeoutFactor) {
+ raftConfig.setElectionTimeoutFactor(shardElectionTimeoutFactor);
+ }
+
+ private void setSnapshotDataThresholdPercentage(int shardSnapshotDataThresholdPercentage) {
+ raftConfig.setSnapshotDataThresholdPercentage(shardSnapshotDataThresholdPercentage);
+ }
+
+ private void setSnapshotBatchCount(int shardSnapshotBatchCount) {
+ raftConfig.setSnapshotBatchCount(shardSnapshotBatchCount);
}
public static class Builder {
- private InMemoryDOMDataStoreConfigProperties dataStoreProperties;
- private Duration shardTransactionIdleTimeout = Duration.create(10, TimeUnit.MINUTES);
- private int operationTimeoutInSeconds = 5;
- private String dataStoreMXBeanType;
- private int shardTransactionCommitTimeoutInSeconds = 30;
- private int shardJournalRecoveryLogBatchSize = 1000;
- private int shardSnapshotBatchCount = 20000;
- private int shardHeartbeatIntervalInMillis = 500;
- private int shardTransactionCommitQueueCapacity = 20000;
- private Timeout shardInitializationTimeout = new Timeout(5, TimeUnit.MINUTES);
- private Timeout shardLeaderElectionTimeout = new Timeout(30, TimeUnit.SECONDS);
- private boolean persistent = true;
- private ConfigurationReader configurationReader = new FileConfigurationReader();
- private int shardIsolatedLeaderCheckIntervalInMillis = shardHeartbeatIntervalInMillis * 10;
- private int shardSnapshotDataThresholdPercentage = 12;
- private long shardElectionTimeoutFactor = 2;
+ private DatastoreContext datastoreContext = new DatastoreContext();
public Builder shardTransactionIdleTimeout(Duration shardTransactionIdleTimeout) {
- this.shardTransactionIdleTimeout = shardTransactionIdleTimeout;
+ datastoreContext.shardTransactionIdleTimeout = shardTransactionIdleTimeout;
return this;
}
public Builder operationTimeoutInSeconds(int operationTimeoutInSeconds) {
- this.operationTimeoutInSeconds = operationTimeoutInSeconds;
+ datastoreContext.operationTimeoutInSeconds = operationTimeoutInSeconds;
return this;
}
public Builder dataStoreMXBeanType(String dataStoreMXBeanType) {
- this.dataStoreMXBeanType = dataStoreMXBeanType;
+ datastoreContext.dataStoreMXBeanType = dataStoreMXBeanType;
return this;
}
public Builder dataStoreProperties(InMemoryDOMDataStoreConfigProperties dataStoreProperties) {
- this.dataStoreProperties = dataStoreProperties;
+ datastoreContext.dataStoreProperties = dataStoreProperties;
return this;
}
public Builder shardTransactionCommitTimeoutInSeconds(int shardTransactionCommitTimeoutInSeconds) {
- this.shardTransactionCommitTimeoutInSeconds = shardTransactionCommitTimeoutInSeconds;
+ datastoreContext.shardTransactionCommitTimeoutInSeconds = shardTransactionCommitTimeoutInSeconds;
return this;
}
public Builder shardJournalRecoveryLogBatchSize(int shardJournalRecoveryLogBatchSize) {
- this.shardJournalRecoveryLogBatchSize = shardJournalRecoveryLogBatchSize;
+ datastoreContext.setShardJournalRecoveryLogBatchSize(shardJournalRecoveryLogBatchSize);
return this;
}
public Builder shardSnapshotBatchCount(int shardSnapshotBatchCount) {
- this.shardSnapshotBatchCount = shardSnapshotBatchCount;
+ datastoreContext.setSnapshotBatchCount(shardSnapshotBatchCount);
return this;
}
public Builder shardSnapshotDataThresholdPercentage(int shardSnapshotDataThresholdPercentage) {
- this.shardSnapshotDataThresholdPercentage = shardSnapshotDataThresholdPercentage;
+ datastoreContext.setSnapshotDataThresholdPercentage(shardSnapshotDataThresholdPercentage);
return this;
}
-
public Builder shardHeartbeatIntervalInMillis(int shardHeartbeatIntervalInMillis) {
- this.shardHeartbeatIntervalInMillis = shardHeartbeatIntervalInMillis;
+ datastoreContext.setHeartbeatInterval(shardHeartbeatIntervalInMillis);
return this;
}
public Builder shardTransactionCommitQueueCapacity(int shardTransactionCommitQueueCapacity) {
- this.shardTransactionCommitQueueCapacity = shardTransactionCommitQueueCapacity;
+ datastoreContext.shardTransactionCommitQueueCapacity = shardTransactionCommitQueueCapacity;
return this;
}
public Builder shardInitializationTimeout(long timeout, TimeUnit unit) {
- this.shardInitializationTimeout = new Timeout(timeout, unit);
+ datastoreContext.shardInitializationTimeout = new Timeout(timeout, unit);
return this;
}
public Builder shardLeaderElectionTimeout(long timeout, TimeUnit unit) {
- this.shardLeaderElectionTimeout = new Timeout(timeout, unit);
+ datastoreContext.shardLeaderElectionTimeout = new Timeout(timeout, unit);
return this;
}
public Builder configurationReader(ConfigurationReader configurationReader){
- this.configurationReader = configurationReader;
+ datastoreContext.configurationReader = configurationReader;
return this;
}
public Builder persistent(boolean persistent){
- this.persistent = persistent;
+ datastoreContext.persistent = persistent;
return this;
}
public Builder shardIsolatedLeaderCheckIntervalInMillis(int shardIsolatedLeaderCheckIntervalInMillis) {
- this.shardIsolatedLeaderCheckIntervalInMillis = shardIsolatedLeaderCheckIntervalInMillis;
+ datastoreContext.setIsolatedLeaderCheckInterval(shardIsolatedLeaderCheckIntervalInMillis);
return this;
}
public Builder shardElectionTimeoutFactor(long shardElectionTimeoutFactor){
- this.shardElectionTimeoutFactor = shardElectionTimeoutFactor;
+ datastoreContext.setElectionTimeoutFactor(shardElectionTimeoutFactor);
return this;
}
+ public Builder transactionCreationInitialRateLimit(long initialRateLimit){
+ datastoreContext.transactionCreationInitialRateLimit = initialRateLimit;
+ return this;
+ }
- public DatastoreContext build() {
- DefaultConfigParamsImpl raftConfig = new DefaultConfigParamsImpl();
- raftConfig.setHeartBeatInterval(new FiniteDuration(shardHeartbeatIntervalInMillis,
- TimeUnit.MILLISECONDS));
- raftConfig.setJournalRecoveryLogBatchSize(shardJournalRecoveryLogBatchSize);
- raftConfig.setSnapshotBatchCount(shardSnapshotBatchCount);
- raftConfig.setSnapshotDataThresholdPercentage(shardSnapshotDataThresholdPercentage);
- raftConfig.setElectionTimeoutFactor(shardElectionTimeoutFactor);
- raftConfig.setIsolatedLeaderCheckInterval(
- new FiniteDuration(shardIsolatedLeaderCheckIntervalInMillis, TimeUnit.MILLISECONDS));
+ public Builder dataStoreType(String dataStoreType){
+ datastoreContext.dataStoreType = dataStoreType;
+ datastoreContext.dataStoreMXBeanType = "Distributed" + WordUtils.capitalize(dataStoreType) + "Datastore";
+ return this;
+ }
- return new DatastoreContext(dataStoreProperties, raftConfig, dataStoreMXBeanType,
- operationTimeoutInSeconds, shardTransactionIdleTimeout,
- shardTransactionCommitTimeoutInSeconds, shardTransactionCommitQueueCapacity,
- shardInitializationTimeout, shardLeaderElectionTimeout,
- persistent, configurationReader, shardElectionTimeoutFactor);
+ public DatastoreContext build() {
+ return datastoreContext;
}
}
}
private final ActorContext actorContext;
- public DistributedDataStore(ActorSystem actorSystem, String type, ClusterWrapper cluster,
+ public DistributedDataStore(ActorSystem actorSystem, ClusterWrapper cluster,
Configuration configuration, DatastoreContext datastoreContext) {
Preconditions.checkNotNull(actorSystem, "actorSystem should not be null");
- Preconditions.checkNotNull(type, "type should not be null");
Preconditions.checkNotNull(cluster, "cluster should not be null");
Preconditions.checkNotNull(configuration, "configuration should not be null");
Preconditions.checkNotNull(datastoreContext, "datastoreContext should not be null");
+ String type = datastoreContext.getDataStoreType();
+
String shardManagerId = ShardManagerIdentifier.builder().type(type).build().toString();
LOG.info("Creating ShardManager : {}", shardManagerId);
actorContext = new ActorContext(actorSystem, actorSystem.actorOf(
- ShardManager.props(type, cluster, configuration, datastoreContext)
+ ShardManager.props(cluster, configuration, datastoreContext)
.withMailbox(ActorContext.MAILBOX), shardManagerId ),
cluster, configuration, datastoreContext);
}
@Override
public DOMStoreWriteTransaction newWriteOnlyTransaction() {
+ actorContext.acquireTxCreationPermit();
return new TransactionProxy(actorContext, TransactionProxy.TransactionType.WRITE_ONLY);
}
@Override
public DOMStoreReadWriteTransaction newReadWriteTransaction() {
+ actorContext.acquireTxCreationPermit();
return new TransactionProxy(actorContext, TransactionProxy.TransactionType.READ_WRITE);
}
private static volatile ActorSystem persistentActorSystem = null;
- public static DistributedDataStore createInstance(String name, SchemaService schemaService,
+ public static DistributedDataStore createInstance(SchemaService schemaService,
DatastoreContext datastoreContext, BundleContext bundleContext) {
ActorSystem actorSystem = getOrCreateInstance(bundleContext, datastoreContext.getConfigurationReader());
Configuration config = new ConfigurationImpl("module-shards.conf", "modules.conf");
final DistributedDataStore dataStore =
- new DistributedDataStore(actorSystem, name, new ClusterWrapperImpl(actorSystem),
+ new DistributedDataStore(actorSystem, new ClusterWrapperImpl(actorSystem),
config, datastoreContext);
ShardStrategyFactory.setConfiguration(config);
import akka.actor.ActorSelection;
import akka.actor.Cancellable;
import akka.actor.Props;
-import akka.event.Logging;
-import akka.event.LoggingAdapter;
import akka.japi.Creator;
import akka.persistence.RecoveryFailure;
import akka.serialization.Serialization;
// The state of this Shard
private final InMemoryDOMDataStore store;
- private final LoggingAdapter LOG = Logging.getLogger(getContext().system(), this);
-
/// The name of this shard
private final ShardIdentifier name;
}
if (message instanceof RecoveryFailure){
- LOG.error(((RecoveryFailure) message).cause(), "{}: Recovery failed because of this cause",
- persistenceId());
+ LOG.error("{}: Recovery failed because of this cause",
+ persistenceId(), ((RecoveryFailure) message).cause());
// Even though recovery failed, we still need to finish our recovery, eg send the
// ActorInitialized message and start the txCommitTimeoutCheckSchedule.
if(cohortEntry != null) {
long elapsed = System.currentTimeMillis() - cohortEntry.getLastAccessTime();
if(elapsed > transactionCommitTimeout) {
- LOG.warning("{}: Current transaction {} has timed out after {} ms - aborting",
+ LOG.warn("{}: Current transaction {} has timed out after {} ms - aborting",
persistenceId(), cohortEntry.getTransactionID(), transactionCommitTimeout);
doAbortTransaction(cohortEntry.getTransactionID(), null);
new ModificationPayload(cohortEntry.getModification()));
}
} catch (Exception e) {
- LOG.error(e, "{} An exception occurred while preCommitting transaction {}",
- persistenceId(), cohortEntry.getTransactionID());
+ LOG.error("{} An exception occurred while preCommitting transaction {}",
+ persistenceId(), cohortEntry.getTransactionID(), e);
shardMBean.incrementFailedTransactionsCount();
getSender().tell(new akka.actor.Status.Failure(e), getSelf());
}
} catch (Exception e) {
sender.tell(new akka.actor.Status.Failure(e), getSelf());
- LOG.error(e, "{}, An exception occurred while committing transaction {}", persistenceId(), transactionID);
+ LOG.error("{}, An exception occurred while committing transaction {}", persistenceId(),
+ transactionID, e);
shardMBean.incrementFailedTransactionsCount();
} finally {
commitCoordinator.currentTransactionComplete(transactionID, true);
@Override
public void onFailure(final Throwable t) {
- LOG.error(t, "{}: An exception happened during abort", persistenceId());
+ LOG.error("{}: An exception happened during abort", persistenceId(), t);
if(sender != null) {
sender.tell(new akka.actor.Status.Failure(t), self);
shardMBean.setLastCommittedTransactionTime(System.currentTimeMillis());
} catch (InterruptedException | ExecutionException e) {
shardMBean.incrementFailedTransactionsCount();
- LOG.error(e, "{}: Failed to commit", persistenceId());
+ LOG.error("{}: Failed to commit", persistenceId(), e);
}
}
try {
currentLogRecoveryBatch.add(((ModificationPayload) data).getModification());
} catch (ClassNotFoundException | IOException e) {
- LOG.error(e, "{}: Error extracting ModificationPayload", persistenceId());
+ LOG.error("{}: Error extracting ModificationPayload", persistenceId(), e);
}
} else if (data instanceof CompositeModificationPayload) {
currentLogRecoveryBatch.add(((CompositeModificationPayload) data).getModification());
shardMBean.incrementCommittedTransactionCount();
} catch (InterruptedException | ExecutionException e) {
shardMBean.incrementFailedTransactionsCount();
- LOG.error(e, "{}: Failed to commit", persistenceId());
+ LOG.error("{}: Failed to commit", persistenceId(), e);
}
}
}
try {
applyModificationToState(clientActor, identifier, ((ModificationPayload) data).getModification());
} catch (ClassNotFoundException | IOException e) {
- LOG.error(e, "{}: Error extracting ModificationPayload", persistenceId());
+ LOG.error("{}: Error extracting ModificationPayload", persistenceId(), e);
}
}
else if (data instanceof CompositeModificationPayload) {
transaction.write(DATASTORE_ROOT, node);
syncCommitTransaction(transaction);
} catch (InterruptedException | ExecutionException e) {
- LOG.error(e, "{}: An exception occurred when applying snapshot", persistenceId());
+ LOG.error("{}: An exception occurred when applying snapshot", persistenceId(), e);
} finally {
LOG.info("{}: Done applying snapshot", persistenceId());
}
import akka.actor.ActorRef;
import akka.actor.Status;
-import akka.event.LoggingAdapter;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import java.util.LinkedList;
import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransactionReply;
import org.opendaylight.controller.cluster.datastore.modification.Modification;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
+import org.slf4j.Logger;
/**
* Coordinates commits for a shard ensuring only one concurrent 3-phase commit.
private final int queueCapacity;
- private final LoggingAdapter log;
+ private final Logger log;
private final String name;
- public ShardCommitCoordinator(long cacheExpiryTimeoutInSec, int queueCapacity, LoggingAdapter log,
+ public ShardCommitCoordinator(long cacheExpiryTimeoutInSec, int queueCapacity, Logger log,
String name) {
cohortCache = CacheBuilder.newBuilder().expireAfterAccess(
cacheExpiryTimeoutInSec, TimeUnit.SECONDS).build();
import akka.actor.Props;
import akka.actor.SupervisorStrategy;
import akka.cluster.ClusterEvent;
-import akka.event.Logging;
-import akka.event.LoggingAdapter;
import akka.japi.Creator;
import akka.japi.Function;
import akka.japi.Procedure;
import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
import org.opendaylight.yangtools.yang.model.api.ModuleIdentifier;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import scala.concurrent.duration.Duration;
/**
*/
public class ShardManager extends AbstractUntypedPersistentActorWithMetering {
- protected final LoggingAdapter LOG =
- Logging.getLogger(getContext().system(), this);
+ private final Logger LOG = LoggerFactory.getLogger(getClass());
// Stores a mapping between a member name and the address of the member
// Member names look like "member-1", "member-2" etc and are as specified
private final DataPersistenceProvider dataPersistenceProvider;
/**
- * @param type defines the kind of data that goes into shards created by this shard manager. Examples of type would be
- * configuration or operational
*/
- protected ShardManager(String type, ClusterWrapper cluster, Configuration configuration,
+ protected ShardManager(ClusterWrapper cluster, Configuration configuration,
DatastoreContext datastoreContext) {
- this.type = Preconditions.checkNotNull(type, "type should not be null");
this.cluster = Preconditions.checkNotNull(cluster, "cluster should not be null");
this.configuration = Preconditions.checkNotNull(configuration, "configuration should not be null");
this.datastoreContext = datastoreContext;
this.dataPersistenceProvider = createDataPersistenceProvider(datastoreContext.isPersistent());
+ this.type = datastoreContext.getDataStoreType();
// Subscribe this actor to cluster member events
cluster.subscribeToMemberEvents(getSelf());
return (persistent) ? new PersistentDataProvider() : new NonPersistentDataProvider();
}
- public static Props props(final String type,
+ public static Props props(
final ClusterWrapper cluster,
final Configuration configuration,
final DatastoreContext datastoreContext) {
- Preconditions.checkNotNull(type, "type should not be null");
Preconditions.checkNotNull(cluster, "cluster should not be null");
Preconditions.checkNotNull(configuration, "configuration should not be null");
- return Props.create(new ShardManagerCreator(type, cluster, configuration, datastoreContext));
+ return Props.create(new ShardManagerCreator(cluster, configuration, datastoreContext));
}
@Override
knownModules = ImmutableSet.copyOf(msg.getModules());
} else if (message instanceof RecoveryFailure) {
RecoveryFailure failure = (RecoveryFailure) message;
- LOG.error(failure.cause(), "Recovery failed");
+ LOG.error("Recovery failed", failure.cause());
} else if (message instanceof RecoveryCompleted) {
LOG.info("Recovery complete : {}", persistenceId());
new Function<Throwable, SupervisorStrategy.Directive>() {
@Override
public SupervisorStrategy.Directive apply(Throwable t) {
- StringBuilder sb = new StringBuilder();
- for(StackTraceElement element : t.getStackTrace()) {
- sb.append("\n\tat ")
- .append(element.toString());
- }
- LOG.warning("Supervisor Strategy of resume applied {}",sb.toString());
+ LOG.warn("Supervisor Strategy caught unexpected exception - resuming", t);
return SupervisorStrategy.resume();
}
}
private static class ShardManagerCreator implements Creator<ShardManager> {
private static final long serialVersionUID = 1L;
- final String type;
final ClusterWrapper cluster;
final Configuration configuration;
final DatastoreContext datastoreContext;
- ShardManagerCreator(String type, ClusterWrapper cluster,
+ ShardManagerCreator(ClusterWrapper cluster,
Configuration configuration, DatastoreContext datastoreContext) {
- this.type = type;
this.cluster = cluster;
this.configuration = configuration;
this.datastoreContext = datastoreContext;
@Override
public ShardManager create() throws Exception {
- return new ShardManager(type, cluster, configuration, datastoreContext);
+ return new ShardManager(cluster, configuration, datastoreContext);
}
}
*/
package org.opendaylight.controller.cluster.datastore;
-import akka.event.LoggingAdapter;
import com.google.common.collect.Lists;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import java.util.Collection;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.slf4j.Logger;
/**
* Coordinates persistence recovery of journal log entries and snapshots for a shard. Each snapshot
private final SchemaContext schemaContext;
private final String shardName;
private final ExecutorService executor;
- private final LoggingAdapter log;
+ private final Logger log;
private final String name;
- ShardRecoveryCoordinator(String shardName, SchemaContext schemaContext, LoggingAdapter log,
+ ShardRecoveryCoordinator(String shardName, SchemaContext schemaContext, Logger log,
String name) {
this.schemaContext = schemaContext;
this.shardName = shardName;
import akka.actor.Terminated;
import akka.actor.UntypedActor;
-import akka.event.Logging;
-import akka.event.LoggingAdapter;
import org.opendaylight.controller.cluster.datastore.messages.Monitor;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class TerminationMonitor extends UntypedActor{
- protected final LoggingAdapter LOG =
- Logging.getLogger(getContext().system(), this);
+ private static final Logger LOG = LoggerFactory.getLogger(TerminationMonitor.class);
public TerminationMonitor(){
- LOG.info("Created TerminationMonitor");
+ LOG.debug("Created TerminationMonitor");
}
@Override public void onReceive(Object message) throws Exception {
import akka.actor.ActorSelection;
import akka.dispatch.Futures;
import akka.dispatch.OnComplete;
+import com.codahale.metrics.Snapshot;
+import com.codahale.metrics.Timer;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.Lists;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.SettableFuture;
import java.util.Collections;
import java.util.List;
+import java.util.concurrent.TimeUnit;
import org.opendaylight.controller.cluster.datastore.messages.AbortTransaction;
import org.opendaylight.controller.cluster.datastore.messages.AbortTransactionReply;
import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransaction;
private final List<Future<ActorSelection>> cohortFutures;
private volatile List<ActorSelection> cohorts;
private final String transactionId;
+ private static final OperationCallback NO_OP_CALLBACK = new OperationCallback() {
+ @Override
+ public void run() {
+ }
+
+ @Override
+ public void success() {
+ }
+
+ @Override
+ public void failure() {
+ }
+ };
public ThreePhaseCommitCohortProxy(ActorContext actorContext,
List<Future<ActorSelection>> cohortFutures, String transactionId) {
if(LOG.isDebugEnabled()) {
LOG.debug("Tx {}: Sending {} to cohort {}", transactionId, message, cohort);
}
-
- futureList.add(actorContext.executeOperationAsync(cohort, message));
+ futureList.add(actorContext.executeOperationAsync(cohort, message, actorContext.getTransactionCommitOperationTimeout()));
}
return Futures.sequence(futureList, actorContext.getActorSystem().dispatcher());
@Override
public ListenableFuture<Void> commit() {
- return voidOperation("commit", new CommitTransaction(transactionId).toSerializable(),
- CommitTransactionReply.SERIALIZABLE_CLASS, true);
+ OperationCallback operationCallback = (cohortFutures.size() == 0) ? NO_OP_CALLBACK :
+ new CommitCallback(actorContext);
+
+ return voidOperation("commit", new CommitTransaction(transactionId).toSerializable(),
+ CommitTransactionReply.SERIALIZABLE_CLASS, true, operationCallback);
+ }
+
+ private ListenableFuture<Void> voidOperation(final String operationName, final Object message,
+ final Class<?> expectedResponseClass, final boolean propagateException) {
+ return voidOperation(operationName, message, expectedResponseClass, propagateException, NO_OP_CALLBACK);
}
private ListenableFuture<Void> voidOperation(final String operationName, final Object message,
- final Class<?> expectedResponseClass, final boolean propagateException) {
+ final Class<?> expectedResponseClass, final boolean propagateException, final OperationCallback callback) {
if(LOG.isDebugEnabled()) {
LOG.debug("Tx {} {}", transactionId, operationName);
if(cohorts != null) {
finishVoidOperation(operationName, message, expectedResponseClass, propagateException,
- returnFuture);
+ returnFuture, callback);
} else {
buildCohortList().onComplete(new OnComplete<Void>() {
@Override
}
} else {
finishVoidOperation(operationName, message, expectedResponseClass,
- propagateException, returnFuture);
+ propagateException, returnFuture, callback);
}
}
}, actorContext.getActorSystem().dispatcher());
}
private void finishVoidOperation(final String operationName, final Object message,
- final Class<?> expectedResponseClass, final boolean propagateException,
- final SettableFuture<Void> returnFuture) {
+ final Class<?> expectedResponseClass, final boolean propagateException,
+ final SettableFuture<Void> returnFuture, final OperationCallback callback) {
if(LOG.isDebugEnabled()) {
LOG.debug("Tx {} finish {}", transactionId, operationName);
}
+
+ callback.run();
+
Future<Iterable<Object>> combinedFuture = invokeCohorts(message);
combinedFuture.onComplete(new OnComplete<Iterable<Object>>() {
}
if(exceptionToPropagate != null) {
+
if(LOG.isDebugEnabled()) {
LOG.debug("Tx {}: a {} cohort Future failed: {}", transactionId,
operationName, exceptionToPropagate);
}
returnFuture.set(null);
}
+
+ callback.failure();
} else {
+
if(LOG.isDebugEnabled()) {
LOG.debug("Tx {}: {} succeeded", transactionId, operationName);
}
returnFuture.set(null);
+
+ callback.success();
}
}
}, actorContext.getActorSystem().dispatcher());
List<Future<ActorSelection>> getCohortFutures() {
return Collections.unmodifiableList(cohortFutures);
}
+
+ private static interface OperationCallback {
+ void run();
+ void success();
+ void failure();
+ }
+
+ private static class CommitCallback implements OperationCallback{
+
+ private static final Logger LOG = LoggerFactory.getLogger(CommitCallback.class);
+ private static final String COMMIT = "commit";
+
+ private final Timer commitTimer;
+ private final ActorContext actorContext;
+ private Timer.Context timerContext;
+
+ CommitCallback(ActorContext actorContext){
+ this.actorContext = actorContext;
+ commitTimer = actorContext.getOperationTimer(COMMIT);
+ }
+
+ @Override
+ public void run() {
+ timerContext = commitTimer.time();
+ }
+
+ @Override
+ public void success() {
+ timerContext.stop();
+
+ Snapshot timerSnapshot = commitTimer.getSnapshot();
+ double allowedLatencyInNanos = timerSnapshot.get98thPercentile();
+
+ long commitTimeoutInSeconds = actorContext.getDatastoreContext()
+ .getShardTransactionCommitTimeoutInSeconds();
+ long commitTimeoutInNanos = TimeUnit.SECONDS.toNanos(commitTimeoutInSeconds);
+
+ // Here we are trying to find out how many transactions per second are allowed
+ double newRateLimit = ((double) commitTimeoutInNanos / allowedLatencyInNanos) / commitTimeoutInSeconds;
+
+ LOG.debug("Data Store {} commit rateLimit adjusted to {} allowedLatencyInNanos = {}",
+ actorContext.getDataStoreType(), newRateLimit, allowedLatencyInNanos);
+
+ actorContext.setTxCreationLimit(newRateLimit);
+ }
+
+ @Override
+ public void failure() {
+ // This would mean we couldn't get a transaction completed in 30 seconds which is
+ // the default transaction commit timeout. Using the timeout information to figure out the rate limit is
+ // not going to be useful - so we leave it as it is
+ }
+ }
+
}
@Override
public DOMStoreReadWriteTransaction newReadWriteTransaction() {
+ actorContext.acquireTxCreationPermit();
return allocateWriteTransaction(TransactionProxy.TransactionType.READ_WRITE);
}
@Override
public DOMStoreWriteTransaction newWriteOnlyTransaction() {
+ actorContext.acquireTxCreationPermit();
return allocateWriteTransaction(TransactionProxy.TransactionType.WRITE_ONLY);
}
*/
package org.opendaylight.controller.cluster.datastore.compat;
+import akka.actor.PoisonPill;
+import akka.actor.Props;
+import akka.japi.Creator;
import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
import org.opendaylight.controller.cluster.datastore.messages.AbortTransaction;
import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransaction;
import org.opendaylight.controller.cluster.datastore.messages.CommitTransaction;
import org.opendaylight.controller.cluster.datastore.messages.PreCommitTransaction;
import org.opendaylight.controller.cluster.datastore.messages.PreCommitTransactionReply;
-import akka.actor.PoisonPill;
-import akka.actor.Props;
-import akka.event.Logging;
-import akka.event.LoggingAdapter;
-import akka.japi.Creator;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* An actor to maintain backwards compatibility for the base Helium version where the 3-phase commit
*/
public class BackwardsCompatibleThreePhaseCommitCohort extends AbstractUntypedActor {
- private final LoggingAdapter LOG = Logging.getLogger(getContext().system(), this);
+ private static final Logger LOG = LoggerFactory.getLogger(BackwardsCompatibleThreePhaseCommitCohort.class);
private final String transactionId;
import akka.dispatch.Mapper;
import akka.pattern.AskTimeoutException;
import akka.util.Timeout;
+import com.codahale.metrics.JmxReporter;
+import com.codahale.metrics.MetricRegistry;
+import com.codahale.metrics.Timer;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
+import com.google.common.util.concurrent.RateLimiter;
import java.util.concurrent.TimeUnit;
import org.opendaylight.controller.cluster.common.actor.CommonConfig;
import org.opendaylight.controller.cluster.datastore.ClusterWrapper;
* but should not be passed to actors especially remote actors
*/
public class ActorContext {
- private static final Logger
- LOG = LoggerFactory.getLogger(ActorContext.class);
-
- public static final String MAILBOX = "bounded-mailbox";
-
+ private static final Logger LOG = LoggerFactory.getLogger(ActorContext.class);
+ private static final String UNKNOWN_DATA_STORE_TYPE = "unknown";
+ private static final String DISTRIBUTED_DATA_STORE_METRIC_REGISTRY = "distributed-data-store";
+ private static final String METRIC_RATE = "rate";
+ private static final String DOMAIN = "org.opendaylight.controller.cluster.datastore";
private static final Mapper<Throwable, Throwable> FIND_PRIMARY_FAILURE_TRANSFORMER =
new Mapper<Throwable, Throwable>() {
@Override
return actualFailure;
}
};
+ public static final String MAILBOX = "bounded-mailbox";
private final ActorSystem actorSystem;
private final ActorRef shardManager;
private final ClusterWrapper clusterWrapper;
private final Configuration configuration;
private final DatastoreContext datastoreContext;
- private volatile SchemaContext schemaContext;
private final FiniteDuration operationDuration;
private final Timeout operationTimeout;
private final String selfAddressHostPort;
+ private final RateLimiter txRateLimiter;
+ private final MetricRegistry metricRegistry = new MetricRegistry();
+ private final JmxReporter jmxReporter = JmxReporter.forRegistry(metricRegistry).inDomain(DOMAIN).build();
private final int transactionOutstandingOperationLimit;
+ private final Timeout transactionCommitOperationTimeout;
+
+ private volatile SchemaContext schemaContext;
public ActorContext(ActorSystem actorSystem, ActorRef shardManager,
ClusterWrapper clusterWrapper, Configuration configuration) {
this.clusterWrapper = clusterWrapper;
this.configuration = configuration;
this.datastoreContext = datastoreContext;
+ this.txRateLimiter = RateLimiter.create(datastoreContext.getTransactionCreationInitialRateLimit());
- operationDuration = Duration.create(datastoreContext.getOperationTimeoutInSeconds(),
- TimeUnit.SECONDS);
+ operationDuration = Duration.create(datastoreContext.getOperationTimeoutInSeconds(), TimeUnit.SECONDS);
operationTimeout = new Timeout(operationDuration);
+ transactionCommitOperationTimeout = new Timeout(Duration.create(getDatastoreContext().getShardTransactionCommitTimeoutInSeconds(),
+ TimeUnit.SECONDS));
+
Address selfAddress = clusterWrapper.getSelfAddress();
if (selfAddress != null && !selfAddress.host().isEmpty()) {
}
transactionOutstandingOperationLimit = new CommonConfig(this.getActorSystem().settings().config()).getMailBoxCapacity();
+ jmxReporter.start();
}
public DatastoreContext getDatastoreContext() {
public int getTransactionOutstandingOperationLimit(){
return transactionOutstandingOperationLimit;
}
+
+ /**
+ * This is a utility method that lets us get a Timer object for any operation. This is a little open-ended to allow
+ * us to create a timer for pretty much anything.
+ *
+ * @param operationName
+ * @return
+ */
+ public Timer getOperationTimer(String operationName){
+ final String rate = MetricRegistry.name(DISTRIBUTED_DATA_STORE_METRIC_REGISTRY, datastoreContext.getDataStoreType(), operationName, METRIC_RATE);
+ return metricRegistry.timer(rate);
+ }
+
+ /**
+ * Get the type of the data store to which this ActorContext belongs
+ *
+ * @return
+ */
+ public String getDataStoreType() {
+ return datastoreContext.getDataStoreType();
+ }
+
+ /**
+ * Set the number of transaction creation permits that are to be allowed
+ *
+ * @param permitsPerSecond
+ */
+ public void setTxCreationLimit(double permitsPerSecond){
+ txRateLimiter.setRate(permitsPerSecond);
+ }
+
+ /**
+ * Get the current transaction creation rate limit
+ * @return
+ */
+ public double getTxCreationLimit(){
+ return txRateLimiter.getRate();
+ }
+
+ /**
+ * Try to acquire a transaction creation permit. Will block if no permits are available.
+ */
+ public void acquireTxCreationPermit(){
+ txRateLimiter.acquire();
+ }
+
+ /**
+ * Return the operation timeout to be used when committing transactions
+ * @return
+ */
+ public Timeout getTransactionCommitOperationTimeout(){
+ return transactionCommitOperationTimeout;
+ }
+
+
}
import java.io.DataOutputStream;
import java.io.IOException;
import org.opendaylight.controller.cluster.datastore.node.NormalizedNodeToNodeCodec;
+import org.opendaylight.controller.cluster.datastore.node.utils.stream.InvalidNormalizedNodeStreamException;
import org.opendaylight.controller.cluster.datastore.node.utils.stream.NormalizedNodeInputStreamReader;
import org.opendaylight.controller.cluster.datastore.node.utils.stream.NormalizedNodeOutputStreamWriter;
import org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages;
}
public static NormalizedNode<?, ?> deserializeNormalizedNode(DataInput in) {
- try {
- boolean present = in.readBoolean();
- if(present) {
- NormalizedNodeInputStreamReader streamReader = streamReader(in);
- return streamReader.readNormalizedNode();
- }
- } catch (IOException e) {
- throw new IllegalArgumentException("Error deserializing NormalizedNode", e);
- }
+ try {
+ return tryDeserializeNormalizedNode(in);
+ } catch (IOException e) {
+ throw new IllegalArgumentException("Error deserializing NormalizedNode", e);
+ }
+ }
+
+ private static NormalizedNode<?, ?> tryDeserializeNormalizedNode(DataInput in) throws IOException {
+ boolean present = in.readBoolean();
+ if(present) {
+ NormalizedNodeInputStreamReader streamReader = streamReader(in);
+ return streamReader.readNormalizedNode();
+ }
return null;
}
public static NormalizedNode<?, ?> deserializeNormalizedNode(byte [] bytes) {
NormalizedNode<?, ?> node = null;
try {
- node = deserializeNormalizedNode(new DataInputStream(new ByteArrayInputStream(bytes)));
- } catch(Exception e) {
- }
-
- if(node == null) {
- // Must be from legacy protobuf serialization - try that.
+ node = tryDeserializeNormalizedNode(new DataInputStream(new ByteArrayInputStream(bytes)));
+ } catch(InvalidNormalizedNodeStreamException e) {
+ // Probably from legacy protobuf serialization - try that.
try {
NormalizedNodeMessages.Node serializedNode = NormalizedNodeMessages.Node.parseFrom(bytes);
node = new NormalizedNodeToNodeCodec(null).decode(serializedNode);
- } catch (InvalidProtocolBufferException e) {
+ } catch (InvalidProtocolBufferException e2) {
throw new IllegalArgumentException("Error deserializing NormalizedNode", e);
}
+ } catch (IOException e) {
+ throw new IllegalArgumentException("Error deserializing NormalizedNode", e);
}
return node;
}
DatastoreContext datastoreContext = DatastoreContext.newBuilder()
- .dataStoreMXBeanType("DistributedConfigDatastore")
+ .dataStoreType("config")
.dataStoreProperties(InMemoryDOMDataStoreConfigProperties.create(
props.getMaxShardDataChangeExecutorPoolSize().getValue().intValue(),
props.getMaxShardDataChangeExecutorQueueSize().getValue().intValue(),
.shardIsolatedLeaderCheckIntervalInMillis(
props.getShardIsolatedLeaderCheckIntervalInMillis().getValue())
.shardElectionTimeoutFactor(props.getShardElectionTimeoutFactor().getValue())
+ .transactionCreationInitialRateLimit(props.getTxCreationInitialRateLimit().getValue())
.build();
- return DistributedDataStoreFactory.createInstance("config", getConfigSchemaServiceDependency(),
+ return DistributedDataStoreFactory.createInstance(getConfigSchemaServiceDependency(),
datastoreContext, bundleContext);
}
}
DatastoreContext datastoreContext = DatastoreContext.newBuilder()
- .dataStoreMXBeanType("DistributedOperationalDatastore")
+ .dataStoreType("operational")
.dataStoreProperties(InMemoryDOMDataStoreConfigProperties.create(
props.getMaxShardDataChangeExecutorPoolSize().getValue().intValue(),
props.getMaxShardDataChangeExecutorQueueSize().getValue().intValue(),
.shardIsolatedLeaderCheckIntervalInMillis(
props.getShardIsolatedLeaderCheckIntervalInMillis().getValue())
.shardElectionTimeoutFactor(props.getShardElectionTimeoutFactor().getValue())
+ .transactionCreationInitialRateLimit(props.getTxCreationInitialRateLimit().getValue())
.build();
- return DistributedDataStoreFactory.createInstance("operational",
- getOperationalSchemaServiceDependency(), datastoreContext, bundleContext);
+ return DistributedDataStoreFactory.createInstance(getOperationalSchemaServiceDependency(),
+ datastoreContext, bundleContext);
}
public void setBundleContext(BundleContext bundleContext) {
description "The interval at which the leader of the shard will check if its majority
followers are active and term itself as isolated";
}
+
+ leaf tx-creation-initial-rate-limit {
+ default 100;
+ type non-zero-uint32-type;
+ description "The initial number of transactions per second that are allowed before the data store
+ should begin applying back pressure. This number is only used as an initial guidance,
+ subsequently the datastore measures the latency for a commit and auto-adjusts the rate limit";
+ }
}
// Augments the 'configuration' choice node under modules/module.
--- /dev/null
+package org.opendaylight.controller.cluster.datastore;
+
+import static org.junit.Assert.assertEquals;
+import org.junit.Before;
+import org.junit.Test;
+
+public class DatastoreContextTest {
+
+ private DatastoreContext.Builder builder;
+
+ @Before
+ public void setUp(){
+ builder = new DatastoreContext.Builder();
+ }
+
+ @Test
+ public void testDefaults(){
+ DatastoreContext build = builder.build();
+
+ assertEquals(DatastoreContext.DEFAULT_SHARD_TRANSACTION_IDLE_TIMEOUT , build.getShardTransactionIdleTimeout());
+ assertEquals(DatastoreContext.DEFAULT_OPERATION_TIMEOUT_IN_SECONDS, build.getOperationTimeoutInSeconds());
+ assertEquals(DatastoreContext.DEFAULT_SHARD_TX_COMMIT_TIMEOUT_IN_SECONDS, build.getShardTransactionCommitTimeoutInSeconds());
+ assertEquals(DatastoreContext.DEFAULT_JOURNAL_RECOVERY_BATCH_SIZE, build.getShardRaftConfig().getJournalRecoveryLogBatchSize());
+ assertEquals(DatastoreContext.DEFAULT_SNAPSHOT_BATCH_COUNT, build.getShardRaftConfig().getSnapshotBatchCount());
+ assertEquals(DatastoreContext.DEFAULT_HEARTBEAT_INTERVAL_IN_MILLIS, build.getShardRaftConfig().getHeartBeatInterval().length());
+ assertEquals(DatastoreContext.DEFAULT_SHARD_TX_COMMIT_QUEUE_CAPACITY, build.getShardTransactionCommitQueueCapacity());
+ assertEquals(DatastoreContext.DEFAULT_SHARD_INITIALIZATION_TIMEOUT, build.getShardInitializationTimeout());
+ assertEquals(DatastoreContext.DEFAULT_SHARD_LEADER_ELECTION_TIMEOUT, build.getShardLeaderElectionTimeout());
+ assertEquals(DatastoreContext.DEFAULT_PERSISTENT, build.isPersistent());
+ assertEquals(DatastoreContext.DEFAULT_CONFIGURATION_READER, build.getConfigurationReader());
+ assertEquals(DatastoreContext.DEFAULT_ISOLATED_LEADER_CHECK_INTERVAL_IN_MILLIS, build.getShardRaftConfig().getIsolatedCheckInterval().length());
+ assertEquals(DatastoreContext.DEFAULT_SHARD_SNAPSHOT_DATA_THRESHOLD_PERCENTAGE, build.getShardRaftConfig().getSnapshotDataThresholdPercentage());
+ assertEquals(DatastoreContext.DEFAULT_SHARD_ELECTION_TIMEOUT_FACTOR, build.getShardRaftConfig().getElectionTimeoutFactor());
+ assertEquals(DatastoreContext.DEFAULT_TX_CREATION_INITIAL_RATE_LIMIT, build.getTransactionCreationInitialRateLimit());
+ }
+
+}
\ No newline at end of file
Configuration config = new ConfigurationImpl("module-shards.conf", "modules.conf");
ShardStrategyFactory.setConfiguration(config);
+ datastoreContextBuilder.dataStoreType(typeName);
+
DatastoreContext datastoreContext = datastoreContextBuilder.build();
- DistributedDataStore dataStore = new DistributedDataStore(getSystem(), typeName, cluster,
+
+ DistributedDataStore dataStore = new DistributedDataStore(getSystem(), cluster,
config, datastoreContext);
SchemaContext schemaContext = SchemaContextHelper.full();
--- /dev/null
+package org.opendaylight.controller.cluster.datastore;
+
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
+import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+
+public class DistributedDataStoreTest extends AbstractActorTest {
+
+ private SchemaContext schemaContext;
+
+ @Mock
+ private ActorContext actorContext;
+
+ @Before
+ public void setUp() throws Exception {
+ MockitoAnnotations.initMocks(this);
+
+ schemaContext = TestModel.createTestContext();
+
+ doReturn(schemaContext).when(actorContext).getSchemaContext();
+ }
+
+ @Test
+ public void testRateLimitingUsedInReadWriteTxCreation(){
+ DistributedDataStore distributedDataStore = new DistributedDataStore(actorContext);
+
+ distributedDataStore.newReadWriteTransaction();
+
+ verify(actorContext, times(1)).acquireTxCreationPermit();
+ }
+
+ @Test
+ public void testRateLimitingUsedInWriteOnlyTxCreation(){
+ DistributedDataStore distributedDataStore = new DistributedDataStore(actorContext);
+
+ distributedDataStore.newWriteOnlyTransaction();
+
+ verify(actorContext, times(1)).acquireTxCreationPermit();
+ }
+
+
+ @Test
+ public void testRateLimitingNotUsedInReadOnlyTxCreation(){
+ DistributedDataStore distributedDataStore = new DistributedDataStore(actorContext);
+
+ distributedDataStore.newReadOnlyTransaction();
+ distributedDataStore.newReadOnlyTransaction();
+ distributedDataStore.newReadOnlyTransaction();
+
+ verify(actorContext, times(0)).acquireTxCreationPermit();
+ }
+
+}
\ No newline at end of file
package org.opendaylight.controller.cluster.datastore;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
import akka.actor.ActorRef;
import akka.actor.Props;
import akka.japi.Creator;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Sets;
import com.google.common.util.concurrent.Uninterruptibles;
+import java.net.URI;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import scala.concurrent.Await;
import scala.concurrent.Future;
-import java.net.URI;
-import java.util.Collection;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.TimeUnit;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
-
public class ShardManagerTest extends AbstractActorTest {
private static int ID_COUNTER = 1;
}
private Props newShardMgrProps() {
- return ShardManager.props(shardMrgIDSuffix, new MockClusterWrapper(), new MockConfiguration(),
- DatastoreContext.newBuilder().build());
+
+ DatastoreContext.Builder builder = DatastoreContext.newBuilder();
+ builder.dataStoreType(shardMrgIDSuffix);
+ return ShardManager.props(new MockClusterWrapper(), new MockConfiguration(), builder.build());
}
@Test
public void testRecoveryApplicable(){
new JavaTestKit(getSystem()) {
{
- final Props persistentProps = ShardManager.props(shardMrgIDSuffix,
- new MockClusterWrapper(),
- new MockConfiguration(),
- DatastoreContext.newBuilder().persistent(true).build());
+ final Props persistentProps = ShardManager.props(new MockClusterWrapper(), new MockConfiguration(),
+ DatastoreContext.newBuilder().persistent(true).dataStoreType(shardMrgIDSuffix).build());
final TestActorRef<ShardManager> persistentShardManager =
TestActorRef.create(getSystem(), persistentProps);
assertTrue("Recovery Applicable", dataPersistenceProvider1.isRecoveryApplicable());
- final Props nonPersistentProps = ShardManager.props(shardMrgIDSuffix,
- new MockClusterWrapper(),
- new MockConfiguration(),
- DatastoreContext.newBuilder().persistent(false).build());
+ final Props nonPersistentProps = ShardManager.props(new MockClusterWrapper(), new MockConfiguration(),
+ DatastoreContext.newBuilder().persistent(false).dataStoreType(shardMrgIDSuffix).build());
final TestActorRef<ShardManager> nonPersistentShardManager =
TestActorRef.create(getSystem(), nonPersistentProps);
private static final long serialVersionUID = 1L;
@Override
public ShardManager create() throws Exception {
- return new ShardManager(shardMrgIDSuffix, new MockClusterWrapper(), new MockConfiguration(), DatastoreContext.newBuilder().build()) {
+ return new ShardManager(new MockClusterWrapper(), new MockConfiguration(),
+ DatastoreContext.newBuilder().dataStoreType(shardMrgIDSuffix).build()) {
@Override
protected DataPersistenceProvider createDataPersistenceProvider(boolean persistent) {
DataPersistenceProviderMonitor dataPersistenceProviderMonitor
private final CountDownLatch recoveryComplete = new CountDownLatch(1);
TestShardManager(String shardMrgIDSuffix) {
- super(shardMrgIDSuffix, new MockClusterWrapper(), new MockConfiguration(),
- DatastoreContext.newBuilder().build());
+ super(new MockClusterWrapper(), new MockConfiguration(),
+ DatastoreContext.newBuilder().dataStoreType(shardMrgIDSuffix).build());
}
@Override
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyLong;
import static org.mockito.Matchers.isA;
import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.timeout;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import akka.actor.ActorPath;
import akka.actor.ActorSelection;
import akka.actor.Props;
import akka.dispatch.Futures;
+import akka.util.Timeout;
+import com.codahale.metrics.Snapshot;
+import com.codahale.metrics.Timer;
import com.google.common.collect.Lists;
import com.google.common.util.concurrent.ListenableFuture;
import java.util.List;
@Mock
private ActorContext actorContext;
+ @Mock
+ private DatastoreContext datastoreContext;
+
+ @Mock
+ private Timer commitTimer;
+
+ @Mock
+ private Timer.Context commitTimerContext;
+
+ @Mock
+ private Snapshot commitSnapshot;
+
@Before
public void setUp() {
MockitoAnnotations.initMocks(this);
doReturn(getSystem()).when(actorContext).getActorSystem();
+ doReturn(datastoreContext).when(actorContext).getDatastoreContext();
+ doReturn(100).when(datastoreContext).getShardTransactionCommitTimeoutInSeconds();
+ doReturn(commitTimer).when(actorContext).getOperationTimer("commit");
+ doReturn(commitTimerContext).when(commitTimer).time();
+ doReturn(commitSnapshot).when(commitTimer).getSnapshot();
+ doReturn(TimeUnit.MILLISECONDS.toNanos(2000) * 1.0).when(commitSnapshot).get98thPercentile();
+ doReturn(10.0).when(actorContext).getTxCreationLimit();
}
private Future<ActorSelection> newCohort() {
}
stubber.when(actorContext).executeOperationAsync(any(ActorSelection.class),
- isA(requestType));
+ isA(requestType), any(Timeout.class));
}
private void verifyCohortInvocations(int nCohorts, Class<?> requestType) {
verify(actorContext, times(nCohorts)).executeOperationAsync(
- any(ActorSelection.class), isA(requestType));
+ any(ActorSelection.class), isA(requestType), any(Timeout.class));
}
private void propagateExecutionExceptionCause(ListenableFuture<?> future) throws Throwable {
try {
propagateExecutionExceptionCause(proxy.commit());
} finally {
+
+ verify(actorContext, never()).setTxCreationLimit(anyLong());
verifyCohortInvocations(0, CommitTransaction.SERIALIZABLE_CLASS);
}
+
}
@Test
setupMockActorContext(CommitTransaction.SERIALIZABLE_CLASS,
new CommitTransactionReply(), new CommitTransactionReply());
+ assertEquals(10.0, actorContext.getTxCreationLimit(), 1e-15);
+
proxy.canCommit().get(5, TimeUnit.SECONDS);
proxy.preCommit().get(5, TimeUnit.SECONDS);
proxy.commit().get(5, TimeUnit.SECONDS);
verifyCohortInvocations(2, CanCommitTransaction.SERIALIZABLE_CLASS);
verifyCohortInvocations(2, CommitTransaction.SERIALIZABLE_CLASS);
+
+ // Verify that the creation limit was changed to 0.5 (based on setup)
+ verify(actorContext, timeout(5000)).setTxCreationLimit(0.5);
+ }
+
+ @Test
+ public void testDoNotChangeTxCreationLimitWhenCommittingEmptyTxn() throws Exception {
+
+ ThreePhaseCommitCohortProxy proxy = setupProxy(0);
+
+ assertEquals(10.0, actorContext.getTxCreationLimit(), 1e-15);
+
+ proxy.canCommit().get(5, TimeUnit.SECONDS);
+ proxy.preCommit().get(5, TimeUnit.SECONDS);
+ proxy.commit().get(5, TimeUnit.SECONDS);
+
+ verify(actorContext, never()).setTxCreationLimit(anyLong());
}
}
package org.opendaylight.controller.cluster.datastore;
import static org.mockito.Matchers.anyObject;
+import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
import org.opendaylight.controller.cluster.datastore.utils.MockActorContext;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
ActorContext actorContext = null;
SchemaContext schemaContext = mock(SchemaContext.class);
+ @Mock
+ ActorContext mockActorContext;
+
@Before
public void setUp() {
+ MockitoAnnotations.initMocks(this);
+
actorContext = new MockActorContext(getSystem());
actorContext.setSchemaContext(schemaContext);
+
+ doReturn(schemaContext).when(mockActorContext).getSchemaContext();
}
@SuppressWarnings("resource")
Assert.assertNotEquals(one.getTransactionChainId(), two.getTransactionChainId());
}
+
+ @Test
+ public void testRateLimitingUsedInReadWriteTxCreation(){
+ TransactionChainProxy txChainProxy = new TransactionChainProxy(mockActorContext);
+
+ txChainProxy.newReadWriteTransaction();
+
+ verify(mockActorContext, times(1)).acquireTxCreationPermit();
+ }
+
+ @Test
+ public void testRateLimitingUsedInWriteOnlyTxCreation(){
+ TransactionChainProxy txChainProxy = new TransactionChainProxy(mockActorContext);
+
+ txChainProxy.newWriteOnlyTransaction();
+
+ verify(mockActorContext, times(1)).acquireTxCreationPermit();
+ }
+
+
+ @Test
+ public void testRateLimitingNotUsedInReadOnlyTxCreation(){
+ TransactionChainProxy txChainProxy = new TransactionChainProxy(mockActorContext);
+
+ txChainProxy.newReadOnlyTransaction();
+
+ verify(mockActorContext, times(0)).acquireTxCreationPermit();
+ }
}
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import akka.actor.ActorRef;
import akka.actor.ActorSelection;
import akka.testkit.JavaTestKit;
import com.google.common.base.Optional;
import java.util.concurrent.TimeUnit;
+import org.apache.commons.lang.time.StopWatch;
import org.junit.Test;
import org.opendaylight.controller.cluster.datastore.AbstractActorTest;
import org.opendaylight.controller.cluster.datastore.ClusterWrapper;
import org.opendaylight.controller.cluster.datastore.Configuration;
+import org.opendaylight.controller.cluster.datastore.DatastoreContext;
import org.opendaylight.controller.cluster.datastore.messages.FindLocalShard;
import org.opendaylight.controller.cluster.datastore.messages.LocalShardFound;
import org.opendaylight.controller.cluster.datastore.messages.LocalShardNotFound;
assertEquals(expected, actual);
}
+ @Test
+ public void testRateLimiting(){
+ DatastoreContext mockDataStoreContext = mock(DatastoreContext.class);
+
+ doReturn(155L).when(mockDataStoreContext).getTransactionCreationInitialRateLimit();
+ doReturn("config").when(mockDataStoreContext).getDataStoreType();
+
+ ActorContext actorContext =
+ new ActorContext(getSystem(), mock(ActorRef.class), mock(ClusterWrapper.class),
+ mock(Configuration.class), mockDataStoreContext);
+
+ // Check that the initial value is being picked up from DataStoreContext
+ assertEquals(mockDataStoreContext.getTransactionCreationInitialRateLimit(), actorContext.getTxCreationLimit(), 1e-15);
+
+ actorContext.setTxCreationLimit(1.0);
+
+ assertEquals(1.0, actorContext.getTxCreationLimit(), 1e-15);
+
+
+ StopWatch watch = new StopWatch();
+
+ watch.start();
+
+ actorContext.acquireTxCreationPermit();
+ actorContext.acquireTxCreationPermit();
+ actorContext.acquireTxCreationPermit();
+
+ watch.stop();
+
+ assertTrue("did not take as much time as expected", watch.getTime() > 1000);
+ }
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.api;
+
+import java.util.Collection;
+import java.util.EventListener;
+import javax.annotation.Nonnull;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+
+/**
+ * Interface implemented by classes interested in receiving notifications about
+ * data tree changes. This interface differs from {@link DOMDataChangeListener}
+ * in that it provides a cursor-based view of the change, which has potentially
+ * lower overhead.
+ */
+public interface DOMDataTreeChangeListener extends EventListener {
+ /**
+ * Invoked when there was data change for the supplied path, which was used
+ * to register this listener.
+ *
+ * <p>
+ * This method may be also invoked during registration of the listener if
+ * there is any pre-existing data in the conceptual data tree for supplied
+ * path. This initial event will contain all pre-existing data as created.
+ *
+ * <p>
+ * A data change event may be triggered spuriously, e.g. such that data before
+ * and after compare as equal. Implementations of this interface are expected
+ * to recover from such events. Event producers are expected to exert reasonable
+ * effort to suppress such events.
+ *
+ * In other words, it is completely acceptable to observe
+ * a {@link org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode},
+ * which reports a {@link org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType}
+ * other than UNMODIFIED, while the before- and after- data items compare as
+ * equal.
+ *
+ * @param changes Collection of change events, may not be null or empty.
+ */
+ void onDataTreeChanged(@Nonnull Collection<DataTreeCandidate> changes);
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.api;
+
+import javax.annotation.Nonnull;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+
+/**
+ * A {@link DOMService} which allows users to register for changes to a
+ * subtree.
+ */
+public interface DOMDataTreeChangeService extends DOMService {
+ /**
+ * Registers a {@link DOMDataTreeChangeListener} to receive
+ * notifications when data changes under a given path in the conceptual data
+ * tree.
+ * <p>
+ * You are able to register for notifications for any node or subtree
+ * which can be represented using {@link DOMDataTreeIdentifier}.
+ * <p>
+ *
+ * You are able to register for data change notifications for a subtree or leaf
+ * even if it does not exist. You will receive notification once that node is
+ * created.
+ * <p>
+ * If there is any pre-existing data in the data tree for the path for which you are
+ * registering, you will receive an initial data change event, which will
+ * contain all pre-existing data, marked as created.
+ *
+ * <p>
+ * This method returns a {@link ListenerRegistration} object. To
+ * "unregister" your listener for changes call the {@link ListenerRegistration#close()}
+ * method on the returned object.
+ * <p>
+ * You MUST explicitly unregister your listener when you no longer want to receive
+ * notifications. This is especially true in OSGi environments, where failure to
+ * do so during bundle shutdown can lead to stale listeners being still registered.
+ *
+ * @param treeId
+ * Data tree identifier of the subtree which should be watched for
+ * changes.
+ * @param listener
+ * Listener instance which is being registered
+ * @return Listener registration object, which may be used to unregister
+ * your listener using {@link ListenerRegistration#close()} to stop
+ * delivery of change events.
+ */
+ @Nonnull <L extends DOMDataTreeChangeListener> ListenerRegistration<L> registerDataTreeChangeListener(@Nonnull DOMDataTreeIdentifier treeId, @Nonnull L listener);
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.api;
+
+import com.google.common.base.Preconditions;
+import java.io.Serializable;
+import javax.annotation.Nonnull;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.yangtools.concepts.Immutable;
+import org.opendaylight.yangtools.concepts.Path;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+
+/**
+ * A unique identifier for a particular subtree. It is composed of the logical
+ * data store type and the instance identifier of the root node.
+ */
+public final class DOMDataTreeIdentifier implements Immutable, Path<DOMDataTreeIdentifier>, Serializable {
+ private static final long serialVersionUID = 1L;
+ private final YangInstanceIdentifier rootIdentifier;
+ private final LogicalDatastoreType datastoreType;
+
+ public DOMDataTreeIdentifier(final LogicalDatastoreType datastoreType, final YangInstanceIdentifier rootIdentifier) {
+ this.datastoreType = Preconditions.checkNotNull(datastoreType);
+ this.rootIdentifier = Preconditions.checkNotNull(rootIdentifier);
+ }
+
+ /**
+ * Return the logical data store type.
+ *
+ * @return Logical data store type. Guaranteed to be non-null.
+ */
+ public @Nonnull LogicalDatastoreType getDatastoreType() {
+ return datastoreType;
+ }
+
+ /**
+ * Return the {@link YangInstanceIdentifier} of the root node.
+ *
+ * @return Instance identifier corresponding to the root node.
+ */
+ public @Nonnull YangInstanceIdentifier getRootIdentifier() {
+ return rootIdentifier;
+ }
+
+ @Override
+ public boolean contains(final DOMDataTreeIdentifier other) {
+ return datastoreType == other.datastoreType && rootIdentifier.contains(other.rootIdentifier);
+ }
+
+ @Override
+ public int hashCode() {
+ final int prime = 31;
+ int result = 1;
+ result = prime * result + datastoreType.hashCode();
+ result = prime * result + rootIdentifier.hashCode();
+ return result;
+ }
+
+ @Override
+ public boolean equals(final Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (!(obj instanceof DOMDataTreeIdentifier)) {
+ return false;
+ }
+ DOMDataTreeIdentifier other = (DOMDataTreeIdentifier) obj;
+ if (datastoreType != other.datastoreType) {
+ return false;
+ }
+ return rootIdentifier.equals(other.rootIdentifier);
+ }
+}
<artifactId>guava</artifactId>
</dependency>
<dependency>
- <groupId>junit</groupId>
- <artifactId>junit</artifactId>
+ <groupId>com.lmax</groupId>
+ <artifactId>disruptor</artifactId>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>ietf-yang-types</artifactId>
</dependency>
+ <dependency>
+ <groupId>junit</groupId>
+ <artifactId>junit</artifactId>
+ </dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.broker.impl;
+
+import com.google.common.base.Preconditions;
+import com.google.common.base.Predicate;
+import com.google.common.collect.ImmutableMultimap;
+import com.google.common.collect.ImmutableMultimap.Builder;
+import com.google.common.collect.Multimap;
+import com.google.common.collect.Multimaps;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.lmax.disruptor.EventHandler;
+import com.lmax.disruptor.InsufficientCapacityException;
+import com.lmax.disruptor.SleepingWaitStrategy;
+import com.lmax.disruptor.WaitStrategy;
+import com.lmax.disruptor.dsl.Disruptor;
+import com.lmax.disruptor.dsl.ProducerType;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+import org.opendaylight.controller.md.sal.dom.api.DOMNotification;
+import org.opendaylight.controller.md.sal.dom.api.DOMNotificationListener;
+import org.opendaylight.controller.md.sal.dom.api.DOMNotificationPublishService;
+import org.opendaylight.controller.md.sal.dom.api.DOMNotificationService;
+import org.opendaylight.yangtools.concepts.AbstractListenerRegistration;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.model.api.SchemaPath;
+
+/**
+ * Joint implementation of {@link DOMNotificationPublishService} and {@link DOMNotificationService}. Provides
+ * routing of notifications from publishers to subscribers.
+ *
+ * Internal implementation works by allocating a two-handler Disruptor. The first handler delivers notifications
+ * to subscribed listeners and the second one notifies whoever may be listening on the returned future. Registration
+ * state tracking is performed by a simple immutable multimap -- when a registration or unregistration occurs we
+ * re-generate the entire map from scratch and set it atomically. While registrations/unregistrations synchronize
+ * on this instance, notifications do not take any locks here.
+ *
+ * The fully-blocking {@link #publish(long, DOMNotification, Collection)} and non-blocking {@link #offerNotification(DOMNotification)}
+ * are realized using the Disruptor's native operations. The bounded-blocking {@link #offerNotification(DOMNotification, long, TimeUnit)}
+ * is realized by arming a background wakeup interrupt.
+ */
+public final class DOMNotificationRouter implements AutoCloseable, DOMNotificationPublishService, DOMNotificationService {
+ private static final ListenableFuture<Void> NO_LISTENERS = Futures.immediateFuture(null);
+ private static final WaitStrategy DEFAULT_STRATEGY = new SleepingWaitStrategy();
+ private static final EventHandler<DOMNotificationRouterEvent> DISPATCH_NOTIFICATIONS = new EventHandler<DOMNotificationRouterEvent>() {
+ @Override
+ public void onEvent(final DOMNotificationRouterEvent event, final long sequence, final boolean endOfBatch) throws Exception {
+ event.deliverNotification();
+
+ }
+ };
+ private static final EventHandler<DOMNotificationRouterEvent> NOTIFY_FUTURE = new EventHandler<DOMNotificationRouterEvent>() {
+ @Override
+ public void onEvent(final DOMNotificationRouterEvent event, final long sequence, final boolean endOfBatch) {
+ event.setFuture();
+ }
+ };
+
+ private final Disruptor<DOMNotificationRouterEvent> disruptor;
+ private final ExecutorService executor;
+ private volatile Multimap<SchemaPath, ListenerRegistration<? extends DOMNotificationListener>> listeners = ImmutableMultimap.of();
+
+ private DOMNotificationRouter(final ExecutorService executor, final Disruptor<DOMNotificationRouterEvent> disruptor) {
+ this.executor = Preconditions.checkNotNull(executor);
+ this.disruptor = Preconditions.checkNotNull(disruptor);
+ }
+
+ @SuppressWarnings("unchecked")
+ public static DOMNotificationRouter create(final int queueDepth) {
+ final ExecutorService executor = Executors.newCachedThreadPool();
+ final Disruptor<DOMNotificationRouterEvent> disruptor = new Disruptor<>(DOMNotificationRouterEvent.FACTORY, queueDepth, executor, ProducerType.MULTI, DEFAULT_STRATEGY);
+
+ disruptor.after(DISPATCH_NOTIFICATIONS).handleEventsWith(NOTIFY_FUTURE);
+ disruptor.start();
+
+ return new DOMNotificationRouter(executor, disruptor);
+ }
+
+ @Override
+ public synchronized <T extends DOMNotificationListener> ListenerRegistration<T> registerNotificationListener(final T listener, final Collection<SchemaPath> types) {
+ final ListenerRegistration<T> reg = new AbstractListenerRegistration<T>(listener) {
+ @Override
+ protected void removeRegistration() {
+ final ListenerRegistration<T> me = this;
+
+ synchronized (DOMNotificationRouter.this) {
+ listeners = ImmutableMultimap.copyOf(Multimaps.filterValues(listeners, new Predicate<ListenerRegistration<? extends DOMNotificationListener>>() {
+ @Override
+ public boolean apply(final ListenerRegistration<? extends DOMNotificationListener> input) {
+ return input != me;
+ }
+ }));
+ }
+ }
+ };
+
+ if (!types.isEmpty()) {
+ final Builder<SchemaPath, ListenerRegistration<? extends DOMNotificationListener>> b = ImmutableMultimap.builder();
+ b.putAll(listeners);
+
+ for (SchemaPath t : types) {
+ b.put(t, reg);
+ }
+
+ listeners = b.build();
+ }
+
+ return reg;
+ }
+
+ @Override
+ public <T extends DOMNotificationListener> ListenerRegistration<T> registerNotificationListener(final T listener, final SchemaPath... types) {
+ return registerNotificationListener(listener, Arrays.asList(types));
+ }
+
+ private ListenableFuture<Void> publish(final long seq, final DOMNotification notification, final Collection<ListenerRegistration<? extends DOMNotificationListener>> subscribers) {
+ final DOMNotificationRouterEvent event = disruptor.get(seq);
+ final ListenableFuture<Void> future = event.initialize(notification, subscribers);
+ disruptor.getRingBuffer().publish(seq);
+ return future;
+ }
+
+ @Override
+ public ListenableFuture<? extends Object> putNotification(final DOMNotification notification) throws InterruptedException {
+ final Collection<ListenerRegistration<? extends DOMNotificationListener>> subscribers = listeners.get(notification.getType());
+ if (subscribers.isEmpty()) {
+ return NO_LISTENERS;
+ }
+
+ final long seq = disruptor.getRingBuffer().next();
+ return publish(seq, notification, subscribers);
+ }
+
+ private ListenableFuture<? extends Object> tryPublish(final DOMNotification notification, final Collection<ListenerRegistration<? extends DOMNotificationListener>> subscribers) {
+ final long seq;
+ try {
+ seq = disruptor.getRingBuffer().tryNext();
+ } catch (InsufficientCapacityException e) {
+ return DOMNotificationPublishService.REJECTED;
+ }
+
+ return publish(seq, notification, subscribers);
+ }
+
+ @Override
+ public ListenableFuture<? extends Object> offerNotification(final DOMNotification notification) {
+ final Collection<ListenerRegistration<? extends DOMNotificationListener>> subscribers = listeners.get(notification.getType());
+ if (subscribers.isEmpty()) {
+ return NO_LISTENERS;
+ }
+
+ return tryPublish(notification, subscribers);
+ }
+
+ @Override
+ public ListenableFuture<? extends Object> offerNotification(final DOMNotification notification, final long timeout,
+ final TimeUnit unit) throws InterruptedException {
+ final Collection<ListenerRegistration<? extends DOMNotificationListener>> subscribers = listeners.get(notification.getType());
+ if (subscribers.isEmpty()) {
+ return NO_LISTENERS;
+ }
+
+ // Attempt to perform a non-blocking publish first
+ final ListenableFuture<? extends Object> noBlock = tryPublish(notification, subscribers);
+ if (!DOMNotificationPublishService.REJECTED.equals(noBlock)) {
+ return noBlock;
+ }
+
+ /*
+ * FIXME: we need a background thread, which will watch out for blocking too long. Here
+ * we will arm a tasklet for it and synchronize delivery of interrupt properly.
+ */
+ throw new UnsupportedOperationException("Not implemented yet");
+ }
+
+ @Override
+ public void close() {
+ disruptor.shutdown();
+ executor.shutdown();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.broker.impl;
+
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.SettableFuture;
+import com.lmax.disruptor.EventFactory;
+import java.util.Collection;
+import org.opendaylight.controller.md.sal.dom.api.DOMNotification;
+import org.opendaylight.controller.md.sal.dom.api.DOMNotificationListener;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+
+/**
+ * A single notification event in the disruptor ringbuffer. These objects are reused,
+ * so they do have mutable state.
+ */
+final class DOMNotificationRouterEvent {
+ public static final EventFactory<DOMNotificationRouterEvent> FACTORY = new EventFactory<DOMNotificationRouterEvent>() {
+ @Override
+ public DOMNotificationRouterEvent newInstance() {
+ return new DOMNotificationRouterEvent();
+ }
+ };
+
+ private Collection<ListenerRegistration<? extends DOMNotificationListener>> subscribers;
+ private DOMNotification notification;
+ private SettableFuture<Void> future;
+
+ private DOMNotificationRouterEvent() {
+ // Hidden on purpose, initialized in initialize()
+ }
+
+ ListenableFuture<Void> initialize(final DOMNotification notification, final Collection<ListenerRegistration<? extends DOMNotificationListener>> subscribers) {
+ this.notification = Preconditions.checkNotNull(notification);
+ this.subscribers = Preconditions.checkNotNull(subscribers);
+ this.future = SettableFuture.create();
+ return this.future;
+ }
+
+ void deliverNotification() {
+ for (ListenerRegistration<? extends DOMNotificationListener> r : subscribers) {
+ final DOMNotificationListener l = r.getInstance();
+ if (l != null) {
+ l.onNotification(notification);
+ }
+ }
+ }
+
+ void setFuture() {
+ future.set(null);
+ }
+
+}
\ No newline at end of file
*/
final boolean success = READY_UPDATER.compareAndSet(this, null, tx);
Preconditions.checkState(success, "Transaction %s collided on ready state", tx, readyTx);
- LOG.debug("Transaction {} readied");
+ LOG.debug("Transaction {} readied", tx);
/*
* We do not see a transaction being in-flight, so we need to take care of dispatching
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.sal.core.spi.data;
+
+import javax.annotation.Nonnull;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+
+/**
+ * Interface implemented by DOMStore implementations which allow registration
+ * of {@link DOMDataTreeChangeListener} instances.
+ */
+public interface DOMStoreTreeChangePublisher {
+ /**
+ * Registers a {@link DOMDataTreeChangeListener} to receive
+ * notifications when data changes under a given path in the conceptual data
+ * tree.
+ * <p>
+ * You are able to register for notifications for any node or subtree
+ * which can be represented using {@link YangInstanceIdentifier}.
+ * <p>
+ *
+ * You are able to register for data change notifications for a subtree or leaf
+ * even if it does not exist. You will receive notification once that node is
+ * created.
+ * <p>
+ * If there is any pre-existing data in data tree on path for which you are
+ * registering, you will receive initial data change event, which will
+ * contain all pre-existing data, marked as created.
+ *
+ * <p>
+ * This method returns a {@link ListenerRegistration} object. To
+ * "unregister" your listener for changes call the {@link ListenerRegistration#close()}
+ * method on this returned object.
+ * <p>
+ * You MUST explicitly unregister your listener when you no longer want to receive
+ * notifications. This is especially true in OSGi environments, where failure to
+ * do so during bundle shutdown can lead to stale listeners being still registered.
+ *
+ * @param treeId
+ * Data tree identifier of the subtree which should be watched for
+ * changes.
+ * @param listener
+ * Listener instance which is being registered
+ * @return Listener registration object, which may be used to unregister
+ * your listener using {@link ListenerRegistration#close()} to stop
+ * delivery of change events.
+ */
+ @Nonnull <L extends DOMDataTreeChangeListener> ListenerRegistration<L> registerTreeChangeListener(@Nonnull YangInstanceIdentifier treeId, @Nonnull L listener);
+}
<version>1.2.0-SNAPSHOT</version>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-distributed-datastore</artifactId>
+ </dependency>
<!-- Test Dependencies -->
<dependency>
import org.opendaylight.controller.md.sal.dom.store.impl.DOMImmutableDataChangeEvent.Builder;
import org.opendaylight.controller.md.sal.dom.store.impl.DOMImmutableDataChangeEvent.SimpleEventFactory;
import org.opendaylight.controller.md.sal.dom.store.impl.tree.ListenerTree;
-import org.opendaylight.controller.md.sal.dom.store.impl.tree.ListenerTree.Walker;
+import org.opendaylight.controller.md.sal.dom.store.impl.tree.ListenerWalker;
import org.opendaylight.yangtools.util.concurrent.NotificationManager;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
* Resolves and submits notification tasks to the specified manager.
*/
public synchronized void resolve(final NotificationManager<DataChangeListenerRegistration<?>, DOMImmutableDataChangeEvent> manager) {
- try (final Walker w = listenerRoot.getWalker()) {
+ try (final ListenerWalker w = listenerRoot.getWalker()) {
// Defensive: reset internal state
collectedEvents = ArrayListMultimap.create();
*/
package org.opendaylight.controller.md.sal.dom.store.impl;
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Iterables;
-import com.google.common.collect.Multimap;
-
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker.DataChangeScope;
import org.opendaylight.controller.md.sal.dom.store.impl.DOMImmutableDataChangeEvent.Builder;
-import org.opendaylight.controller.md.sal.dom.store.impl.tree.ListenerTree.Node;
+import org.opendaylight.controller.md.sal.dom.store.impl.tree.ListenerNode;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Multimap;
+
/**
* Recursion state used in {@link ResolveDataChangeEventsTask}. Instances of this
* method track which listeners are affected by a particular change node. It takes
*/
private final Collection<Builder> inheritedOne;
private final YangInstanceIdentifier nodeId;
- private final Collection<Node> nodes;
+ private final Collection<ListenerNode> nodes;
private final Map<DataChangeListenerRegistration<?>, Builder> subBuilders;
private final Map<DataChangeListenerRegistration<?>, Builder> oneBuilders;
private ResolveDataChangeState(final YangInstanceIdentifier nodeId,
final Iterable<Builder> inheritedSub, final Collection<Builder> inheritedOne,
- final Collection<Node> nodes) {
+ final Collection<ListenerNode> nodes) {
this.nodeId = Preconditions.checkNotNull(nodeId);
this.nodes = Preconditions.checkNotNull(nodes);
this.inheritedSub = Preconditions.checkNotNull(inheritedSub);
final Map<DataChangeListenerRegistration<?>, Builder> sub = new HashMap<>();
final Map<DataChangeListenerRegistration<?>, Builder> one = new HashMap<>();
final Map<DataChangeListenerRegistration<?>, Builder> base = new HashMap<>();
- for (Node n : nodes) {
+ for (ListenerNode n : nodes) {
for (DataChangeListenerRegistration<?> l : n.getListeners()) {
final Builder b = DOMImmutableDataChangeEvent.builder(DataChangeScope.BASE);
switch (l.getScope()) {
* @param root root node
* @return
*/
- public static ResolveDataChangeState initial(final YangInstanceIdentifier rootId, final Node root) {
+ public static ResolveDataChangeState initial(final YangInstanceIdentifier rootId, final ListenerNode root) {
return new ResolveDataChangeState(rootId, Collections.<Builder>emptyList(),
Collections.<Builder>emptyList(), Collections.singletonList(root));
}
LOG.trace("Collected events {}", map);
}
- private static Collection<Node> getListenerChildrenWildcarded(final Collection<Node> parentNodes,
+ private static Collection<ListenerNode> getListenerChildrenWildcarded(final Collection<ListenerNode> parentNodes,
final PathArgument child) {
if (parentNodes.isEmpty()) {
return Collections.emptyList();
}
- final List<Node> result = new ArrayList<>();
+ final List<ListenerNode> result = new ArrayList<>();
if (child instanceof NodeWithValue || child instanceof NodeIdentifierWithPredicates) {
NodeIdentifier wildcardedIdentifier = new NodeIdentifier(child.getNodeType());
addChildNodes(result, parentNodes, wildcardedIdentifier);
return result;
}
- private static void addChildNodes(final List<Node> result, final Collection<Node> parentNodes, final PathArgument childIdentifier) {
- for (Node node : parentNodes) {
- Optional<Node> child = node.getChild(childIdentifier);
+ private static void addChildNodes(final List<ListenerNode> result, final Collection<ListenerNode> parentNodes, final PathArgument childIdentifier) {
+ for (ListenerNode node : parentNodes) {
+ Optional<ListenerNode> child = node.getChild(childIdentifier);
if (child.isPresent()) {
result.add(child.get());
}
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.store.impl.tree;
+
+import com.google.common.base.Optional;
+import java.lang.ref.Reference;
+import java.lang.ref.WeakReference;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Map;
+import org.opendaylight.controller.md.sal.dom.store.impl.DataChangeListenerRegistration;
+import org.opendaylight.controller.md.sal.dom.store.impl.tree.ListenerTree.DataChangeListenerRegistrationImpl;
+import org.opendaylight.yangtools.concepts.Identifiable;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.StoreTreeNode;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This is a single node within the listener tree. Note that the data returned from
+ * and instance of this class is guaranteed to have any relevance or consistency
+ * only as long as the {@link ListenerWalker} instance through which it is reached remains
+ * unclosed.
+ *
+ * @author Robert Varga
+ */
+public class ListenerNode implements StoreTreeNode<ListenerNode>, Identifiable<PathArgument> {
+
+ private static final Logger LOG = LoggerFactory.getLogger(ListenerNode.class);
+
+ private final Collection<DataChangeListenerRegistration<?>> listeners = new ArrayList<>();
+ private final Map<PathArgument, ListenerNode> children = new HashMap<>();
+ private final PathArgument identifier;
+ private final Reference<ListenerNode> parent;
+
+ ListenerNode(final ListenerNode parent, final PathArgument identifier) {
+ this.parent = new WeakReference<>(parent);
+ this.identifier = identifier;
+ }
+
+ @Override
+ public PathArgument getIdentifier() {
+ return identifier;
+ }
+
+ @Override
+ public Optional<ListenerNode> getChild(final PathArgument child) {
+ return Optional.fromNullable(children.get(child));
+ }
+
+ /**
+ * Return the list of current listeners. This collection is guaranteed
+ * to be immutable only while the walker, through which this node is
+ * reachable remains unclosed.
+ *
+ * @return the list of current listeners
+ */
+ public Collection<DataChangeListenerRegistration<?>> getListeners() {
+ return listeners;
+ }
+
+ ListenerNode ensureChild(final PathArgument child) {
+ ListenerNode potential = children.get(child);
+ if (potential == null) {
+ potential = new ListenerNode(this, child);
+ children.put(child, potential);
+ }
+ return potential;
+ }
+
+ void addListener(final DataChangeListenerRegistration<?> listener) {
+ listeners.add(listener);
+ LOG.debug("Listener {} registered", listener);
+ }
+
+ void removeListener(final DataChangeListenerRegistrationImpl<?> listener) {
+ listeners.remove(listener);
+ LOG.debug("Listener {} unregistered", listener);
+
+ // We have been called with the write-lock held, so we can perform some cleanup.
+ removeThisIfUnused();
+ }
+
+ private void removeThisIfUnused() {
+ final ListenerNode p = parent.get();
+ if (p != null && listeners.isEmpty() && children.isEmpty()) {
+ p.removeChild(identifier);
+ }
+ }
+
+ private void removeChild(final PathArgument arg) {
+ children.remove(arg);
+ removeThisIfUnused();
+ }
+
+ @Override
+ public String toString() {
+ return "Node [identifier=" + identifier + ", listeners=" + listeners.size() + ", children=" + children.size() + "]";
+ }
+}
*/
package org.opendaylight.controller.md.sal.dom.store.impl.tree;
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
-
-import java.lang.ref.Reference;
-import java.lang.ref.WeakReference;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
-import javax.annotation.concurrent.GuardedBy;
-
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker.DataChangeScope;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
import org.opendaylight.controller.md.sal.dom.store.impl.DataChangeListenerRegistration;
import org.opendaylight.yangtools.concepts.AbstractListenerRegistration;
-import org.opendaylight.yangtools.concepts.Identifiable;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.StoreTreeNode;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A set of listeners organized as a tree by node to which they listen. This class
* allows for efficient lookup of listeners when we walk the DataTreeCandidate.
+ *
+ * @author Robert Varga
*/
public final class ListenerTree {
private static final Logger LOG = LoggerFactory.getLogger(ListenerTree.class);
private final ReadWriteLock rwLock = new ReentrantReadWriteLock(true);
- private final Node rootNode = new Node(null, null);
+ private final ListenerNode rootNode = new ListenerNode(null, null);
private ListenerTree() {
// Private to disallow direct instantiation
rwLock.writeLock().lock();
try {
- Node walkNode = rootNode;
+ ListenerNode walkNode = rootNode;
for (final PathArgument arg : path.getPathArguments()) {
walkNode = walkNode.ensureChild(arg);
}
- final Node node = walkNode;
+ final ListenerNode node = walkNode;
DataChangeListenerRegistration<L> reg = new DataChangeListenerRegistrationImpl<L>(listener) {
@Override
public DataChangeScope getScope() {
*
* @return A walker instance.
*/
- public Walker getWalker() {
+ public ListenerWalker getWalker() {
/*
* TODO: The only current user of this method is local to the datastore.
* Since this class represents a read-lock, losing a reference to
* external user exist, make the Walker a phantom reference, which
* will cleanup the lock if not told to do so.
*/
- final Walker ret = new Walker(rwLock.readLock(), rootNode);
+ final ListenerWalker ret = new ListenerWalker(rwLock.readLock(), rootNode);
rwLock.readLock().lock();
return ret;
}
- /**
- * A walking context, pretty much equivalent to an iterator, but it
- * exposes the underlying tree structure.
- */
- /*
- * FIXME: BUG-1511: split this class out as ListenerWalker.
- */
- public static final class Walker implements AutoCloseable {
- private final Lock lock;
- private final Node node;
-
- @GuardedBy("this")
- private boolean valid = true;
-
- private Walker(final Lock lock, final Node node) {
- this.lock = Preconditions.checkNotNull(lock);
- this.node = Preconditions.checkNotNull(node);
- }
-
- public Node getRootNode() {
- return node;
- }
-
- @Override
- public synchronized void close() {
- if (valid) {
- lock.unlock();
- valid = false;
- }
- }
- }
-
- /**
- * This is a single node within the listener tree. Note that the data returned from
- * and instance of this class is guaranteed to have any relevance or consistency
- * only as long as the {@link org.opendaylight.controller.md.sal.dom.store.impl.tree.ListenerTree.Walker} instance through which it is reached remains
- * unclosed.
- */
- /*
- * FIXME: BUG-1511: split this class out as ListenerNode.
- */
- public static final class Node implements StoreTreeNode<Node>, Identifiable<PathArgument> {
- private final Collection<DataChangeListenerRegistration<?>> listeners = new ArrayList<>();
- private final Map<PathArgument, Node> children = new HashMap<>();
- private final PathArgument identifier;
- private final Reference<Node> parent;
-
- private Node(final Node parent, final PathArgument identifier) {
- this.parent = new WeakReference<>(parent);
- this.identifier = identifier;
- }
-
- @Override
- public PathArgument getIdentifier() {
- return identifier;
- }
-
- @Override
- public Optional<Node> getChild(final PathArgument child) {
- return Optional.fromNullable(children.get(child));
- }
-
- /**
- * Return the list of current listeners. This collection is guaranteed
- * to be immutable only while the walker, through which this node is
- * reachable remains unclosed.
- *
- * @return the list of current listeners
- */
- public Collection<DataChangeListenerRegistration<?>> getListeners() {
- return listeners;
- }
-
- private Node ensureChild(final PathArgument child) {
- Node potential = children.get(child);
- if (potential == null) {
- potential = new Node(this, child);
- children.put(child, potential);
- }
- return potential;
- }
-
- private void addListener(final DataChangeListenerRegistration<?> listener) {
- listeners.add(listener);
- LOG.debug("Listener {} registered", listener);
- }
-
- private void removeListener(final DataChangeListenerRegistrationImpl<?> listener) {
- listeners.remove(listener);
- LOG.debug("Listener {} unregistered", listener);
-
- // We have been called with the write-lock held, so we can perform some cleanup.
- removeThisIfUnused();
- }
-
- private void removeThisIfUnused() {
- final Node p = parent.get();
- if (p != null && listeners.isEmpty() && children.isEmpty()) {
- p.removeChild(identifier);
- }
- }
-
- private void removeChild(final PathArgument arg) {
- children.remove(arg);
- removeThisIfUnused();
- }
-
- @Override
- public String toString() {
- return "Node [identifier=" + identifier + ", listeners=" + listeners.size() + ", children=" + children.size() + "]";
- }
-
-
- }
-
- private abstract static class DataChangeListenerRegistrationImpl<T extends AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>> extends AbstractListenerRegistration<T> //
+ abstract static class DataChangeListenerRegistrationImpl<T extends AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>> extends AbstractListenerRegistration<T> //
implements DataChangeListenerRegistration<T> {
public DataChangeListenerRegistrationImpl(final T listener) {
super(listener);
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.store.impl.tree;
+
+import com.google.common.base.Preconditions;
+import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
+import java.util.concurrent.locks.Lock;
+
+/**
+ * A walking context, pretty much equivalent to an iterator, but it
+ * exposes the underlying tree structure.
+ *
+ * @author Robert Varga
+ */
+public class ListenerWalker implements AutoCloseable {
+ private static final AtomicIntegerFieldUpdater<ListenerWalker> CLOSED_UPDATER = AtomicIntegerFieldUpdater.newUpdater(ListenerWalker.class, "closed");
+ private final Lock lock;
+ private final ListenerNode node;
+
+ // Used via CLOSED_UPDATER
+ @SuppressWarnings("unused")
+ private volatile int closed = 0;
+
+ ListenerWalker(final Lock lock, final ListenerNode node) {
+ this.lock = Preconditions.checkNotNull(lock);
+ this.node = Preconditions.checkNotNull(node);
+ }
+
+ public ListenerNode getRootNode() {
+ return node;
+ }
+
+ @Override
+ public void close() {
+ if (CLOSED_UPDATER.compareAndSet(this, 0, 1)) {
+ lock.unlock();
+ }
+ }
+}
\ No newline at end of file
<groupId>org.opendaylight.controller</groupId>
<artifactId>ietf-netconf-monitoring</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>ietf-netconf-notifications</artifactId>
+ </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-client</artifactId>
<groupId>org.opendaylight.controller.model</groupId>
<artifactId>model-inventory</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools.model</groupId>
+ <artifactId>ietf-topology</artifactId>
+ </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-broker-impl</artifactId>
import static org.opendaylight.controller.config.api.JmxAttributeValidationException.checkCondition;
import static org.opendaylight.controller.config.api.JmxAttributeValidationException.checkNotNull;
+
import com.google.common.base.Optional;
import io.netty.util.concurrent.EventExecutor;
import java.math.BigDecimal;
import org.opendaylight.controller.sal.connect.netconf.NetconfDevice;
import org.opendaylight.controller.sal.connect.netconf.NetconfStateSchemas;
import org.opendaylight.controller.sal.connect.netconf.listener.NetconfDeviceCommunicator;
-import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionCapabilities;
+import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionPreferences;
import org.opendaylight.controller.sal.connect.netconf.sal.NetconfDeviceSalFacade;
import org.opendaylight.controller.sal.connect.netconf.schema.mapping.NetconfMessageTransformer;
import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
private static final Logger logger = LoggerFactory.getLogger(NetconfConnectorModule.class);
private BundleContext bundleContext;
- private Optional<NetconfSessionCapabilities> userCapabilities;
+ private Optional<NetconfSessionPreferences> userCapabilities;
private SchemaSourceRegistry schemaRegistry;
private SchemaContextFactory schemaContextFactory;
}
userCapabilities = getUserCapabilities();
-
}
private boolean isHostAddressPresent(final Host address) {
@Override
public java.lang.AutoCloseable createInstance() {
- final RemoteDeviceId id = new RemoteDeviceId(getIdentifier());
+ final RemoteDeviceId id = new RemoteDeviceId(getIdentifier(), getSocketAddress());
final ExecutorService globalProcessingExecutor = getProcessingExecutorDependency().getExecutor();
final Broker domBroker = getDomRegistryDependency();
final BindingAwareBroker bindingBroker = getBindingRegistryDependency();
- final RemoteDeviceHandler<NetconfSessionCapabilities> salFacade
+ final RemoteDeviceHandler<NetconfSessionPreferences> salFacade
= new NetconfDeviceSalFacade(id, domBroker, bindingBroker, bundleContext, globalProcessingExecutor);
final NetconfDevice.SchemaResourcesDTO schemaResourcesDTO =
new NetconfDevice.SchemaResourcesDTO(schemaRegistry, schemaContextFactory, new NetconfStateSchemas.NetconfStateSchemasResolverImpl());
final NetconfDevice device =
- new NetconfDevice(schemaResourcesDTO, id, salFacade, globalProcessingExecutor, new NetconfMessageTransformer());
+ new NetconfDevice(schemaResourcesDTO, id, salFacade, globalProcessingExecutor, new NetconfMessageTransformer(), true);
final NetconfDeviceCommunicator listener = userCapabilities.isPresent() ?
new NetconfDeviceCommunicator(id, device, userCapabilities.get()) : new NetconfDeviceCommunicator(id, device);
final NetconfReconnectingClientConfiguration clientConfig = getClientConfig(listener);
-
final NetconfClientDispatcher dispatcher = getClientDispatcherDependency();
+
listener.initializeRemoteConnection(dispatcher, clientConfig);
- return new MyAutoCloseable(listener, salFacade);
+ return new SalConnectorCloseable(listener, salFacade);
}
- private Optional<NetconfSessionCapabilities> getUserCapabilities() {
+ private Optional<NetconfSessionPreferences> getUserCapabilities() {
if(getYangModuleCapabilities() == null) {
return Optional.absent();
}
return Optional.absent();
}
- final NetconfSessionCapabilities parsedOverrideCapabilities = NetconfSessionCapabilities.fromStrings(capabilities);
+ final NetconfSessionPreferences parsedOverrideCapabilities = NetconfSessionPreferences.fromStrings(capabilities);
JmxAttributeValidationException.checkCondition(
parsedOverrideCapabilities.getNonModuleCaps().isEmpty(),
"Capabilities to override can only contain module based capabilities, non-module capabilities will be retrieved from the device," +
final InetSocketAddress socketAddress = getSocketAddress();
final long clientConnectionTimeoutMillis = getConnectionTimeoutMillis();
- final ReconnectStrategyFactory sf = new MyReconnectStrategyFactory(
+ final ReconnectStrategyFactory sf = new TimedReconnectStrategyFactory(
getEventExecutorDependency(), getMaxConnectionAttempts(), getBetweenAttemptsTimeoutMillis(), getSleepFactor());
final ReconnectStrategy strategy = sf.createReconnectStrategy();
.withAddress(socketAddress)
.withConnectionTimeoutMillis(clientConnectionTimeoutMillis)
.withReconnectStrategy(strategy)
- .withSessionListener(listener)
.withAuthHandler(new LoginPassword(getUsername(),getPassword()))
.withProtocol(getTcpOnly() ?
NetconfClientConfiguration.NetconfClientProtocol.TCP :
NetconfClientConfiguration.NetconfClientProtocol.SSH)
.withConnectStrategyFactory(sf)
+ .withSessionListener(listener)
.build();
}
- private static final class MyAutoCloseable implements AutoCloseable {
- private final RemoteDeviceHandler<NetconfSessionCapabilities> salFacade;
+ private static final class SalConnectorCloseable implements AutoCloseable {
+ private final RemoteDeviceHandler<NetconfSessionPreferences> salFacade;
private final NetconfDeviceCommunicator listener;
- public MyAutoCloseable(final NetconfDeviceCommunicator listener,
- final RemoteDeviceHandler<NetconfSessionCapabilities> salFacade) {
+ public SalConnectorCloseable(final NetconfDeviceCommunicator listener,
+ final RemoteDeviceHandler<NetconfSessionPreferences> salFacade) {
this.listener = listener;
this.salFacade = salFacade;
}
}
}
- private static final class MyReconnectStrategyFactory implements ReconnectStrategyFactory {
+ private static final class TimedReconnectStrategyFactory implements ReconnectStrategyFactory {
private final Long connectionAttempts;
private final EventExecutor executor;
private final double sleepFactor;
private final int minSleep;
- MyReconnectStrategyFactory(final EventExecutor executor, final Long maxConnectionAttempts, final int minSleep, final BigDecimal sleepFactor) {
+ TimedReconnectStrategyFactory(final EventExecutor executor, final Long maxConnectionAttempts, final int minSleep, final BigDecimal sleepFactor) {
if (maxConnectionAttempts != null && maxConnectionAttempts > 0) {
connectionAttempts = maxConnectionAttempts;
} else {
/**
*
*/
-public interface RemoteDevice<PREF, M> {
+public interface RemoteDevice<PREF, M, LISTENER extends RemoteDeviceCommunicator<M>> {
- void onRemoteSessionUp(PREF remoteSessionCapabilities, RemoteDeviceCommunicator<M> listener);
+ void onRemoteSessionUp(PREF remoteSessionCapabilities, LISTENER listener);
void onRemoteSessionDown();
+ void onRemoteSessionFailed(Throwable throwable);
+
void onNotification(M notification);
}
void onDeviceDisconnected();
+ void onDeviceFailed(Throwable throwable);
+
void onNotification(CompositeNode domNotification);
void close();
import com.google.common.util.concurrent.ListeningExecutorService;
import com.google.common.util.concurrent.MoreExecutors;
import java.util.Collection;
+import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Set;
import org.opendaylight.controller.sal.connect.api.RemoteDevice;
import org.opendaylight.controller.sal.connect.api.RemoteDeviceCommunicator;
import org.opendaylight.controller.sal.connect.api.RemoteDeviceHandler;
-import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionCapabilities;
+import org.opendaylight.controller.sal.connect.netconf.listener.NetconfDeviceCapabilities;
+import org.opendaylight.controller.sal.connect.netconf.listener.NetconfDeviceCommunicator;
+import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionPreferences;
import org.opendaylight.controller.sal.connect.netconf.sal.NetconfDeviceRpc;
import org.opendaylight.controller.sal.connect.netconf.schema.NetconfRemoteSchemaYangSourceProvider;
+import org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil;
import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.notifications.rev120206.NetconfCapabilityChange;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.topology.rev150114.netconf.node.fields.unavailable.capabilities.UnavailableCapability;
import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.data.api.CompositeNode;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import org.opendaylight.yangtools.yang.model.repo.api.MissingSchemaSourceException;
import org.opendaylight.yangtools.yang.model.repo.api.SchemaContextFactory;
/**
* This is a mediator between NetconfDeviceCommunicator and NetconfDeviceSalFacade
*/
-public final class NetconfDevice implements RemoteDevice<NetconfSessionCapabilities, NetconfMessage> {
+public final class NetconfDevice implements RemoteDevice<NetconfSessionPreferences, NetconfMessage, NetconfDeviceCommunicator> {
private static final Logger logger = LoggerFactory.getLogger(NetconfDevice.class);
};
private final RemoteDeviceId id;
+ private final boolean reconnectOnSchemasChange;
private final SchemaContextFactory schemaContextFactory;
- private final RemoteDeviceHandler<NetconfSessionCapabilities> salFacade;
+ private final RemoteDeviceHandler<NetconfSessionPreferences> salFacade;
private final ListeningExecutorService processingExecutor;
private final SchemaSourceRegistry schemaRegistry;
private final MessageTransformer<NetconfMessage> messageTransformer;
private final NotificationHandler notificationHandler;
private final List<SchemaSourceRegistration<? extends SchemaSourceRepresentation>> sourceRegistrations = Lists.newArrayList();
- public NetconfDevice(final SchemaResourcesDTO schemaResourcesDTO, final RemoteDeviceId id, final RemoteDeviceHandler<NetconfSessionCapabilities> salFacade,
+ public NetconfDevice(final SchemaResourcesDTO schemaResourcesDTO, final RemoteDeviceId id, final RemoteDeviceHandler<NetconfSessionPreferences> salFacade,
final ExecutorService globalProcessingExecutor, final MessageTransformer<NetconfMessage> messageTransformer) {
+ this(schemaResourcesDTO, id, salFacade, globalProcessingExecutor, messageTransformer, false);
+ }
+
+ // FIXME reduce parameters
+ public NetconfDevice(final SchemaResourcesDTO schemaResourcesDTO, final RemoteDeviceId id, final RemoteDeviceHandler<NetconfSessionPreferences> salFacade,
+ final ExecutorService globalProcessingExecutor, final MessageTransformer<NetconfMessage> messageTransformer, final boolean reconnectOnSchemasChange) {
this.id = id;
+ this.reconnectOnSchemasChange = reconnectOnSchemasChange;
this.schemaRegistry = schemaResourcesDTO.getSchemaRegistry();
this.messageTransformer = messageTransformer;
this.schemaContextFactory = schemaResourcesDTO.getSchemaContextFactory();
}
@Override
- public void onRemoteSessionUp(final NetconfSessionCapabilities remoteSessionCapabilities,
- final RemoteDeviceCommunicator<NetconfMessage> listener) {
+ public void onRemoteSessionUp(final NetconfSessionPreferences remoteSessionCapabilities,
+ final NetconfDeviceCommunicator listener) {
// SchemaContext setup has to be performed in a dedicated thread since
// we are in a netty thread in this method
// Yang models are being downloaded in this method and it would cause a
final DeviceSourcesResolver task = new DeviceSourcesResolver(deviceRpc, remoteSessionCapabilities, id, stateSchemasResolver);
final ListenableFuture<DeviceSources> sourceResolverFuture = processingExecutor.submit(task);
+ if(shouldListenOnSchemaChange(remoteSessionCapabilities)) {
+ registerToBaseNetconfStream(deviceRpc, listener);
+ }
+
final FutureCallback<DeviceSources> resolvedSourceCallback = new FutureCallback<DeviceSources>() {
@Override
public void onSuccess(final DeviceSources result) {
};
Futures.addCallback(sourceResolverFuture, resolvedSourceCallback);
+
}
- private void handleSalInitializationSuccess(final SchemaContext result, final NetconfSessionCapabilities remoteSessionCapabilities, final NetconfDeviceRpc deviceRpc) {
+ private void registerToBaseNetconfStream(final NetconfDeviceRpc deviceRpc, final NetconfDeviceCommunicator listener) {
+ final ListenableFuture<RpcResult<CompositeNode>> rpcResultListenableFuture =
+ deviceRpc.invokeRpc(NetconfMessageTransformUtil.CREATE_SUBSCRIPTION_RPC_QNAME, NetconfMessageTransformUtil.CREATE_SUBSCRIPTION_RPC_CONTENT);
+
+ final NotificationHandler.NotificationFilter filter = new NotificationHandler.NotificationFilter() {
+ @Override
+ public Optional<CompositeNode> filterNotification(final CompositeNode notification) {
+ if (isCapabilityChanged(notification)) {
+ logger.info("{}: Schemas change detected, reconnecting", id);
+ // Only disconnect is enough, the reconnecting nature of the connector will take care of reconnecting
+ listener.disconnect();
+ return Optional.absent();
+ }
+ return Optional.of(notification);
+ }
+
+ private boolean isCapabilityChanged(final CompositeNode notification) {
+ return notification.getNodeType().equals(NetconfCapabilityChange.QNAME);
+ }
+ };
+
+ Futures.addCallback(rpcResultListenableFuture, new FutureCallback<RpcResult<CompositeNode>>() {
+ @Override
+ public void onSuccess(final RpcResult<CompositeNode> result) {
+ notificationHandler.addNotificationFilter(filter);
+ }
+
+ @Override
+ public void onFailure(final Throwable t) {
+ logger.warn("Unable to subscribe to base notification stream. Schemas will not be reloaded on the fly", t);
+ }
+ });
+ }
+
+ private boolean shouldListenOnSchemaChange(final NetconfSessionPreferences remoteSessionCapabilities) {
+ return remoteSessionCapabilities.isNotificationsSupported() && reconnectOnSchemasChange;
+ }
+
+ private void handleSalInitializationSuccess(final SchemaContext result, final NetconfSessionPreferences remoteSessionCapabilities, final NetconfDeviceRpc deviceRpc) {
updateMessageTransformer(result);
salFacade.onDeviceConnected(result, remoteSessionCapabilities, deviceRpc);
notificationHandler.onRemoteSchemaUp();
- logger.debug("{}: Initialization in sal successful", id);
logger.info("{}: Netconf connector initialized successfully", id);
}
/**
* Update initial message transformer to use retrieved schema
- * @param currentSchemaContext
*/
private void updateMessageTransformer(final SchemaContext currentSchemaContext) {
messageTransformer.onGlobalContextUpdated(currentSchemaContext);
resetMessageTransformer();
}
+ @Override
+ public void onRemoteSessionFailed(Throwable throwable) {
+ salFacade.onDeviceFailed(throwable);
+ }
+
@Override
public void onNotification(final NetconfMessage notification) {
notificationHandler.handleNotification(notification);
*/
private static class DeviceSourcesResolver implements Callable<DeviceSources> {
private final NetconfDeviceRpc deviceRpc;
- private final NetconfSessionCapabilities remoteSessionCapabilities;
+ private final NetconfSessionPreferences remoteSessionCapabilities;
private final RemoteDeviceId id;
private final NetconfStateSchemas.NetconfStateSchemasResolver stateSchemasResolver;
- public DeviceSourcesResolver(final NetconfDeviceRpc deviceRpc, final NetconfSessionCapabilities remoteSessionCapabilities, final RemoteDeviceId id, final NetconfStateSchemas.NetconfStateSchemasResolver stateSchemasResolver) {
+ public DeviceSourcesResolver(final NetconfDeviceRpc deviceRpc, final NetconfSessionPreferences remoteSessionCapabilities, final RemoteDeviceId id, final NetconfStateSchemas.NetconfStateSchemasResolver stateSchemasResolver) {
this.deviceRpc = deviceRpc;
this.remoteSessionCapabilities = remoteSessionCapabilities;
this.id = id;
*/
private final class RecursiveSchemaSetup implements Runnable {
private final DeviceSources deviceSources;
- private final NetconfSessionCapabilities remoteSessionCapabilities;
+ private final NetconfSessionPreferences remoteSessionCapabilities;
private final NetconfDeviceRpc deviceRpc;
private final RemoteDeviceCommunicator<NetconfMessage> listener;
+ private NetconfDeviceCapabilities capabilities;
- public RecursiveSchemaSetup(final DeviceSources deviceSources, final NetconfSessionCapabilities remoteSessionCapabilities, final NetconfDeviceRpc deviceRpc, final RemoteDeviceCommunicator<NetconfMessage> listener) {
+ public RecursiveSchemaSetup(final DeviceSources deviceSources, final NetconfSessionPreferences remoteSessionCapabilities, final NetconfDeviceRpc deviceRpc, final RemoteDeviceCommunicator<NetconfMessage> listener) {
this.deviceSources = deviceSources;
this.remoteSessionCapabilities = remoteSessionCapabilities;
this.deviceRpc = deviceRpc;
this.listener = listener;
+ this.capabilities = remoteSessionCapabilities.getNetconfDeviceCapabilities();
}
@Override
/**
* Recursively build schema context, in case of success or final failure notify device
*/
+ // FIXME reimplement without recursion
private void setUpSchema(final Collection<SourceIdentifier> requiredSources) {
logger.trace("{}: Trying to build schema context from {}", id, requiredSources);
@Override
public void onSuccess(final SchemaContext result) {
logger.debug("{}: Schema context built successfully from {}", id, requiredSources);
+ Collection<QName> filteredQNames = Sets.difference(remoteSessionCapabilities.getModuleBasedCaps(), capabilities.getUnresolvedCapabilites().keySet());
+ capabilities.addCapabilities(filteredQNames);
+ capabilities.addNonModuleBasedCapabilities(remoteSessionCapabilities.getNonModuleCaps());
handleSalInitializationSuccess(result, remoteSessionCapabilities, deviceRpc);
}
if (t instanceof MissingSchemaSourceException) {
final SourceIdentifier missingSource = ((MissingSchemaSourceException) t).getSourceId();
logger.warn("{}: Unable to build schema context, missing source {}, will reattempt without it", id, missingSource);
+ capabilities.addUnresolvedCapabilities(getQNameFromSourceIdentifiers(Sets.newHashSet(missingSource)), UnavailableCapability.FailureReason.MissingSource);
setUpSchema(stripMissingSource(requiredSources, missingSource));
// In case resolution error, try only with resolved sources
} else if (t instanceof SchemaResolutionException) {
// TODO check for infinite loop
final SchemaResolutionException resolutionException = (SchemaResolutionException) t;
+ final Set<SourceIdentifier> unresolvedSources = resolutionException.getUnsatisfiedImports().keySet();
+ capabilities.addUnresolvedCapabilities(getQNameFromSourceIdentifiers(unresolvedSources), UnavailableCapability.FailureReason.UnableToResolve);
logger.warn("{}: Unable to build schema context, unsatisfied imports {}, will reattempt with resolved only", id, resolutionException.getUnsatisfiedImports());
setUpSchema(resolutionException.getResolvedSources());
// unknown error, fail
Preconditions.checkState(removed, "{}: Trying to remove {} from {} failed", id, sIdToRemove, requiredSources);
return sourceIdentifiers;
}
+
+ private Collection<QName> getQNameFromSourceIdentifiers(Collection<SourceIdentifier> identifiers) {
+ Collection<QName> qNames = new HashSet<>();
+ for (SourceIdentifier source : identifiers) {
+ Optional<QName> qname = getQNameFromSourceIdentifier(source);
+ if (qname.isPresent()) {
+ qNames.add(qname.get());
+ }
+ }
+ if (qNames.isEmpty()) {
+ logger.debug("Unable to map any source identfiers to a capability reported by device : " + identifiers);
+ }
+ return qNames;
+ }
+
+ private Optional<QName> getQNameFromSourceIdentifier(SourceIdentifier identifier) {
+ for (QName qname : remoteSessionCapabilities.getModuleBasedCaps()) {
+ if (qname.getLocalName().equals(identifier.getName())
+ && qname.getFormattedRevision().equals(identifier.getRevision())) {
+ return Optional.of(qname);
+ }
+ }
+ throw new IllegalArgumentException("Unable to map identifier to a devices reported capability: " + identifier);
+ }
}
}
import java.util.Collections;
import java.util.Set;
import java.util.concurrent.ExecutionException;
-import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionCapabilities;
+import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionPreferences;
import org.opendaylight.controller.sal.connect.netconf.sal.NetconfDeviceRpc;
import org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil;
import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
* Factory for NetconfStateSchemas
*/
public interface NetconfStateSchemasResolver {
- NetconfStateSchemas resolve(final NetconfDeviceRpc deviceRpc, final NetconfSessionCapabilities remoteSessionCapabilities, final RemoteDeviceId id);
+ NetconfStateSchemas resolve(final NetconfDeviceRpc deviceRpc, final NetconfSessionPreferences remoteSessionCapabilities, final RemoteDeviceId id);
}
/**
public static final class NetconfStateSchemasResolverImpl implements NetconfStateSchemasResolver {
@Override
- public NetconfStateSchemas resolve(final NetconfDeviceRpc deviceRpc, final NetconfSessionCapabilities remoteSessionCapabilities, final RemoteDeviceId id) {
+ public NetconfStateSchemas resolve(final NetconfDeviceRpc deviceRpc, final NetconfSessionPreferences remoteSessionCapabilities, final RemoteDeviceId id) {
return NetconfStateSchemas.create(deviceRpc, remoteSessionCapabilities, id);
}
}
/**
* Issue get request to remote device and parse response to find all schemas under netconf-state/schemas
*/
- private static NetconfStateSchemas create(final NetconfDeviceRpc deviceRpc, final NetconfSessionCapabilities remoteSessionCapabilities, final RemoteDeviceId id) {
+ private static NetconfStateSchemas create(final NetconfDeviceRpc deviceRpc, final NetconfSessionPreferences remoteSessionCapabilities, final RemoteDeviceId id) {
if(remoteSessionCapabilities.isMonitoringSupported() == false) {
logger.warn("{}: Netconf monitoring not supported on device, cannot detect provided schemas");
return EMPTY;
*/
package org.opendaylight.controller.sal.connect.netconf;
+import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import java.util.LinkedList;
import java.util.List;
private final MessageTransformer<NetconfMessage> messageTransformer;
private final RemoteDeviceId id;
private boolean passNotifications = false;
+ private NotificationFilter filter;
NotificationHandler(final RemoteDeviceHandler<?> salFacade, final MessageTransformer<NetconfMessage> messageTransformer, final RemoteDeviceId id) {
this.salFacade = Preconditions.checkNotNull(salFacade);
queue.add(notification);
}
- private void passNotification(final CompositeNode parsedNotification) {
+ private synchronized void passNotification(final CompositeNode parsedNotification) {
logger.debug("{}: Forwarding notification {}", id, parsedNotification);
Preconditions.checkNotNull(parsedNotification);
- salFacade.onNotification(parsedNotification);
+
+ if(filter == null || filter.filterNotification(parsedNotification).isPresent()) {
+ salFacade.onNotification(parsedNotification);
+ }
+ }
+
+ synchronized void addNotificationFilter(final NotificationFilter filter) {
+ this.filter = filter;
+ }
+
+ static interface NotificationFilter {
+
+ Optional<CompositeNode> filterNotification(CompositeNode notification);
}
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.sal.connect.netconf.listener;
+
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.topology.rev150114.netconf.node.fields.unavailable.capabilities.UnavailableCapability.FailureReason;
+import org.opendaylight.yangtools.yang.common.QName;
+
+public final class NetconfDeviceCapabilities {
+ private final Map<QName, FailureReason> unresolvedCapabilites;
+ private final Set<QName> resolvedCapabilities;
+
+ private final Set<String> nonModuleBasedCapabilities;
+
+ public NetconfDeviceCapabilities() {
+ this.unresolvedCapabilites = new HashMap<>();
+ this.resolvedCapabilities = new HashSet<>();
+ this.nonModuleBasedCapabilities = new HashSet<>();
+ }
+
+ public void addUnresolvedCapability(QName source, FailureReason reason) {
+ unresolvedCapabilites.put(source, reason);
+ }
+
+ public void addUnresolvedCapabilities(Collection<QName> capabilities, FailureReason reason) {
+ for (QName s : capabilities) {
+ unresolvedCapabilites.put(s, reason);
+ }
+ }
+
+ public void addCapabilities(Collection<QName> availableSchemas) {
+ resolvedCapabilities.addAll(availableSchemas);
+ }
+
+ public void addNonModuleBasedCapabilities(Collection<String> nonModuleCapabilities) {
+ this.nonModuleBasedCapabilities.addAll(nonModuleCapabilities);
+ }
+
+ public Set<String> getNonModuleBasedCapabilities() {
+ return nonModuleBasedCapabilities;
+ }
+
+ public Map<QName, FailureReason> getUnresolvedCapabilites() {
+ return unresolvedCapabilites;
+ }
+
+ public Set<QName> getResolvedCapabilities() {
+ return resolvedCapabilities;
+ }
+
+}
import com.google.common.util.concurrent.ListenableFuture;
import io.netty.util.concurrent.Future;
import io.netty.util.concurrent.FutureListener;
+import io.netty.util.concurrent.GenericFutureListener;
import java.util.ArrayDeque;
import java.util.Iterator;
import java.util.List;
private static final Logger logger = LoggerFactory.getLogger(NetconfDeviceCommunicator.class);
- private final RemoteDevice<NetconfSessionCapabilities, NetconfMessage> remoteDevice;
- private final Optional<NetconfSessionCapabilities> overrideNetconfCapabilities;
+ private final RemoteDevice<NetconfSessionPreferences, NetconfMessage, NetconfDeviceCommunicator> remoteDevice;
+ private final Optional<NetconfSessionPreferences> overrideNetconfCapabilities;
private final RemoteDeviceId id;
private final Lock sessionLock = new ReentrantLock();
private NetconfClientSession session;
private Future<?> initFuture;
- public NetconfDeviceCommunicator(final RemoteDeviceId id, final RemoteDevice<NetconfSessionCapabilities, NetconfMessage> remoteDevice,
- final NetconfSessionCapabilities netconfSessionCapabilities) {
- this(id, remoteDevice, Optional.of(netconfSessionCapabilities));
+ public NetconfDeviceCommunicator(final RemoteDeviceId id, final RemoteDevice<NetconfSessionPreferences, NetconfMessage, NetconfDeviceCommunicator> remoteDevice,
+ final NetconfSessionPreferences NetconfSessionPreferences) {
+ this(id, remoteDevice, Optional.of(NetconfSessionPreferences));
}
public NetconfDeviceCommunicator(final RemoteDeviceId id,
- final RemoteDevice<NetconfSessionCapabilities, NetconfMessage> remoteDevice) {
- this(id, remoteDevice, Optional.<NetconfSessionCapabilities>absent());
+ final RemoteDevice<NetconfSessionPreferences, NetconfMessage, NetconfDeviceCommunicator> remoteDevice) {
+ this(id, remoteDevice, Optional.<NetconfSessionPreferences>absent());
}
- private NetconfDeviceCommunicator(final RemoteDeviceId id, final RemoteDevice<NetconfSessionCapabilities, NetconfMessage> remoteDevice,
- final Optional<NetconfSessionCapabilities> overrideNetconfCapabilities) {
+ private NetconfDeviceCommunicator(final RemoteDeviceId id, final RemoteDevice<NetconfSessionPreferences, NetconfMessage, NetconfDeviceCommunicator> remoteDevice,
+ final Optional<NetconfSessionPreferences> overrideNetconfCapabilities) {
this.id = id;
this.remoteDevice = remoteDevice;
this.overrideNetconfCapabilities = overrideNetconfCapabilities;
logger.debug("{}: Session established", id);
this.session = session;
- NetconfSessionCapabilities netconfSessionCapabilities =
- NetconfSessionCapabilities.fromNetconfSession(session);
- logger.trace("{}: Session advertised capabilities: {}", id, netconfSessionCapabilities);
+ NetconfSessionPreferences netconfSessionPreferences =
+ NetconfSessionPreferences.fromNetconfSession(session);
+ logger.trace("{}: Session advertised capabilities: {}", id, netconfSessionPreferences);
if(overrideNetconfCapabilities.isPresent()) {
- netconfSessionCapabilities = netconfSessionCapabilities.replaceModuleCaps(overrideNetconfCapabilities.get());
- logger.debug("{}: Session capabilities overridden, capabilities that will be used: {}", id, netconfSessionCapabilities);
+ netconfSessionPreferences = netconfSessionPreferences.replaceModuleCaps(overrideNetconfCapabilities.get());
+ logger.debug("{}: Session capabilities overridden, capabilities that will be used: {}", id, netconfSessionPreferences);
}
- remoteDevice.onRemoteSessionUp(netconfSessionCapabilities, this);
+ remoteDevice.onRemoteSessionUp(netconfSessionPreferences, this);
}
finally {
sessionLock.unlock();
}
}
- public void initializeRemoteConnection(final NetconfClientDispatcher dispatch,
- final NetconfClientConfiguration config) {
+ public void initializeRemoteConnection(final NetconfClientDispatcher dispatcher, final NetconfClientConfiguration config) {
+ // TODO 2313 extract listener from configuration
if(config instanceof NetconfReconnectingClientConfiguration) {
- initFuture = dispatch.createReconnectingClient((NetconfReconnectingClientConfiguration) config);
+ initFuture = dispatcher.createReconnectingClient((NetconfReconnectingClientConfiguration) config);
} else {
- initFuture = dispatch.createClient(config);
+ initFuture = dispatcher.createClient(config);
+ }
+
+
+ initFuture.addListener(new GenericFutureListener<Future<Object>>(){
+
+ @Override
+ public void operationComplete(Future<Object> future) throws Exception {
+ if (!future.isSuccess()) {
+ logger.debug("{}: Connection failed", id, future.cause());
+ NetconfDeviceCommunicator.this.remoteDevice.onRemoteSessionFailed(future.cause());
+ }
+ }
+ });
+
+ }
+
+ public void disconnect() {
+ if(session != null) {
+ session.close();
}
}
}
}
- private RpcResult<NetconfMessage> createSessionDownRpcResult()
- {
+ private RpcResult<NetconfMessage> createSessionDownRpcResult() {
return createErrorRpcResult( RpcError.ErrorType.TRANSPORT,
String.format( "The netconf session to %1$s is disconnected", id.getName() ) );
}
- private RpcResult<NetconfMessage> createErrorRpcResult( RpcError.ErrorType errorType, String message )
- {
+ private RpcResult<NetconfMessage> createErrorRpcResult( RpcError.ErrorType errorType, String message ) {
return RpcResultBuilder.<NetconfMessage>failed()
- .withError( errorType, NetconfDocumentedException.ErrorTag.operation_failed.getTagValue(),
- message )
- .build();
+ .withError(errorType, NetconfDocumentedException.ErrorTag.operation_failed.getTagValue(), message).build();
}
@Override
if(session != null) {
session.close();
}
+
tearDown(id + ": Netconf session closed");
}
logger.debug("{}: Message received {}", id, message);
if(logger.isTraceEnabled()) {
- logger.trace( "{}: Matched request: {} to response: {}", id,
- msgToS( request.request ), msgToS( message ) );
+ logger.trace( "{}: Matched request: {} to response: {}", id, msgToS( request.request ), msgToS( message ) );
}
try {
NetconfMessageTransformUtil.checkValidReply( request.request, message );
- }
- catch (final NetconfDocumentedException e) {
+ } catch (final NetconfDocumentedException e) {
logger.warn( "{}: Invalid request-reply match, reply message contains different message-id, request: {}, response: {}",
id, msgToS( request.request ), msgToS( message ), e );
try {
NetconfMessageTransformUtil.checkSuccessReply(message);
- }
- catch(final NetconfDocumentedException e) {
+ } catch(final NetconfDocumentedException e) {
logger.warn( "{}: Error reply from remote device, request: {}, response: {}", id,
msgToS( request.request ), msgToS( message ), e );
}
@Override
- public ListenableFuture<RpcResult<NetconfMessage>> sendRequest(
- final NetconfMessage message, final QName rpc) {
+ public ListenableFuture<RpcResult<NetconfMessage>> sendRequest(final NetconfMessage message, final QName rpc) {
sessionLock.lock();
try {
return sendRequestWithLock( message, rpc );
- }
- finally {
+ } finally {
sessionLock.unlock();
}
}
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-public final class NetconfSessionCapabilities {
+public final class NetconfSessionPreferences {
private static final class ParameterMatcher {
private final Predicate<String> predicate;
}
}
- private static final Logger LOG = LoggerFactory.getLogger(NetconfSessionCapabilities.class);
+ private static final Logger LOG = LoggerFactory.getLogger(NetconfSessionPreferences.class);
private static final ParameterMatcher MODULE_PARAM = new ParameterMatcher("module=");
private static final ParameterMatcher REVISION_PARAM = new ParameterMatcher("revision=");
private static final ParameterMatcher BROKEN_REVISON_PARAM = new ParameterMatcher("amp;revision=");
private final Set<QName> moduleBasedCaps;
private final Set<String> nonModuleCaps;
- private NetconfSessionCapabilities(final Set<String> nonModuleCaps, final Set<QName> moduleBasedCaps) {
+ private NetconfSessionPreferences(final Set<String> nonModuleCaps, final Set<QName> moduleBasedCaps) {
this.nonModuleCaps = Preconditions.checkNotNull(nonModuleCaps);
this.moduleBasedCaps = Preconditions.checkNotNull(moduleBasedCaps);
}
return containsNonModuleCapability(NetconfMessageTransformUtil.NETCONF_RUNNING_WRITABLE_URI.toString());
}
+ public boolean isNotificationsSupported() {
+ return containsNonModuleCapability(NetconfMessageTransformUtil.NETCONF_NOTIFICATONS_URI.toString())
+ || containsModuleCapability(NetconfMessageTransformUtil.IETF_NETCONF_NOTIFICATIONS);
+ }
+
public boolean isMonitoringSupported() {
return containsModuleCapability(NetconfMessageTransformUtil.IETF_NETCONF_MONITORING)
|| containsNonModuleCapability(NetconfMessageTransformUtil.IETF_NETCONF_MONITORING.getNamespace().toString());
}
- public NetconfSessionCapabilities replaceModuleCaps(final NetconfSessionCapabilities netconfSessionModuleCapabilities) {
+ public NetconfSessionPreferences replaceModuleCaps(final NetconfSessionPreferences netconfSessionModuleCapabilities) {
final Set<QName> moduleBasedCaps = Sets.newHashSet(netconfSessionModuleCapabilities.getModuleBasedCaps());
// Preserve monitoring module, since it indicates support for ietf-netconf-monitoring
if(containsModuleCapability(NetconfMessageTransformUtil.IETF_NETCONF_MONITORING)) {
moduleBasedCaps.add(NetconfMessageTransformUtil.IETF_NETCONF_MONITORING);
}
- return new NetconfSessionCapabilities(getNonModuleCaps(), moduleBasedCaps);
+ return new NetconfSessionPreferences(getNonModuleCaps(), moduleBasedCaps);
}
- public static NetconfSessionCapabilities fromNetconfSession(final NetconfClientSession session) {
+ public static NetconfSessionPreferences fromNetconfSession(final NetconfClientSession session) {
return fromStrings(session.getServerCapabilities());
}
return QName.cachedReference(QName.create(URI.create(namespace), null, moduleName).withoutRevision());
}
- public static NetconfSessionCapabilities fromStrings(final Collection<String> capabilities) {
+ public static NetconfSessionPreferences fromStrings(final Collection<String> capabilities) {
final Set<QName> moduleBasedCaps = new HashSet<>();
final Set<String> nonModuleCaps = Sets.newHashSet(capabilities);
addModuleQName(moduleBasedCaps, nonModuleCaps, capability, cachedQName(namespace, moduleName));
}
- return new NetconfSessionCapabilities(ImmutableSet.copyOf(nonModuleCaps), ImmutableSet.copyOf(moduleBasedCaps));
+ return new NetconfSessionPreferences(ImmutableSet.copyOf(nonModuleCaps), ImmutableSet.copyOf(moduleBasedCaps));
}
moduleBasedCaps.add(qName);
nonModuleCaps.remove(capability);
}
+
+ private NetconfDeviceCapabilities capabilities = new NetconfDeviceCapabilities();
+
+ public NetconfDeviceCapabilities getNetconfDeviceCapabilities() {
+ return capabilities;
+ }
+
+
}
import org.opendaylight.controller.md.sal.dom.api.DOMDataReadWriteTransaction;
import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
import org.opendaylight.controller.md.sal.dom.api.DOMTransactionChain;
-import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionCapabilities;
+import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionPreferences;
import org.opendaylight.controller.sal.connect.netconf.sal.tx.ReadOnlyTx;
import org.opendaylight.controller.sal.connect.netconf.sal.tx.ReadWriteTx;
import org.opendaylight.controller.sal.connect.netconf.sal.tx.WriteCandidateTx;
final class NetconfDeviceDataBroker implements DOMDataBroker {
private final RemoteDeviceId id;
private final NetconfBaseOps netconfOps;
- private final NetconfSessionCapabilities netconfSessionPreferences;
+ private final NetconfSessionPreferences netconfSessionPreferences;
private final DataNormalizer normalizer;
- public NetconfDeviceDataBroker(final RemoteDeviceId id, final RpcImplementation rpc, final SchemaContext schemaContext, final NetconfSessionCapabilities netconfSessionPreferences) {
+ public NetconfDeviceDataBroker(final RemoteDeviceId id, final RpcImplementation rpc, final SchemaContext schemaContext, final NetconfSessionPreferences netconfSessionPreferences) {
this.id = id;
this.netconfOps = new NetconfBaseOps(rpc);
this.netconfSessionPreferences = netconfSessionPreferences;
*
* All data changes are submitted to an ExecutorService to avoid Thread blocking while sal is waiting for schema.
*/
+@Deprecated
final class NetconfDeviceDatastoreAdapter implements AutoCloseable {
private static final Logger logger = LoggerFactory.getLogger(NetconfDeviceDatastoreAdapter.class);
import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
import org.opendaylight.controller.sal.binding.api.BindingAwareBroker;
import org.opendaylight.controller.sal.connect.api.RemoteDeviceHandler;
-import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionCapabilities;
+import org.opendaylight.controller.sal.connect.netconf.listener.NetconfDeviceCapabilities;
+import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionPreferences;
import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
import org.opendaylight.controller.sal.core.api.Broker;
import org.opendaylight.controller.sal.core.api.RpcImplementation;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-public final class NetconfDeviceSalFacade implements AutoCloseable, RemoteDeviceHandler<NetconfSessionCapabilities> {
+public final class NetconfDeviceSalFacade implements AutoCloseable, RemoteDeviceHandler<NetconfSessionPreferences> {
private static final Logger logger= LoggerFactory.getLogger(NetconfDeviceSalFacade.class);
@Override
public synchronized void onDeviceConnected(final SchemaContext schemaContext,
- final NetconfSessionCapabilities netconfSessionPreferences, final RpcImplementation deviceRpc) {
+ final NetconfSessionPreferences netconfSessionPreferences, final RpcImplementation deviceRpc) {
// TODO move SchemaAwareRpcBroker from sal-broker-impl, now we have depend on the whole sal-broker-impl
final RpcProvisionRegistry rpcRegistry = new SchemaAwareRpcBroker(id.getPath().toString(), new SchemaContextProvider() {
salProvider.getMountInstance().onDeviceConnected(schemaContext, domBroker, rpcRegistry, notificationService);
salProvider.getDatastoreAdapter().updateDeviceState(true, netconfSessionPreferences.getModuleBasedCaps());
+ salProvider.getMountInstance().onTopologyDeviceConnected(schemaContext, domBroker, rpcRegistry, notificationService);
+ salProvider.getTopologyDatastoreAdapter().updateDeviceData(true, netconfSessionPreferences.getNetconfDeviceCapabilities());
}
@Override
public synchronized void onDeviceDisconnected() {
salProvider.getDatastoreAdapter().updateDeviceState(false, Collections.<QName>emptySet());
+ salProvider.getTopologyDatastoreAdapter().updateDeviceData(false, new NetconfDeviceCapabilities());
salProvider.getMountInstance().onDeviceDisconnected();
+ salProvider.getMountInstance().onTopologyDeviceDisconnected();
+ }
+
+ @Override
+ public void onDeviceFailed(Throwable throwable) {
+ salProvider.getTopologyDatastoreAdapter().setDeviceAsFailed(throwable);
+ salProvider.getMountInstance().onDeviceDisconnected();
+ salProvider.getMountInstance().onTopologyDeviceDisconnected();
}
private void registerRpcsToSal(final SchemaContext schemaContext, final RpcProvisionRegistry rpcRegistry, final RpcImplementation deviceRpc) {
private volatile NetconfDeviceDatastoreAdapter datastoreAdapter;
private MountInstance mountInstance;
+ private volatile NetconfDeviceTopologyAdapter topologyDatastoreAdapter;
+
public NetconfDeviceSalProvider(final RemoteDeviceId deviceId, final ExecutorService executor) {
this.id = deviceId;
this.executor = executor;
return datastoreAdapter;
}
+ public NetconfDeviceTopologyAdapter getTopologyDatastoreAdapter() {
+ Preconditions.checkState(topologyDatastoreAdapter != null,
+ "%s: Sal provider %s was not initialized by sal. Cannot get topology datastore adapter", id);
+ return topologyDatastoreAdapter;
+ }
+
@Override
public void onSessionInitiated(final Broker.ProviderSession session) {
logger.debug("{}: (BI)Session with sal established {}", id, session);
final DataBroker dataBroker = session.getSALService(DataBroker.class);
datastoreAdapter = new NetconfDeviceDatastoreAdapter(id, dataBroker);
+
+ topologyDatastoreAdapter = new NetconfDeviceTopologyAdapter(id, dataBroker);
}
public void close() throws Exception {
private ObjectRegistration<DOMMountPoint> registration;
private NotificationPublishService notificationSerivce;
+ private ObjectRegistration<DOMMountPoint> topologyRegistration;
+
MountInstance(final DOMMountPointService mountService, final RemoteDeviceId id) {
this.mountService = Preconditions.checkNotNull(mountService);
this.id = Preconditions.checkNotNull(id);
}
+ @Deprecated
synchronized void onDeviceConnected(final SchemaContext initialCtx,
final DOMDataBroker broker, final RpcProvisionRegistry rpc,
final NotificationPublishService notificationSerivce) {
registration = mountBuilder.register();
}
+ @Deprecated
synchronized void onDeviceDisconnected() {
if(registration == null) {
return;
}
}
+ synchronized void onTopologyDeviceConnected(final SchemaContext initialCtx,
+ final DOMDataBroker broker, final RpcProvisionRegistry rpc,
+ final NotificationPublishService notificationSerivce) {
+
+ Preconditions.checkNotNull(mountService, "Closed");
+ Preconditions.checkState(topologyRegistration == null, "Already initialized");
+
+ final DOMMountPointService.DOMMountPointBuilder mountBuilder = mountService.createMountPoint(id.getTopologyPath());
+ mountBuilder.addInitialSchemaContext(initialCtx);
+
+ mountBuilder.addService(DOMDataBroker.class, broker);
+ mountBuilder.addService(RpcProvisionRegistry.class, rpc);
+ this.notificationSerivce = notificationSerivce;
+ mountBuilder.addService(NotificationPublishService.class, notificationSerivce);
+
+ topologyRegistration = mountBuilder.register();
+ }
+
+ synchronized void onTopologyDeviceDisconnected() {
+ if(topologyRegistration == null) {
+ return;
+ }
+
+ try {
+ topologyRegistration.close();
+ } catch (final Exception e) {
+ // Only log and ignore
+ logger.warn("Unable to unregister mount instance for {}. Ignoring exception", id.getTopologyPath(), e);
+ } finally {
+ topologyRegistration = null;
+ }
+ }
+
@Override
synchronized public void close() throws Exception {
if(registration != null) {
onDeviceDisconnected();
+ onTopologyDeviceDisconnected();
}
mountService = null;
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.sal.connect.netconf.sal;
+
+import com.google.common.base.Function;
+import com.google.common.collect.FluentIterable;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map.Entry;
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
+import org.opendaylight.controller.sal.connect.netconf.listener.NetconfDeviceCapabilities;
+import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev100924.PortNumber;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.topology.rev150114.NetconfNode;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.topology.rev150114.NetconfNodeBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.topology.rev150114.NetconfNodeFields.ConnectionStatus;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.topology.rev150114.netconf.node.fields.AvailableCapabilitiesBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.topology.rev150114.netconf.node.fields.UnavailableCapabilities;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.topology.rev150114.netconf.node.fields.UnavailableCapabilitiesBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.topology.rev150114.netconf.node.fields.unavailable.capabilities.UnavailableCapability;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.topology.rev150114.netconf.node.fields.unavailable.capabilities.UnavailableCapability.FailureReason;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.topology.rev150114.netconf.node.fields.unavailable.capabilities.UnavailableCapabilityBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.topology.rev150114.network.topology.topology.topology.types.TopologyNetconf;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NetworkTopology;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NetworkTopologyBuilder;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NodeId;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.TopologyId;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.Topology;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.TopologyBuilder;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.TopologyKey;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.NodeBuilder;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.NodeKey;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.binding.KeyedInstanceIdentifier;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+final class NetconfDeviceTopologyAdapter implements AutoCloseable {
+
+ public static final Logger logger = LoggerFactory.getLogger(NetconfDeviceTopologyAdapter.class);
+ public static final Function<Entry<QName, FailureReason>, UnavailableCapability> UNAVAILABLE_CAPABILITY_TRANSFORMER = new Function<Entry<QName, FailureReason>, UnavailableCapability>() {
+ @Override
+ public UnavailableCapability apply(final Entry<QName, FailureReason> input) {
+ return new UnavailableCapabilityBuilder()
+ .setCapability(input.getKey().toString())
+ .setFailureReason(input.getValue()).build();
+ }
+ };
+ public static final Function<QName, String> AVAILABLE_CAPABILITY_TRANSFORMER = new Function<QName, String>() {
+ @Override
+ public String apply(QName qName) {
+ return qName.toString();
+ }
+ };
+
+ private final RemoteDeviceId id;
+ private final DataBroker dataService;
+
+ private final InstanceIdentifier<NetworkTopology> networkTopologyPath;
+ private final KeyedInstanceIdentifier<Topology, TopologyKey> topologyListPath;
+ private static final String UNKNOWN_REASON = "Unknown reason";
+
+ NetconfDeviceTopologyAdapter(final RemoteDeviceId id, final DataBroker dataService) {
+ this.id = id;
+ this.dataService = dataService;
+
+ this.networkTopologyPath = InstanceIdentifier.builder(NetworkTopology.class).build();
+ this.topologyListPath = networkTopologyPath.child(Topology.class, new TopologyKey(new TopologyId(TopologyNetconf.QNAME.getLocalName())));
+
+ initDeviceData();
+ }
+
+ private void initDeviceData() {
+ final WriteTransaction writeTx = dataService.newWriteOnlyTransaction();
+
+ createNetworkTopologyIfNotPresent(writeTx);
+
+ final InstanceIdentifier<Node> path = id.getTopologyBindingPath();
+ NodeBuilder nodeBuilder = getNodeIdBuilder(id);
+ NetconfNodeBuilder netconfNodeBuilder = new NetconfNodeBuilder();
+ netconfNodeBuilder.setConnectionStatus(ConnectionStatus.Connecting);
+ netconfNodeBuilder.setHost(id.getHost());
+ netconfNodeBuilder.setPort(new PortNumber(id.getAddress().getPort()));
+ nodeBuilder.addAugmentation(NetconfNode.class, netconfNodeBuilder.build());
+ Node node = nodeBuilder.build();
+
+ logger.trace("{}: Init device state transaction {} putting if absent operational data started.", id, writeTx.getIdentifier());
+ writeTx.put(LogicalDatastoreType.OPERATIONAL, path, node);
+ logger.trace("{}: Init device state transaction {} putting operational data ended.", id, writeTx.getIdentifier());
+
+ logger.trace("{}: Init device state transaction {} putting if absent config data started.", id, writeTx.getIdentifier());
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, path, getNodeWithId(id));
+ logger.trace("{}: Init device state transaction {} putting config data ended.", id, writeTx.getIdentifier());
+
+ commitTransaction(writeTx, "init");
+ }
+
+ public void updateDeviceData(boolean up, NetconfDeviceCapabilities capabilities) {
+ final Node data = buildDataForNetconfNode(up, capabilities);
+
+ final WriteTransaction writeTx = dataService.newWriteOnlyTransaction();
+ logger.trace("{}: Update device state transaction {} merging operational data started.", id, writeTx.getIdentifier());
+ writeTx.put(LogicalDatastoreType.OPERATIONAL, id.getTopologyBindingPath(), data);
+ logger.trace("{}: Update device state transaction {} merging operational data ended.", id, writeTx.getIdentifier());
+
+ commitTransaction(writeTx, "update");
+ }
+
+ public void setDeviceAsFailed(Throwable throwable) {
+ String reason = (throwable != null && throwable.getMessage() != null) ? throwable.getMessage() : UNKNOWN_REASON;
+
+ final NetconfNode netconfNode = new NetconfNodeBuilder().setConnectionStatus(ConnectionStatus.UnableToConnect).setConnectedMessage(reason).build();
+ final Node data = getNodeIdBuilder(id).addAugmentation(NetconfNode.class, netconfNode).build();
+
+ final WriteTransaction writeTx = dataService.newWriteOnlyTransaction();
+ logger.trace("{}: Setting device state as failed {} putting operational data started.", id, writeTx.getIdentifier());
+ writeTx.put(LogicalDatastoreType.OPERATIONAL, id.getTopologyBindingPath(), data);
+ logger.trace("{}: Setting device state as failed {} putting operational data ended.", id, writeTx.getIdentifier());
+
+ commitTransaction(writeTx, "update-failed-device");
+ }
+
+ private Node buildDataForNetconfNode(boolean up, NetconfDeviceCapabilities capabilities) {
+ List<String> capabilityList = new ArrayList<>();
+ capabilityList.addAll(capabilities.getNonModuleBasedCapabilities());
+ capabilityList.addAll(FluentIterable.from(capabilities.getResolvedCapabilities()).transform(AVAILABLE_CAPABILITY_TRANSFORMER).toList());
+ final AvailableCapabilitiesBuilder avCapabalitiesBuilder = new AvailableCapabilitiesBuilder();
+ avCapabalitiesBuilder.setAvailableCapability(capabilityList);
+
+ final UnavailableCapabilities unavailableCapabilities =
+ new UnavailableCapabilitiesBuilder().setUnavailableCapability(FluentIterable.from(capabilities.getUnresolvedCapabilites().entrySet())
+ .transform(UNAVAILABLE_CAPABILITY_TRANSFORMER).toList()).build();
+
+ final NetconfNodeBuilder netconfNodeBuilder = new NetconfNodeBuilder()
+ .setHost(id.getHost())
+ .setPort(new PortNumber(id.getAddress().getPort()))
+ .setConnectionStatus(up ? ConnectionStatus.Connected : ConnectionStatus.Connecting)
+ .setAvailableCapabilities(avCapabalitiesBuilder.build())
+ .setUnavailableCapabilities(unavailableCapabilities);
+
+ final NodeBuilder nodeBuilder = getNodeIdBuilder(id);
+ final Node node = nodeBuilder.addAugmentation(NetconfNode.class, netconfNodeBuilder.build()).build();
+
+ return node;
+ }
+
+ public void removeDeviceConfiguration() {
+ final WriteTransaction writeTx = dataService.newWriteOnlyTransaction();
+
+ logger.trace("{}: Close device state transaction {} removing all data started.", id, writeTx.getIdentifier());
+ writeTx.delete(LogicalDatastoreType.CONFIGURATION, id.getTopologyBindingPath());
+ writeTx.delete(LogicalDatastoreType.OPERATIONAL, id.getTopologyBindingPath());
+ logger.trace("{}: Close device state transaction {} removing all data ended.", id, writeTx.getIdentifier());
+
+ commitTransaction(writeTx, "close");
+ }
+
+ private void createNetworkTopologyIfNotPresent(final WriteTransaction writeTx) {
+
+ final NetworkTopology networkTopology = new NetworkTopologyBuilder().build();
+ logger.trace("{}: Merging {} container to ensure its presence", id, networkTopology.QNAME, writeTx.getIdentifier());
+ writeTx.merge(LogicalDatastoreType.CONFIGURATION, networkTopologyPath, networkTopology);
+ writeTx.merge(LogicalDatastoreType.OPERATIONAL, networkTopologyPath, networkTopology);
+
+ final Topology topology = new TopologyBuilder().setTopologyId(new TopologyId(TopologyNetconf.QNAME.getLocalName())).build();
+ logger.trace("{}: Merging {} container to ensure its presence", id, topology.QNAME, writeTx.getIdentifier());
+ writeTx.merge(LogicalDatastoreType.CONFIGURATION, topologyListPath, topology);
+ writeTx.merge(LogicalDatastoreType.OPERATIONAL, topologyListPath, topology);
+ }
+
+ private void commitTransaction(final WriteTransaction transaction, final String txType) {
+ logger.trace("{}: Committing Transaction {}:{}", id, txType, transaction.getIdentifier());
+ final CheckedFuture<Void, TransactionCommitFailedException> result = transaction.submit();
+
+ Futures.addCallback(result, new FutureCallback<Void>() {
+ @Override
+ public void onSuccess(final Void result) {
+ logger.trace("{}: Transaction({}) {} SUCCESSFUL", id, txType, transaction.getIdentifier());
+ }
+
+ @Override
+ public void onFailure(final Throwable t) {
+ logger.error("{}: Transaction({}) {} FAILED!", id, txType, transaction.getIdentifier(), t);
+ throw new IllegalStateException(id + " Transaction(" + txType + ") not committed correctly", t);
+ }
+ });
+
+ }
+
+ private static Node getNodeWithId(final RemoteDeviceId id) {
+ final NodeBuilder builder = getNodeIdBuilder(id);
+ return builder.build();
+ }
+
+ private static NodeBuilder getNodeIdBuilder(final RemoteDeviceId id) {
+ final NodeBuilder nodeBuilder = new NodeBuilder();
+ nodeBuilder.setKey(new NodeKey(new NodeId(id.getName())));
+ return nodeBuilder;
+ }
+
+ @Override
+ public void close() throws Exception {
+ removeDeviceConfiguration();
+ }
+}
import org.opendaylight.controller.md.sal.common.impl.util.compat.DataNormalizer;
import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
-import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionCapabilities;
+import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionPreferences;
import org.opendaylight.controller.sal.connect.netconf.util.NetconfBaseOps;
import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
import org.opendaylight.yangtools.yang.common.RpcResult;
protected final RemoteDeviceId id;
protected final NetconfBaseOps netOps;
protected final DataNormalizer normalizer;
- protected final NetconfSessionCapabilities netconfSessionPreferences;
+ protected final NetconfSessionPreferences netconfSessionPreferences;
// Allow commit to be called only once
protected boolean finished = false;
- public AbstractWriteTx(final NetconfBaseOps netOps, final RemoteDeviceId id, final DataNormalizer normalizer, final NetconfSessionCapabilities netconfSessionPreferences) {
+ public AbstractWriteTx(final NetconfBaseOps netOps, final RemoteDeviceId id, final DataNormalizer normalizer, final NetconfSessionPreferences netconfSessionPreferences) {
this.netOps = netOps;
this.id = id;
this.normalizer = normalizer;
import com.google.common.util.concurrent.ListenableFuture;
import org.opendaylight.controller.md.sal.common.impl.util.compat.DataNormalizer;
import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
-import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionCapabilities;
+import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionPreferences;
import org.opendaylight.controller.sal.connect.netconf.util.NetconfBaseOps;
import org.opendaylight.controller.sal.connect.netconf.util.NetconfRpcFutureCallback;
import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
private static final Logger LOG = LoggerFactory.getLogger(WriteCandidateRunningTx.class);
- public WriteCandidateRunningTx(final RemoteDeviceId id, final NetconfBaseOps netOps, final DataNormalizer normalizer, final NetconfSessionCapabilities netconfSessionPreferences) {
+ public WriteCandidateRunningTx(final RemoteDeviceId id, final NetconfBaseOps netOps, final DataNormalizer normalizer, final NetconfSessionPreferences netconfSessionPreferences) {
super(id, netOps, normalizer, netconfSessionPreferences);
}
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
import org.opendaylight.controller.md.sal.common.impl.util.compat.DataNormalizer;
import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
-import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionCapabilities;
+import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionPreferences;
import org.opendaylight.controller.sal.connect.netconf.util.NetconfBaseOps;
import org.opendaylight.controller.sal.connect.netconf.util.NetconfRpcFutureCallback;
import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
}
};
- public WriteCandidateTx(final RemoteDeviceId id, final NetconfBaseOps rpc, final DataNormalizer normalizer, final NetconfSessionCapabilities netconfSessionPreferences) {
+ public WriteCandidateTx(final RemoteDeviceId id, final NetconfBaseOps rpc, final DataNormalizer normalizer, final NetconfSessionPreferences netconfSessionPreferences) {
super(rpc, id, normalizer, netconfSessionPreferences);
}
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
import org.opendaylight.controller.md.sal.common.impl.util.compat.DataNormalizer;
import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
-import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionCapabilities;
+import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionPreferences;
import org.opendaylight.controller.sal.connect.netconf.util.NetconfBaseOps;
import org.opendaylight.controller.sal.connect.netconf.util.NetconfRpcFutureCallback;
import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
private static final Logger LOG = LoggerFactory.getLogger(WriteRunningTx.class);
public WriteRunningTx(final RemoteDeviceId id, final NetconfBaseOps netOps,
- final DataNormalizer normalizer, final NetconfSessionCapabilities netconfSessionPreferences) {
+ final DataNormalizer normalizer, final NetconfSessionPreferences netconfSessionPreferences) {
super(netOps, id, normalizer, netconfSessionPreferences);
}
import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
import org.opendaylight.controller.netconf.api.NetconfMessage;
import org.opendaylight.controller.netconf.util.messages.NetconfMessageUtil;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netconf.notification._1._0.rev080714.CreateSubscriptionInput;
import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.monitoring.rev101004.NetconfState;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.notifications.rev120206.NetconfCapabilityChange;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.common.RpcError;
import org.opendaylight.yangtools.yang.common.RpcError.ErrorSeverity;
public class NetconfMessageTransformUtil {
public static final String MESSAGE_ID_ATTR = "message-id";
+ public static final QName CREATE_SUBSCRIPTION_RPC_QNAME = QName.cachedReference(QName.create(CreateSubscriptionInput.QNAME, "create-subscription"));
private NetconfMessageTransformUtil() {}
public static final QName IETF_NETCONF_MONITORING_SCHEMA_VERSION = QName.create(IETF_NETCONF_MONITORING, "version");
public static final QName IETF_NETCONF_MONITORING_SCHEMA_NAMESPACE = QName.create(IETF_NETCONF_MONITORING, "namespace");
+ public static final QName IETF_NETCONF_NOTIFICATIONS = QName.create(NetconfCapabilityChange.QNAME, "ietf-netconf-notifications");
+
public static URI NETCONF_URI = URI.create("urn:ietf:params:xml:ns:netconf:base:1.0");
public static QName NETCONF_QNAME = QName.create(NETCONF_URI, null, "netconf");
public static QName NETCONF_DATA_QNAME = QName.create(NETCONF_QNAME, "data");
public static URI NETCONF_CANDIDATE_URI = URI
.create("urn:ietf:params:netconf:capability:candidate:1.0");
+ public static URI NETCONF_NOTIFICATONS_URI = URI
+ .create("urn:ietf:params:netconf:capability:notification:1.0");
+
public static URI NETCONF_RUNNING_WRITABLE_URI = URI
.create("urn:ietf:params:netconf:capability:writable-running:1.0");
public static final CompositeNode COMMIT_RPC_CONTENT =
NodeFactory.createImmutableCompositeNode(NETCONF_COMMIT_QNAME, null, Collections.<Node<?>>emptyList());
+ // Create-subscription changes message
+ public static final CompositeNode CREATE_SUBSCRIPTION_RPC_CONTENT =
+ NodeFactory.createImmutableCompositeNode(CREATE_SUBSCRIPTION_RPC_QNAME, null, Collections.<Node<?>>emptyList());
+
public static Node<?> toFilterStructure(final YangInstanceIdentifier identifier) {
Node<?> previous = null;
if (Iterables.isEmpty(identifier.getPathArguments())) {
*/
package org.opendaylight.controller.sal.connect.util;
+import com.google.common.base.Preconditions;
+import java.net.InetSocketAddress;
import org.opendaylight.controller.config.api.ModuleIdentifier;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev100924.Host;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev100924.HostBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.topology.rev150114.network.topology.topology.topology.types.TopologyNetconf;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NetworkTopology;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.TopologyId;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.Topology;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.TopologyKey;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-
-import com.google.common.base.Preconditions;
+import org.opendaylight.yangtools.yang.binding.KeyedInstanceIdentifier;
import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-public class RemoteDeviceId {
+public final class RemoteDeviceId {
private final String name;
private final org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier path;
private final InstanceIdentifier<Node> bindingPath;
private final NodeKey key;
+ private final org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier topologyPath;
+ private final InstanceIdentifier<org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node> topologyBindingPath;
+ private InetSocketAddress address;
+ private Host host;
+ @Deprecated
public RemoteDeviceId(final ModuleIdentifier identifier) {
this(Preconditions.checkNotNull(identifier).getInstanceName());
}
+ public RemoteDeviceId(final ModuleIdentifier identifier, Host host) {
+ this(identifier);
+ this.host = host;
+ }
+
+ public RemoteDeviceId(final ModuleIdentifier identifier, InetSocketAddress address) {
+ this(identifier);
+ this.address = address;
+ this.host = buildHost();
+ }
+
+ @Deprecated
public RemoteDeviceId(final String name) {
Preconditions.checkNotNull(name);
this.name = name;
this.key = new NodeKey(new NodeId(name));
this.path = createBIPath(name);
this.bindingPath = createBindingPath(key);
+ this.topologyPath = createBIPathForTopology(name);
+ this.topologyBindingPath = createBindingPathForTopology(key);
+ }
+
+ public RemoteDeviceId(final String name, InetSocketAddress address) {
+ this(name);
+ this.address = address;
+ this.host = buildHost();
}
private static InstanceIdentifier<Node> createBindingPath(final NodeKey key) {
return builder.build();
}
+ private static InstanceIdentifier<org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node> createBindingPathForTopology(final NodeKey key) {
+ final InstanceIdentifier<NetworkTopology> networkTopology = InstanceIdentifier.builder(NetworkTopology.class).build();
+ final KeyedInstanceIdentifier<Topology, TopologyKey> topology = networkTopology.child(Topology.class, new TopologyKey(new TopologyId(TopologyNetconf.QNAME.getLocalName())));
+ return topology
+ .child(org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node.class,
+ new org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.NodeKey
+ (new org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NodeId(key.getId().getValue())));
+ }
+
+ private static org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier createBIPathForTopology(final String name) {
+ final org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.InstanceIdentifierBuilder builder =
+ org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.builder();
+ builder
+ .node(NetworkTopology.QNAME)
+ .nodeWithKey(Topology.QNAME, QName.create(Topology.QNAME, "topology-id"), TopologyNetconf.QNAME.getLocalName())
+ .nodeWithKey(org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node.QNAME,
+ QName.create(org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node.QNAME, "node-id"), name);
+ return builder.build();
+ }
+
+ private Host buildHost() {
+ return address.getAddress().getHostAddress() != null
+ ? HostBuilder.getDefaultInstance(address.getAddress().getHostAddress())
+ : HostBuilder.getDefaultInstance(address.getAddress().getHostName());
+ }
+
public String getName() {
return name;
}
return key;
}
+ public InstanceIdentifier<org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node> getTopologyBindingPath() {
+ return topologyBindingPath;
+ }
+
+ public YangInstanceIdentifier getTopologyPath() {
+ return topologyPath;
+ }
+
+ public InetSocketAddress getAddress() {
+ return address;
+ }
+
+ public Host getHost() {
+ return host;
+ }
+
@Override
public String toString() {
return "RemoteDevice{" + name +'}';
--- /dev/null
+module netconf-node-topology {
+ namespace "urn:opendaylight:netconf-node-topology";
+ prefix "nettop";
+
+ import network-topology { prefix nt; revision-date 2013-10-21; }
+ import yang-ext { prefix ext; revision-date "2013-07-09";}
+ import ietf-inet-types { prefix inet; revision-date "2010-09-24"; }
+
+ revision "2015-01-14" {
+ description "Initial revision of Topology model";
+ }
+
+ augment "/nt:network-topology/nt:topology/nt:topology-types" {
+ container topology-netconf {
+ }
+ }
+
+ grouping netconf-node-fields {
+ leaf connection-status {
+ type enumeration {
+ enum connecting;
+ enum connected;
+ enum unable-to-connect;
+ }
+ }
+
+ leaf host {
+ type inet:host;
+ }
+
+ leaf port {
+ type inet:port-number;
+ }
+
+ leaf connected-message {
+ type string;
+ }
+
+ container available-capabilities {
+ leaf-list available-capability {
+ type string;
+ }
+ }
+
+ container unavailable-capabilities {
+ list unavailable-capability {
+ leaf capability {
+ type string;
+ }
+
+ leaf failure-reason {
+ type enumeration {
+ enum missing-source;
+ enum unable-to-resolve;
+ }
+ }
+ }
+ }
+
+ container pass-through {
+ when "../connection-status = connected";
+ description
+ "When the underlying node is connected, its NETCONF context
+ is available verbatim under this container through the
+ mount extension.";
+ }
+ }
+
+ augment "/nt:network-topology/nt:topology/nt:node" {
+ when "../../nt:topology-types/topology-netconf";
+ ext:augment-identifier "netconf-node";
+
+ uses netconf-node-fields;
+ }
+}
}
}
+ leaf reconnect-on-changed-schema {
+ type boolean;
+ default false;
+ description "If true, the connector would auto disconnect/reconnect when schemas are changed in the remote device.
+ The connector subscribes (right after connect) to base netconf notifications and listens for netconf-capability-change notification";
+ }
+
container dom-registry {
uses config:service-ref {
refine type {
import static org.mockito.Mockito.timeout;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
+
import com.google.common.base.Optional;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.Lists;
import org.opendaylight.controller.netconf.api.NetconfMessage;
import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
import org.opendaylight.controller.sal.connect.api.MessageTransformer;
-import org.opendaylight.controller.sal.connect.api.RemoteDeviceCommunicator;
import org.opendaylight.controller.sal.connect.api.RemoteDeviceHandler;
import org.opendaylight.controller.sal.connect.api.SchemaSourceProviderFactory;
-import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionCapabilities;
+import org.opendaylight.controller.sal.connect.netconf.listener.NetconfDeviceCommunicator;
+import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionPreferences;
import org.opendaylight.controller.sal.connect.netconf.sal.NetconfDeviceRpc;
import org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil;
import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
private static final NetconfStateSchemas.NetconfStateSchemasResolver stateSchemasResolver = new NetconfStateSchemas.NetconfStateSchemasResolver() {
@Override
- public NetconfStateSchemas resolve(final NetconfDeviceRpc deviceRpc, final NetconfSessionCapabilities remoteSessionCapabilities, final RemoteDeviceId id) {
+ public NetconfStateSchemas resolve(final NetconfDeviceRpc deviceRpc, final NetconfSessionPreferences remoteSessionCapabilities, final RemoteDeviceId id) {
return NetconfStateSchemas.EMPTY;
}
};
public void testNetconfDeviceFailFirstSchemaFailSecondEmpty() throws Exception {
final ArrayList<String> capList = Lists.newArrayList(TEST_CAPABILITY);
- final RemoteDeviceHandler<NetconfSessionCapabilities> facade = getFacade();
- final RemoteDeviceCommunicator<NetconfMessage> listener = getListener();
+ final RemoteDeviceHandler<NetconfSessionPreferences> facade = getFacade();
+ final NetconfDeviceCommunicator listener = getListener();
final SchemaContextFactory schemaFactory = getSchemaFactory();
final NetconfDevice.SchemaResourcesDTO schemaResourcesDTO
= new NetconfDevice.SchemaResourcesDTO(getSchemaRegistry(), schemaFactory, stateSchemasResolver);
- final NetconfDevice device = new NetconfDevice(schemaResourcesDTO, getId(), facade, getExecutor(), getMessageTransformer());
+ final NetconfDevice device = new NetconfDevice(schemaResourcesDTO, getId(), facade, getExecutor(), getMessageTransformer(), true);
// Monitoring not supported
- final NetconfSessionCapabilities sessionCaps = getSessionCaps(false, capList);
+ final NetconfSessionPreferences sessionCaps = getSessionCaps(false, capList);
device.onRemoteSessionUp(sessionCaps, listener);
Mockito.verify(facade, Mockito.timeout(5000)).onDeviceDisconnected();
@Test
public void testNetconfDeviceMissingSource() throws Exception {
- final RemoteDeviceHandler<NetconfSessionCapabilities> facade = getFacade();
- final RemoteDeviceCommunicator<NetconfMessage> listener = getListener();
+ final RemoteDeviceHandler<NetconfSessionPreferences> facade = getFacade();
+ final NetconfDeviceCommunicator listener = getListener();
final SchemaContextFactory schemaFactory = getSchemaFactory();
final NetconfDevice.SchemaResourcesDTO schemaResourcesDTO
= new NetconfDevice.SchemaResourcesDTO(getSchemaRegistry(), schemaFactory, stateSchemasResolver);
- final NetconfDevice device = new NetconfDevice(schemaResourcesDTO, getId(), facade, getExecutor(), getMessageTransformer());
+ final NetconfDevice device = new NetconfDevice(schemaResourcesDTO, getId(), facade, getExecutor(), getMessageTransformer(), true);
// Monitoring supported
- final NetconfSessionCapabilities sessionCaps = getSessionCaps(true, Lists.newArrayList(TEST_CAPABILITY, TEST_CAPABILITY2));
+ final NetconfSessionPreferences sessionCaps = getSessionCaps(true, Lists.newArrayList(TEST_CAPABILITY, TEST_CAPABILITY2));
device.onRemoteSessionUp(sessionCaps, listener);
- Mockito.verify(facade, Mockito.timeout(5000)).onDeviceConnected(any(SchemaContext.class), any(NetconfSessionCapabilities.class), any(RpcImplementation.class));
+ Mockito.verify(facade, Mockito.timeout(5000)).onDeviceConnected(any(SchemaContext.class), any(NetconfSessionPreferences.class), any(RpcImplementation.class));
Mockito.verify(schemaFactory, times(2)).createSchemaContext(anyCollectionOf(SourceIdentifier.class));
}
@Test
public void testNotificationBeforeSchema() throws Exception {
- final RemoteDeviceHandler<NetconfSessionCapabilities> facade = getFacade();
- final RemoteDeviceCommunicator<NetconfMessage> listener = getListener();
+ final RemoteDeviceHandler<NetconfSessionPreferences> facade = getFacade();
+ final NetconfDeviceCommunicator listener = getListener();
final MessageTransformer<NetconfMessage> messageTransformer = getMessageTransformer();
final NetconfDevice.SchemaResourcesDTO schemaResourcesDTO
= new NetconfDevice.SchemaResourcesDTO(getSchemaRegistry(), getSchemaFactory(), stateSchemasResolver);
- final NetconfDevice device = new NetconfDevice(schemaResourcesDTO, getId(), facade, getExecutor(), messageTransformer);
+ final NetconfDevice device = new NetconfDevice(schemaResourcesDTO, getId(), facade, getExecutor(), messageTransformer, true);
device.onNotification(netconfMessage);
device.onNotification(netconfMessage);
verify(facade, times(0)).onNotification(any(CompositeNode.class));
- final NetconfSessionCapabilities sessionCaps = getSessionCaps(true,
+ final NetconfSessionPreferences sessionCaps = getSessionCaps(true,
Lists.newArrayList(TEST_CAPABILITY));
device.onRemoteSessionUp(sessionCaps, listener);
@Test
public void testNetconfDeviceReconnect() throws Exception {
- final RemoteDeviceHandler<NetconfSessionCapabilities> facade = getFacade();
- final RemoteDeviceCommunicator<NetconfMessage> listener = getListener();
+ final RemoteDeviceHandler<NetconfSessionPreferences> facade = getFacade();
+ final NetconfDeviceCommunicator listener = getListener();
final SchemaContextFactory schemaContextProviderFactory = getSchemaFactory();
final MessageTransformer<NetconfMessage> messageTransformer = getMessageTransformer();
final NetconfDevice.SchemaResourcesDTO schemaResourcesDTO
= new NetconfDevice.SchemaResourcesDTO(getSchemaRegistry(), schemaContextProviderFactory, stateSchemasResolver);
- final NetconfDevice device = new NetconfDevice(schemaResourcesDTO, getId(), facade, getExecutor(), messageTransformer);
- final NetconfSessionCapabilities sessionCaps = getSessionCaps(true,
+ final NetconfDevice device = new NetconfDevice(schemaResourcesDTO, getId(), facade, getExecutor(), messageTransformer, true);
+ final NetconfSessionPreferences sessionCaps = getSessionCaps(true,
Lists.newArrayList(TEST_NAMESPACE + "?module=" + TEST_MODULE + "&revision=" + TEST_REVISION));
device.onRemoteSessionUp(sessionCaps, listener);
verify(schemaContextProviderFactory, timeout(5000)).createSchemaContext(any(Collection.class));
verify(messageTransformer, timeout(5000)).onGlobalContextUpdated(any(SchemaContext.class));
- verify(facade, timeout(5000)).onDeviceConnected(any(SchemaContext.class), any(NetconfSessionCapabilities.class), any(RpcImplementation.class));
+ verify(facade, timeout(5000)).onDeviceConnected(any(SchemaContext.class), any(NetconfSessionPreferences.class), any(RpcImplementation.class));
device.onRemoteSessionDown();
verify(facade, timeout(5000)).onDeviceDisconnected();
verify(schemaContextProviderFactory, timeout(5000).times(2)).createSchemaContext(any(Collection.class));
verify(messageTransformer, timeout(5000).times(3)).onGlobalContextUpdated(any(SchemaContext.class));
- verify(facade, timeout(5000).times(2)).onDeviceConnected(any(SchemaContext.class), any(NetconfSessionCapabilities.class), any(RpcImplementation.class));
+ verify(facade, timeout(5000).times(2)).onDeviceConnected(any(SchemaContext.class), any(NetconfSessionPreferences.class), any(RpcImplementation.class));
}
private SchemaContextFactory getSchemaFactory() {
return parser.resolveSchemaContext(models);
}
- private RemoteDeviceHandler<NetconfSessionCapabilities> getFacade() throws Exception {
- final RemoteDeviceHandler<NetconfSessionCapabilities> remoteDeviceHandler = mockCloseableClass(RemoteDeviceHandler.class);
- doNothing().when(remoteDeviceHandler).onDeviceConnected(any(SchemaContext.class), any(NetconfSessionCapabilities.class), any(RpcImplementation.class));
+ private RemoteDeviceHandler<NetconfSessionPreferences> getFacade() throws Exception {
+ final RemoteDeviceHandler<NetconfSessionPreferences> remoteDeviceHandler = mockCloseableClass(RemoteDeviceHandler.class);
+ doNothing().when(remoteDeviceHandler).onDeviceConnected(any(SchemaContext.class), any(NetconfSessionPreferences.class), any(RpcImplementation.class));
doNothing().when(remoteDeviceHandler).onDeviceDisconnected();
doNothing().when(remoteDeviceHandler).onNotification(any(CompositeNode.class));
return remoteDeviceHandler;
return messageTransformer;
}
- public NetconfSessionCapabilities getSessionCaps(final boolean addMonitor, final Collection<String> additionalCapabilities) {
+ public NetconfSessionPreferences getSessionCaps(final boolean addMonitor, final Collection<String> additionalCapabilities) {
final ArrayList<String> capabilities = Lists.newArrayList(
XmlNetconfConstants.URN_IETF_PARAMS_NETCONF_BASE_1_0,
XmlNetconfConstants.URN_IETF_PARAMS_NETCONF_BASE_1_1);
capabilities.addAll(additionalCapabilities);
- return NetconfSessionCapabilities.fromStrings(
+ return NetconfSessionPreferences.fromStrings(
capabilities);
}
- public RemoteDeviceCommunicator<NetconfMessage> getListener() throws Exception {
- final RemoteDeviceCommunicator<NetconfMessage> remoteDeviceCommunicator = mockCloseableClass(RemoteDeviceCommunicator.class);
+ public NetconfDeviceCommunicator getListener() throws Exception {
+ final NetconfDeviceCommunicator remoteDeviceCommunicator = mockCloseableClass(NetconfDeviceCommunicator.class);
doReturn(Futures.immediateFuture(rpcResult)).when(remoteDeviceCommunicator).sendRequest(any(NetconfMessage.class), any(QName.class));
return remoteDeviceCommunicator;
}
import org.opendaylight.controller.netconf.client.NetconfClientDispatcherImpl;
import org.opendaylight.controller.netconf.client.NetconfClientSession;
import org.opendaylight.controller.netconf.client.conf.NetconfClientConfiguration;
+import org.opendaylight.controller.netconf.client.conf.NetconfReconnectingClientConfiguration;
import org.opendaylight.controller.netconf.client.conf.NetconfReconnectingClientConfigurationBuilder;
import org.opendaylight.controller.netconf.nettyutil.handler.ssh.authentication.LoginPassword;
import org.opendaylight.controller.sal.connect.api.RemoteDevice;
-import org.opendaylight.controller.sal.connect.api.RemoteDeviceCommunicator;
import org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil;
import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
import org.opendaylight.protocol.framework.ReconnectStrategy;
NetconfClientSession mockSession;
@Mock
- RemoteDevice<NetconfSessionCapabilities, NetconfMessage> mockDevice;
+ RemoteDevice<NetconfSessionPreferences, NetconfMessage, NetconfDeviceCommunicator> mockDevice;
NetconfDeviceCommunicator communicator;
public void setUp() throws Exception {
MockitoAnnotations.initMocks( this );
- communicator = new NetconfDeviceCommunicator( new RemoteDeviceId( "test" ), mockDevice );
+ communicator = new NetconfDeviceCommunicator( new RemoteDeviceId( "test" ), mockDevice);
}
@SuppressWarnings("unchecked")
- void setupSession()
- {
- doReturn( Collections.<String>emptySet() ).when( mockSession ).getServerCapabilities();
- doNothing().when( mockDevice ).onRemoteSessionUp( any( NetconfSessionCapabilities.class ),
- any( RemoteDeviceCommunicator.class ) );
- communicator.onSessionUp( mockSession );
+ void setupSession() {
+ doReturn(Collections.<String>emptySet()).when(mockSession).getServerCapabilities();
+ doNothing().when(mockDevice).onRemoteSessionUp(any(NetconfSessionPreferences.class),
+ any(NetconfDeviceCommunicator.class));
+ communicator.onSessionUp(mockSession);
}
private ListenableFuture<RpcResult<NetconfMessage>> sendRequest() throws Exception {
testCapability );
doReturn( serverCapabilities ).when( mockSession ).getServerCapabilities();
- ArgumentCaptor<NetconfSessionCapabilities> netconfSessionCapabilities =
- ArgumentCaptor.forClass( NetconfSessionCapabilities.class );
- doNothing().when( mockDevice ).onRemoteSessionUp( netconfSessionCapabilities.capture(), eq( communicator ) );
+ ArgumentCaptor<NetconfSessionPreferences> NetconfSessionPreferences =
+ ArgumentCaptor.forClass( NetconfSessionPreferences.class );
+ doNothing().when( mockDevice ).onRemoteSessionUp( NetconfSessionPreferences.capture(), eq( communicator ) );
communicator.onSessionUp( mockSession );
verify( mockSession ).getServerCapabilities();
- verify( mockDevice ).onRemoteSessionUp( netconfSessionCapabilities.capture(), eq( communicator ) );
+ verify( mockDevice ).onRemoteSessionUp( NetconfSessionPreferences.capture(), eq( communicator ) );
- NetconfSessionCapabilities actualCapabilites = netconfSessionCapabilities.getValue();
+ NetconfSessionPreferences actualCapabilites = NetconfSessionPreferences.getValue();
assertEquals( "containsModuleCapability", true, actualCapabilites.containsNonModuleCapability(
NetconfMessageTransformUtil.NETCONF_ROLLBACK_ON_ERROR_URI.toString()) );
assertEquals( "containsModuleCapability", false, actualCapabilites.containsNonModuleCapability(testCapability) );
*/
@Test
public void testNetconfDeviceReconnectInCommunicator() throws Exception {
- final RemoteDevice<NetconfSessionCapabilities, NetconfMessage> device = mock(RemoteDevice.class);
+ final RemoteDevice<NetconfSessionPreferences, NetconfMessage, NetconfDeviceCommunicator> device = mock(RemoteDevice.class);
final TimedReconnectStrategy timedReconnectStrategy = new TimedReconnectStrategy(GlobalEventExecutor.INSTANCE, 10000, 0, 1.0, null, 100L, null);
final ReconnectStrategy reconnectStrategy = spy(new ReconnectStrategy() {
}
});
- final NetconfDeviceCommunicator listener = new NetconfDeviceCommunicator(new RemoteDeviceId("test"), device);
final EventLoopGroup group = new NioEventLoopGroup();
final Timer time = new HashedWheelTimer();
try {
- final NetconfClientConfiguration cfg = NetconfReconnectingClientConfigurationBuilder.create()
+ final NetconfDeviceCommunicator listener = new NetconfDeviceCommunicator(new RemoteDeviceId("test"), device);
+ final NetconfReconnectingClientConfiguration cfg = NetconfReconnectingClientConfigurationBuilder.create()
.withAddress(new InetSocketAddress("localhost", 65000))
.withReconnectStrategy(reconnectStrategy)
.withConnectStrategyFactory(new ReconnectStrategyFactory() {
.withSessionListener(listener)
.build();
-
listener.initializeRemoteConnection(new NetconfClientDispatcherImpl(group, group, time), cfg);
verify(reconnectStrategy, timeout((int) TimeUnit.MINUTES.toMillis(3)).times(101)).scheduleReconnect(any(Throwable.class));
import org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil;
import org.opendaylight.yangtools.yang.common.QName;
-public class NetconfSessionCapabilitiesTest {
+public class NetconfSessionPreferencesTest {
@Test
public void testMerge() throws Exception {
"urn:ietf:params:netconf:base:1.0",
"urn:ietf:params:netconf:capability:rollback-on-error:1.0"
);
- final NetconfSessionCapabilities sessionCaps1 = NetconfSessionCapabilities.fromStrings(caps1);
+ final NetconfSessionPreferences sessionCaps1 = NetconfSessionPreferences.fromStrings(caps1);
assertCaps(sessionCaps1, 2, 3);
final List<String> caps2 = Lists.newArrayList(
"namespace:4?module=module4&revision=2012-12-12",
"randomNonModuleCap"
);
- final NetconfSessionCapabilities sessionCaps2 = NetconfSessionCapabilities.fromStrings(caps2);
+ final NetconfSessionPreferences sessionCaps2 = NetconfSessionPreferences.fromStrings(caps2);
assertCaps(sessionCaps2, 1, 2);
- final NetconfSessionCapabilities merged = sessionCaps1.replaceModuleCaps(sessionCaps2);
+ final NetconfSessionPreferences merged = sessionCaps1.replaceModuleCaps(sessionCaps2);
assertCaps(merged, 2, 2 + 1 /*Preserved monitoring*/);
for (final QName qName : sessionCaps2.getModuleBasedCaps()) {
assertThat(merged.getModuleBasedCaps(), hasItem(qName));
"namespace:2?module=module2&RANDOMSTRING;revision=2013-12-12" // This one should be ignored(same as first), since revision is in wrong format
);
- final NetconfSessionCapabilities sessionCaps1 = NetconfSessionCapabilities.fromStrings(caps1);
+ final NetconfSessionPreferences sessionCaps1 = NetconfSessionPreferences.fromStrings(caps1);
assertCaps(sessionCaps1, 0, 3);
}
- private void assertCaps(final NetconfSessionCapabilities sessionCaps1, final int nonModuleCaps, final int moduleCaps) {
+ private void assertCaps(final NetconfSessionPreferences sessionCaps1, final int nonModuleCaps, final int moduleCaps) {
assertEquals(nonModuleCaps, sessionCaps1.getNonModuleCaps().size());
assertEquals(moduleCaps, sessionCaps1.getModuleBasedCaps().size());
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.sal.connect.netconf.sal;
+
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+
+import com.google.common.util.concurrent.Futures;
+import java.net.InetSocketAddress;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.sal.connect.netconf.listener.NetconfDeviceCapabilities;
+import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+
+public class NetconfDeviceTopologyAdapterTest {
+
+ private RemoteDeviceId id = new RemoteDeviceId("test", new InetSocketAddress("localhost", 22));
+
+ @Mock
+ private DataBroker broker;
+ @Mock
+ private WriteTransaction writeTx;
+ @Mock
+ private Node data;
+
+ private String txIdent = "test transaction";
+
+ @Before
+ public void setUp() throws Exception {
+ MockitoAnnotations.initMocks(this);
+ doReturn(writeTx).when(broker).newWriteOnlyTransaction();
+ doNothing().when(writeTx).put(any(LogicalDatastoreType.class), any(InstanceIdentifier.class), any(Node.class));
+ doNothing().when(writeTx).merge(any(LogicalDatastoreType.class), any(InstanceIdentifier.class), any(Node.class));
+
+ doReturn(txIdent).when(writeTx).getIdentifier();
+ }
+
+ @Test
+ public void testFailedDevice() throws Exception {
+ doReturn(Futures.immediateCheckedFuture(null)).when(writeTx).submit();
+
+ NetconfDeviceTopologyAdapter adapter = new NetconfDeviceTopologyAdapter(id, broker);
+ adapter.setDeviceAsFailed(null);
+
+ verify(broker, times(2)).newWriteOnlyTransaction();
+ verify(writeTx, times(3)).put(any(LogicalDatastoreType.class), any(InstanceIdentifier.class), any(Node.class));
+ }
+
+ @Test
+ public void testDeviceUpdate() throws Exception {
+ doReturn(Futures.immediateCheckedFuture(null)).when(writeTx).submit();
+
+ NetconfDeviceTopologyAdapter adapter = new NetconfDeviceTopologyAdapter(id, broker);
+ adapter.updateDeviceData(true, new NetconfDeviceCapabilities());
+
+ verify(broker, times(2)).newWriteOnlyTransaction();
+ verify(writeTx, times(3)).put(any(LogicalDatastoreType.class), any(InstanceIdentifier.class), any(Node.class));
+ }
+
+}
\ No newline at end of file
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
import org.opendaylight.controller.md.sal.common.impl.util.compat.DataNormalizer;
-import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionCapabilities;
+import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionPreferences;
import org.opendaylight.controller.sal.connect.netconf.util.NetconfBaseOps;
import org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil;
import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
@Test
public void testDiscardChanges() {
final WriteCandidateTx tx = new WriteCandidateTx(id, new NetconfBaseOps(rpc), normalizer,
- NetconfSessionCapabilities.fromStrings(Collections.<String>emptySet()));
+ NetconfSessionPreferences.fromStrings(Collections.<String>emptySet()));
final CheckedFuture<Void, TransactionCommitFailedException> submitFuture = tx.submit();
try {
submitFuture.checkedGet();
.when(rpc).invokeRpc(any(QName.class), any(CompositeNode.class));
final WriteRunningTx tx = new WriteRunningTx(id, new NetconfBaseOps(rpc), normalizer,
- NetconfSessionCapabilities.fromStrings(Collections.<String>emptySet()));
+ NetconfSessionPreferences.fromStrings(Collections.<String>emptySet()));
try {
tx.delete(LogicalDatastoreType.CONFIGURATION, yangIId);
} catch (final Exception e) {
import akka.actor.Terminated;
import akka.actor.UntypedActor;
-import akka.event.Logging;
-import akka.event.LoggingAdapter;
import org.opendaylight.controller.cluster.common.actor.Monitor;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class TerminationMonitor extends UntypedActor{
- protected final LoggingAdapter LOG =
- Logging.getLogger(getContext().system(), this);
+ private static final Logger LOG = LoggerFactory.getLogger(TerminationMonitor.class);
public TerminationMonitor(){
- LOG.info("Created TerminationMonitor");
+ LOG.debug("Created TerminationMonitor");
}
@Override public void onReceive(Object message) throws Exception {
package org.opendaylight.controller.remote.rpc.registry;
import akka.actor.ActorRef;
-import akka.event.Logging;
-import akka.event.LoggingAdapter;
import akka.japi.Option;
import akka.japi.Pair;
import com.google.common.base.Preconditions;
*/
public class RpcRegistry extends BucketStore<RoutingTable> {
- final LoggingAdapter log = Logging.getLogger(getContext().system(), this);
-
public RpcRegistry() {
getLocalBucket().setData(new RoutingTable());
}
import akka.actor.Address;
import akka.actor.Props;
import akka.cluster.ClusterActorRefProvider;
-import akka.event.Logging;
-import akka.event.LoggingAdapter;
import com.google.common.annotations.VisibleForTesting;
import java.util.HashMap;
import java.util.Map;
import org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetBucketsByMembersReply;
import org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.UpdateRemoteBuckets;
import org.opendaylight.controller.utils.ConditionalProbe;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* A store that syncs its data across nodes in the cluster.
private static final Long NO_VERSION = -1L;
- final LoggingAdapter log = Logging.getLogger(getContext().system(), this);
+ protected final Logger log = LoggerFactory.getLogger(getClass());
/**
* Bucket owned by the node
import akka.cluster.ClusterEvent;
import akka.cluster.Member;
import akka.dispatch.Mapper;
-import akka.event.Logging;
-import akka.event.LoggingAdapter;
import akka.pattern.Patterns;
-import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActorWithMetering;
-import org.opendaylight.controller.remote.rpc.RemoteRpcProviderConfig;
-import scala.concurrent.Future;
-import scala.concurrent.duration.FiniteDuration;
-
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
-
-import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetBucketVersions;
-import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetBucketVersionsReply;
-import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetBucketsByMembers;
-import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetBucketsByMembersReply;
-import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.UpdateRemoteBuckets;
-import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.GossiperMessages.GossipEnvelope;
-import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.GossiperMessages.GossipStatus;
-import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.GossiperMessages.GossipTick;
+import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActorWithMetering;
+import org.opendaylight.controller.remote.rpc.RemoteRpcProviderConfig;
+import org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetBucketVersions;
+import org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetBucketVersionsReply;
+import org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetBucketsByMembers;
+import org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetBucketsByMembersReply;
+import org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.UpdateRemoteBuckets;
+import org.opendaylight.controller.remote.rpc.registry.gossip.Messages.GossiperMessages.GossipEnvelope;
+import org.opendaylight.controller.remote.rpc.registry.gossip.Messages.GossiperMessages.GossipStatus;
+import org.opendaylight.controller.remote.rpc.registry.gossip.Messages.GossiperMessages.GossipTick;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import scala.concurrent.Future;
+import scala.concurrent.duration.FiniteDuration;
/**
* Gossiper that syncs bucket store across nodes in the cluster.
public class Gossiper extends AbstractUntypedActorWithMetering {
- final LoggingAdapter log = Logging.getLogger(getContext().system(), this);
+ private final Logger log = LoggerFactory.getLogger(getClass());
private Cluster cluster;
@Override
public void postStop(){
- if (cluster != null)
+ if (cluster != null) {
cluster.unsubscribe(getSelf());
- if (gossipTask != null)
+ }
+ if (gossipTask != null) {
gossipTask.cancel();
+ }
}
@Override
protected void handleReceive(Object message) throws Exception {
//Usually sent by self via gossip task defined above. But its not enforced.
//These ticks can be sent by another actor as well which is esp. useful while testing
- if (message instanceof GossipTick)
+ if (message instanceof GossipTick) {
receiveGossipTick();
-
- //Message from remote gossiper with its bucket versions
- else if (message instanceof GossipStatus)
+ } else if (message instanceof GossipStatus) {
+ // Message from remote gossiper with its bucket versions
receiveGossipStatus((GossipStatus) message);
-
- //Message from remote gossiper with buckets. This is usually in response to GossipStatus message
- //The contained buckets are newer as determined by the remote gossiper by comparing the GossipStatus
- //message with its local versions
- else if (message instanceof GossipEnvelope)
+ } else if (message instanceof GossipEnvelope) {
+ // Message from remote gossiper with buckets. This is usually in response to GossipStatus
+ // message. The contained buckets are newer as determined by the remote gossiper by
+ // comparing the GossipStatus message with its local versions.
receiveGossip((GossipEnvelope) message);
-
- else if (message instanceof ClusterEvent.MemberUp) {
+ } else if (message instanceof ClusterEvent.MemberUp) {
receiveMemberUp(((ClusterEvent.MemberUp) message).member());
} else if (message instanceof ClusterEvent.MemberRemoved) {
} else if ( message instanceof ClusterEvent.UnreachableMember){
receiveMemberRemoveOrUnreachable(((ClusterEvent.UnreachableMember) message).member());
- } else
+ } else {
unhandled(message);
+ }
}
/**
*/
void receiveMemberUp(Member member) {
- if (selfAddress.equals(member.address()))
+ if (selfAddress.equals(member.address())) {
return; //ignore up notification for self
+ }
- if (!clusterMembers.contains(member.address()))
+ if (!clusterMembers.contains(member.address())) {
clusterMembers.add(member.address());
+ }
if(log.isDebugEnabled()) {
log.debug("Added member [{}], Active member list [{}]", member.address(), clusterMembers);
}
* 3. If there are more than one member, randomly pick one and send gossip status (bucket versions) to it.
*/
void receiveGossipTick(){
- if (clusterMembers.size() == 0) return; //no members to send gossip status to
+ if (clusterMembers.size() == 0) {
+ return; //no members to send gossip status to
+ }
Address remoteMemberToGossipTo;
- if (clusterMembers.size() == 1)
+ if (clusterMembers.size() == 1) {
remoteMemberToGossipTo = clusterMembers.get(0);
- else {
+ } else {
Integer randomIndex = ThreadLocalRandom.current().nextInt(0, clusterMembers.size());
remoteMemberToGossipTo = clusterMembers.get(randomIndex);
}
*/
void receiveGossipStatus(GossipStatus status){
//Don't accept messages from non-members
- if (!clusterMembers.contains(status.from()))
+ if (!clusterMembers.contains(status.from())) {
return;
+ }
final ActorRef sender = getSender();
Future<Object> futureReply =
for (Address address : remoteVersions.keySet()){
- if (localVersions.get(address) == null || remoteVersions.get(address) == null)
+ if (localVersions.get(address) == null || remoteVersions.get(address) == null) {
continue; //this condition is taken care of by above diffs
- if (localVersions.get(address) < remoteVersions.get(address))
+ }
+ if (localVersions.get(address) < remoteVersions.get(address)) {
localIsOlder.add(address);
- else if (localVersions.get(address) > remoteVersions.get(address))
+ } else if (localVersions.get(address) > remoteVersions.get(address)) {
localIsNewer.add(address);
+ }
}
- if (!localIsOlder.isEmpty())
+ if (!localIsOlder.isEmpty()) {
sendGossipStatusTo(sender, localVersions );
+ }
- if (!localIsNewer.isEmpty())
+ if (!localIsNewer.isEmpty()) {
sendGossipTo(sender, localIsNewer);//send newer buckets to remote
+ }
}
return null;
<groupId>${project.groupId}</groupId>
<artifactId>netconf-mapping-api</artifactId>
</dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netconf-notifications-api</artifactId>
+ </dependency>
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>netconf-util</artifactId>
org.opendaylight.controller.netconf.confignetconfconnector.util,
org.opendaylight.controller.netconf.confignetconfconnector.osgi,
org.opendaylight.controller.netconf.confignetconfconnector.exception,</Private-Package>
- <Import-Package>*</Import-Package>
- <Export-Package></Export-Package>
</instructions>
</configuration>
</plugin>
import org.opendaylight.controller.netconf.confignetconfconnector.mapping.config.Services;
import org.opendaylight.controller.netconf.confignetconfconnector.operations.AbstractConfigNetconfOperation;
import org.opendaylight.controller.netconf.confignetconfconnector.operations.editconfig.EditConfigXmlParser.EditConfigExecution;
-import org.opendaylight.controller.netconf.confignetconfconnector.osgi.YangStoreSnapshot;
+import org.opendaylight.controller.netconf.confignetconfconnector.osgi.YangStoreContext;
import org.opendaylight.controller.netconf.confignetconfconnector.transactions.TransactionProvider;
import org.opendaylight.controller.netconf.util.xml.XmlElement;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
private static final Logger LOG = LoggerFactory.getLogger(EditConfig.class);
- private final YangStoreSnapshot yangStoreSnapshot;
+ private final YangStoreContext yangStoreSnapshot;
private final TransactionProvider transactionProvider;
private EditConfigXmlParser editConfigXmlParser;
- public EditConfig(YangStoreSnapshot yangStoreSnapshot, TransactionProvider transactionProvider,
+ public EditConfig(YangStoreContext yangStoreSnapshot, TransactionProvider transactionProvider,
ConfigRegistryClient configRegistryClient, String netconfSessionIdForReporting) {
super(configRegistryClient, netconfSessionIdForReporting);
this.yangStoreSnapshot = yangStoreSnapshot;
}
}
- public static Config getConfigMapping(ConfigRegistryClient configRegistryClient, YangStoreSnapshot yangStoreSnapshot) {
+ public static Config getConfigMapping(ConfigRegistryClient configRegistryClient, YangStoreContext yangStoreSnapshot) {
Map<String, Map<String, ModuleConfig>> factories = transformMbeToModuleConfigs(configRegistryClient,
yangStoreSnapshot.getModuleMXBeanEntryMap());
Map<String, Map<Date, IdentityMapping>> identitiesMap = transformIdentities(yangStoreSnapshot.getModules());
import org.opendaylight.controller.netconf.confignetconfconnector.operations.AbstractConfigNetconfOperation;
import org.opendaylight.controller.netconf.confignetconfconnector.operations.Datastore;
import org.opendaylight.controller.netconf.confignetconfconnector.operations.editconfig.EditConfig;
-import org.opendaylight.controller.netconf.confignetconfconnector.osgi.YangStoreSnapshot;
+import org.opendaylight.controller.netconf.confignetconfconnector.osgi.YangStoreContext;
import org.opendaylight.controller.netconf.util.exception.MissingNameSpaceException;
import org.opendaylight.controller.netconf.util.exception.UnexpectedElementException;
import org.opendaylight.controller.netconf.util.exception.UnexpectedNamespaceException;
public class Get extends AbstractConfigNetconfOperation {
- private final YangStoreSnapshot yangStoreSnapshot;
+ private final YangStoreContext yangStoreSnapshot;
private static final Logger LOG = LoggerFactory.getLogger(Get.class);
- public Get(YangStoreSnapshot yangStoreSnapshot, ConfigRegistryClient configRegistryClient,
+ public Get(YangStoreContext yangStoreSnapshot, ConfigRegistryClient configRegistryClient,
String netconfSessionIdForReporting) {
super(configRegistryClient, netconfSessionIdForReporting);
this.yangStoreSnapshot = yangStoreSnapshot;
import org.opendaylight.controller.netconf.confignetconfconnector.operations.AbstractConfigNetconfOperation;
import org.opendaylight.controller.netconf.confignetconfconnector.operations.Datastore;
import org.opendaylight.controller.netconf.confignetconfconnector.operations.editconfig.EditConfig;
-import org.opendaylight.controller.netconf.confignetconfconnector.osgi.YangStoreSnapshot;
+import org.opendaylight.controller.netconf.confignetconfconnector.osgi.YangStoreContext;
import org.opendaylight.controller.netconf.confignetconfconnector.transactions.TransactionProvider;
import org.opendaylight.controller.netconf.util.exception.MissingNameSpaceException;
import org.opendaylight.controller.netconf.util.exception.UnexpectedElementException;
public static final String GET_CONFIG = "get-config";
- private final YangStoreSnapshot yangStoreSnapshot;
+ private final YangStoreContext yangStoreSnapshot;
private final Optional<String> maybeNamespace;
private final TransactionProvider transactionProvider;
private static final Logger LOG = LoggerFactory.getLogger(GetConfig.class);
- public GetConfig(YangStoreSnapshot yangStoreSnapshot, Optional<String> maybeNamespace,
+ public GetConfig(YangStoreContext yangStoreSnapshot, Optional<String> maybeNamespace,
TransactionProvider transactionProvider, ConfigRegistryClient configRegistryClient,
String netconfSessionIdForReporting) {
super(configRegistryClient, netconfSessionIdForReporting);
import org.opendaylight.controller.netconf.confignetconfconnector.mapping.rpc.ModuleRpcs;
import org.opendaylight.controller.netconf.confignetconfconnector.mapping.rpc.Rpcs;
import org.opendaylight.controller.netconf.confignetconfconnector.operations.AbstractConfigNetconfOperation;
-import org.opendaylight.controller.netconf.confignetconfconnector.osgi.YangStoreSnapshot;
+import org.opendaylight.controller.netconf.confignetconfconnector.osgi.YangStoreContext;
import org.opendaylight.controller.netconf.mapping.api.HandlingPriority;
import org.opendaylight.controller.netconf.util.exception.MissingNameSpaceException;
import org.opendaylight.controller.netconf.util.xml.XmlElement;
private static final Logger LOG = LoggerFactory.getLogger(RuntimeRpc.class);
public static final String CONTEXT_INSTANCE = "context-instance";
- private final YangStoreSnapshot yangStoreSnapshot;
+ private final YangStoreContext yangStoreSnapshot;
- public RuntimeRpc(final YangStoreSnapshot yangStoreSnapshot, ConfigRegistryClient configRegistryClient,
+ public RuntimeRpc(final YangStoreContext yangStoreSnapshot, ConfigRegistryClient configRegistryClient,
String netconfSessionIdForReporting) {
super(configRegistryClient, netconfSessionIdForReporting);
this.yangStoreSnapshot = yangStoreSnapshot;
SchemaContextProvider schemaContextProvider = reference.getBundle().getBundleContext().getService(reference);
- YangStoreServiceImpl yangStoreService = new YangStoreServiceImpl(schemaContextProvider);
+ YangStoreService yangStoreService = new YangStoreService(schemaContextProvider, context);
configRegistryLookup = new ConfigRegistryLookupThread(yangStoreService);
configRegistryLookup.start();
return configRegistryLookup;
}
private class ConfigRegistryLookupThread extends Thread {
- private final YangStoreServiceImpl yangStoreService;
+ private final YangStoreService yangStoreService;
- private ConfigRegistryLookupThread(YangStoreServiceImpl yangStoreService) {
+ private ConfigRegistryLookupThread(YangStoreService yangStoreService) {
super("config-registry-lookup");
this.yangStoreService = yangStoreService;
}
final class NetconfOperationProvider {
private final Set<NetconfOperation> operations;
- NetconfOperationProvider(YangStoreSnapshot yangStoreSnapshot, ConfigRegistryClient configRegistryClient,
+ NetconfOperationProvider(YangStoreContext yangStoreSnapshot, ConfigRegistryClient configRegistryClient,
TransactionProvider transactionProvider, String netconfSessionIdForReporting) {
operations = setUpOperations(yangStoreSnapshot, configRegistryClient, transactionProvider,
return operations;
}
- private static Set<NetconfOperation> setUpOperations(YangStoreSnapshot yangStoreSnapshot,
+ private static Set<NetconfOperation> setUpOperations(YangStoreContext yangStoreSnapshot,
ConfigRegistryClient configRegistryClient, TransactionProvider transactionProvider,
String netconfSessionIdForReporting) {
Set<NetconfOperation> ops = Sets.newHashSet();
@Override
public NetconfOperationServiceImpl createService(String netconfSessionIdForReporting) {
- try {
- return new NetconfOperationServiceImpl(yangStoreService, jmxClient, netconfSessionIdForReporting);
- } catch (YangStoreException e) {
- throw new IllegalStateException(e);
- }
+ return new NetconfOperationServiceImpl(yangStoreService, jmxClient, netconfSessionIdForReporting);
}
}
package org.opendaylight.controller.netconf.confignetconfconnector.osgi;
-import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Sets;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
-import java.util.Map;
import java.util.Set;
-import org.opendaylight.controller.config.api.LookupRegistry;
import org.opendaylight.controller.config.util.ConfigRegistryJMXClient;
-import org.opendaylight.controller.config.yangjmxgenerator.ModuleMXBeanEntry;
import org.opendaylight.controller.netconf.confignetconfconnector.transactions.TransactionProvider;
import org.opendaylight.controller.netconf.confignetconfconnector.util.Util;
import org.opendaylight.controller.netconf.mapping.api.Capability;
import org.opendaylight.yangtools.yang.model.api.Module;
/**
- * Manages life cycle of {@link YangStoreSnapshot}.
+ * Manages life cycle of {@link YangStoreContext}.
*/
public class NetconfOperationServiceImpl implements NetconfOperationService {
- private final YangStoreSnapshot yangStoreSnapshot;
private final NetconfOperationProvider operationProvider;
- private final Set<Capability> capabilities;
private final TransactionProvider transactionProvider;
+ private final YangStoreService yangStoreService;
public NetconfOperationServiceImpl(final YangStoreService yangStoreService, final ConfigRegistryJMXClient jmxClient,
- final String netconfSessionIdForReporting) throws YangStoreException {
+ final String netconfSessionIdForReporting) {
- yangStoreSnapshot = yangStoreService.getYangStoreSnapshot();
- checkConsistencyBetweenYangStoreAndConfig(jmxClient, yangStoreSnapshot);
+ this.yangStoreService = yangStoreService;
transactionProvider = new TransactionProvider(jmxClient, netconfSessionIdForReporting);
- operationProvider = new NetconfOperationProvider(yangStoreSnapshot, jmxClient, transactionProvider,
+ operationProvider = new NetconfOperationProvider(yangStoreService, jmxClient, transactionProvider,
netconfSessionIdForReporting);
- capabilities = setupCapabilities(yangStoreSnapshot);
- }
-
-
- @VisibleForTesting
- static void checkConsistencyBetweenYangStoreAndConfig(final LookupRegistry jmxClient, final YangStoreSnapshot yangStoreSnapshot) {
- Set<String> missingModulesFromConfig = Sets.newHashSet();
-
- Set<String> modulesSeenByConfig = jmxClient.getAvailableModuleFactoryQNames();
- Map<String, Map<String, ModuleMXBeanEntry>> moduleMXBeanEntryMap = yangStoreSnapshot.getModuleMXBeanEntryMap();
-
- for (Map<String, ModuleMXBeanEntry> moduleNameToMBE : moduleMXBeanEntryMap.values()) {
- for (ModuleMXBeanEntry moduleMXBeanEntry : moduleNameToMBE.values()) {
- String moduleSeenByYangStore = moduleMXBeanEntry.getYangModuleQName().toString();
- if(!modulesSeenByConfig.contains(moduleSeenByYangStore)){
- missingModulesFromConfig.add(moduleSeenByYangStore);
- }
- }
- }
-
- Preconditions
- .checkState(
- missingModulesFromConfig.isEmpty(),
- "There are inconsistencies between configuration subsystem and yangstore in terms of discovered yang modules, yang modules missing from config subsystem but present in yangstore: %s, %sAll modules present in config: %s",
- missingModulesFromConfig, System.lineSeparator(), modulesSeenByConfig);
-
}
@Override
public void close() {
- yangStoreSnapshot.close();
transactionProvider.close();
}
@Override
public Set<Capability> getCapabilities() {
- return capabilities;
+ return setupCapabilities(yangStoreService);
}
@Override
return operationProvider.getOperations();
}
- private static Set<Capability> setupCapabilities(final YangStoreSnapshot yangStoreSnapshot) {
+ private static Set<Capability> setupCapabilities(final YangStoreContext yangStoreSnapshot) {
Set<Capability> capabilities = new HashSet<>();
// [RFC6241] 8.3. Candidate Configuration Capability
capabilities.add(new BasicCapability("urn:ietf:params:netconf:capability:candidate:1.0"));
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.netconf.confignetconfconnector.osgi;
+
+import java.util.Map;
+import java.util.Set;
+import org.opendaylight.controller.config.yangjmxgenerator.ModuleMXBeanEntry;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.model.api.Module;
+import org.opendaylight.yangtools.yang.model.api.ModuleIdentifier;
+
+public interface YangStoreContext {
+
+ /**
+ * @deprecated Use {@link #getQNamesToIdentitiesToModuleMXBeanEntries()} instead. This method return only one
+ * module representation even if multiple revisions are available.
+ */
+ @Deprecated
+ Map<String/* Namespace from yang file */,
+ Map<String /* Name of module entry from yang file */, ModuleMXBeanEntry>> getModuleMXBeanEntryMap();
+
+
+ Map<QName, Map<String /* identity local name */, ModuleMXBeanEntry>> getQNamesToIdentitiesToModuleMXBeanEntries();
+
+ /**
+ * Get all modules discovered when this snapshot was created.
+ * @return all modules discovered. If one module exists with two different revisions, return both.
+ */
+ Set<Module> getModules();
+
+ String getModuleSource(ModuleIdentifier moduleIdentifier);
+
+}
+++ /dev/null
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.netconf.confignetconfconnector.osgi;
-
-public class YangStoreException extends Exception {
-
- private static final long serialVersionUID = 2841238836278528836L;
-
- public YangStoreException(String message, Throwable cause) {
- super(message, cause);
- }
-
-}
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
+
package org.opendaylight.controller.netconf.confignetconfconnector.osgi;
-/**
- * Yang store OSGi service
- */
-public interface YangStoreService {
+import com.google.common.base.Function;
+import com.google.common.collect.Collections2;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
+import java.lang.ref.SoftReference;
+import java.util.Collections;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.atomic.AtomicReference;
+import org.opendaylight.controller.config.yangjmxgenerator.ModuleMXBeanEntry;
+import org.opendaylight.controller.netconf.notifications.BaseNetconfNotificationListener;
+import org.opendaylight.controller.netconf.notifications.BaseNotificationPublisherRegistration;
+import org.opendaylight.controller.netconf.notifications.NetconfNotificationCollector;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev100924.Uri;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.notifications.rev120206.NetconfCapabilityChange;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.notifications.rev120206.NetconfCapabilityChangeBuilder;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.notifications.rev120206.changed.by.parms.ChangedByBuilder;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.notifications.rev120206.changed.by.parms.changed.by.server.or.user.ServerBuilder;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.model.api.Module;
+import org.opendaylight.yangtools.yang.model.api.ModuleIdentifier;
+import org.opendaylight.yangtools.yang.model.api.SchemaContextProvider;
+import org.osgi.framework.BundleContext;
+import org.osgi.framework.ServiceReference;
+import org.osgi.util.tracker.ServiceTracker;
+import org.osgi.util.tracker.ServiceTrackerCustomizer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class YangStoreService implements YangStoreContext {
+
+ private static final Logger LOG = LoggerFactory.getLogger(YangStoreService.class);
/**
- * Module entry objects mapped to module names and namespaces.
+ * This is a rather interesting locking model. We need to guard against both the
+ * cache expiring from GC and being invalidated by schema context change. The
+ * context can change while we are doing processing, so we do not want to block
+ * it, so no synchronization can happen on the methods.
+ *
+ * So what we are doing is the following:
*
- * @return actual view of what is available in OSGi service registry.
+ * We synchronize with GC as usual, using a SoftReference.
+ *
+ * The atomic reference is used to synchronize with {@link #refresh()}, e.g. when
+ * refresh happens, it will push a SoftReference(null), e.g. simulate the GC. Now
+ * that may happen while the getter is already busy acting on the old schema context,
+ * so it needs to understand that a refresh has happened and retry. To do that, it
+ * attempts a CAS operation -- if it fails, in knows that the SoftReference has
+ * been replaced and thus it needs to retry.
+ *
+ * Note that {@link #getYangStoreSnapshot()} will still use synchronize() internally
+ * to stop multiple threads doing the same work.
*/
- YangStoreSnapshot getYangStoreSnapshot() throws YangStoreException;
+ private final AtomicReference<SoftReference<YangStoreSnapshot>> ref =
+ new AtomicReference<>(new SoftReference<YangStoreSnapshot>(null));
+
+ private final SchemaContextProvider schemaContextProvider;
+ private final BaseNetconfNotificationListener notificationPublisher;
+
+ private final ExecutorService notificationExecutor = Executors.newSingleThreadExecutor(new ThreadFactory() {
+ @Override
+ public Thread newThread(final Runnable r) {
+ return new Thread(r, "config-netconf-connector-capability-notifications");
+ }
+ });
+
+ public YangStoreService(final SchemaContextProvider schemaContextProvider, final BundleContext context) {
+ this(schemaContextProvider, new NotificationCollectorTracker(context));
+ }
+
+ public YangStoreService(final SchemaContextProvider schemaContextProvider, final BaseNetconfNotificationListener notificationHandler) {
+ this.schemaContextProvider = schemaContextProvider;
+ this.notificationPublisher = notificationHandler;
+ }
+
+ private synchronized YangStoreContext getYangStoreSnapshot() {
+ SoftReference<YangStoreSnapshot> r = ref.get();
+ YangStoreSnapshot ret = r.get();
+
+ while (ret == null) {
+ // We need to be compute a new value
+ ret = new YangStoreSnapshot(schemaContextProvider.getSchemaContext());
+
+ if (!ref.compareAndSet(r, new SoftReference<>(ret))) {
+ LOG.debug("Concurrent refresh detected, recomputing snapshot");
+ r = ref.get();
+ ret = null;
+ }
+ }
+
+ return ret;
+ }
+
+ @Override
+ public Map<String, Map<String, ModuleMXBeanEntry>> getModuleMXBeanEntryMap() {
+ return getYangStoreSnapshot().getModuleMXBeanEntryMap();
+ }
+
+ @Override
+ public Map<QName, Map<String, ModuleMXBeanEntry>> getQNamesToIdentitiesToModuleMXBeanEntries() {
+ return getYangStoreSnapshot().getQNamesToIdentitiesToModuleMXBeanEntries();
+ }
+
+ @Override
+ public Set<Module> getModules() {
+ return getYangStoreSnapshot().getModules();
+ }
+
+ @Override
+ public String getModuleSource(final ModuleIdentifier moduleIdentifier) {
+ return getYangStoreSnapshot().getModuleSource(moduleIdentifier);
+ }
+
+ public void refresh() {
+ final YangStoreSnapshot previous = ref.get().get();
+ ref.set(new SoftReference<YangStoreSnapshot>(null));
+ notificationExecutor.submit(new CapabilityChangeNotifier(previous));
+ }
+
+ private final class CapabilityChangeNotifier implements Runnable {
+ private final YangStoreSnapshot previous;
+
+ public CapabilityChangeNotifier(final YangStoreSnapshot previous) {
+ this.previous = previous;
+ }
+
+ @Override
+ public void run() {
+ final YangStoreContext current = getYangStoreSnapshot();
+
+ if(current.equals(previous) == false) {
+ notificationPublisher.onCapabilityChanged(computeDiff(previous, current));
+ }
+ }
+ }
+
+ private static final Function<Module, Uri> MODULE_TO_URI = new Function<Module, Uri>() {
+ @Override
+ public Uri apply(final Module input) {
+ final QName qName = QName.cachedReference(QName.create(input.getQNameModule(), input.getName()));
+ return new Uri(qName.toString());
+ }
+ };
+
+ static NetconfCapabilityChange computeDiff(final YangStoreContext previous, final YangStoreContext current) {
+ final Sets.SetView<Module> removed = Sets.difference(previous.getModules(), current.getModules());
+ final Sets.SetView<Module> added = Sets.difference(current.getModules(), previous.getModules());
+
+ final NetconfCapabilityChangeBuilder netconfCapabilityChangeBuilder = new NetconfCapabilityChangeBuilder();
+ netconfCapabilityChangeBuilder.setChangedBy(new ChangedByBuilder().setServerOrUser(new ServerBuilder().setServer(true).build()).build());
+ netconfCapabilityChangeBuilder.setDeletedCapability(Lists.newArrayList(Collections2.transform(removed, MODULE_TO_URI)));
+ netconfCapabilityChangeBuilder.setAddedCapability(Lists.newArrayList(Collections2.transform(added, MODULE_TO_URI)));
+ // TODO modified should be computed ... but why ?
+ netconfCapabilityChangeBuilder.setModifiedCapability(Collections.<Uri>emptyList());
+ return netconfCapabilityChangeBuilder.build();
+ }
+
+
+ /**
+ * Looks for NetconfNotificationCollector service and publishes base netconf notifications if possible
+ */
+ private static class NotificationCollectorTracker implements ServiceTrackerCustomizer<NetconfNotificationCollector, NetconfNotificationCollector>, BaseNetconfNotificationListener, AutoCloseable {
+
+ private final BundleContext context;
+ private final ServiceTracker<NetconfNotificationCollector, NetconfNotificationCollector> listenerTracker;
+ private BaseNotificationPublisherRegistration publisherReg;
+
+ public NotificationCollectorTracker(final BundleContext context) {
+ this.context = context;
+ listenerTracker = new ServiceTracker<>(context, NetconfNotificationCollector.class, this);
+ listenerTracker.open();
+ }
+
+ @Override
+ public synchronized NetconfNotificationCollector addingService(final ServiceReference<NetconfNotificationCollector> reference) {
+ closePublisherRegistration();
+ publisherReg = context.getService(reference).registerBaseNotificationPublisher();
+ return null;
+ }
+
+ @Override
+ public synchronized void modifiedService(final ServiceReference<NetconfNotificationCollector> reference, final NetconfNotificationCollector service) {
+ closePublisherRegistration();
+ publisherReg = context.getService(reference).registerBaseNotificationPublisher();
+ }
+
+ @Override
+ public synchronized void removedService(final ServiceReference<NetconfNotificationCollector> reference, final NetconfNotificationCollector service) {
+ closePublisherRegistration();
+ publisherReg = null;
+ }
+
+ private void closePublisherRegistration() {
+ if(publisherReg != null) {
+ publisherReg.close();
+ }
+ }
+
+ @Override
+ public synchronized void close() {
+ closePublisherRegistration();
+ listenerTracker.close();
+ }
+
+ @Override
+ public void onCapabilityChanged(final NetconfCapabilityChange capabilityChange) {
+ if(publisherReg == null) {
+ LOG.warn("Omitting notification due to missing notification service: {}", capabilityChange);
+ return;
+ }
+ publisherReg.onCapabilityChanged(capabilityChange);
+ }
+ }
}
+++ /dev/null
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.netconf.confignetconfconnector.osgi;
-
-import java.lang.ref.SoftReference;
-import java.util.concurrent.atomic.AtomicReference;
-import org.opendaylight.yangtools.yang.model.api.SchemaContextProvider;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class YangStoreServiceImpl implements YangStoreService {
- private static final Logger LOG = LoggerFactory.getLogger(YangStoreServiceImpl.class);
-
- /**
- * This is a rather interesting locking model. We need to guard against both the
- * cache expiring from GC and being invalidated by schema context change. The
- * context can change while we are doing processing, so we do not want to block
- * it, so no synchronization can happen on the methods.
- *
- * So what we are doing is the following:
- *
- * We synchronize with GC as usual, using a SoftReference.
- *
- * The atomic reference is used to synchronize with {@link #refresh()}, e.g. when
- * refresh happens, it will push a SoftReference(null), e.g. simulate the GC. Now
- * that may happen while the getter is already busy acting on the old schema context,
- * so it needs to understand that a refresh has happened and retry. To do that, it
- * attempts a CAS operation -- if it fails, in knows that the SoftReference has
- * been replaced and thus it needs to retry.
- *
- * Note that {@link #getYangStoreSnapshot()} will still use synchronize() internally
- * to stop multiple threads doing the same work.
- */
- private final AtomicReference<SoftReference<YangStoreSnapshotImpl>> ref = new AtomicReference<>(new SoftReference<YangStoreSnapshotImpl>(null));
- private final SchemaContextProvider service;
-
- public YangStoreServiceImpl(final SchemaContextProvider service) {
- this.service = service;
- }
-
- @Override
- public synchronized YangStoreSnapshotImpl getYangStoreSnapshot() throws YangStoreException {
- SoftReference<YangStoreSnapshotImpl> r = ref.get();
- YangStoreSnapshotImpl ret = r.get();
-
- while (ret == null) {
- // We need to be compute a new value
- ret = new YangStoreSnapshotImpl(service.getSchemaContext());
-
- if (!ref.compareAndSet(r, new SoftReference<>(ret))) {
- LOG.debug("Concurrent refresh detected, recomputing snapshot");
- r = ref.get();
- ret = null;
- }
- }
-
- return ret;
- }
-
- /**
- * Called when schema context changes, invalidates cache.
- */
- public void refresh() {
- ref.set(new SoftReference<YangStoreSnapshotImpl>(null));
- }
-}
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
+
package org.opendaylight.controller.netconf.confignetconfconnector.osgi;
+import com.google.common.collect.Maps;
+import java.util.Collections;
+import java.util.HashMap;
import java.util.Map;
+import java.util.Map.Entry;
import java.util.Set;
import org.opendaylight.controller.config.yangjmxgenerator.ModuleMXBeanEntry;
+import org.opendaylight.controller.config.yangjmxgenerator.PackageTranslator;
+import org.opendaylight.controller.config.yangjmxgenerator.ServiceInterfaceEntry;
+import org.opendaylight.controller.config.yangjmxgenerator.TypeProviderWrapper;
+import org.opendaylight.yangtools.sal.binding.yang.types.TypeProviderImpl;
import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.model.api.IdentitySchemaNode;
import org.opendaylight.yangtools.yang.model.api.Module;
-import org.opendaylight.yangtools.yang.model.api.ModuleIdentifier;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+final class YangStoreSnapshot implements YangStoreContext {
+ private static final Logger LOG = LoggerFactory.getLogger(YangStoreSnapshot.class);
+
+
+ private final Map<String /* Namespace from yang file */,
+ Map<String /* Name of module entry from yang file */, ModuleMXBeanEntry>> moduleMXBeanEntryMap;
+
+
+ private final Map<QName, Map<String, ModuleMXBeanEntry>> qNamesToIdentitiesToModuleMXBeanEntries;
+
+ private final SchemaContext schemaContext;
+
+ public YangStoreSnapshot(final SchemaContext resolveSchemaContext) {
+ LOG.trace("Resolved modules:{}", resolveSchemaContext.getModules());
+ this.schemaContext = resolveSchemaContext;
+ // JMX generator
+
+ Map<String, String> namespaceToPackageMapping = Maps.newHashMap();
+ PackageTranslator packageTranslator = new PackageTranslator(namespaceToPackageMapping);
+ Map<QName, ServiceInterfaceEntry> qNamesToSIEs = new HashMap<>();
+ Map<IdentitySchemaNode, ServiceInterfaceEntry> knownSEITracker = new HashMap<>();
+ // create SIE structure qNamesToSIEs
+ for (Module module : resolveSchemaContext.getModules()) {
+ String packageName = packageTranslator.getPackageName(module);
+ Map<QName, ServiceInterfaceEntry> namesToSIEntries = ServiceInterfaceEntry
+ .create(module, packageName, knownSEITracker);
+ for (Entry<QName, ServiceInterfaceEntry> sieEntry : namesToSIEntries.entrySet()) {
+ // merge value into qNamesToSIEs
+ if (qNamesToSIEs.containsKey(sieEntry.getKey()) == false) {
+ qNamesToSIEs.put(sieEntry.getKey(), sieEntry.getValue());
+ } else {
+ throw new IllegalStateException("Cannot add two SIE with same qname "
+ + sieEntry.getValue());
+ }
+ }
+ }
+
+ Map<String, Map<String, ModuleMXBeanEntry>> moduleMXBeanEntryMap = Maps.newHashMap();
+
+ Map<QName, Map<String /* identity local name */, ModuleMXBeanEntry>> qNamesToIdentitiesToModuleMXBeanEntries = new HashMap<>();
+
-public interface YangStoreSnapshot extends AutoCloseable {
+ for (Module module : schemaContext.getModules()) {
+ String packageName = packageTranslator.getPackageName(module);
+ TypeProviderWrapper typeProviderWrapper = new TypeProviderWrapper(
+ new TypeProviderImpl(resolveSchemaContext));
- /**
- * @deprecated Use {@link #getQNamesToIdentitiesToModuleMXBeanEntries()} instead. This method return only one
- * module representation even if multiple revisions are available.
- */
- @Deprecated
- Map<String/* Namespace from yang file */,
- Map<String /* Name of module entry from yang file */, ModuleMXBeanEntry>> getModuleMXBeanEntryMap();
+ QName qName = QName.create(module.getNamespace(), module.getRevision(), module.getName());
+ Map<String /* MB identity local name */, ModuleMXBeanEntry> namesToMBEs =
+ Collections.unmodifiableMap(ModuleMXBeanEntry.create(module, qNamesToSIEs, resolveSchemaContext,
+ typeProviderWrapper, packageName));
+ moduleMXBeanEntryMap.put(module.getNamespace().toString(), namesToMBEs);
+
+ qNamesToIdentitiesToModuleMXBeanEntries.put(qName, namesToMBEs);
+ }
+ this.moduleMXBeanEntryMap = Collections.unmodifiableMap(moduleMXBeanEntryMap);
+ this.qNamesToIdentitiesToModuleMXBeanEntries = Collections.unmodifiableMap(qNamesToIdentitiesToModuleMXBeanEntries);
+
+ }
+
+ @Override
+ public Map<String, Map<String, ModuleMXBeanEntry>> getModuleMXBeanEntryMap() {
+ return moduleMXBeanEntryMap;
+ }
+
+ @Override
+ public Map<QName, Map<String, ModuleMXBeanEntry>> getQNamesToIdentitiesToModuleMXBeanEntries() {
+ return qNamesToIdentitiesToModuleMXBeanEntries;
+ }
+
+ @Override
+ public Set<Module> getModules() {
+ return schemaContext.getModules();
+ }
+
+ @Override
+ public String getModuleSource(final org.opendaylight.yangtools.yang.model.api.ModuleIdentifier moduleIdentifier) {
+ return schemaContext.getModuleSource(moduleIdentifier).get();
+ }
+
+ @Override
+ public boolean equals(final Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
- Map<QName, Map<String /* identity local name */, ModuleMXBeanEntry>> getQNamesToIdentitiesToModuleMXBeanEntries();
+ final YangStoreSnapshot that = (YangStoreSnapshot) o;
- /**
- * Get all modules discovered when this snapshot was created.
- * @return all modules discovered. If one module exists with two different revisions, return both.
- */
- Set<Module> getModules();
+ if (schemaContext != null ? !schemaContext.equals(that.schemaContext) : that.schemaContext != null)
+ return false;
- String getModuleSource(ModuleIdentifier moduleIdentifier);
+ return true;
+ }
@Override
- void close();
+ public int hashCode() {
+ return schemaContext != null ? schemaContext.hashCode() : 0;
+ }
}
+++ /dev/null
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.netconf.confignetconfconnector.osgi;
-
-import com.google.common.collect.Maps;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-import org.opendaylight.controller.config.yangjmxgenerator.ModuleMXBeanEntry;
-import org.opendaylight.controller.config.yangjmxgenerator.PackageTranslator;
-import org.opendaylight.controller.config.yangjmxgenerator.ServiceInterfaceEntry;
-import org.opendaylight.controller.config.yangjmxgenerator.TypeProviderWrapper;
-import org.opendaylight.yangtools.sal.binding.yang.types.TypeProviderImpl;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.model.api.IdentitySchemaNode;
-import org.opendaylight.yangtools.yang.model.api.Module;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class YangStoreSnapshotImpl implements YangStoreSnapshot {
- private static final Logger LOG = LoggerFactory.getLogger(YangStoreSnapshotImpl.class);
-
-
- private final Map<String /* Namespace from yang file */,
- Map<String /* Name of module entry from yang file */, ModuleMXBeanEntry>> moduleMXBeanEntryMap;
-
-
- private final Map<QName, Map<String, ModuleMXBeanEntry>> qNamesToIdentitiesToModuleMXBeanEntries;
-
- private final SchemaContext schemaContext;
-
-
- public YangStoreSnapshotImpl(final SchemaContext resolveSchemaContext) {
- LOG.trace("Resolved modules:{}", resolveSchemaContext.getModules());
- this.schemaContext = resolveSchemaContext;
- // JMX generator
-
- Map<String, String> namespaceToPackageMapping = Maps.newHashMap();
- PackageTranslator packageTranslator = new PackageTranslator(namespaceToPackageMapping);
- Map<QName, ServiceInterfaceEntry> qNamesToSIEs = new HashMap<>();
- Map<IdentitySchemaNode, ServiceInterfaceEntry> knownSEITracker = new HashMap<>();
- // create SIE structure qNamesToSIEs
- for (Module module : resolveSchemaContext.getModules()) {
- String packageName = packageTranslator.getPackageName(module);
- Map<QName, ServiceInterfaceEntry> namesToSIEntries = ServiceInterfaceEntry
- .create(module, packageName, knownSEITracker);
- for (Entry<QName, ServiceInterfaceEntry> sieEntry : namesToSIEntries.entrySet()) {
- // merge value into qNamesToSIEs
- if (qNamesToSIEs.containsKey(sieEntry.getKey()) == false) {
- qNamesToSIEs.put(sieEntry.getKey(), sieEntry.getValue());
- } else {
- throw new IllegalStateException("Cannot add two SIE with same qname "
- + sieEntry.getValue());
- }
- }
- }
-
- Map<String, Map<String, ModuleMXBeanEntry>> moduleMXBeanEntryMap = Maps.newHashMap();
-
- Map<QName, Map<String /* identity local name */, ModuleMXBeanEntry>> qNamesToIdentitiesToModuleMXBeanEntries = new HashMap<>();
-
-
- for (Module module : schemaContext.getModules()) {
- String packageName = packageTranslator.getPackageName(module);
- TypeProviderWrapper typeProviderWrapper = new TypeProviderWrapper(
- new TypeProviderImpl(resolveSchemaContext));
-
- QName qName = QName.create(module.getNamespace(), module.getRevision(), module.getName());
-
- Map<String /* MB identity local name */, ModuleMXBeanEntry> namesToMBEs =
- Collections.unmodifiableMap(ModuleMXBeanEntry.create(module, qNamesToSIEs, resolveSchemaContext,
- typeProviderWrapper, packageName));
- moduleMXBeanEntryMap.put(module.getNamespace().toString(), namesToMBEs);
-
- qNamesToIdentitiesToModuleMXBeanEntries.put(qName, namesToMBEs);
- }
- this.moduleMXBeanEntryMap = Collections.unmodifiableMap(moduleMXBeanEntryMap);
- this.qNamesToIdentitiesToModuleMXBeanEntries = Collections.unmodifiableMap(qNamesToIdentitiesToModuleMXBeanEntries);
-
- }
-
- @Override
- public Map<String, Map<String, ModuleMXBeanEntry>> getModuleMXBeanEntryMap() {
- return moduleMXBeanEntryMap;
- }
-
- @Override
- public Map<QName, Map<String, ModuleMXBeanEntry>> getQNamesToIdentitiesToModuleMXBeanEntries() {
- return qNamesToIdentitiesToModuleMXBeanEntries;
- }
-
- @Override
- public Set<Module> getModules() {
- return schemaContext.getModules();
- }
-
- @Override
- public String getModuleSource(final org.opendaylight.yangtools.yang.model.api.ModuleIdentifier moduleIdentifier) {
- return schemaContext.getModuleSource(moduleIdentifier).get();
- }
-
- @Override
- public void close() {
-
- }
-}
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyString;
import static org.mockito.Mockito.doNothing;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import org.opendaylight.controller.netconf.confignetconfconnector.operations.get.Get;
import org.opendaylight.controller.netconf.confignetconfconnector.operations.getconfig.GetConfig;
import org.opendaylight.controller.netconf.confignetconfconnector.operations.runtimerpc.RuntimeRpc;
-import org.opendaylight.controller.netconf.confignetconfconnector.osgi.YangStoreServiceImpl;
-import org.opendaylight.controller.netconf.confignetconfconnector.osgi.YangStoreSnapshot;
+import org.opendaylight.controller.netconf.confignetconfconnector.osgi.YangStoreContext;
+import org.opendaylight.controller.netconf.confignetconfconnector.osgi.YangStoreService;
import org.opendaylight.controller.netconf.confignetconfconnector.transactions.TransactionProvider;
import org.opendaylight.controller.netconf.impl.mapping.operations.DefaultCloseSession;
import org.opendaylight.controller.netconf.impl.osgi.NetconfOperationRouter;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import org.opendaylight.yangtools.yang.model.api.SchemaContextProvider;
import org.opendaylight.yangtools.yang.parser.impl.YangParserImpl;
+import org.osgi.framework.Filter;
+import org.osgi.framework.ServiceListener;
+import org.osgi.framework.ServiceReference;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.w3c.dom.Document;
private TestImplModuleFactory factory4;
@Mock
- YangStoreSnapshot yangStoreSnapshot;
+ YangStoreContext yangStoreSnapshot;
@Mock
NetconfOperationRouter netconfOperationRouter;
@Mock
@Before
public void setUp() throws Exception {
MockitoAnnotations.initMocks(this);
+
+
+ final Filter filter = mock(Filter.class);
+ doReturn(filter).when(mockedContext).createFilter(anyString());
+ doNothing().when(mockedContext).addServiceListener(any(ServiceListener.class), anyString());
+ doReturn(new ServiceReference<?>[]{}).when(mockedContext).getServiceReferences(anyString(), anyString());
+
doReturn(getMbes()).when(this.yangStoreSnapshot).getModuleMXBeanEntryMap();
doReturn(getModules()).when(this.yangStoreSnapshot).getModules();
doNothing().when(netconfOperationServiceSnapshot).close();
this.factory2 = new DepTestImplModuleFactory();
this.factory3 = new IdentityTestModuleFactory();
factory4 = new TestImplModuleFactory();
+
+
super.initConfigTransactionManagerImpl(new HardcodedModuleFactoriesResolver(mockedContext, this.factory, this.factory2,
this.factory3, factory4));
YangParserImpl yangParser = new YangParserImpl();
final SchemaContext schemaContext = yangParser.resolveSchemaContext(new HashSet<>(yangParser.parseYangModelsFromStreamsMapped(yangDependencies).values()));
- YangStoreServiceImpl yangStoreService = new YangStoreServiceImpl(new SchemaContextProvider() {
+ YangStoreService yangStoreService = new YangStoreService(new SchemaContextProvider() {
@Override
public SchemaContext getSchemaContext() {
return schemaContext ;
}
- });
- mBeanEntries.putAll(yangStoreService.getYangStoreSnapshot().getModuleMXBeanEntryMap());
+ }, mockedContext);
+ mBeanEntries.putAll(yangStoreService.getModuleMXBeanEntryMap());
return mBeanEntries;
}
import org.opendaylight.controller.netconf.confignetconfconnector.mapping.config.Services;
import org.opendaylight.controller.netconf.confignetconfconnector.operations.ValidateTest;
import org.opendaylight.controller.netconf.confignetconfconnector.operations.editconfig.EditConfigXmlParser.EditConfigExecution;
-import org.opendaylight.controller.netconf.confignetconfconnector.osgi.YangStoreSnapshot;
+import org.opendaylight.controller.netconf.confignetconfconnector.osgi.YangStoreContext;
import org.opendaylight.controller.netconf.confignetconfconnector.transactions.TransactionProvider;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
public class EditConfigTest {
@Mock
- private YangStoreSnapshot yangStoreSnapshot;
+ private YangStoreContext yangStoreSnapshot;
@Mock
private TransactionProvider provider;
@Mock
+++ /dev/null
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.netconf.confignetconfconnector.osgi;
-
-import static org.junit.Assert.fail;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.mock;
-
-import com.google.common.collect.Maps;
-import com.google.common.collect.Sets;
-import java.net.URI;
-import java.text.ParseException;
-import java.text.SimpleDateFormat;
-import java.util.Date;
-import java.util.Map;
-import java.util.Set;
-import org.hamcrest.CoreMatchers;
-import org.junit.Assert;
-import org.junit.Test;
-import org.opendaylight.controller.config.api.LookupRegistry;
-import org.opendaylight.controller.config.yangjmxgenerator.ModuleMXBeanEntry;
-import org.opendaylight.yangtools.yang.common.QName;
-
-public class NetconfOperationServiceImplTest {
-
- private static final Date date1970_01_01;
-
- static {
- try {
- date1970_01_01 = new SimpleDateFormat("yyyy-MM-dd").parse("1970-01-01");
- } catch (ParseException e) {
- throw new IllegalStateException(e);
- }
- }
-
- @Test
- public void testCheckConsistencyBetweenYangStoreAndConfig_ok() throws Exception {
- NetconfOperationServiceImpl.checkConsistencyBetweenYangStoreAndConfig(
- mockJmxClient("qname1", "qname2"),
- mockYangStoreSnapshot("qname2", "qname1"));
- }
-
- @Test
- public void testCheckConsistencyBetweenYangStoreAndConfig_ok2() throws Exception {
- NetconfOperationServiceImpl.checkConsistencyBetweenYangStoreAndConfig(
- mockJmxClient("qname1", "qname2", "qname4", "qname5"),
- mockYangStoreSnapshot("qname2", "qname1"));
- }
-
- @Test
- public void testCheckConsistencyBetweenYangStoreAndConfig_ok3() throws Exception {
- NetconfOperationServiceImpl.checkConsistencyBetweenYangStoreAndConfig(
- mockJmxClient(),
- mockYangStoreSnapshot());
- }
-
- @Test
- public void testCheckConsistencyBetweenYangStoreAndConfig_yangStoreMore() throws Exception {
- try {
- NetconfOperationServiceImpl.checkConsistencyBetweenYangStoreAndConfig(mockJmxClient("qname1"),
- mockYangStoreSnapshot("qname2", "qname1"));
- fail("An exception of type " + IllegalStateException.class + " was expected");
- } catch (IllegalStateException e) {
- String message = e.getMessage();
- Assert.assertThat(
- message,
- CoreMatchers
- .containsString("missing from config subsystem but present in yangstore: [(namespace?revision=1970-01-01)qname2]"));
- Assert.assertThat(
- message,
- CoreMatchers
- .containsString("All modules present in config: [(namespace?revision=1970-01-01)qname1]"));
- }
- }
-
- private YangStoreSnapshot mockYangStoreSnapshot(final String... qnames) {
- YangStoreSnapshot mock = mock(YangStoreSnapshot.class);
-
- Map<String, Map<String, ModuleMXBeanEntry>> map = Maps.newHashMap();
-
- Map<String, ModuleMXBeanEntry> innerMap = Maps.newHashMap();
-
- int i = 1;
- for (String qname : qnames) {
- innerMap.put(Integer.toString(i++), mockMBeanEntry(qname));
- }
-
- map.put("1", innerMap);
-
- doReturn(map).when(mock).getModuleMXBeanEntryMap();
-
- return mock;
- }
-
- private ModuleMXBeanEntry mockMBeanEntry(final String qname) {
- ModuleMXBeanEntry mock = mock(ModuleMXBeanEntry.class);
- QName q = getQName(qname);
- doReturn(q).when(mock).getYangModuleQName();
- return mock;
- }
-
- private QName getQName(final String qname) {
- return QName.create(URI.create("namespace"), date1970_01_01, qname);
- }
-
- private LookupRegistry mockJmxClient(final String... visibleQNames) {
- LookupRegistry mock = mock(LookupRegistry.class);
- Set<String> qnames = Sets.newHashSet();
- for (String visibleQName : visibleQNames) {
- QName q = getQName(visibleQName);
- qnames.add(q.toString());
- }
- doReturn(qnames).when(mock).getAvailableModuleFactoryQNames();
- return mock;
- }
-}
<version>${project.version}</version>
</dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>ietf-netconf</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>ietf-netconf-monitoring</artifactId>
<version>${project.version}</version>
</dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>ietf-netconf-notifications</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netconf-notifications-api</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netconf-notifications-impl</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>netconf-client</artifactId>
<groupId>org.opendaylight.yangtools</groupId>
<artifactId>yang-parser-impl</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-netconf-connector</artifactId>
+ </dependency>
</dependencies>
<build>
}
case SSH: {
writeStatus(consoleIO, "Connecting to %s via SSH. Please wait.", cliArgs.getAddress());
- connectionManager.connectBlocking(cliArgs.getAddress(), getClientSshConfig(cliArgs));
+ connectionManager.connectBlocking(cliArgs.getAddress(), cliArgs.getServerAddress(), getClientSshConfig(cliArgs));
break;
}
case NONE: {/* Do not connect initially */
import org.opendaylight.controller.netconf.cli.io.ConsoleContext;
import org.opendaylight.controller.netconf.cli.io.ConsoleIO;
import org.opendaylight.controller.sal.connect.api.RemoteDeviceHandler;
-import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionCapabilities;
+import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionPreferences;
import org.opendaylight.controller.sal.core.api.RpcImplementation;
import org.opendaylight.yangtools.yang.data.api.CompositeNode;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
* Implementation of RemoteDeviceHandler. Integrates cli with
* sal-netconf-connector.
*/
-public class NetconfDeviceConnectionHandler implements RemoteDeviceHandler<NetconfSessionCapabilities> {
+public class NetconfDeviceConnectionHandler implements RemoteDeviceHandler<NetconfSessionPreferences> {
private final CommandDispatcher commandDispatcher;
private final SchemaContextRegistry schemaContextRegistry;
@Override
public synchronized void onDeviceConnected(final SchemaContext context,
- final NetconfSessionCapabilities capabilities, final RpcImplementation rpcImplementation) {
+ final NetconfSessionPreferences preferences, final RpcImplementation rpcImplementation) {
console.enterRootContext(new ConsoleContext() {
@Override
up = false;
}
+ @Override
+ public void onDeviceFailed(Throwable throwable) {
+ // FIXME
+ }
+
@Override
public void onNotification(final CompositeNode compositeNode) {
// FIXME
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
+import java.net.InetSocketAddress;
import java.util.Set;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
// TODO we receive configBuilder in order to add SessionListener, Session
// Listener should not be part of config
- public synchronized void connect(final String name, final NetconfClientConfigurationBuilder configBuilder) {
+ public synchronized void connect(final String name, final InetSocketAddress address, final NetconfClientConfigurationBuilder configBuilder) {
// TODO change IllegalState exceptions to custom ConnectionException
Preconditions.checkState(listener == null, "Already connected");
- final RemoteDeviceId deviceId = new RemoteDeviceId(name);
+ final RemoteDeviceId deviceId = new RemoteDeviceId(name, address);
handler = new NetconfDeviceConnectionHandler(commandDispatcher, schemaContextRegistry,
console, name);
/**
* Blocks thread until connection is fully established
*/
- public synchronized Set<String> connectBlocking(final String name, final NetconfClientConfigurationBuilder configBuilder) {
- this.connect(name, configBuilder);
+ public synchronized Set<String> connectBlocking(final String name, final InetSocketAddress address, final NetconfClientConfigurationBuilder configBuilder) {
+ this.connect(name, address, configBuilder);
synchronized (handler) {
while (handler.isUp() == false) {
try {
@Override
public Output invoke(final Input inputArgs) {
final NetconfClientConfigurationBuilder config = getConfig(inputArgs);
- return invoke(config, getArgument(inputArgs, "address-name", String.class));
+ return invoke(config, getArgument(inputArgs, "address-name", String.class), inputArgs);
}
- private Output invoke(final NetconfClientConfigurationBuilder config, final String addressName) {
- final Set<String> remoteCmds = connectManager.connectBlocking(addressName, config);
+ private Output invoke(final NetconfClientConfigurationBuilder config, final String addressName, final Input inputArgs) {
+ final Set<String> remoteCmds = connectManager.connectBlocking(addressName, getAdress(inputArgs), config);
final ArrayList<Node<?>> output = Lists.newArrayList();
output.add(new SimpleNodeTOImpl<>(QName.create(getCommandId(), "status"), null, "Connection initiated"));
.withProtocol(NetconfClientConfiguration.NetconfClientProtocol.SSH);
}
+ private InetSocketAddress getAdress(final Input inputArgs) {
+ final String address = getArgument(inputArgs, "address-name", String.class);
+ final InetSocketAddress inetAddress;
+ try {
+ inetAddress = new InetSocketAddress(InetAddress.getByName(address), getArgument(inputArgs, "address-port", Integer.class));
+ } catch (final UnknownHostException e) {
+ throw new IllegalArgumentException("Unable to use address: " + address, e);
+ }
+ return inetAddress;
+ }
+
private <T> Optional<T> getArgumentOpt(final Input inputArgs, final String argName, final Class<T> type) {
final QName argQName = QName.create(getCommandId(), argName);
final Node<?> argumentNode = inputArgs.getArg(argName);
<username xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:connector:netconf">admin</username>
<password xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:connector:netconf">admin</password>
<tcp-only xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:connector:netconf">false</tcp-only>
+ <reconnect-on-changed-schema>true</reconnect-on-changed-schema>
<event-executor xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:connector:netconf">
<type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:netty">prefix:netty-event-executor</type>
<name>global-event-executor</name>
import org.opendaylight.controller.netconf.mapping.api.NetconfOperationChainedExecution;
import org.opendaylight.controller.netconf.mapping.api.NetconfOperationService;
import org.opendaylight.controller.netconf.mapping.api.NetconfOperationServiceSnapshot;
+import org.opendaylight.controller.netconf.mapping.api.SessionAwareNetconfOperation;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
if (netconfOperation instanceof DefaultNetconfOperation) {
((DefaultNetconfOperation) netconfOperation).setNetconfSession(session);
}
+ if(netconfOperation instanceof SessionAwareNetconfOperation) {
+ ((SessionAwareNetconfOperation) netconfOperation).setSession(session);
+ }
if (!handlingPriority.equals(HandlingPriority.CANNOT_HANDLE)) {
Preconditions.checkState(!sortedPriority.containsKey(handlingPriority),
import org.opendaylight.controller.netconf.client.conf.NetconfClientConfiguration;
import org.opendaylight.controller.netconf.client.conf.NetconfClientConfigurationBuilder;
import org.opendaylight.controller.netconf.confignetconfconnector.osgi.NetconfOperationServiceFactoryImpl;
-import org.opendaylight.controller.netconf.confignetconfconnector.osgi.YangStoreException;
import org.opendaylight.controller.netconf.confignetconfconnector.osgi.YangStoreService;
-import org.opendaylight.controller.netconf.confignetconfconnector.osgi.YangStoreServiceImpl;
-import org.opendaylight.controller.netconf.confignetconfconnector.osgi.YangStoreSnapshot;
import org.opendaylight.controller.netconf.impl.DefaultCommitNotificationProducer;
import org.opendaylight.controller.netconf.impl.NetconfServerDispatcher;
import org.opendaylight.controller.netconf.impl.NetconfServerSessionNegotiatorFactory;
import org.opendaylight.controller.netconf.mapping.api.NetconfOperationProvider;
import org.opendaylight.controller.netconf.mapping.api.NetconfOperationService;
import org.opendaylight.controller.netconf.mapping.api.NetconfOperationServiceFactory;
+import org.opendaylight.controller.netconf.notifications.BaseNetconfNotificationListener;
import org.opendaylight.controller.netconf.util.test.XmlFileLoader;
import org.opendaylight.protocol.framework.NeverReconnectStrategy;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.notifications.rev120206.NetconfCapabilityChange;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import org.opendaylight.yangtools.yang.model.api.SchemaContextProvider;
import org.opendaylight.yangtools.yang.parser.impl.YangParserImpl;
return clientDispatcher;
}
- private HardcodedYangStoreService getYangStore() throws YangStoreException, IOException {
+ private HardcodedYangStoreService getYangStore() throws IOException {
final Collection<InputStream> yangDependencies = getBasicYangs();
return new HardcodedYangStoreService(yangDependencies);
}
return b.build();
}
- public static final class HardcodedYangStoreService implements YangStoreService {
-
- private final List<InputStream> byteArrayInputStreams;
+ public static final class HardcodedYangStoreService extends YangStoreService {
+ public HardcodedYangStoreService(final Collection<? extends InputStream> inputStreams) throws IOException {
+ super(new SchemaContextProvider() {
+ @Override
+ public SchemaContext getSchemaContext() {
+ return getSchema(inputStreams);
+ }
+ }, new BaseNetconfNotificationListener() {
+ @Override
+ public void onCapabilityChanged(final NetconfCapabilityChange capabilityChange) {
+ // NOOP
+ }
+ });
+ }
- public HardcodedYangStoreService(final Collection<? extends InputStream> inputStreams) throws YangStoreException, IOException {
- byteArrayInputStreams = new ArrayList<>();
+ private static SchemaContext getSchema(final Collection<? extends InputStream> inputStreams) {
+ final ArrayList<InputStream> byteArrayInputStreams = new ArrayList<>();
for (final InputStream inputStream : inputStreams) {
assertNotNull(inputStream);
- final byte[] content = IOUtils.toByteArray(inputStream);
+ final byte[] content;
+ try {
+ content = IOUtils.toByteArray(inputStream);
+ } catch (IOException e) {
+ throw new IllegalStateException("Cannot read " + inputStream, e);
+ }
final ByteArrayInputStream byteArrayInputStream = new ByteArrayInputStream(content);
byteArrayInputStreams.add(byteArrayInputStream);
}
- }
- @Override
- public YangStoreSnapshot getYangStoreSnapshot() throws YangStoreException {
for (final InputStream inputStream : byteArrayInputStreams) {
try {
inputStream.reset();
}
final YangParserImpl yangParser = new YangParserImpl();
- final SchemaContext schemaContext = yangParser.resolveSchemaContext(new HashSet<>(yangParser.parseYangModelsFromStreamsMapped(byteArrayInputStreams).values()));
- final YangStoreServiceImpl yangStoreService = new YangStoreServiceImpl(new SchemaContextProvider() {
- @Override
- public SchemaContext getSchemaContext() {
- return schemaContext ;
- }
- });
- return yangStoreService.getYangStoreSnapshot();
+ return yangParser.resolveSchemaContext(new HashSet<>(yangParser.parseYangModelsFromStreamsMapped(byteArrayInputStreams).values()));
}
}
}
import org.opendaylight.controller.netconf.util.osgi.NetconfConfigUtil;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
import org.opendaylight.controller.sal.connect.api.RemoteDevice;
-import org.opendaylight.controller.sal.connect.api.RemoteDeviceCommunicator;
import org.opendaylight.controller.sal.connect.netconf.listener.NetconfDeviceCommunicator;
-import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionCapabilities;
+import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionPreferences;
import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
import org.opendaylight.protocol.framework.NeverReconnectStrategy;
import org.opendaylight.yangtools.yang.common.QName;
}
static NetconfDeviceCommunicator getSessionListener() {
- RemoteDevice<NetconfSessionCapabilities, NetconfMessage> mockedRemoteDevice = mock(RemoteDevice.class);
- doNothing().when(mockedRemoteDevice).onRemoteSessionUp(any(NetconfSessionCapabilities.class), any(RemoteDeviceCommunicator.class));
+ RemoteDevice<NetconfSessionPreferences, NetconfMessage, NetconfDeviceCommunicator> mockedRemoteDevice = mock(RemoteDevice.class);
+ doNothing().when(mockedRemoteDevice).onRemoteSessionUp(any(NetconfSessionPreferences.class), any(NetconfDeviceCommunicator.class));
doNothing().when(mockedRemoteDevice).onRemoteSessionDown();
return new NetconfDeviceCommunicator(new RemoteDeviceId("secure-test"), mockedRemoteDevice);
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.mapping.api;
+
+import org.opendaylight.controller.netconf.api.NetconfSession;
+
+public interface SessionAwareNetconfOperation extends NetconfOperation {
+
+ void setSession(NetconfSession session);
+}
private static final Logger LOG = LoggerFactory.getLogger(NetconfMessageToEXIEncoder.class);
/**
* This class is not marked as shared, so it can be attached to only a single channel,
- * which means that {@link #encode(ChannelHandlerContext, NetconfMessage, ByteBuf)}
+ * which means that {@link #encode(io.netty.channel.ChannelHandlerContext, org.opendaylight.controller.netconf.api.NetconfMessage, io.netty.buffer.ByteBuf)}
* cannot be invoked concurrently. Hence we can reuse the transmogrifier.
*/
- private final Transmogrifier transmogrifier;
+ private final NetconfEXICodec codec;
- private NetconfMessageToEXIEncoder(final Transmogrifier transmogrifier) {
- this.transmogrifier = Preconditions.checkNotNull(transmogrifier);
+ private NetconfMessageToEXIEncoder(final NetconfEXICodec codec) {
+ this.codec = Preconditions.checkNotNull(codec);
}
public static NetconfMessageToEXIEncoder create(final NetconfEXICodec codec) throws EXIOptionsException, TransmogrifierException {
- return new NetconfMessageToEXIEncoder(codec.getTransmogrifier());
+ return new NetconfMessageToEXIEncoder(codec);
}
@Override
protected void encode(final ChannelHandlerContext ctx, final NetconfMessage msg, final ByteBuf out) throws EXIOptionsException, IOException, TransformerException, TransmogrifierException {
LOG.trace("Sent to encode : {}", msg);
+ // TODO Workaround for bug 2679, recreate transmogrifier every time
+ // If the transmogrifier is reused, encoded xml can become non valid according to EXI decoder
+ // Seems like a bug in the nagasena library (try newer version of the library or fix the bug inside of it)
+ // Related bugs 2459: reuse nagasena resources, 2458: upgrade nagasena to newest version
+ final Transmogrifier transmogrifier = codec.getTransmogrifier();
+
try (final OutputStream os = new ByteBufOutputStream(out)) {
transmogrifier.setOutputStream(os);
final ContentHandler handler = transmogrifier.getSAXTransmogrifier();
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ ~ Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ ~
+ ~ This program and the accompanying materials are made available under the
+ ~ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ ~ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <parent>
+ <artifactId>netconf-subsystem</artifactId>
+ <groupId>org.opendaylight.controller</groupId>
+ <version>0.3.0-SNAPSHOT</version>
+ </parent>
+ <modelVersion>4.0.0</modelVersion>
+ <packaging>bundle</packaging>
+ <artifactId>netconf-notifications-api</artifactId>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>netconf-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>ietf-netconf-notifications</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.slf4j</groupId>
+ <artifactId>slf4j-api</artifactId>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-maven-plugin</artifactId>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.felix</groupId>
+ <artifactId>maven-bundle-plugin</artifactId>
+ <configuration>
+ <instructions>
+ <Export-Package>org.opendaylight.controller.netconf.notifications.*</Export-Package>
+ </instructions>
+ </configuration>
+ </plugin>
+ </plugins>
+ </build>
+</project>
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.notifications;
+
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.notifications.rev120206.NetconfCapabilityChange;
+
+
+/**
+ * Listener for base netconf notifications defined in https://tools.ietf.org/html/rfc6470.
+ * This listener uses generated classes from yang model defined in RFC6470.
+ * It alleviates the provisioning of base netconf notifications from the code.
+ */
+public interface BaseNetconfNotificationListener {
+
+ /**
+ * Callback used to notify about a change in used capabilities
+ */
+ void onCapabilityChanged(NetconfCapabilityChange capabilityChange);
+
+ // TODO add other base notifications
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.notifications;
+
+/**
+ * Registration for base notification publisher. This registration allows for publishing of base netconf notifications using generated classes
+ */
+public interface BaseNotificationPublisherRegistration extends NotificationRegistration, BaseNetconfNotificationListener {
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.notifications;
+
+import com.google.common.base.Preconditions;
+import java.text.SimpleDateFormat;
+import java.util.Date;
+import org.opendaylight.controller.netconf.api.NetconfMessage;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+
+/**
+ * Special kind of netconf message that contains a timestamp.
+ */
+public final class NetconfNotification extends NetconfMessage {
+
+ public static final String NOTIFICATION = "notification";
+ public static final String NOTIFICATION_NAMESPACE = "urn:ietf:params:netconf:capability:notification:1.0";
+ public static final String RFC3339_DATE_FORMAT_BLUEPRINT = "yyyy-MM-dd'T'HH:mm:ssXXX";
+ public static final String EVENT_TIME = "eventTime";
+
+ /**
+ * Create new notification and capture the timestamp in the constructor
+ */
+ public NetconfNotification(final Document notificationContent) {
+ this(notificationContent, new Date());
+ }
+
+ /**
+ * Create new notification with provided timestamp
+ */
+ public NetconfNotification(final Document notificationContent, final Date eventTime) {
+ super(wrapNotification(notificationContent, eventTime));
+ }
+
+ private static Document wrapNotification(final Document notificationContent, final Date eventTime) {
+ Preconditions.checkNotNull(notificationContent);
+ Preconditions.checkNotNull(eventTime);
+
+ final Element baseNotification = notificationContent.getDocumentElement();
+ final Element entireNotification = notificationContent.createElementNS(NOTIFICATION_NAMESPACE, NOTIFICATION);
+ entireNotification.appendChild(baseNotification);
+
+ final Element eventTimeElement = notificationContent.createElementNS(NOTIFICATION_NAMESPACE, EVENT_TIME);
+ eventTimeElement.setTextContent(getSerializedEventTime(eventTime));
+ entireNotification.appendChild(eventTimeElement);
+
+ notificationContent.appendChild(entireNotification);
+ return notificationContent;
+ }
+
+ private static String getSerializedEventTime(final Date eventTime) {
+ // SimpleDateFormat is not threadsafe, cannot be in a constant
+ return new SimpleDateFormat(RFC3339_DATE_FORMAT_BLUEPRINT).format(eventTime);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.notifications;
+
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netconf.notification._1._0.rev080714.StreamNameType;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netmod.notification.rev080714.netconf.streams.Stream;
+
+/**
+ * Collector of all notifications. Base or generic
+ */
+public interface NetconfNotificationCollector {
+
+ /**
+ * Add notification publisher for a particular stream
+ *
+ * Implementations should allow for multiple publishers of a single stream
+ * and its up to implementations to decide how to merge metadata (e.g. description)
+ * for the same stream when providing information about available stream
+ *
+ */
+ NotificationPublisherRegistration registerNotificationPublisher(Stream stream);
+
+ /**
+ * Register base notification publisher
+ */
+ BaseNotificationPublisherRegistration registerBaseNotificationPublisher();
+
+ /**
+ * Users of the registry have an option to get notification each time new notification stream gets registered
+ * This allows for a push model in addition to pull model for retrieving information about available streams.
+ *
+ * The listener should receive callbacks for each stream available prior to the registration when its registered
+ */
+ NotificationRegistration registerStreamListener(NetconfNotificationStreamListener listener);
+
+ /**
+ * Simple listener that receives notifications about changes in stream availability
+ */
+ public interface NetconfNotificationStreamListener {
+
+ /**
+ * Stream becomes available in the collector (first publisher is registered)
+ */
+ void onStreamRegistered(Stream stream);
+
+ /**
+ * Stream is not available anymore in the collector (last publisher is unregistered)
+ */
+ void onStreamUnregistered(StreamNameType stream);
+ }
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.notifications;
+
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netconf.notification._1._0.rev080714.StreamNameType;
+
+/**
+ * Generic listener for netconf notifications
+ */
+public interface NetconfNotificationListener {
+
+ /**
+ * Callback used to notify the listener about any new notification
+ */
+ void onNotification(StreamNameType stream, NetconfNotification notification);
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.notifications;
+
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netconf.notification._1._0.rev080714.StreamNameType;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netmod.notification.rev080714.netconf.Streams;
+
+/**
+ *
+ */
+public interface NetconfNotificationRegistry {
+
+ /**
+ * Add listener for a certain notification type
+ */
+ NotificationListenerRegistration registerNotificationListener(StreamNameType stream, NetconfNotificationListener listener);
+
+ /**
+ * Check stream availability
+ */
+ boolean isStreamAvailable(StreamNameType streamNameType);
+
+ /**
+ * Get all the streams available
+ */
+ Streams getNotificationPublishers();
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.notifications;
+
+/**
+ * Manages the registration of a single listener
+ */
+public interface NotificationListenerRegistration extends NotificationRegistration {
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.notifications;
+
+/**
+ * Registration for notification publisher. This registration allows for publishing any netconf notifications
+ */
+public interface NotificationPublisherRegistration extends NetconfNotificationListener, NotificationRegistration {
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.notifications;
+
+/**
+ * Generic registration, used as a base for other registration types
+ */
+public interface NotificationRegistration extends AutoCloseable {
+
+ // Overriden close does not throw any kind of checked exception
+
+ /**
+ * Close the registration.
+ */
+ @Override
+ void close();
+}
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ ~ Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ ~
+ ~ This program and the accompanying materials are made available under the
+ ~ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ ~ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <parent>
+ <artifactId>netconf-subsystem</artifactId>
+ <groupId>org.opendaylight.controller</groupId>
+ <version>0.3.0-SNAPSHOT</version>
+ </parent>
+ <modelVersion>4.0.0</modelVersion>
+ <packaging>bundle</packaging>
+ <artifactId>netconf-notifications-impl</artifactId>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>netconf-notifications-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netconf-util</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>binding-generator-impl</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>binding-data-codec</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.slf4j</groupId>
+ <artifactId>slf4j-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>xmlunit</groupId>
+ <artifactId>xmlunit</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>mockito-configuration</artifactId>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.felix</groupId>
+ <artifactId>maven-bundle-plugin</artifactId>
+ <configuration>
+ <instructions>
+ <Bundle-Activator>org.opendaylight.controller.netconf.notifications.impl.osgi.Activator</Bundle-Activator>
+ </instructions>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-maven-plugin</artifactId>
+ </plugin>
+ </plugins>
+ </build>
+</project>
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.notifications.impl;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.HashMultimap;
+import com.google.common.collect.HashMultiset;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Multimap;
+import com.google.common.collect.Multiset;
+import com.google.common.collect.Sets;
+import java.util.Map;
+import java.util.Set;
+import javax.annotation.concurrent.GuardedBy;
+import javax.annotation.concurrent.ThreadSafe;
+import org.opendaylight.controller.netconf.notifications.BaseNotificationPublisherRegistration;
+import org.opendaylight.controller.netconf.notifications.NetconfNotification;
+import org.opendaylight.controller.netconf.notifications.NetconfNotificationCollector;
+import org.opendaylight.controller.netconf.notifications.NetconfNotificationListener;
+import org.opendaylight.controller.netconf.notifications.NetconfNotificationRegistry;
+import org.opendaylight.controller.netconf.notifications.NotificationListenerRegistration;
+import org.opendaylight.controller.netconf.notifications.NotificationPublisherRegistration;
+import org.opendaylight.controller.netconf.notifications.NotificationRegistration;
+import org.opendaylight.controller.netconf.notifications.impl.ops.NotificationsTransformUtil;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netconf.notification._1._0.rev080714.StreamNameType;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netmod.notification.rev080714.netconf.Streams;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netmod.notification.rev080714.netconf.StreamsBuilder;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netmod.notification.rev080714.netconf.streams.Stream;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netmod.notification.rev080714.netconf.streams.StreamBuilder;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netmod.notification.rev080714.netconf.streams.StreamKey;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.notifications.rev120206.NetconfCapabilityChange;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@ThreadSafe
+public class NetconfNotificationManager implements NetconfNotificationCollector, NetconfNotificationRegistry, NetconfNotificationListener, AutoCloseable {
+
+ public static final StreamNameType BASE_STREAM_NAME = new StreamNameType("NETCONF");
+ public static final Stream BASE_NETCONF_STREAM;
+
+ static {
+ BASE_NETCONF_STREAM = new StreamBuilder()
+ .setName(BASE_STREAM_NAME)
+ .setKey(new StreamKey(BASE_STREAM_NAME))
+ .setReplaySupport(false)
+ .setDescription("Default Event Stream")
+ .build();
+ }
+
+ private static final Logger LOG = LoggerFactory.getLogger(NetconfNotificationManager.class);
+
+ // TODO excessive synchronization provides thread safety but is most likely not optimal (combination of concurrent collections might improve performance)
+ // And also calling callbacks from a synchronized block is dangerous since the listeners/publishers can block the whole notification processing
+
+ @GuardedBy("this")
+ private final Multimap<StreamNameType, GenericNotificationListenerReg> notificationListeners = HashMultimap.create();
+
+ @GuardedBy("this")
+ private final Set<NetconfNotificationStreamListener> streamListeners = Sets.newHashSet();
+
+ @GuardedBy("this")
+ private final Map<StreamNameType, Stream> streamMetadata = Maps.newHashMap();
+
+ @GuardedBy("this")
+ private final Multiset<StreamNameType> availableStreams = HashMultiset.create();
+
+ @GuardedBy("this")
+ private final Set<GenericNotificationPublisherReg> notificationPublishers = Sets.newHashSet();
+
+ @Override
+ public synchronized void onNotification(final StreamNameType stream, final NetconfNotification notification) {
+ LOG.debug("Notification of type {} detected", stream);
+ if(LOG.isTraceEnabled()) {
+ LOG.debug("Notification of type {} detected: {}", stream, notification);
+ }
+
+ for (final GenericNotificationListenerReg listenerReg : notificationListeners.get(BASE_STREAM_NAME)) {
+ listenerReg.getListener().onNotification(BASE_STREAM_NAME, notification);
+ }
+ }
+
+ @Override
+ public synchronized NotificationListenerRegistration registerNotificationListener(final StreamNameType stream, final NetconfNotificationListener listener) {
+ Preconditions.checkNotNull(stream);
+ Preconditions.checkNotNull(listener);
+
+ LOG.trace("Notification listener registered for stream: {}", stream);
+
+ final GenericNotificationListenerReg genericNotificationListenerReg = new GenericNotificationListenerReg(listener) {
+ @Override
+ public void close() {
+ synchronized (NetconfNotificationManager.this) {
+ LOG.trace("Notification listener unregistered for stream: {}", stream);
+ super.close();
+ }
+ }
+ };
+
+ notificationListeners.put(BASE_STREAM_NAME, genericNotificationListenerReg);
+ return genericNotificationListenerReg;
+ }
+
+ @Override
+ public synchronized Streams getNotificationPublishers() {
+ return new StreamsBuilder().setStream(Lists.newArrayList(streamMetadata.values())).build();
+ }
+
+ @Override
+ public synchronized boolean isStreamAvailable(final StreamNameType streamNameType) {
+ return availableStreams.contains(streamNameType);
+ }
+
+ @Override
+ public synchronized NotificationRegistration registerStreamListener(final NetconfNotificationStreamListener listener) {
+ streamListeners.add(listener);
+
+ // Notify about all already available
+ for (final Stream availableStream : streamMetadata.values()) {
+ listener.onStreamRegistered(availableStream);
+ }
+
+ return new NotificationRegistration() {
+ @Override
+ public void close() {
+ synchronized(NetconfNotificationManager.this) {
+ streamListeners.remove(listener);
+ }
+ }
+ };
+ }
+
+ @Override
+ public synchronized void close() {
+ // Unregister all listeners
+ for (final GenericNotificationListenerReg genericNotificationListenerReg : notificationListeners.values()) {
+ genericNotificationListenerReg.close();
+ }
+ notificationListeners.clear();
+
+ // Unregister all publishers
+ for (final GenericNotificationPublisherReg notificationPublisher : notificationPublishers) {
+ notificationPublisher.close();
+ }
+ notificationPublishers.clear();
+
+ // Clear stream Listeners
+ streamListeners.clear();
+ }
+
+ @Override
+ public synchronized NotificationPublisherRegistration registerNotificationPublisher(final Stream stream) {
+ Preconditions.checkNotNull(stream);
+ final StreamNameType streamName = stream.getName();
+
+ LOG.debug("Notification publisher registered for stream: {}", streamName);
+ if(LOG.isTraceEnabled()) {
+ LOG.trace("Notification publisher registered for stream: {}", stream);
+ }
+
+ if(streamMetadata.containsKey(streamName)) {
+ LOG.warn("Notification stream {} already registered as: {}. Will be reused", streamName, streamMetadata.get(streamName));
+ } else {
+ streamMetadata.put(streamName, stream);
+ }
+
+ availableStreams.add(streamName);
+
+ final GenericNotificationPublisherReg genericNotificationPublisherReg = new GenericNotificationPublisherReg(this, streamName) {
+ @Override
+ public void close() {
+ synchronized (NetconfNotificationManager.this) {
+ super.close();
+ }
+ }
+ };
+
+ notificationPublishers.add(genericNotificationPublisherReg);
+
+ notifyStreamAdded(stream);
+ return genericNotificationPublisherReg;
+ }
+
+ private void unregisterNotificationPublisher(final StreamNameType streamName, final GenericNotificationPublisherReg genericNotificationPublisherReg) {
+ availableStreams.remove(streamName);
+ notificationPublishers.remove(genericNotificationPublisherReg);
+
+ LOG.debug("Notification publisher unregistered for stream: {}", streamName);
+
+ // Notify stream listeners if all publishers are gone and also clear metadata for stream
+ if (!isStreamAvailable(streamName)) {
+ LOG.debug("Notification stream: {} became unavailable", streamName);
+ streamMetadata.remove(streamName);
+ notifyStreamRemoved(streamName);
+ }
+ }
+
+ private synchronized void notifyStreamAdded(final Stream stream) {
+ for (final NetconfNotificationStreamListener streamListener : streamListeners) {
+ streamListener.onStreamRegistered(stream);
+ }
+ }
+ private synchronized void notifyStreamRemoved(final StreamNameType stream) {
+ for (final NetconfNotificationStreamListener streamListener : streamListeners) {
+ streamListener.onStreamUnregistered(stream);
+ }
+ }
+
+ @Override
+ public BaseNotificationPublisherRegistration registerBaseNotificationPublisher() {
+ final NotificationPublisherRegistration notificationPublisherRegistration = registerNotificationPublisher(BASE_NETCONF_STREAM);
+ return new BaseNotificationPublisherReg(notificationPublisherRegistration);
+ }
+
+ private static class GenericNotificationPublisherReg implements NotificationPublisherRegistration {
+ private NetconfNotificationManager baseListener;
+ private final StreamNameType registeredStream;
+
+ public GenericNotificationPublisherReg(final NetconfNotificationManager baseListener, final StreamNameType registeredStream) {
+ this.baseListener = baseListener;
+ this.registeredStream = registeredStream;
+ }
+
+ @Override
+ public void close() {
+ baseListener.unregisterNotificationPublisher(registeredStream, this);
+ baseListener = null;
+ }
+
+ @Override
+ public void onNotification(final StreamNameType stream, final NetconfNotification notification) {
+ Preconditions.checkState(baseListener != null, "Already closed");
+ Preconditions.checkArgument(stream.equals(registeredStream));
+ baseListener.onNotification(stream, notification);
+ }
+ }
+
+ private static class BaseNotificationPublisherReg implements BaseNotificationPublisherRegistration {
+
+ private final NotificationPublisherRegistration baseRegistration;
+
+ public BaseNotificationPublisherReg(final NotificationPublisherRegistration baseRegistration) {
+ this.baseRegistration = baseRegistration;
+ }
+
+ @Override
+ public void close() {
+ baseRegistration.close();
+ }
+
+ @Override
+ public void onCapabilityChanged(final NetconfCapabilityChange capabilityChange) {
+ baseRegistration.onNotification(BASE_STREAM_NAME, serializeNotification(capabilityChange));
+ }
+
+ private static NetconfNotification serializeNotification(final NetconfCapabilityChange capabilityChange) {
+ return NotificationsTransformUtil.transform(capabilityChange);
+ }
+ }
+
+ private class GenericNotificationListenerReg implements NotificationListenerRegistration {
+ private final NetconfNotificationListener listener;
+
+ public GenericNotificationListenerReg(final NetconfNotificationListener listener) {
+ this.listener = listener;
+ }
+
+ public NetconfNotificationListener getListener() {
+ return listener;
+ }
+
+ @Override
+ public void close() {
+ notificationListeners.remove(BASE_STREAM_NAME, this);
+ }
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.notifications.impl.ops;
+
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+import java.util.List;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.netconf.api.NetconfSession;
+import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
+import org.opendaylight.controller.netconf.mapping.api.SessionAwareNetconfOperation;
+import org.opendaylight.controller.netconf.notifications.NetconfNotification;
+import org.opendaylight.controller.netconf.notifications.NetconfNotificationListener;
+import org.opendaylight.controller.netconf.notifications.NetconfNotificationRegistry;
+import org.opendaylight.controller.netconf.notifications.NotificationListenerRegistration;
+import org.opendaylight.controller.netconf.notifications.impl.NetconfNotificationManager;
+import org.opendaylight.controller.netconf.util.mapping.AbstractLastNetconfOperation;
+import org.opendaylight.controller.netconf.util.xml.XmlElement;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netconf.notification._1._0.rev080714.CreateSubscriptionInput;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netconf.notification._1._0.rev080714.StreamNameType;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+
+/**
+ * Create subscription listens for create subscription requests and registers notification listeners into notification registry.
+ * Received notifications are sent to the client right away
+ */
+public class CreateSubscription extends AbstractLastNetconfOperation implements SessionAwareNetconfOperation, AutoCloseable {
+
+ private static final Logger LOG = LoggerFactory.getLogger(CreateSubscription.class);
+
+ static final String CREATE_SUBSCRIPTION = "create-subscription";
+
+ private final NetconfNotificationRegistry notifications;
+ private final List<NotificationListenerRegistration> subscriptions = Lists.newArrayList();
+ private NetconfSession netconfSession;
+
+ public CreateSubscription(final String netconfSessionIdForReporting, final NetconfNotificationRegistry notifications) {
+ super(netconfSessionIdForReporting);
+ this.notifications = notifications;
+ }
+
+ @Override
+ protected Element handleWithNoSubsequentOperations(final Document document, final XmlElement operationElement) throws NetconfDocumentedException {
+ operationElement.checkName(CREATE_SUBSCRIPTION);
+ operationElement.checkNamespace(CreateSubscriptionInput.QNAME.getNamespace().toString());
+ // FIXME reimplement using CODEC_REGISTRY and parse everything into generated class instance
+ // Waiting ofr https://git.opendaylight.org/gerrit/#/c/13763/
+
+ // FIXME filter could be supported same way as netconf server filters get and get-config results
+ final Optional<XmlElement> filter = operationElement.getOnlyChildElementWithSameNamespaceOptionally("filter");
+ Preconditions.checkArgument(filter.isPresent() == false, "Filter element not yet supported");
+
+ // Replay not supported
+ final Optional<XmlElement> startTime = operationElement.getOnlyChildElementWithSameNamespaceOptionally("startTime");
+ Preconditions.checkArgument(startTime.isPresent() == false, "StartTime element not yet supported");
+
+ // Stop time not supported
+ final Optional<XmlElement> stopTime = operationElement.getOnlyChildElementWithSameNamespaceOptionally("stopTime");
+ Preconditions.checkArgument(stopTime.isPresent() == false, "StopTime element not yet supported");
+
+ final StreamNameType streamNameType = parseStreamIfPresent(operationElement);
+
+ Preconditions.checkNotNull(netconfSession);
+ // Premature streams are allowed (meaning listener can register even if no provider is available yet)
+ if(notifications.isStreamAvailable(streamNameType) == false) {
+ LOG.warn("Registering premature stream {}. No publisher available yet for session {}", streamNameType, getNetconfSessionIdForReporting());
+ }
+
+ final NotificationListenerRegistration notificationListenerRegistration =
+ notifications.registerNotificationListener(streamNameType, new NotificationSubscription(netconfSession));
+ subscriptions.add(notificationListenerRegistration);
+
+ return XmlUtil.createElement(document, XmlNetconfConstants.OK, Optional.<String>absent());
+ }
+
+ private StreamNameType parseStreamIfPresent(final XmlElement operationElement) throws NetconfDocumentedException {
+ final Optional<XmlElement> stream = operationElement.getOnlyChildElementWithSameNamespaceOptionally("stream");
+ return stream.isPresent() ? new StreamNameType(stream.get().getTextContent()) : NetconfNotificationManager.BASE_STREAM_NAME;
+ }
+
+ @Override
+ protected String getOperationName() {
+ return CREATE_SUBSCRIPTION;
+ }
+
+ @Override
+ protected String getOperationNamespace() {
+ return CreateSubscriptionInput.QNAME.getNamespace().toString();
+ }
+
+ @Override
+ public void setSession(final NetconfSession session) {
+ this.netconfSession = session;
+ }
+
+ @Override
+ public void close() {
+ netconfSession = null;
+ // Unregister from notification streams
+ for (final NotificationListenerRegistration subscription : subscriptions) {
+ subscription.close();
+ }
+ }
+
+ private static class NotificationSubscription implements NetconfNotificationListener {
+ private final NetconfSession currentSession;
+
+ public NotificationSubscription(final NetconfSession currentSession) {
+ this.currentSession = currentSession;
+ }
+
+ @Override
+ public void onNotification(final StreamNameType stream, final NetconfNotification notification) {
+ currentSession.sendMessage(notification);
+ }
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.notifications.impl.ops;
+
+import com.google.common.base.Preconditions;
+import java.io.IOException;
+import javax.xml.stream.XMLStreamException;
+import javax.xml.transform.dom.DOMResult;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
+import org.opendaylight.controller.netconf.mapping.api.HandlingPriority;
+import org.opendaylight.controller.netconf.mapping.api.NetconfOperationChainedExecution;
+import org.opendaylight.controller.netconf.notifications.NetconfNotificationRegistry;
+import org.opendaylight.controller.netconf.util.mapping.AbstractNetconfOperation;
+import org.opendaylight.controller.netconf.util.xml.XmlElement;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netmod.notification.rev080714.Netconf;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netmod.notification.rev080714.NetconfBuilder;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netmod.notification.rev080714.netconf.Streams;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.model.api.SchemaPath;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+
+/**
+ * Serialize the subtree for netconf notifications into the response of get rpc.
+ * This operation just adds its subtree into the common response of get rpc.
+ */
+public class Get extends AbstractNetconfOperation implements AutoCloseable {
+
+ private static final String GET = "get";
+ private static final InstanceIdentifier<Netconf> NETCONF_SUBTREE_INSTANCE_IDENTIFIER = InstanceIdentifier.builder(Netconf.class).build();
+
+ private final NetconfNotificationRegistry notificationRegistry;
+
+ public Get(final String netconfSessionIdForReporting, final NetconfNotificationRegistry notificationRegistry) {
+ super(netconfSessionIdForReporting);
+ Preconditions.checkNotNull(notificationRegistry);
+ this.notificationRegistry = notificationRegistry;
+ }
+
+ @Override
+ protected String getOperationName() {
+ return GET;
+ }
+
+ @Override
+ public Document handle(final Document requestMessage, final NetconfOperationChainedExecution subsequentOperation) throws NetconfDocumentedException {
+ final Document partialResponse = subsequentOperation.execute(requestMessage);
+ final Streams availableStreams = notificationRegistry.getNotificationPublishers();
+ if(availableStreams.getStream().isEmpty() == false) {
+ serializeStreamsSubtree(partialResponse, availableStreams);
+ }
+ return partialResponse;
+ }
+
+ static void serializeStreamsSubtree(final Document partialResponse, final Streams availableStreams) throws NetconfDocumentedException {
+ final Netconf netconfSubtree = new NetconfBuilder().setStreams(availableStreams).build();
+ final NormalizedNode<?, ?> normalized = toNormalized(netconfSubtree);
+
+ final DOMResult result = new DOMResult(getPlaceholder(partialResponse));
+
+ try {
+ NotificationsTransformUtil.writeNormalizedNode(normalized, result, SchemaPath.ROOT);
+ } catch (final XMLStreamException | IOException e) {
+ throw new IllegalStateException("Unable to serialize " + netconfSubtree, e);
+ }
+ }
+
+ private static Element getPlaceholder(final Document innerResult)
+ throws NetconfDocumentedException {
+ final XmlElement rootElement = XmlElement.fromDomElementWithExpected(
+ innerResult.getDocumentElement(), XmlNetconfConstants.RPC_REPLY_KEY, XmlNetconfConstants.RFC4741_TARGET_NAMESPACE);
+ return rootElement.getOnlyChildElement(XmlNetconfConstants.DATA_KEY).getDomElement();
+ }
+
+ private static NormalizedNode<?, ?> toNormalized(final Netconf netconfSubtree) {
+ return NotificationsTransformUtil.CODEC_REGISTRY.toNormalizedNode(NETCONF_SUBTREE_INSTANCE_IDENTIFIER, netconfSubtree).getValue();
+ }
+
+ @Override
+ protected Element handle(final Document document, final XmlElement message, final NetconfOperationChainedExecution subsequentOperation)
+ throws NetconfDocumentedException {
+ throw new UnsupportedOperationException("Never gets called");
+ }
+
+ @Override
+ protected HandlingPriority getHandlingPriority() {
+ return HandlingPriority.HANDLE_WITH_DEFAULT_PRIORITY.increasePriority(2);
+ }
+
+ @Override
+ public void close() throws Exception {
+
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.notifications.impl.ops;
+
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+import com.google.common.base.Predicate;
+import com.google.common.collect.Collections2;
+import com.google.common.collect.Iterables;
+import java.io.IOException;
+import java.util.Collections;
+import java.util.Date;
+import javassist.ClassPool;
+import javax.xml.stream.XMLOutputFactory;
+import javax.xml.stream.XMLStreamException;
+import javax.xml.stream.XMLStreamWriter;
+import javax.xml.transform.dom.DOMResult;
+import org.opendaylight.controller.netconf.notifications.NetconfNotification;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netmod.notification.rev080714.$YangModuleInfoImpl;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.notifications.rev120206.NetconfCapabilityChange;
+import org.opendaylight.yangtools.binding.data.codec.gen.impl.StreamWriterGenerator;
+import org.opendaylight.yangtools.binding.data.codec.impl.BindingNormalizedNodeCodecRegistry;
+import org.opendaylight.yangtools.sal.binding.generator.impl.ModuleInfoBackedContext;
+import org.opendaylight.yangtools.sal.binding.generator.util.BindingRuntimeContext;
+import org.opendaylight.yangtools.sal.binding.generator.util.JavassistUtils;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeStreamWriter;
+import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeWriter;
+import org.opendaylight.yangtools.yang.data.impl.codec.xml.XMLStreamNormalizedNodeStreamWriter;
+import org.opendaylight.yangtools.yang.model.api.RpcDefinition;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.model.api.SchemaPath;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.w3c.dom.Document;
+
+public final class NotificationsTransformUtil {
+
+ private static final Logger LOG = LoggerFactory.getLogger(NotificationsTransformUtil.class);
+
+ private NotificationsTransformUtil() {}
+
+ static final SchemaContext NOTIFICATIONS_SCHEMA_CTX;
+ static final BindingNormalizedNodeCodecRegistry CODEC_REGISTRY;
+ static final XMLOutputFactory XML_FACTORY;
+ static final RpcDefinition CREATE_SUBSCRIPTION_RPC;
+
+ static final SchemaPath CAPABILITY_CHANGE_SCHEMA_PATH = SchemaPath.create(true, NetconfCapabilityChange.QNAME);
+
+ static {
+ XML_FACTORY = XMLOutputFactory.newFactory();
+ XML_FACTORY.setProperty(XMLOutputFactory.IS_REPAIRING_NAMESPACES, true);
+
+ final ModuleInfoBackedContext moduleInfoBackedContext = ModuleInfoBackedContext.create();
+ moduleInfoBackedContext.addModuleInfos(Collections.singletonList($YangModuleInfoImpl.getInstance()));
+ moduleInfoBackedContext.addModuleInfos(Collections.singletonList(org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.notifications.rev120206.$YangModuleInfoImpl.getInstance()));
+ final Optional<SchemaContext> schemaContextOptional = moduleInfoBackedContext.tryToCreateSchemaContext();
+ Preconditions.checkState(schemaContextOptional.isPresent());
+ NOTIFICATIONS_SCHEMA_CTX = schemaContextOptional.get();
+
+ CREATE_SUBSCRIPTION_RPC = Preconditions.checkNotNull(findCreateSubscriptionRpc());
+
+ Preconditions.checkNotNull(CREATE_SUBSCRIPTION_RPC);
+
+ final JavassistUtils javassist = JavassistUtils.forClassPool(ClassPool.getDefault());
+ CODEC_REGISTRY = new BindingNormalizedNodeCodecRegistry(StreamWriterGenerator.create(javassist));
+ CODEC_REGISTRY.onBindingRuntimeContextUpdated(BindingRuntimeContext.create(moduleInfoBackedContext, NOTIFICATIONS_SCHEMA_CTX));
+ }
+
+ private static RpcDefinition findCreateSubscriptionRpc() {
+ return Iterables.getFirst(Collections2.filter(NOTIFICATIONS_SCHEMA_CTX.getOperations(), new Predicate<RpcDefinition>() {
+ @Override
+ public boolean apply(final RpcDefinition input) {
+ return input.getQName().getLocalName().equals(CreateSubscription.CREATE_SUBSCRIPTION);
+ }
+ }), null);
+ }
+
+ /**
+ * Transform base notification for capabilities into NetconfNotification
+ */
+ public static NetconfNotification transform(final NetconfCapabilityChange capabilityChange) {
+ return transform(capabilityChange, Optional.<Date>absent());
+ }
+
+ public static NetconfNotification transform(final NetconfCapabilityChange capabilityChange, final Date eventTime) {
+ return transform(capabilityChange, Optional.fromNullable(eventTime));
+ }
+
+ private static NetconfNotification transform(final NetconfCapabilityChange capabilityChange, final Optional<Date> eventTime) {
+ final ContainerNode containerNode = CODEC_REGISTRY.toNormalizedNodeNotification(capabilityChange);
+ final DOMResult result = new DOMResult(XmlUtil.newDocument());
+ try {
+ writeNormalizedNode(containerNode, result, CAPABILITY_CHANGE_SCHEMA_PATH);
+ } catch (final XMLStreamException| IOException e) {
+ throw new IllegalStateException("Unable to serialize " + capabilityChange, e);
+ }
+ final Document node = (Document) result.getNode();
+ return eventTime.isPresent() ?
+ new NetconfNotification(node, eventTime.get()):
+ new NetconfNotification(node);
+ }
+
+ static void writeNormalizedNode(final NormalizedNode<?, ?> normalized, final DOMResult result, final SchemaPath schemaPath) throws IOException, XMLStreamException {
+ NormalizedNodeWriter normalizedNodeWriter = null;
+ NormalizedNodeStreamWriter normalizedNodeStreamWriter = null;
+ XMLStreamWriter writer = null;
+ try {
+ writer = XML_FACTORY.createXMLStreamWriter(result);
+ normalizedNodeStreamWriter = XMLStreamNormalizedNodeStreamWriter.create(writer, NOTIFICATIONS_SCHEMA_CTX, schemaPath);
+ normalizedNodeWriter = NormalizedNodeWriter.forStreamWriter(normalizedNodeStreamWriter);
+
+ normalizedNodeWriter.write(normalized);
+
+ normalizedNodeWriter.flush();
+ } finally {
+ try {
+ if(normalizedNodeWriter != null) {
+ normalizedNodeWriter.close();
+ }
+ if(normalizedNodeStreamWriter != null) {
+ normalizedNodeStreamWriter.close();
+ }
+ if(writer != null) {
+ writer.close();
+ }
+ } catch (final Exception e) {
+ LOG.warn("Unable to close resource properly", e);
+ }
+ }
+ }
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.notifications.impl.osgi;
+
+import com.google.common.base.Optional;
+import com.google.common.collect.Sets;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Hashtable;
+import java.util.Set;
+import org.opendaylight.controller.netconf.mapping.api.Capability;
+import org.opendaylight.controller.netconf.mapping.api.NetconfOperation;
+import org.opendaylight.controller.netconf.mapping.api.NetconfOperationService;
+import org.opendaylight.controller.netconf.mapping.api.NetconfOperationServiceFactory;
+import org.opendaylight.controller.netconf.notifications.NetconfNotification;
+import org.opendaylight.controller.netconf.notifications.NetconfNotificationCollector;
+import org.opendaylight.controller.netconf.notifications.impl.NetconfNotificationManager;
+import org.opendaylight.controller.netconf.notifications.impl.ops.CreateSubscription;
+import org.opendaylight.controller.netconf.notifications.impl.ops.Get;
+import org.osgi.framework.BundleActivator;
+import org.osgi.framework.BundleContext;
+import org.osgi.framework.ServiceRegistration;
+
+public class Activator implements BundleActivator {
+
+ private ServiceRegistration<NetconfNotificationCollector> netconfNotificationCollectorServiceRegistration;
+ private ServiceRegistration<NetconfOperationServiceFactory> operationaServiceRegistration;
+ private NetconfNotificationManager netconfNotificationManager;
+
+ @Override
+ public void start(final BundleContext context) throws Exception {
+ netconfNotificationManager = new NetconfNotificationManager();
+ netconfNotificationCollectorServiceRegistration = context.registerService(NetconfNotificationCollector.class, netconfNotificationManager, new Hashtable<String, Object>());
+
+ final NetconfOperationServiceFactory netconfOperationServiceFactory = new NetconfOperationServiceFactory() {
+
+ @Override
+ public NetconfOperationService createService(final String netconfSessionIdForReporting) {
+ return new NetconfOperationService() {
+
+ private final CreateSubscription createSubscription = new CreateSubscription(netconfSessionIdForReporting, netconfNotificationManager);
+
+ @Override
+ public Set<Capability> getCapabilities() {
+ return Collections.<Capability>singleton(new NotificationsCapability());
+ }
+
+ @Override
+ public Set<NetconfOperation> getNetconfOperations() {
+ return Sets.<NetconfOperation>newHashSet(
+ new Get(netconfSessionIdForReporting, netconfNotificationManager),
+ createSubscription);
+ }
+
+ @Override
+ public void close() {
+ createSubscription.close();
+ }
+ };
+ }
+ };
+
+ operationaServiceRegistration = context.registerService(NetconfOperationServiceFactory.class, netconfOperationServiceFactory, new Hashtable<String, Object>());
+
+ }
+
+ @Override
+ public void stop(final BundleContext context) throws Exception {
+ if(netconfNotificationCollectorServiceRegistration != null) {
+ netconfNotificationCollectorServiceRegistration.unregister();
+ netconfNotificationCollectorServiceRegistration = null;
+ }
+ if (netconfNotificationManager != null) {
+ netconfNotificationManager.close();
+ }
+ if (operationaServiceRegistration != null) {
+ operationaServiceRegistration.unregister();
+ operationaServiceRegistration = null;
+ }
+ }
+
+ private class NotificationsCapability implements Capability {
+ @Override
+ public String getCapabilityUri() {
+ return NetconfNotification.NOTIFICATION_NAMESPACE;
+ }
+
+ @Override
+ public Optional<String> getModuleNamespace() {
+ return Optional.absent();
+ }
+
+ @Override
+ public Optional<String> getModuleName() {
+ return Optional.absent();
+ }
+
+ @Override
+ public Optional<String> getRevision() {
+ return Optional.absent();
+ }
+
+ @Override
+ public Optional<String> getCapabilitySchema() {
+ return Optional.absent();
+ }
+
+ @Override
+ public Collection<String> getLocation() {
+ return Collections.emptyList();
+ }
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.notifications.impl;
+
+import static org.junit.Assert.fail;
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
+import org.opendaylight.controller.netconf.notifications.BaseNotificationPublisherRegistration;
+import org.opendaylight.controller.netconf.notifications.NetconfNotification;
+import org.opendaylight.controller.netconf.notifications.NetconfNotificationCollector;
+import org.opendaylight.controller.netconf.notifications.NetconfNotificationListener;
+import org.opendaylight.controller.netconf.notifications.NetconfNotificationRegistry;
+import org.opendaylight.controller.netconf.notifications.NotificationListenerRegistration;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netconf.notification._1._0.rev080714.StreamNameType;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netmod.notification.rev080714.netconf.streams.Stream;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.notifications.rev120206.NetconfCapabilityChange;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.notifications.rev120206.NetconfCapabilityChangeBuilder;
+
+public class NetconfNotificationManagerTest {
+
+ @Mock
+ private NetconfNotificationRegistry notificationRegistry;
+
+ @Before
+ public void setUp() throws Exception {
+ MockitoAnnotations.initMocks(this);
+ }
+
+ @Test
+ public void testNotificationListeners() throws Exception {
+ final NetconfNotificationManager netconfNotificationManager = new NetconfNotificationManager();
+ final BaseNotificationPublisherRegistration baseNotificationPublisherRegistration =
+ netconfNotificationManager.registerBaseNotificationPublisher();
+
+ final NetconfCapabilityChangeBuilder capabilityChangedBuilder = new NetconfCapabilityChangeBuilder();
+
+ final NetconfNotificationListener listener = mock(NetconfNotificationListener.class);
+ doNothing().when(listener).onNotification(any(StreamNameType.class), any(NetconfNotification.class));
+ final NotificationListenerRegistration notificationListenerRegistration = netconfNotificationManager.registerNotificationListener(NetconfNotificationManager.BASE_NETCONF_STREAM.getName(), listener);
+ final NetconfCapabilityChange notification = capabilityChangedBuilder.build();
+ baseNotificationPublisherRegistration.onCapabilityChanged(notification);
+
+ verify(listener).onNotification(any(StreamNameType.class), any(NetconfNotification.class));
+
+ notificationListenerRegistration.close();
+
+ baseNotificationPublisherRegistration.onCapabilityChanged(notification);
+ verifyNoMoreInteractions(listener);
+ }
+
+ @Test
+ public void testClose() throws Exception {
+ final NetconfNotificationManager netconfNotificationManager = new NetconfNotificationManager();
+
+ final BaseNotificationPublisherRegistration baseNotificationPublisherRegistration = netconfNotificationManager.registerBaseNotificationPublisher();
+
+ final NetconfNotificationListener listener = mock(NetconfNotificationListener.class);
+ doNothing().when(listener).onNotification(any(StreamNameType.class), any(NetconfNotification.class));
+
+ netconfNotificationManager.registerNotificationListener(NetconfNotificationManager.BASE_NETCONF_STREAM.getName(), listener);
+
+ final NetconfNotificationCollector.NetconfNotificationStreamListener streamListener =
+ mock(NetconfNotificationCollector.NetconfNotificationStreamListener.class);
+ doNothing().when(streamListener).onStreamUnregistered(any(StreamNameType.class));
+ doNothing().when(streamListener).onStreamRegistered(any(Stream.class));
+ netconfNotificationManager.registerStreamListener(streamListener);
+
+ verify(streamListener).onStreamRegistered(NetconfNotificationManager.BASE_NETCONF_STREAM);
+
+ netconfNotificationManager.close();
+
+ verify(streamListener).onStreamUnregistered(NetconfNotificationManager.BASE_NETCONF_STREAM.getName());
+
+ try {
+ baseNotificationPublisherRegistration.onCapabilityChanged(new NetconfCapabilityChangeBuilder().build());
+ } catch (final IllegalStateException e) {
+ // Exception should be thrown after manager is closed
+ return;
+ }
+
+ fail("Publishing into a closed manager should fail");
+ }
+
+ @Test
+ public void testStreamListeners() throws Exception {
+ final NetconfNotificationManager netconfNotificationManager = new NetconfNotificationManager();
+
+ final NetconfNotificationCollector.NetconfNotificationStreamListener streamListener = mock(NetconfNotificationCollector.NetconfNotificationStreamListener.class);
+ doNothing().when(streamListener).onStreamRegistered(any(Stream.class));
+ doNothing().when(streamListener).onStreamUnregistered(any(StreamNameType.class));
+
+ netconfNotificationManager.registerStreamListener(streamListener);
+
+ final BaseNotificationPublisherRegistration baseNotificationPublisherRegistration =
+ netconfNotificationManager.registerBaseNotificationPublisher();
+
+ verify(streamListener).onStreamRegistered(NetconfNotificationManager.BASE_NETCONF_STREAM);
+
+
+ baseNotificationPublisherRegistration.close();
+
+ verify(streamListener).onStreamUnregistered(NetconfNotificationManager.BASE_STREAM_NAME);
+ }
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.notifications.impl.ops;
+
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+
+import org.hamcrest.CoreMatchers;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
+import org.opendaylight.controller.netconf.api.NetconfSession;
+import org.opendaylight.controller.netconf.notifications.NetconfNotificationListener;
+import org.opendaylight.controller.netconf.notifications.NetconfNotificationRegistry;
+import org.opendaylight.controller.netconf.notifications.NotificationListenerRegistration;
+import org.opendaylight.controller.netconf.util.xml.XmlElement;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netconf.notification._1._0.rev080714.StreamNameType;
+import org.w3c.dom.Element;
+
+public class CreateSubscriptionTest {
+
+ private static final String CREATE_SUBSCRIPTION_XML = "<create-subscription\n" +
+ "xmlns=\"urn:ietf:params:xml:ns:netconf:notification:1.0\" xmlns:netconf=\"urn:ietf:params:xml:ns:netconf:base:1.0\">\n" +
+ "<stream>TESTSTREAM</stream>" +
+ "</create-subscription>";
+
+ @Mock
+ private NetconfNotificationRegistry notificationRegistry;
+
+ @Before
+ public void setUp() throws Exception {
+ MockitoAnnotations.initMocks(this);
+ doReturn(true).when(notificationRegistry).isStreamAvailable(any(StreamNameType.class));
+ doReturn(mock(NotificationListenerRegistration.class)).when(notificationRegistry).registerNotificationListener(any(StreamNameType.class), any(NetconfNotificationListener.class));
+ }
+
+ @Test
+ public void testHandleWithNoSubsequentOperations() throws Exception {
+ final CreateSubscription createSubscription = new CreateSubscription("id", notificationRegistry);
+ createSubscription.setSession(mock(NetconfSession.class));
+
+ final Element e = XmlUtil.readXmlToElement(CREATE_SUBSCRIPTION_XML);
+
+ final XmlElement operationElement = XmlElement.fromDomElement(e);
+ final Element element = createSubscription.handleWithNoSubsequentOperations(XmlUtil.newDocument(), operationElement);
+
+ Assert.assertThat(XmlUtil.toString(element), CoreMatchers.containsString("ok"));
+ }
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.notifications.impl.ops;
+
+import static org.junit.Assert.assertTrue;
+
+import com.google.common.collect.Lists;
+import java.io.IOException;
+import org.custommonkey.xmlunit.Diff;
+import org.custommonkey.xmlunit.XMLUnit;
+import org.junit.Test;
+import org.opendaylight.controller.netconf.notifications.impl.ops.Get;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netconf.notification._1._0.rev080714.StreamNameType;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netmod.notification.rev080714.netconf.Streams;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netmod.notification.rev080714.netconf.StreamsBuilder;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netmod.notification.rev080714.netconf.streams.StreamBuilder;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netmod.notification.rev080714.netconf.streams.StreamKey;
+import org.w3c.dom.Document;
+import org.xml.sax.SAXException;
+
+public class GetTest {
+
+ @Test
+ public void testSerializeStreamsSubtree() throws Exception {
+ final StreamsBuilder streamsBuilder = new StreamsBuilder();
+ final StreamBuilder streamBuilder = new StreamBuilder();
+ final StreamNameType base = new StreamNameType("base");
+ streamBuilder.setName(base);
+ streamBuilder.setKey(new StreamKey(base));
+ streamBuilder.setDescription("description");
+ streamBuilder.setReplaySupport(false);
+ streamsBuilder.setStream(Lists.newArrayList(streamBuilder.build()));
+ final Streams streams = streamsBuilder.build();
+
+ final Document response = getBlankResponse();
+ Get.serializeStreamsSubtree(response, streams);
+ final Diff diff = XMLUnit.compareXML(XmlUtil.toString(response),
+ "<rpc-reply message-id=\"101\" xmlns=\"urn:ietf:params:xml:ns:netconf:base:1.0\">\n" +
+ "<data>\n" +
+ "<netconf xmlns=\"urn:ietf:params:xml:ns:netmod:notification\">\n" +
+ "<streams>\n" +
+ "<stream>\n" +
+ "<name>base</name>\n" +
+ "<description>description</description>\n" +
+ "<replaySupport>false</replaySupport>\n" +
+ "</stream>\n" +
+ "</streams>\n" +
+ "</netconf>\n" +
+ "</data>\n" +
+ "</rpc-reply>\n");
+
+ assertTrue(diff.toString(), diff.identical());
+ }
+
+ private Document getBlankResponse() throws IOException, SAXException {
+
+ return XmlUtil.readXmlToDocument("<rpc-reply message-id=\"101\"\n" +
+ "xmlns=\"urn:ietf:params:xml:ns:netconf:base:1.0\">\n" +
+ "<data>\n" +
+ "</data>\n" +
+ "</rpc-reply>");
+ }
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.notifications.impl.ops;
+
+import static org.junit.Assert.assertTrue;
+
+import com.google.common.collect.Lists;
+import java.text.SimpleDateFormat;
+import java.util.Date;
+import org.custommonkey.xmlunit.Diff;
+import org.custommonkey.xmlunit.XMLUnit;
+import org.junit.Test;
+import org.opendaylight.controller.netconf.notifications.NetconfNotification;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev100924.Uri;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.notifications.rev120206.NetconfCapabilityChange;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.notifications.rev120206.NetconfCapabilityChangeBuilder;
+
+public class NotificationsTransformUtilTest {
+
+ private static final Date DATE = new Date();
+ private static final String innerNotification = "<netconf-capability-change xmlns=\"urn:ietf:params:xml:ns:yang:ietf-netconf-notifications\">" +
+ "<deleted-capability>uri3</deleted-capability>" +
+ "<deleted-capability>uri4</deleted-capability>" +
+ "<added-capability>uri1</added-capability>" +
+ "</netconf-capability-change>";
+
+ private static final String expectedNotification = "<notification xmlns=\"urn:ietf:params:netconf:capability:notification:1.0\">" +
+ innerNotification +
+ "<eventTime>" + new SimpleDateFormat(NetconfNotification.RFC3339_DATE_FORMAT_BLUEPRINT).format(DATE) + "</eventTime>" +
+ "</notification>";
+
+ @Test
+ public void testTransform() throws Exception {
+ final NetconfCapabilityChangeBuilder netconfCapabilityChangeBuilder = new NetconfCapabilityChangeBuilder();
+
+ netconfCapabilityChangeBuilder.setAddedCapability(Lists.newArrayList(new Uri("uri1"), new Uri("uri1")));
+ netconfCapabilityChangeBuilder.setDeletedCapability(Lists.newArrayList(new Uri("uri3"), new Uri("uri4")));
+
+ final NetconfCapabilityChange capabilityChange = netconfCapabilityChangeBuilder.build();
+ final NetconfNotification transform = NotificationsTransformUtil.transform(capabilityChange, DATE);
+
+ final String serialized = XmlUtil.toString(transform.getDocument());
+
+ XMLUnit.setIgnoreWhitespace(true);
+ final Diff diff = XMLUnit.compareXML(expectedNotification, serialized);
+ assertTrue(diff.toString(), diff.similar());
+ }
+
+ @Test
+ public void testTransformFromDOM() throws Exception {
+ final NetconfNotification netconfNotification = new NetconfNotification(XmlUtil.readXmlToDocument(innerNotification), DATE);
+
+ XMLUnit.setIgnoreWhitespace(true);
+ final Diff diff = XMLUnit.compareXML(expectedNotification, netconfNotification.toString());
+ assertTrue(diff.toString(), diff.similar());
+ }
+
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.test.tool;
+
+import com.google.common.base.Optional;
+import java.util.Collections;
+import java.util.Date;
+import java.util.List;
+import org.opendaylight.controller.netconf.confignetconfconnector.util.Util;
+import org.opendaylight.controller.netconf.mapping.api.Capability;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.parser.builder.impl.ModuleBuilder;
+
+/**
+ * Can be passed instead of ModuleBuilderCapability when building capabilities
+ * in NetconfDeviceSimulator when testing various schema resolution related exceptions.
+ */
+public class FakeModuleBuilderCapability implements Capability{
+ private static final Date NO_REVISION = new Date(0);
+ private final ModuleBuilder input;
+ private final Optional<String> content;
+
+ public FakeModuleBuilderCapability(final ModuleBuilder input, final String inputStream) {
+ this.input = input;
+ this.content = Optional.of(inputStream);
+ }
+
+ @Override
+ public String getCapabilityUri() {
+ // FIXME capabilities in Netconf-impl need to check for NO REVISION
+ final String withoutRevision = getModuleNamespace().get() + "?module=" + getModuleName().get();
+ return hasRevision() ? withoutRevision + "&revision=" + Util.writeDate(input.getRevision()) : withoutRevision;
+ }
+
+ @Override
+ public Optional<String> getModuleNamespace() {
+ return Optional.of(input.getNamespace().toString());
+ }
+
+ @Override
+ public Optional<String> getModuleName() {
+ return Optional.of(input.getName());
+ }
+
+ @Override
+ public Optional<String> getRevision() {
+ return Optional.of(hasRevision() ? QName.formattedRevision(input.getRevision()) : "");
+ }
+
+ private boolean hasRevision() {
+ return !input.getRevision().equals(NO_REVISION);
+ }
+
+ /**
+ *
+ * @return empty schema source to trigger schema resolution exception.
+ */
+ @Override
+ public Optional<String> getCapabilitySchema() {
+ return Optional.absent();
+ }
+
+ @Override
+ public List<String> getLocation() {
+ return Collections.emptyList();
+ }
+}
private final ScheduledExecutorService minaTimerExecutor;
private final ExecutorService nioExecutor;
+ private boolean sendFakeSchema = false;
+
public NetconfDeviceSimulator() {
// TODO make pool size configurable
this(new NioEventLoopGroup(), new HashedWheelTimer(),
final Set<Capability> capabilities = Sets.newHashSet(Collections2.transform(moduleBuilders.keySet(), new Function<ModuleBuilder, Capability>() {
@Override
public Capability apply(final ModuleBuilder input) {
- return new ModuleBuilderCapability(input, moduleBuilders.get(input));
+ if (sendFakeSchema) {
+ sendFakeSchema = false;
+ return new FakeModuleBuilderCapability(input, moduleBuilders.get(input));
+ } else {
+ return new ModuleBuilderCapability(input, moduleBuilders.get(input));
+ }
}
}));
<module>netconf-ssh</module>
<module>netconf-tcp</module>
<module>netconf-monitoring</module>
+ <module>ietf-netconf</module>
<module>ietf-netconf-monitoring</module>
+ <module>ietf-netconf-notifications</module>
<module>ietf-netconf-monitoring-extension</module>
<module>netconf-connector-config</module>
<module>netconf-auth</module>
<module>netconf-usermanager</module>
<module>netconf-testtool</module>
+ <module>netconf-notifications-impl</module>
+ <module>netconf-notifications-api</module>
<module>netconf-artifacts</module>
</modules>
classes.add(NeutronLoadBalancerPoolNorthbound.class);
classes.add(NeutronLoadBalancerHealthMonitorNorthbound.class);
classes.add(NeutronLoadBalancerPoolMembersNorthbound.class);
+ classes.add(MOXyJsonProvider.class);
return classes;
}
moxyJsonProvider.setMarshalEmptyCollections(true);
moxyJsonProvider.setValueWrapper("$");
- Map<String, String> namespacePrefixMapper = new HashMap<String, String>(1);
+ Map<String, String> namespacePrefixMapper = new HashMap<String, String>(3);
namespacePrefixMapper.put("router", "router"); // FIXME: fill in with XSD
namespacePrefixMapper.put("provider", "provider"); // FIXME: fill in with XSD
+ namespacePrefixMapper.put("binding", "binding");
moxyJsonProvider.setNamespacePrefixMapper(namespacePrefixMapper);
moxyJsonProvider.setNamespaceSeparator(':');
package org.opendaylight.controller.networkconfig.neutron;
+
import java.io.Serializable;
import java.util.ArrayList;
import java.util.HashMap;
@XmlElement (name="security_groups")
List<NeutronSecurityGroup> securityGroups;
+ @XmlElement (namespace= "binding", name="host_id")
+ String bindinghostID;
+
+ @XmlElement (namespace= "binding", name="vnic_type")
+ String bindingvnicType;
+
+ @XmlElement (namespace= "binding", name="vif_type")
+ String bindingvifType;
+
+
/* this attribute stores the floating IP address assigned to
* each fixed IP address
*/
this.securityGroups = securityGroups;
}
+ public String getBindinghostID() {
+ return bindinghostID;
+ }
+
+ public void setBindinghostID(String bindinghostID) {
+ this.bindinghostID = bindinghostID;
+ }
+
+ public String getBindingvnicType() {
+ return bindingvnicType;
+ }
+
+ public void setBindingvnicType(String bindingvnicType) {
+ this.bindingvnicType = bindingvnicType;
+ }
+
+ public String getBindingvifType() {
+ return bindingvifType;
+ }
+
+ public void setBindingvifType(String bindingvifType) {
+ this.bindingvifType = bindingvifType;
+ }
+
public NeutronFloatingIP getFloatingIP(String key) {
if (!floatingIPMap.containsKey(key)) {
return null;
return "NeutronPort [portUUID=" + portUUID + ", networkUUID=" + networkUUID + ", name=" + name
+ ", adminStateUp=" + adminStateUp + ", status=" + status + ", macAddress=" + macAddress
+ ", fixedIPs=" + fixedIPs + ", deviceID=" + deviceID + ", deviceOwner=" + deviceOwner + ", tenantID="
- + tenantID + ", floatingIPMap=" + floatingIPMap + ", securityGroups=" + securityGroups + "]";
+ + tenantID + ", floatingIPMap=" + floatingIPMap + ", securityGroups=" + securityGroups
+ + ", bindinghostID=" + bindinghostID + ", bindingvnicType=" + bindingvnicType
+ + ", bindingvnicType=" + bindingvnicType + "]";
}
}
<artifactId>releasepom</artifactId>
<version>0.2.0-SNAPSHOT</version>
<packaging>pom</packaging>
- <name>controller</name> <!-- Used by Sonar to set project name -->
+ <name>controller</name>
+ <!-- Used by Sonar to set project name -->
<modules>