<repository>mvn:org.opendaylight.controller/features-akka/${commons.opendaylight.version}/xml/features</repository>
<feature name='odl-mdsal-all' version='${project.version}' description="OpenDaylight :: MDSAL :: All">
<feature version='${project.version}'>odl-mdsal-broker</feature>
- <feature version='${project.version}'>odl-mdsal-clustering</feature>
+ <feature version='${project.version}'>odl-mdsal-broker-local</feature>
<feature version='${project.version}'>odl-mdsal-xsql</feature>
<feature version='${project.version}'>odl-toaster</feature>
</feature>
<configfile finalname='${config.configfile.directory}/${config.netconf.mdsal.configfile}'>mvn:org.opendaylight.controller/netconf-mdsal-config/${netconf.version}/xml/config</configfile>
</feature>
- <feature name='odl-mdsal-broker' version='${project.version}' description="OpenDaylight :: MDSAL :: Broker">
+ <feature name='odl-mdsal-broker-local' version='${project.version}' description="OpenDaylight :: MDSAL :: Broker">
<feature version='${yangtools.version}'>odl-yangtools-common</feature>
<feature version='${yangtools.version}'>odl-yangtools-binding</feature>
<feature version='${yangtools.version}'>odl-yangtools-models</feature>
<configfile finalname="${config.configfile.directory}/${config.xsql.configfile}">mvn:org.opendaylight.controller/sal-dom-xsql-config/${project.version}/xml/config</configfile>
</feature>
<feature name ='odl-mdsal-clustering-commons' version='${project.version}'>
- <feature version='${project.version}'>odl-mdsal-broker</feature>
+ <feature version='${project.version}'>odl-mdsal-broker-local</feature>
<feature version='${akka.version}'>odl-akka-system</feature>
<feature version='${akka.version}'>odl-akka-persistence</feature>
<bundle>mvn:org.opendaylight.controller/sal-clustering-commons/${project.version}</bundle>
<bundle>mvn:com.codahale.metrics/metrics-core/3.0.1</bundle>
</feature>
<feature name ='odl-mdsal-distributed-datastore' version='${project.version}'>
- <feature version='${project.version}'>odl-mdsal-broker</feature>
+ <feature version='${project.version}'>odl-mdsal-broker-local</feature>
<feature version='${project.version}'>odl-mdsal-clustering-commons</feature>
<feature version='${akka.version}'>odl-akka-clustering</feature>
<bundle>mvn:org.opendaylight.controller/sal-distributed-datastore/${project.version}</bundle>
</feature>
<feature name ='odl-mdsal-remoterpc-connector' version='${project.version}'>
- <feature version='${project.version}'>odl-mdsal-broker</feature>
+ <feature version='${project.version}'>odl-mdsal-broker-local</feature>
<feature version='${project.version}'>odl-mdsal-clustering-commons</feature>
<feature version='${akka.version}'>odl-akka-clustering</feature>
<feature version='0.7'>odl-akka-leveldb</feature>
<bundle>mvn:org.opendaylight.controller/sal-remoterpc-connector/${project.version}</bundle>
</feature>
- <feature name ='odl-mdsal-clustering' version='${project.version}'>
+ <feature name ='odl-mdsal-broker' version='${project.version}'>
<feature version='${project.version}'>odl-mdsal-remoterpc-connector</feature>
<feature version='${project.version}'>odl-mdsal-distributed-datastore</feature>
<configfile finalname="${config.configfile.directory}/${config.clustering.configfile}">mvn:org.opendaylight.controller/sal-clustering-config/${project.version}/xml/config</configfile>
<configfile finalname="configuration/initial/modules.conf">mvn:org.opendaylight.controller/sal-clustering-config/${project.version}/xml/moduleconf</configfile>
<configfile finalname="etc/org.opendaylight.controller.cluster.datastore.cfg">mvn:org.opendaylight.controller/sal-clustering-config/${project.version}/cfg/datastore</configfile>
</feature>
-
+ <feature name ='odl-mdsal-clustering' version='${project.version}'>
+ <feature version='${project.version}'>odl-mdsal-broker</feature>
+ </feature>
<feature name='odl-clustering-test-app' version='${project.version}'>
<feature version='${yangtools.version}'>odl-yangtools-models</feature>
- <feature version='${project.version}'>odl-mdsal-broker</feature>
+ <feature version='${project.version}'>odl-mdsal-broker-local</feature>
<bundle>mvn:org.opendaylight.controller.samples/clustering-it-model/${project.version}</bundle>
<bundle>mvn:org.opendaylight.controller.samples/clustering-it-provider/${project.version}</bundle>
<configfile finalname="${config.configfile.directory}/20-clustering-test-app.xml">mvn:org.opendaylight.controller.samples/clustering-it-config/${project.version}/xml/config</configfile>
<artifactId>${artifactId}-impl</artifactId>
<version>${symbol_dollar}{project.version}</version>
</dependency>
+ <dependency>
+ <groupId>${symbol_dollar}{project.groupId}</groupId>
+ <artifactId>${artifactId}-impl</artifactId>
+ <version>${symbol_dollar}{project.version}</version>
+ <type>xml</type>
+ <classifier>config</classifier>
+ </dependency>
<dependency>
<groupId>${symbol_dollar}{project.groupId}</groupId>
<artifactId>${artifactId}-api</artifactId>
<provider>/modules/module[type='runtime-generated-mapping'][name='runtime-mapping-singleton']</provider>
</instance>
</service>
+ <service>
+ <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">binding:binding-normalized-node-serializer</type>
+ <instance>
+ <name>runtime-mapping-singleton</name>
+ <provider>/modules/module[type='runtime-generated-mapping'][name='runtime-mapping-singleton']</provider>
+ </instance>
+ </service>
<service>
<type xmlns:binding-impl="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">binding-impl:binding-new-notification-service</type>
<instance>
import java.io.ObjectOutputStream;
import java.util.HashMap;
import java.util.Map;
-import org.opendaylight.controller.cluster.DataPersistenceProvider;
import org.opendaylight.controller.cluster.example.messages.KeyValue;
import org.opendaylight.controller.cluster.example.messages.KeyValueSaved;
import org.opendaylight.controller.cluster.example.messages.PrintRole;
public class ExampleActor extends RaftActor {
private final Map<String, String> state = new HashMap();
- private final DataPersistenceProvider dataPersistenceProvider;
private long persistIdentifier = 1;
private final Optional<ActorRef> roleChangeNotifier;
public ExampleActor(String id, Map<String, String> peerAddresses,
Optional<ConfigParams> configParams) {
super(id, peerAddresses, configParams);
- this.dataPersistenceProvider = new PersistentDataProvider();
+ setPersistence(true);
roleChangeNotifier = createRoleChangeNotifier(id);
}
}
- @Override
- protected DataPersistenceProvider persistence() {
- return dataPersistenceProvider;
- }
-
@Override public void onReceiveRecover(Object message)throws Exception {
super.onReceiveRecover(message);
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft;
+
+import org.opendaylight.controller.cluster.DataPersistenceProvider;
+import org.opendaylight.controller.cluster.raft.RaftActor.UpdateElectionTerm;
+import org.slf4j.Logger;
+
+/**
+ * Implementation of ElectionTerm for the RaftActor.
+ */
+class ElectionTermImpl implements ElectionTerm {
+ /**
+ * Identifier of the actor whose election term information this is
+ */
+ private long currentTerm = 0;
+ private String votedFor = null;
+
+ private final DataPersistenceProvider persistence;
+
+ private final Logger log;
+ private final String logId;
+
+ ElectionTermImpl(DataPersistenceProvider persistence, String logId, Logger log) {
+ this.persistence = persistence;
+ this.logId = logId;
+ this.log = log;
+ }
+
+ @Override
+ public long getCurrentTerm() {
+ return currentTerm;
+ }
+
+ @Override
+ public String getVotedFor() {
+ return votedFor;
+ }
+
+ @Override public void update(long currentTerm, String votedFor) {
+ if(log.isDebugEnabled()) {
+ log.debug("{}: Set currentTerm={}, votedFor={}", logId, currentTerm, votedFor);
+ }
+ this.currentTerm = currentTerm;
+ this.votedFor = votedFor;
+ }
+
+ @Override
+ public void updateAndPersist(long currentTerm, String votedFor){
+ update(currentTerm, votedFor);
+ // FIXME : Maybe first persist then update the state
+ persistence.persist(new UpdateElectionTerm(this.currentTerm, this.votedFor), NoopProcedure.instance());
+ }
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft;
+
+import akka.japi.Procedure;
+
+/**
+ * An akka Procedure that does nothing.
+ *
+ * @author Thomas Pantelis
+ */
+public class NoopProcedure<T> implements Procedure<T> {
+
+ private static final NoopProcedure<Object> INSTANCE = new NoopProcedure<>();
+
+ private NoopProcedure() {
+ }
+
+ @SuppressWarnings("unchecked")
+ public static <T> NoopProcedure<T> instance() {
+ return (NoopProcedure<T>) INSTANCE;
+ }
+
+ @Override
+ public void apply(Object notUsed) {
+ }
+}
import java.util.concurrent.TimeUnit;
import org.apache.commons.lang3.time.DurationFormatUtils;
import org.opendaylight.controller.cluster.DataPersistenceProvider;
+import org.opendaylight.controller.cluster.DelegatingPersistentDataProvider;
+import org.opendaylight.controller.cluster.NonPersistentDataProvider;
+import org.opendaylight.controller.cluster.PersistentDataProvider;
import org.opendaylight.controller.cluster.common.actor.AbstractUntypedPersistentActor;
import org.opendaylight.controller.cluster.notifications.LeaderStateChanged;
import org.opendaylight.controller.cluster.notifications.RoleChanged;
import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply;
import org.opendaylight.controller.cluster.raft.base.messages.Replicate;
import org.opendaylight.controller.cluster.raft.behaviors.AbstractLeader;
-import org.opendaylight.controller.cluster.raft.behaviors.AbstractRaftActorBehavior;
+import org.opendaylight.controller.cluster.raft.behaviors.DelegatingRaftActorBehavior;
import org.opendaylight.controller.cluster.raft.behaviors.Follower;
import org.opendaylight.controller.cluster.raft.behaviors.RaftActorBehavior;
import org.opendaylight.controller.cluster.raft.client.messages.FindLeader;
private static final long APPLY_STATE_DELAY_THRESHOLD_IN_NANOS = TimeUnit.MILLISECONDS.toNanos(50L); // 50 millis
- private static final Procedure<ApplyJournalEntries> APPLY_JOURNAL_ENTRIES_PERSIST_CALLBACK =
- new Procedure<ApplyJournalEntries>() {
- @Override
- public void apply(ApplyJournalEntries param) throws Exception {
- }
- };
private static final String COMMIT_SNAPSHOT = "commit_snapshot";
protected final Logger LOG = LoggerFactory.getLogger(getClass());
* The current state determines the current behavior of a RaftActor
* A Raft Actor always starts off in the Follower State
*/
- private RaftActorBehavior currentBehavior;
+ private final DelegatingRaftActorBehavior currentBehavior = new DelegatingRaftActorBehavior();
/**
* This context should NOT be passed directly to any other actor it is
*/
private final RaftActorContextImpl context;
- private final Procedure<Void> createSnapshotProcedure = new CreateSnapshotProcedure();
+ private final DelegatingPersistentDataProvider delegatingPersistenceProvider = new DelegatingPersistentDataProvider(null);
- /**
- * The in-memory journal
- */
- private ReplicatedLogImpl replicatedLog = new ReplicatedLogImpl();
+ private final Procedure<Void> createSnapshotProcedure = new CreateSnapshotProcedure();
private Stopwatch recoveryTimer;
Optional<ConfigParams> configParams) {
context = new RaftActorContextImpl(this.getSelf(),
- this.getContext(), id, new ElectionTermImpl(),
- -1, -1, replicatedLog, peerAddresses,
- (configParams.isPresent() ? configParams.get(): new DefaultConfigParamsImpl()),
- LOG);
+ this.getContext(), id, new ElectionTermImpl(delegatingPersistenceProvider, id, LOG),
+ -1, -1, peerAddresses,
+ (configParams.isPresent() ? configParams.get(): new DefaultConfigParamsImpl()), LOG);
+
+ context.setReplicatedLog(ReplicatedLogImpl.newInstance(context, delegatingPersistenceProvider, currentBehavior));
}
private void initRecoveryTimer() {
} else if (message instanceof ApplyJournalEntries) {
onRecoveredApplyLogEntries(((ApplyJournalEntries) message).getToIndex());
} else if (message instanceof DeleteEntries) {
- replicatedLog.removeFrom(((DeleteEntries) message).getFromIndex());
+ replicatedLog().removeFrom(((DeleteEntries) message).getFromIndex());
} else if (message instanceof UpdateElectionTerm) {
context.getTermInformation().update(((UpdateElectionTerm) message).getCurrentTerm(),
((UpdateElectionTerm) message).getVotedFor());
// Create a replicated log with the snapshot information
// The replicated log can be used later on to retrieve this snapshot
// when we need to install it on a peer
- replicatedLog = new ReplicatedLogImpl(snapshot);
- context.setReplicatedLog(replicatedLog);
+ context.setReplicatedLog(ReplicatedLogImpl.newInstance(snapshot, context, delegatingPersistenceProvider,
+ currentBehavior));
context.setLastApplied(snapshot.getLastAppliedIndex());
context.setCommitIndex(snapshot.getLastAppliedIndex());
timer.stop();
LOG.info("Recovery snapshot applied for {} in {}: snapshotIndex={}, snapshotTerm={}, journal-size=" +
- replicatedLog.size(), persistenceId(), timer.toString(),
- replicatedLog.getSnapshotIndex(), replicatedLog.getSnapshotTerm());
+ replicatedLog().size(), persistenceId(), timer.toString(),
+ replicatedLog().getSnapshotIndex(), replicatedLog().getSnapshotTerm());
}
private void onRecoveredJournalLogEntry(ReplicatedLogEntry logEntry) {
LOG.debug("{}: Received ReplicatedLogEntry for recovery: {}", persistenceId(), logEntry.getIndex());
}
- replicatedLog.append(logEntry);
+ replicatedLog().append(logEntry);
}
private void onRecoveredApplyLogEntries(long toIndex) {
}
for (long i = context.getLastApplied() + 1; i <= toIndex; i++) {
- batchRecoveredLogEntry(replicatedLog.get(i));
+ batchRecoveredLogEntry(replicatedLog().get(i));
}
context.setLastApplied(toIndex);
"Persistence Id = " + persistenceId() +
" Last index in log={}, snapshotIndex={}, snapshotTerm={}, " +
"journal-size={}",
- replicatedLog.lastIndex(), replicatedLog.getSnapshotIndex(),
- replicatedLog.getSnapshotTerm(), replicatedLog.size());
+ replicatedLog().lastIndex(), replicatedLog().getSnapshotIndex(),
+ replicatedLog().getSnapshotTerm(), replicatedLog().size());
initializeBehavior();
}
}
protected void changeCurrentBehavior(RaftActorBehavior newBehavior){
- reusableBehaviorStateHolder.init(currentBehavior);
- currentBehavior = newBehavior;
- handleBehaviorChange(reusableBehaviorStateHolder, currentBehavior);
+ reusableBehaviorStateHolder.init(getCurrentBehavior());
+ setCurrentBehavior(newBehavior);
+ handleBehaviorChange(reusableBehaviorStateHolder, getCurrentBehavior());
}
@Override public void handleCommand(Object message) {
LOG.debug("{}: Persisting ApplyLogEntries with index={}", persistenceId(), applyEntries.getToIndex());
}
- persistence().persist(applyEntries, APPLY_JOURNAL_ENTRIES_PERSIST_CALLBACK);
+ persistence().persist(applyEntries, NoopProcedure.instance());
} else if(message instanceof ApplySnapshot ) {
Snapshot snapshot = ((ApplySnapshot) message).getSnapshot();
applySnapshot(snapshot.getState());
//clears the followers log, sets the snapshot index to ensure adjusted-index works
- replicatedLog = new ReplicatedLogImpl(snapshot);
- context.setReplicatedLog(replicatedLog);
+ context.setReplicatedLog(ReplicatedLogImpl.newInstance(snapshot, context, delegatingPersistenceProvider,
+ currentBehavior));
context.setLastApplied(snapshot.getLastAppliedIndex());
} else if (message instanceof FindLeader) {
} else if (message.equals(COMMIT_SNAPSHOT)) {
commitSnapshot(-1);
} else {
- reusableBehaviorStateHolder.init(currentBehavior);
+ reusableBehaviorStateHolder.init(getCurrentBehavior());
- currentBehavior = currentBehavior.handleMessage(getSender(), message);
+ setCurrentBehavior(currentBehavior.handleMessage(getSender(), message));
- handleBehaviorChange(reusableBehaviorStateHolder, currentBehavior);
+ handleBehaviorChange(reusableBehaviorStateHolder, getCurrentBehavior());
}
}
OnDemandRaftState.Builder builder = OnDemandRaftState.builder()
.commitIndex(context.getCommitIndex())
.currentTerm(context.getTermInformation().getCurrentTerm())
- .inMemoryJournalDataSize(replicatedLog.dataSize())
- .inMemoryJournalLogSize(replicatedLog.size())
+ .inMemoryJournalDataSize(replicatedLog().dataSize())
+ .inMemoryJournalLogSize(replicatedLog().size())
.isSnapshotCaptureInitiated(context.getSnapshotManager().isCapturing())
.lastApplied(context.getLastApplied())
- .lastIndex(replicatedLog.lastIndex())
- .lastTerm(replicatedLog.lastTerm())
+ .lastIndex(replicatedLog().lastIndex())
+ .lastTerm(replicatedLog().lastTerm())
.leader(getLeaderId())
.raftState(currentBehavior.state().toString())
.replicatedToAllIndex(currentBehavior.getReplicatedToAllIndex())
- .snapshotIndex(replicatedLog.getSnapshotIndex())
- .snapshotTerm(replicatedLog.getSnapshotTerm())
+ .snapshotIndex(replicatedLog().getSnapshotIndex())
+ .snapshotTerm(replicatedLog().getSnapshotTerm())
.votedFor(context.getTermInformation().getVotedFor())
.peerAddresses(ImmutableMap.copyOf(context.getPeerAddresses()));
builder.lastLogTerm(lastLogEntry.getTerm());
}
- if(currentBehavior instanceof AbstractLeader) {
- AbstractLeader leader = (AbstractLeader)currentBehavior;
+ if(getCurrentBehavior() instanceof AbstractLeader) {
+ AbstractLeader leader = (AbstractLeader)getCurrentBehavior();
Collection<String> followerIds = leader.getFollowerIds();
List<FollowerInfo> followerInfoList = Lists.newArrayListWithCapacity(followerIds.size());
for(String id: followerIds) {
final RaftActorContext raftContext = getRaftActorContext();
- replicatedLog
- .appendAndPersist(replicatedLogEntry, new Procedure<ReplicatedLogEntry>() {
- @Override
- public void apply(ReplicatedLogEntry replicatedLogEntry) throws Exception {
- if(!hasFollowers()){
- // Increment the Commit Index and the Last Applied values
- raftContext.setCommitIndex(replicatedLogEntry.getIndex());
- raftContext.setLastApplied(replicatedLogEntry.getIndex());
+ replicatedLog().appendAndPersist(replicatedLogEntry, new Procedure<ReplicatedLogEntry>() {
+ @Override
+ public void apply(ReplicatedLogEntry replicatedLogEntry) throws Exception {
+ if(!hasFollowers()){
+ // Increment the Commit Index and the Last Applied values
+ raftContext.setCommitIndex(replicatedLogEntry.getIndex());
+ raftContext.setLastApplied(replicatedLogEntry.getIndex());
- // Apply the state immediately
- applyState(clientActor, identifier, data);
+ // Apply the state immediately
+ applyState(clientActor, identifier, data);
- // Send a ApplyJournalEntries message so that we write the fact that we applied
- // the state to durable storage
- self().tell(new ApplyJournalEntries(replicatedLogEntry.getIndex()), self());
+ // Send a ApplyJournalEntries message so that we write the fact that we applied
+ // the state to durable storage
+ self().tell(new ApplyJournalEntries(replicatedLogEntry.getIndex()), self());
- context.getSnapshotManager().trimLog(context.getLastApplied(), currentBehavior);
+ context.getSnapshotManager().trimLog(context.getLastApplied(), currentBehavior);
- } else if (clientActor != null) {
- // Send message for replication
- currentBehavior.handleMessage(getSelf(),
- new Replicate(clientActor, identifier,
- replicatedLogEntry)
- );
- }
+ } else if (clientActor != null) {
+ // Send message for replication
+ currentBehavior.handleMessage(getSelf(),
+ new Replicate(clientActor, identifier, replicatedLogEntry));
+ }
+ }
+ });
+ }
- }
- }); }
+ private ReplicatedLog replicatedLog() {
+ return context.getReplicatedLog();
+ }
protected String getId() {
return context.getId();
}
+ @VisibleForTesting
+ void setCurrentBehavior(RaftActorBehavior behavior) {
+ currentBehavior.setDelegate(behavior);
+ }
+
+ protected RaftActorBehavior getCurrentBehavior() {
+ return currentBehavior.getDelegate();
+ }
+
/**
* Derived actors can call the isLeader method to check if the current
* RaftActor is the Leader or not
}
protected ReplicatedLogEntry getLastLogEntry() {
- return replicatedLog.last();
+ return replicatedLog().last();
}
protected Long getCurrentTerm(){
context.setConfigParams(configParams);
}
+ public final DataPersistenceProvider persistence() {
+ return delegatingPersistenceProvider.getDelegate();
+ }
+
+ public void setPersistence(DataPersistenceProvider provider) {
+ delegatingPersistenceProvider.setDelegate(provider);
+ }
+
+ protected void setPersistence(boolean persistent) {
+ if(persistent) {
+ setPersistence(new PersistentDataProvider(this));
+ } else {
+ setPersistence(new NonPersistentDataProvider() {
+ /**
+ * The way snapshotting works is,
+ * <ol>
+ * <li> RaftActor calls createSnapshot on the Shard
+ * <li> Shard sends a CaptureSnapshotReply and RaftActor then calls saveSnapshot
+ * <li> When saveSnapshot is invoked on the akka-persistence API it uses the SnapshotStore to save
+ * the snapshot. The SnapshotStore sends SaveSnapshotSuccess or SaveSnapshotFailure. When the
+ * RaftActor gets SaveSnapshot success it commits the snapshot to the in-memory journal. This
+ * commitSnapshot is mimicking what is done in SaveSnapshotSuccess.
+ * </ol>
+ */
+ @Override
+ public void saveSnapshot(Object o) {
+ // Make saving Snapshot successful
+ // Committing the snapshot here would end up calling commit in the creating state which would
+ // be a state violation. That's why now we send a message to commit the snapshot.
+ self().tell(COMMIT_SNAPSHOT, self());
+ }
+ });
+ }
+ }
+
/**
* setPeerAddress sets the address of a known peer at a later time.
* <p>
*/
protected abstract void onStateChanged();
- protected abstract DataPersistenceProvider persistence();
-
/**
* Notifier Actor for this RaftActor to notify when a role change happens
* @return ActorRef - ActorRef of the notifier or Optional.absent if none.
private void handleCaptureSnapshotReply(byte[] snapshotBytes) {
LOG.debug("{}: CaptureSnapshotReply received by actor: snapshot size {}", persistenceId(), snapshotBytes.length);
- context.getSnapshotManager().persist(persistence(), snapshotBytes, currentBehavior, getTotalMemory());
- }
-
- protected long getTotalMemory() {
- return Runtime.getRuntime().totalMemory();
+ context.getSnapshotManager().persist(persistence(), snapshotBytes, currentBehavior, context.getTotalMemory());
}
protected boolean hasFollowers(){
- return getRaftActorContext().getPeerAddresses().keySet().size() > 0;
- }
-
- private class ReplicatedLogImpl extends AbstractReplicatedLogImpl {
- private static final int DATA_SIZE_DIVIDER = 5;
- private long dataSizeSinceLastSnapshot = 0L;
-
-
- public ReplicatedLogImpl(Snapshot snapshot) {
- super(snapshot.getLastAppliedIndex(), snapshot.getLastAppliedTerm(),
- snapshot.getUnAppliedEntries());
- }
-
- public ReplicatedLogImpl() {
- super();
- }
-
- @Override public void removeFromAndPersist(long logEntryIndex) {
- int adjustedIndex = adjustedIndex(logEntryIndex);
-
- if (adjustedIndex < 0) {
- return;
- }
-
- // FIXME: Maybe this should be done after the command is saved
- journal.subList(adjustedIndex , journal.size()).clear();
-
- persistence().persist(new DeleteEntries(adjustedIndex), new Procedure<DeleteEntries>() {
-
- @Override
- public void apply(DeleteEntries param)
- throws Exception {
- //FIXME : Doing nothing for now
- dataSize = 0;
- for (ReplicatedLogEntry entry : journal) {
- dataSize += entry.size();
- }
- }
- });
- }
-
- @Override public void appendAndPersist(
- final ReplicatedLogEntry replicatedLogEntry) {
- appendAndPersist(replicatedLogEntry, null);
- }
-
- public void appendAndPersist(
- final ReplicatedLogEntry replicatedLogEntry,
- final Procedure<ReplicatedLogEntry> callback) {
-
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: Append log entry and persist {} ", persistenceId(), replicatedLogEntry);
- }
-
- // FIXME : By adding the replicated log entry to the in-memory journal we are not truly ensuring durability of the logs
- journal.add(replicatedLogEntry);
-
- // When persisting events with persist it is guaranteed that the
- // persistent actor will not receive further commands between the
- // persist call and the execution(s) of the associated event
- // handler. This also holds for multiple persist calls in context
- // of a single command.
- persistence().persist(replicatedLogEntry,
- new Procedure<ReplicatedLogEntry>() {
- @Override
- public void apply(ReplicatedLogEntry evt) throws Exception {
- int logEntrySize = replicatedLogEntry.size();
-
- dataSize += logEntrySize;
- long dataSizeForCheck = dataSize;
-
- dataSizeSinceLastSnapshot += logEntrySize;
-
- if (!hasFollowers()) {
- // When we do not have followers we do not maintain an in-memory log
- // due to this the journalSize will never become anything close to the
- // snapshot batch count. In fact will mostly be 1.
- // Similarly since the journal's dataSize depends on the entries in the
- // journal the journal's dataSize will never reach a value close to the
- // memory threshold.
- // By maintaining the dataSize outside the journal we are tracking essentially
- // what we have written to the disk however since we no longer are in
- // need of doing a snapshot just for the sake of freeing up memory we adjust
- // the real size of data by the DATA_SIZE_DIVIDER so that we do not snapshot as often
- // as if we were maintaining a real snapshot
- dataSizeForCheck = dataSizeSinceLastSnapshot / DATA_SIZE_DIVIDER;
- }
- long journalSize = replicatedLogEntry.getIndex() + 1;
- long dataThreshold = getTotalMemory() *
- context.getConfigParams().getSnapshotDataThresholdPercentage() / 100;
-
- if ((journalSize % context.getConfigParams().getSnapshotBatchCount() == 0
- || dataSizeForCheck > dataThreshold)) {
-
- boolean started = context.getSnapshotManager().capture(replicatedLogEntry,
- currentBehavior.getReplicatedToAllIndex());
-
- if(started){
- dataSizeSinceLastSnapshot = 0;
- }
-
- }
-
- if (callback != null){
- callback.apply(replicatedLogEntry);
- }
- }
- }
- );
- }
-
+ return getRaftActorContext().hasFollowers();
}
static class DeleteEntries implements Serializable {
}
}
-
- private class ElectionTermImpl implements ElectionTerm {
- /**
- * Identifier of the actor whose election term information this is
- */
- private long currentTerm = 0;
- private String votedFor = null;
-
- @Override
- public long getCurrentTerm() {
- return currentTerm;
- }
-
- @Override
- public String getVotedFor() {
- return votedFor;
- }
-
- @Override public void update(long currentTerm, String votedFor) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: Set currentTerm={}, votedFor={}", persistenceId(), currentTerm, votedFor);
- }
- this.currentTerm = currentTerm;
- this.votedFor = votedFor;
- }
-
- @Override
- public void updateAndPersist(long currentTerm, String votedFor){
- update(currentTerm, votedFor);
- // FIXME : Maybe first persist then update the state
- persistence().persist(new UpdateElectionTerm(this.currentTerm, this.votedFor), new Procedure<UpdateElectionTerm>(){
-
- @Override public void apply(UpdateElectionTerm param)
- throws Exception {
-
- }
- });
- }
- }
-
static class UpdateElectionTerm implements Serializable {
private static final long serialVersionUID = 1L;
private final long currentTerm;
}
}
- protected class NonPersistentRaftDataProvider extends NonPersistentDataProvider {
-
- public NonPersistentRaftDataProvider(){
-
- }
-
- /**
- * The way snapshotting works is,
- * <ol>
- * <li> RaftActor calls createSnapshot on the Shard
- * <li> Shard sends a CaptureSnapshotReply and RaftActor then calls saveSnapshot
- * <li> When saveSnapshot is invoked on the akka-persistence API it uses the SnapshotStore to save the snapshot.
- * The SnapshotStore sends SaveSnapshotSuccess or SaveSnapshotFailure. When the RaftActor gets SaveSnapshot
- * success it commits the snapshot to the in-memory journal. This commitSnapshot is mimicking what is done
- * in SaveSnapshotSuccess.
- * </ol>
- * @param o
- */
- @Override
- public void saveSnapshot(Object o) {
- // Make saving Snapshot successful
- // Committing the snapshot here would end up calling commit in the creating state which would
- // be a state violation. That's why now we send a message to commit the snapshot.
- self().tell(COMMIT_SNAPSHOT, self());
- }
- }
-
-
private class CreateSnapshotProcedure implements Procedure<Void> {
@Override
}
}
- @VisibleForTesting
- void setCurrentBehavior(AbstractRaftActorBehavior behavior) {
- currentBehavior = behavior;
- }
-
- protected RaftActorBehavior getCurrentBehavior() {
- return currentBehavior;
- }
-
private static class BehaviorStateHolder {
private RaftActorBehavior behavior;
private String leaderId;
import akka.actor.ActorSelection;
import akka.actor.ActorSystem;
import akka.actor.Props;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Supplier;
import java.util.Map;
import org.slf4j.Logger;
SnapshotManager getSnapshotManager();
+ boolean hasFollowers();
+
+ long getTotalMemory();
+
+ @VisibleForTesting
+ void setTotalMemoryRetriever(Supplier<Long> retriever);
+
}
import akka.actor.ActorSystem;
import akka.actor.Props;
import akka.actor.UntypedActorContext;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Supplier;
import java.util.Map;
import org.slf4j.Logger;
private ConfigParams configParams;
- private boolean snapshotCaptureInitiated;
+ @VisibleForTesting
+ private Supplier<Long> totalMemoryRetriever;
// Snapshot manager will need to be created on demand as it needs raft actor context which cannot
// be passed to it in the constructor
private SnapshotManager snapshotManager;
- public RaftActorContextImpl(ActorRef actor, UntypedActorContext context,
- String id,
- ElectionTerm termInformation, long commitIndex,
- long lastApplied, ReplicatedLog replicatedLog,
- Map<String, String> peerAddresses, ConfigParams configParams,
- Logger logger) {
+ public RaftActorContextImpl(ActorRef actor, UntypedActorContext context, String id,
+ ElectionTerm termInformation, long commitIndex, long lastApplied, Map<String, String> peerAddresses,
+ ConfigParams configParams, Logger logger) {
this.actor = actor;
this.context = context;
this.id = id;
this.termInformation = termInformation;
this.commitIndex = commitIndex;
this.lastApplied = lastApplied;
- this.replicatedLog = replicatedLog;
this.peerAddresses = peerAddresses;
this.configParams = configParams;
this.LOG = logger;
peerAddresses.put(peerId, peerAddress);
}
+ @Override
public SnapshotManager getSnapshotManager() {
if(snapshotManager == null){
snapshotManager = new SnapshotManager(this, LOG);
}
return snapshotManager;
}
+
+ @Override
+ public long getTotalMemory() {
+ return totalMemoryRetriever != null ? totalMemoryRetriever.get() : Runtime.getRuntime().totalMemory();
+ }
+
+ @Override
+ public void setTotalMemoryRetriever(Supplier<Long> retriever) {
+ totalMemoryRetriever = retriever;
+ }
+
+ @Override
+ public boolean hasFollowers() {
+ return getPeerAddresses().keySet().size() > 0;
+ }
}
package org.opendaylight.controller.cluster.raft;
+import akka.japi.Procedure;
import java.util.List;
/**
*/
void appendAndPersist(final ReplicatedLogEntry replicatedLogEntry);
+ void appendAndPersist(ReplicatedLogEntry replicatedLogEntry, Procedure<ReplicatedLogEntry> callback);
+
/**
*
* @param index the index of the log entry
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft;
+
+import akka.japi.Procedure;
+import java.util.Collections;
+import java.util.List;
+import org.opendaylight.controller.cluster.DataPersistenceProvider;
+import org.opendaylight.controller.cluster.raft.RaftActor.DeleteEntries;
+import org.opendaylight.controller.cluster.raft.behaviors.RaftActorBehavior;
+
+/**
+ * Implementation of ReplicatedLog used by the RaftActor.
+ */
+class ReplicatedLogImpl extends AbstractReplicatedLogImpl {
+ private static final int DATA_SIZE_DIVIDER = 5;
+
+ private long dataSizeSinceLastSnapshot = 0L;
+ private final RaftActorContext context;
+ private final DataPersistenceProvider persistence;
+ private final RaftActorBehavior currentBehavior;
+
+ private final Procedure<DeleteEntries> deleteProcedure = new Procedure<DeleteEntries>() {
+ @Override
+ public void apply(DeleteEntries param) {
+ dataSize = 0;
+ for (ReplicatedLogEntry entry : journal) {
+ dataSize += entry.size();
+ }
+ }
+ };
+
+ static ReplicatedLog newInstance(Snapshot snapshot, RaftActorContext context,
+ DataPersistenceProvider persistence, RaftActorBehavior currentBehavior) {
+ return new ReplicatedLogImpl(snapshot.getLastAppliedIndex(), snapshot.getLastAppliedTerm(),
+ snapshot.getUnAppliedEntries(), context, persistence, currentBehavior);
+ }
+
+ static ReplicatedLog newInstance(RaftActorContext context,
+ DataPersistenceProvider persistence, RaftActorBehavior currentBehavior) {
+ return new ReplicatedLogImpl(-1L, -1L, Collections.<ReplicatedLogEntry>emptyList(), context,
+ persistence, currentBehavior);
+ }
+
+ private ReplicatedLogImpl(long snapshotIndex, long snapshotTerm, List<ReplicatedLogEntry> unAppliedEntries,
+ RaftActorContext context, DataPersistenceProvider persistence, RaftActorBehavior currentBehavior) {
+ super(snapshotIndex, snapshotTerm, unAppliedEntries);
+ this.context = context;
+ this.persistence = persistence;
+ this.currentBehavior = currentBehavior;
+ }
+
+ @Override
+ public void removeFromAndPersist(long logEntryIndex) {
+ int adjustedIndex = adjustedIndex(logEntryIndex);
+
+ if (adjustedIndex < 0) {
+ return;
+ }
+
+ // FIXME: Maybe this should be done after the command is saved
+ journal.subList(adjustedIndex , journal.size()).clear();
+
+ persistence.persist(new DeleteEntries(adjustedIndex), deleteProcedure);
+ }
+
+ @Override
+ public void appendAndPersist(final ReplicatedLogEntry replicatedLogEntry) {
+ appendAndPersist(replicatedLogEntry, null);
+ }
+
+ @Override
+ public void appendAndPersist(final ReplicatedLogEntry replicatedLogEntry,
+ final Procedure<ReplicatedLogEntry> callback) {
+
+ if(context.getLogger().isDebugEnabled()) {
+ context.getLogger().debug("{}: Append log entry and persist {} ", context.getId(), replicatedLogEntry);
+ }
+
+ // FIXME : By adding the replicated log entry to the in-memory journal we are not truly ensuring durability of the logs
+ journal.add(replicatedLogEntry);
+
+ // When persisting events with persist it is guaranteed that the
+ // persistent actor will not receive further commands between the
+ // persist call and the execution(s) of the associated event
+ // handler. This also holds for multiple persist calls in context
+ // of a single command.
+ persistence.persist(replicatedLogEntry,
+ new Procedure<ReplicatedLogEntry>() {
+ @Override
+ public void apply(ReplicatedLogEntry evt) throws Exception {
+ int logEntrySize = replicatedLogEntry.size();
+
+ dataSize += logEntrySize;
+ long dataSizeForCheck = dataSize;
+
+ dataSizeSinceLastSnapshot += logEntrySize;
+
+ if (!context.hasFollowers()) {
+ // When we do not have followers we do not maintain an in-memory log
+ // due to this the journalSize will never become anything close to the
+ // snapshot batch count. In fact will mostly be 1.
+ // Similarly since the journal's dataSize depends on the entries in the
+ // journal the journal's dataSize will never reach a value close to the
+ // memory threshold.
+ // By maintaining the dataSize outside the journal we are tracking essentially
+ // what we have written to the disk however since we no longer are in
+ // need of doing a snapshot just for the sake of freeing up memory we adjust
+ // the real size of data by the DATA_SIZE_DIVIDER so that we do not snapshot as often
+ // as if we were maintaining a real snapshot
+ dataSizeForCheck = dataSizeSinceLastSnapshot / DATA_SIZE_DIVIDER;
+ }
+ long journalSize = replicatedLogEntry.getIndex() + 1;
+ long dataThreshold = context.getTotalMemory() *
+ context.getConfigParams().getSnapshotDataThresholdPercentage() / 100;
+
+ if ((journalSize % context.getConfigParams().getSnapshotBatchCount() == 0
+ || dataSizeForCheck > dataThreshold)) {
+
+ boolean started = context.getSnapshotManager().capture(replicatedLogEntry,
+ currentBehavior.getReplicatedToAllIndex());
+
+ if(started){
+ dataSizeSinceLastSnapshot = 0;
+ }
+ }
+
+ if (callback != null){
+ callback.apply(replicatedLogEntry);
+ }
+ }
+ }
+ );
+ }
+}
\ No newline at end of file
long getTerm();
}
- private static class LastAppliedTermInformationReader implements TermInformationReader{
+ static class LastAppliedTermInformationReader implements TermInformationReader{
private long index;
private long term;
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.behaviors;
+
+import akka.actor.ActorRef;
+import org.opendaylight.controller.cluster.raft.RaftState;
+
+/**
+ * A RaftActorBehavior implementation that delegates to another implementation.
+ *
+ * @author Thomas Pantelis
+ */
+public class DelegatingRaftActorBehavior implements RaftActorBehavior {
+ private RaftActorBehavior delegate;
+
+ public RaftActorBehavior getDelegate() {
+ return delegate;
+ }
+
+ public void setDelegate(RaftActorBehavior delegate) {
+ this.delegate = delegate;
+ }
+
+ @Override
+ public void close() throws Exception {
+ delegate.close();
+ }
+
+ @Override
+ public RaftActorBehavior handleMessage(ActorRef sender, Object message) {
+ return delegate.handleMessage(sender, message);
+ }
+
+ @Override
+ public RaftState state() {
+ return delegate.state();
+ }
+
+ @Override
+ public String getLeaderId() {
+ return delegate.getLeaderId();
+ }
+
+ @Override
+ public void setReplicatedToAllIndex(long replicatedToAllIndex) {
+ delegate.setReplicatedToAllIndex(replicatedToAllIndex);
+ }
+
+ @Override
+ public long getReplicatedToAllIndex() {
+ return delegate.getReplicatedToAllIndex();
+ }
+}
import akka.testkit.TestActorRef;
import com.google.common.base.Optional;
import com.google.common.base.Predicate;
+import com.google.common.base.Supplier;
import com.google.common.collect.ImmutableMap;
import java.util.Collections;
import java.util.List;
private final TestActorRef<MessageCollectorActor> collectorActor;
private final Map<Class<?>, Boolean> dropMessages = new ConcurrentHashMap<>();
private volatile byte[] snapshot;
- private volatile long mockTotalMemory;
private TestRaftActor(String id, Map<String, String> peerAddresses, ConfigParams config,
TestActorRef<MessageCollectorActor> collectorActor) {
super(id, peerAddresses, Optional.of(config), null);
- dataPersistenceProvider = new PersistentDataProvider();
this.collectorActor = collectorActor;
}
dropMessages.remove(msgClass);
}
- void setMockTotalMemory(long mockTotalMemory) {
- this.mockTotalMemory = mockTotalMemory;
- }
+ void setMockTotalMemory(final long mockTotalMemory) {
+ if(mockTotalMemory > 0) {
+ getRaftActorContext().setTotalMemoryRetriever(new Supplier<Long>() {
+ @Override
+ public Long get() {
+ return mockTotalMemory;
+ }
- @Override
- protected long getTotalMemory() {
- return mockTotalMemory > 0 ? mockTotalMemory : super.getTotalMemory();
+ });
+ } else {
+ getRaftActorContext().setTotalMemoryRetriever(null);
+ }
}
@Override
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
+import akka.japi.Procedure;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public List<ReplicatedLogEntry> getEntriesTill(final int index) {
return journal.subList(0, index);
}
+
+ @Override
+ public void appendAndPersist(ReplicatedLogEntry replicatedLogEntry, Procedure<ReplicatedLogEntry> callback) {
+ }
}
}
import akka.actor.ActorSelection;
import akka.actor.ActorSystem;
import akka.actor.Props;
+import akka.japi.Procedure;
import com.google.common.base.Preconditions;
+import com.google.common.base.Supplier;
import com.google.protobuf.GeneratedMessage;
import java.io.Serializable;
import java.util.HashMap;
this.configParams = configParams;
}
+ @Override
+ public long getTotalMemory() {
+ return Runtime.getRuntime().totalMemory();
+ }
+
+ @Override
+ public void setTotalMemoryRetriever(Supplier<Long> retriever) {
+ }
+
+ @Override
+ public boolean hasFollowers() {
+ return getPeerAddresses().keySet().size() > 0;
+ }
+
public static class SimpleReplicatedLog extends AbstractReplicatedLogImpl {
@Override public void appendAndPersist(
ReplicatedLogEntry replicatedLogEntry) {
@Override public void removeFromAndPersist(long index) {
removeFrom(index);
}
+
+ @Override
+ public void appendAndPersist(ReplicatedLogEntry replicatedLogEntry, Procedure<ReplicatedLogEntry> callback) {
+ append(replicatedLogEntry);
+
+ if(callback != null) {
+ try {
+ callback.apply(replicatedLogEntry);
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+ }
+ }
}
public static class MockPayload extends Payload implements Serializable {
import org.junit.Before;
import org.junit.Test;
import org.opendaylight.controller.cluster.DataPersistenceProvider;
+import org.opendaylight.controller.cluster.NonPersistentDataProvider;
import org.opendaylight.controller.cluster.datastore.DataPersistenceProviderMonitor;
import org.opendaylight.controller.cluster.notifications.LeaderStateChanged;
import org.opendaylight.controller.cluster.notifications.RoleChanged;
+import org.opendaylight.controller.cluster.raft.RaftActor.UpdateElectionTerm;
import org.opendaylight.controller.cluster.raft.base.messages.ApplyJournalEntries;
import org.opendaylight.controller.cluster.raft.base.messages.ApplyLogEntries;
import org.opendaylight.controller.cluster.raft.base.messages.ApplySnapshot;
public static class MockRaftActor extends RaftActor {
- protected DataPersistenceProvider dataPersistenceProvider;
private final RaftActor delegate;
private final CountDownLatch recoveryComplete = new CountDownLatch(1);
private final List<Object> state;
state = new ArrayList<>();
this.delegate = mock(RaftActor.class);
if(dataPersistenceProvider == null){
- this.dataPersistenceProvider = new PersistentDataProvider();
+ setPersistence(true);
} else {
- this.dataPersistenceProvider = dataPersistenceProvider;
+ setPersistence(dataPersistenceProvider);
}
}
delegate.onStateChanged();
}
- @Override
- protected DataPersistenceProvider persistence() {
- return this.dataPersistenceProvider;
- }
-
@Override
protected Optional<ActorRef> getRoleChangeNotifier() {
return Optional.fromNullable(roleChangeNotifier);
assertEquals("remove log entries", 1, replicatedLog.size());
- mockRaftActor.onReceiveRecover(new RaftActor.UpdateElectionTerm(10, "foobar"));
+ mockRaftActor.onReceiveRecover(new UpdateElectionTerm(10, "foobar"));
assertEquals("election term", 10, mockRaftActor.getRaftActorContext().getTermInformation().getCurrentTerm());
assertEquals("voted for", "foobar", mockRaftActor.getRaftActorContext().getTermInformation().getVotedFor());
assertEquals("remove log entries", 0, replicatedLog.size());
- mockRaftActor.onReceiveRecover(new RaftActor.UpdateElectionTerm(10, "foobar"));
+ mockRaftActor.onReceiveRecover(new UpdateElectionTerm(10, "foobar"));
assertNotEquals("election term", 10, mockRaftActor.getRaftActorContext().getTermInformation().getCurrentTerm());
assertNotEquals("voted for", "foobar", mockRaftActor.getRaftActorContext().getTermInformation().getVotedFor());
TestActorRef<MockRaftActor> raftActorRef = factory.createTestActor(MockRaftActor.props(persistenceId,
Collections.<String, String>emptyMap(), Optional.<ConfigParams>of(config), notifierActor,
- new NonPersistentProvider()), persistenceId);
+ new NonPersistentDataProvider()), persistenceId);
List<RoleChanged> matches = MessageCollectorActor.expectMatching(notifierActor, RoleChanged.class, 3);
new MockRaftActorContext.MockPayload("foo-3"),
new MockRaftActorContext.MockPayload("foo-4")));
- leaderActor.getRaftActorContext().getSnapshotManager().persist(new NonPersistentProvider()
+ leaderActor.getRaftActorContext().getSnapshotManager().persist(new NonPersistentDataProvider()
, snapshotBytes.toByteArray(), leader, Runtime.getRuntime().totalMemory());
assertFalse(leaderActor.getRaftActorContext().getSnapshotManager().isCapturing());
// The commit is needed to complete the snapshot creation process
- leaderActor.getRaftActorContext().getSnapshotManager().commit(new NonPersistentProvider(), -1);
+ leaderActor.getRaftActorContext().getSnapshotManager().commit(new NonPersistentDataProvider(), -1);
// capture snapshot reply should remove the snapshotted entries only
assertEquals(3, leaderActor.getReplicatedLog().size());
assertFalse(followerActor.getRaftActorContext().getSnapshotManager().isCapturing());
// The commit is needed to complete the snapshot creation process
- followerActor.getRaftActorContext().getSnapshotManager().commit(new NonPersistentProvider(), -1);
+ followerActor.getRaftActorContext().getSnapshotManager().commit(new NonPersistentDataProvider(), -1);
// capture snapshot reply should remove the snapshotted entries only till replicatedToAllIndex
assertEquals(3, followerActor.getReplicatedLog().size()); //indexes 5,6,7 left in the log
};
}
-
- private static class NonPersistentProvider implements DataPersistenceProvider {
- @Override
- public boolean isRecoveryApplicable() {
- return false;
- }
-
- @Override
- public <T> void persist(T o, Procedure<T> procedure) {
- try {
- procedure.apply(o);
- } catch (Exception e) {
- e.printStackTrace();
- }
- }
-
- @Override
- public void saveSnapshot(Object o) {
-
- }
-
- @Override
- public void deleteSnapshots(SnapshotSelectionCriteria criteria) {
-
- }
-
- @Override
- public void deleteMessages(long sequenceNumber) {
-
- }
- }
-
@Test
public void testRealSnapshotWhenReplicatedToAllIndexMinusOne() throws Exception {
new JavaTestKit(getSystem()) {{
config.setIsolatedLeaderCheckInterval(new FiniteDuration(1, TimeUnit.DAYS));
config.setSnapshotBatchCount(5);
- DataPersistenceProvider dataPersistenceProvider = new NonPersistentProvider();
+ DataPersistenceProvider dataPersistenceProvider = new NonPersistentDataProvider();
Map<String, String> peerAddresses = new HashMap<>();
config.setIsolatedLeaderCheckInterval(new FiniteDuration(1, TimeUnit.DAYS));
config.setSnapshotBatchCount(5);
- DataPersistenceProvider dataPersistenceProvider = new NonPersistentProvider();
+ DataPersistenceProvider dataPersistenceProvider = new NonPersistentDataProvider();
Map<String, String> peerAddresses = new HashMap<>();
package org.opendaylight.controller.cluster.raft;
-import static junit.framework.TestCase.assertFalse;
-import static junit.framework.TestCase.assertTrue;
+import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyLong;
import static org.mockito.Mockito.doReturn;
import akka.japi.Procedure;
import akka.persistence.SnapshotSelectionCriteria;
import akka.testkit.TestActorRef;
+import com.google.common.collect.ImmutableMap;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import org.mockito.Mock;
import org.mockito.MockitoAnnotations;
import org.opendaylight.controller.cluster.DataPersistenceProvider;
+import org.opendaylight.controller.cluster.raft.SnapshotManager.LastAppliedTermInformationReader;
import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshot;
import org.opendaylight.controller.cluster.raft.base.messages.SendInstallSnapshot;
import org.opendaylight.controller.cluster.raft.behaviors.RaftActorBehavior;
doReturn("123").when(mockRaftActorContext).getId();
doReturn("123").when(mockRaftActorBehavior).getLeaderId();
+ ElectionTerm mockElectionTerm = mock(ElectionTerm.class);
+ doReturn(mockElectionTerm).when(mockRaftActorContext).getTermInformation();
+ doReturn(5L).when(mockElectionTerm).getCurrentTerm();
+
snapshotManager = new SnapshotManager(mockRaftActorContext, LoggerFactory.getLogger(this.getClass()));
factory = new TestActorFactory(getSystem());
@Test
public void testPersistWhenReplicatedToAllIndexMinusOne(){
- doReturn("123").when(mockRaftActorContext).getId();
- doReturn(45L).when(mockReplicatedLog).getSnapshotIndex();
- doReturn(6L).when(mockReplicatedLog).getSnapshotTerm();
+ doReturn(7L).when(mockReplicatedLog).getSnapshotIndex();
+ doReturn(1L).when(mockReplicatedLog).getSnapshotTerm();
+
+ doReturn(ImmutableMap.builder().put("follower-1", "").build()).when(mockRaftActorContext).getPeerAddresses();
+
+ doReturn(8L).when(mockRaftActorContext).getLastApplied();
+
+ MockRaftActorContext.MockReplicatedLogEntry lastLogEntry = new MockRaftActorContext.MockReplicatedLogEntry(
+ 3L, 9L, new MockRaftActorContext.MockPayload());
+
+ MockRaftActorContext.MockReplicatedLogEntry lastAppliedEntry = new MockRaftActorContext.MockReplicatedLogEntry(
+ 2L, 8L, new MockRaftActorContext.MockPayload());
+
+ doReturn(lastAppliedEntry).when(mockReplicatedLog).get(8L);
+ doReturn(Arrays.asList(lastLogEntry)).when(mockReplicatedLog).getFrom(9L);
// when replicatedToAllIndex = -1
- snapshotManager.capture(new MockRaftActorContext.MockReplicatedLogEntry(6,9,
- new MockRaftActorContext.MockPayload()), -1);
+ snapshotManager.capture(lastLogEntry, -1);
snapshotManager.create(mockProcedure);
Snapshot snapshot = snapshotArgumentCaptor.getValue();
- assertEquals(6, snapshot.getLastAppliedTerm());
- assertEquals(9, snapshot.getLastAppliedIndex());
- assertEquals(9, snapshot.getLastIndex());
- assertEquals(6, snapshot.getLastTerm());
- assertEquals(10, snapshot.getState().length);
- assertTrue(Arrays.equals(bytes, snapshot.getState()));
- assertEquals(0, snapshot.getUnAppliedEntries().size());
+ assertEquals("getLastTerm", 3L, snapshot.getLastTerm());
+ assertEquals("getLastIndex", 9L, snapshot.getLastIndex());
+ assertEquals("getLastAppliedTerm", 2L, snapshot.getLastAppliedTerm());
+ assertEquals("getLastAppliedIndex", 8L, snapshot.getLastAppliedIndex());
+ assertArrayEquals("getState", bytes, snapshot.getState());
+ assertEquals("getUnAppliedEntries", Arrays.asList(lastLogEntry), snapshot.getUnAppliedEntries());
- verify(mockReplicatedLog).snapshotPreCommit(45L, 6L);
+ verify(mockReplicatedLog).snapshotPreCommit(7L, 1L);
}
snapshotManager.create(mockProcedure);
verify(mockProcedure).apply(null);
+
+ assertEquals("isCapturing", true, snapshotManager.isCapturing());
}
@Test
snapshotManager.create(mockProcedure);
- snapshotManager.persist(mockDataPersistenceProvider, new byte[]{}, mockRaftActorBehavior
+ byte[] bytes = new byte[] {1,2,3,4,5,6,7,8,9,10};
+ snapshotManager.persist(mockDataPersistenceProvider, bytes, mockRaftActorBehavior
, Runtime.getRuntime().totalMemory());
- verify(mockDataPersistenceProvider).saveSnapshot(any(Snapshot.class));
+ ArgumentCaptor<Snapshot> snapshotArgumentCaptor = ArgumentCaptor.forClass(Snapshot.class);
+ verify(mockDataPersistenceProvider).saveSnapshot(snapshotArgumentCaptor.capture());
+
+ Snapshot snapshot = snapshotArgumentCaptor.getValue();
+
+ assertEquals("getLastTerm", 6L, snapshot.getLastTerm());
+ assertEquals("getLastIndex", 9L, snapshot.getLastIndex());
+ assertEquals("getLastAppliedTerm", 6L, snapshot.getLastAppliedTerm());
+ assertEquals("getLastAppliedIndex", 9L, snapshot.getLastAppliedIndex());
+ assertArrayEquals("getState", bytes, snapshot.getState());
+ assertEquals("getUnAppliedEntries size", 0, snapshot.getUnAppliedEntries().size());
verify(mockReplicatedLog).snapshotPreCommit(9L, 6L);
}
@Test
- public void testTrimLog(){
- ElectionTerm mockElectionTerm = mock(ElectionTerm.class);
- ReplicatedLogEntry replicatedLogEntry = mock(ReplicatedLogEntry.class);
+ public void testTrimLogWhenTrimIndexLessThanLastApplied() {
doReturn(20L).when(mockRaftActorContext).getLastApplied();
+
+ ReplicatedLogEntry replicatedLogEntry = mock(ReplicatedLogEntry.class);
doReturn(true).when(mockReplicatedLog).isPresent(10);
- doReturn(mockElectionTerm).when(mockRaftActorContext).getTermInformation();
- doReturn(5L).when(mockElectionTerm).getCurrentTerm();
doReturn(replicatedLogEntry).when((mockReplicatedLog)).get(10);
doReturn(5L).when(replicatedLogEntry).getTerm();
- snapshotManager.trimLog(10, mockRaftActorBehavior);
+ long retIndex = snapshotManager.trimLog(10, mockRaftActorBehavior);
+ assertEquals("return index", 10L, retIndex);
verify(mockReplicatedLog).snapshotPreCommit(10, 5);
verify(mockReplicatedLog).snapshotCommit();
+
+ verify(mockRaftActorBehavior, never()).setReplicatedToAllIndex(anyLong());
+ }
+
+ @Test
+ public void testTrimLogWhenLastAppliedNotSet() {
+ doReturn(-1L).when(mockRaftActorContext).getLastApplied();
+
+ ReplicatedLogEntry replicatedLogEntry = mock(ReplicatedLogEntry.class);
+ doReturn(true).when(mockReplicatedLog).isPresent(10);
+ doReturn(replicatedLogEntry).when((mockReplicatedLog)).get(10);
+ doReturn(5L).when(replicatedLogEntry).getTerm();
+
+ long retIndex = snapshotManager.trimLog(10, mockRaftActorBehavior);
+ assertEquals("return index", -1L, retIndex);
+
+ verify(mockReplicatedLog, never()).snapshotPreCommit(anyLong(), anyLong());
+ verify(mockReplicatedLog, never()).snapshotCommit();
+
+ verify(mockRaftActorBehavior, never()).setReplicatedToAllIndex(anyLong());
+ }
+
+ @Test
+ public void testTrimLogWhenLastAppliedZero() {
+ doReturn(0L).when(mockRaftActorContext).getLastApplied();
+
+ ReplicatedLogEntry replicatedLogEntry = mock(ReplicatedLogEntry.class);
+ doReturn(true).when(mockReplicatedLog).isPresent(10);
+ doReturn(replicatedLogEntry).when((mockReplicatedLog)).get(10);
+ doReturn(5L).when(replicatedLogEntry).getTerm();
+
+ long retIndex = snapshotManager.trimLog(10, mockRaftActorBehavior);
+ assertEquals("return index", -1L, retIndex);
+
+ verify(mockReplicatedLog, never()).snapshotPreCommit(anyLong(), anyLong());
+ verify(mockReplicatedLog, never()).snapshotCommit();
+
+ verify(mockRaftActorBehavior, never()).setReplicatedToAllIndex(anyLong());
+ }
+
+ @Test
+ public void testTrimLogWhenTrimIndexNotPresent() {
+ doReturn(20L).when(mockRaftActorContext).getLastApplied();
+
+ doReturn(false).when(mockReplicatedLog).isPresent(10);
+
+ long retIndex = snapshotManager.trimLog(10, mockRaftActorBehavior);
+ assertEquals("return index", -1L, retIndex);
+
+ verify(mockReplicatedLog, never()).snapshotPreCommit(anyLong(), anyLong());
+ verify(mockReplicatedLog, never()).snapshotCommit();
+
+ // Trim index is greater than replicatedToAllIndex so should update it.
+ verify(mockRaftActorBehavior).setReplicatedToAllIndex(10L);
}
@Test
assertEquals(true, snapshotManager.isCapturing());
- ElectionTerm mockElectionTerm = mock(ElectionTerm.class);
ReplicatedLogEntry replicatedLogEntry = mock(ReplicatedLogEntry.class);
doReturn(20L).when(mockRaftActorContext).getLastApplied();
doReturn(true).when(mockReplicatedLog).isPresent(10);
- doReturn(mockElectionTerm).when(mockRaftActorContext).getTermInformation();
- doReturn(5L).when(mockElectionTerm).getCurrentTerm();
doReturn(replicatedLogEntry).when((mockReplicatedLog)).get(10);
doReturn(5L).when(replicatedLogEntry).getTerm();
assertEquals(true, snapshotManager.isCapturing());
- ElectionTerm mockElectionTerm = mock(ElectionTerm.class);
ReplicatedLogEntry replicatedLogEntry = mock(ReplicatedLogEntry.class);
doReturn(20L).when(mockRaftActorContext).getLastApplied();
doReturn(true).when(mockReplicatedLog).isPresent(10);
- doReturn(mockElectionTerm).when(mockRaftActorContext).getTermInformation();
- doReturn(5L).when(mockElectionTerm).getCurrentTerm();
doReturn(replicatedLogEntry).when((mockReplicatedLog)).get(10);
doReturn(5L).when(replicatedLogEntry).getTerm();
}
+ @Test
+ public void testLastAppliedTermInformationReader() {
+
+ LastAppliedTermInformationReader reader = new LastAppliedTermInformationReader();
+
+ doReturn(4L).when(mockReplicatedLog).getSnapshotTerm();
+ doReturn(7L).when(mockReplicatedLog).getSnapshotIndex();
+
+ ReplicatedLogEntry lastLogEntry = new MockRaftActorContext.MockReplicatedLogEntry(6L, 9L,
+ new MockRaftActorContext.MockPayload());
+
+ // No followers and valid lastLogEntry
+ reader.init(mockReplicatedLog, 1L, lastLogEntry, false);
+
+ assertEquals("getTerm", 6L, reader.getTerm());
+ assertEquals("getIndex", 9L, reader.getIndex());
+
+ // No followers and null lastLogEntry
+ reader.init(mockReplicatedLog, 1L, null, false);
+
+ assertEquals("getTerm", -1L, reader.getTerm());
+ assertEquals("getIndex", -1L, reader.getIndex());
+
+ // Followers and valid originalIndex entry
+ doReturn(new MockRaftActorContext.MockReplicatedLogEntry(5L, 8L,
+ new MockRaftActorContext.MockPayload())).when(mockReplicatedLog).get(8L);
+ reader.init(mockReplicatedLog, 8L, lastLogEntry, true);
+
+ assertEquals("getTerm", 5L, reader.getTerm());
+ assertEquals("getIndex", 8L, reader.getIndex());
+
+ // Followers and null originalIndex entry and valid snapshot index
+ reader.init(mockReplicatedLog, 7L, lastLogEntry, true);
+
+ assertEquals("getTerm", 4L, reader.getTerm());
+ assertEquals("getIndex", 7L, reader.getIndex());
+
+ // Followers and null originalIndex entry and invalid snapshot index
+ doReturn(-1L).when(mockReplicatedLog).getSnapshotIndex();
+ reader.init(mockReplicatedLog, 7L, lastLogEntry, true);
+
+ assertEquals("getTerm", -1L, reader.getTerm());
+ assertEquals("getIndex", -1L, reader.getIndex());
+ }
}
\ No newline at end of file
import java.util.Collection;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
-import org.opendaylight.yangtools.concepts.Identifiable;
+import org.opendaylight.yangtools.yang.binding.Augmentation;
+import org.opendaylight.yangtools.yang.binding.ChildOf;
import org.opendaylight.yangtools.yang.binding.DataObject;
+import org.opendaylight.yangtools.yang.binding.Identifiable;
+import org.opendaylight.yangtools.yang.binding.Identifier;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier.PathArgument;
/**
* Represents modification of Data Object.
*
*/
-public interface DataObjectModification<T extends DataObject> extends Identifiable<PathArgument> {
+public interface DataObjectModification<T extends DataObject> extends org.opendaylight.yangtools.concepts.Identifiable<PathArgument> {
enum ModificationType {
/**
*/
@Nonnull Collection<DataObjectModification<? extends DataObject>> getModifiedChildren();
+ /**
+ * Returns container child modification if {@code child} was modified by this
+ * modification.
+ *
+ * For accessing all modified list items consider iterating over {@link #getModifiedChildren()}.
+ *
+ * @param child Type of child - must be only container
+ * @return Modification of {@code child} if {@code child} was modified, null otherwise.
+ * @throws IllegalArgumentException If supplied {@code child} class is not valid child according
+ * to generated model.
+ */
+ @Nullable <C extends ChildOf<? super T>> DataObjectModification<C> getModifiedChildContainer(@Nonnull Class<C> child);
+
+ /**
+ * Returns augmentation child modification if {@code augmentation} was modified by this
+ * modification.
+ *
+ * For accessing all modified list items consider iterating over {@link #getModifiedChildren()}.
+ *
+ * @param augmentation Type of augmentation - must be only container
+ * @return Modification of {@code augmentation} if {@code augmentation} was modified, null otherwise.
+ * @throws IllegalArgumentException If supplied {@code augmentation} class is not valid augmentation
+ * according to generated model.
+ */
+ @Nullable <C extends Augmentation<T> & DataObject> DataObjectModification<C> getModifiedAugmentation(@Nonnull Class<C> augmentation);
+
+
+ /**
+ * Returns child list item modification if {@code child} was modified by this modification.
+ *
+ * @param listItem Type of list item - must be list item with key
+ * @param listKey List item key
+ * @return Modification of {@code child} if {@code child} was modified, null otherwise.
+ * @throws IllegalArgumentException If supplied {@code listItem} class is not valid child according
+ * to generated model.
+ */
+ <C extends Identifiable<K> & ChildOf<? super T>, K extends Identifier<C>> DataObjectModification<C> getModifiedChildListItem(
+ @Nonnull Class<C> listItem,@Nonnull K listKey);
+
+ /**
+ * Returns a child modification if a node identified by {@code childArgument} was modified by
+ * this modification.
+ *
+ * @param childArgument Path Argument of child node
+ * @return Modification of child identified by {@code childArgument} if {@code childArgument}
+ * was modified, null otherwise.
+ * @throws IllegalArgumentException If supplied path argument is not valid child according to
+ * generated model.
+ *
+ */
+ @Nullable DataObjectModification<? extends DataObject> getModifiedChild(PathArgument childArgument);
}
import java.util.Collection;
import java.util.EventListener;
import javax.annotation.Nonnull;
+import org.opendaylight.yangtools.yang.binding.DataObject;
/**
* Interface implemented by classes interested in receiving notifications about
* in that it provides a cursor-based view of the change, which has potentially
* lower overhead and allow more flexible consumption of change event.
*/
-public interface DataTreeChangeListener extends EventListener {
+public interface DataTreeChangeListener<T extends DataObject> extends EventListener {
/**
* Invoked when there was data change for the supplied path, which was used
* to register this listener.
*
* @param changes Collection of change events, may not be null or empty.
*/
- void onDataTreeChanged(@Nonnull Collection<DataTreeModification> changes);
+ void onDataTreeChanged(@Nonnull Collection<DataTreeModification<T>> changes);
}
import javax.annotation.Nonnull;
import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.binding.DataObject;
/**
* A {@link DOMService} which allows users to register for changes to a
* your listener using {@link ListenerRegistration#close()} to stop
* delivery of change events.
*/
- @Nonnull <L extends DataTreeChangeListener> ListenerRegistration<L> registerDataTreeChangeListener(@Nonnull DataTreeIdentifier treeId, @Nonnull L listener);
+ @Nonnull <T extends DataObject,L extends DataTreeChangeListener<T>> ListenerRegistration<L> registerDataTreeChangeListener(@Nonnull DataTreeIdentifier<T> treeId, @Nonnull L listener);
}
\ No newline at end of file
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.yangtools.concepts.Immutable;
import org.opendaylight.yangtools.concepts.Path;
+import org.opendaylight.yangtools.yang.binding.DataObject;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
/**
* A unique identifier for a particular subtree. It is composed of the logical
* data store type and the instance identifier of the root node.
*/
-public final class DataTreeIdentifier implements Immutable, Path<DataTreeIdentifier>, Serializable {
+public final class DataTreeIdentifier<T extends DataObject> implements Immutable, Path<DataTreeIdentifier<?>>, Serializable {
private static final long serialVersionUID = 1L;
- private final InstanceIdentifier<?> rootIdentifier;
+ private final InstanceIdentifier<T> rootIdentifier;
private final LogicalDatastoreType datastoreType;
- public DataTreeIdentifier(final LogicalDatastoreType datastoreType, final InstanceIdentifier<?> rootIdentifier) {
+ public DataTreeIdentifier(final LogicalDatastoreType datastoreType, final InstanceIdentifier<T> rootIdentifier) {
this.datastoreType = Preconditions.checkNotNull(datastoreType);
this.rootIdentifier = Preconditions.checkNotNull(rootIdentifier);
}
}
@Override
- public boolean contains(final DataTreeIdentifier other) {
+ public boolean contains(final DataTreeIdentifier<?> other) {
return datastoreType == other.datastoreType && rootIdentifier.contains(other.rootIdentifier);
}
if (!(obj instanceof DataTreeIdentifier)) {
return false;
}
- DataTreeIdentifier other = (DataTreeIdentifier) obj;
+ final DataTreeIdentifier<?> other = (DataTreeIdentifier<?>) obj;
if (datastoreType != other.datastoreType) {
return false;
}
* @author Tony Tkacik <ttkacik@cisco.com>
*
*/
-public interface DataTreeModification {
+public interface DataTreeModification<T extends DataObject> {
/**
* Get the modification root path. This is the path of the root node
*
* @return absolute path of the root node
*/
- @Nonnull DataTreeIdentifier getRootPath();
+ @Nonnull DataTreeIdentifier<T> getRootPath();
/**
* Get the modification root node.
*
* @return modification root node
*/
- @Nonnull DataObjectModification<? extends DataObject> getRootNode();
+ @Nonnull DataObjectModification<T> getRootNode();
}
import java.util.Set;
import org.opendaylight.controller.md.sal.binding.api.BindingTransactionChain;
import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.api.DataTreeChangeListener;
+import org.opendaylight.controller.md.sal.binding.api.DataTreeChangeService;
+import org.opendaylight.controller.md.sal.binding.api.DataTreeIdentifier;
import org.opendaylight.controller.md.sal.binding.api.ReadOnlyTransaction;
import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
import org.opendaylight.controller.md.sal.binding.impl.BindingDOMAdapterBuilder.Factory;
import org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener;
import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeService;
import org.opendaylight.controller.md.sal.dom.api.DOMService;
-import org.opendaylight.controller.sal.core.api.model.SchemaService;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.binding.DataObject;
/**
* The DataBrokerImpl simply defers to the DOMDataBroker for all its operations.
*
*/
-public class BindingDOMDataBrokerAdapter extends AbstractForwardedDataBroker implements DataBroker {
+public class BindingDOMDataBrokerAdapter extends AbstractForwardedDataBroker implements DataBroker, DataTreeChangeService {
static final Factory<DataBroker> BUILDER_FACTORY = new BindingDOMAdapterBuilder.Factory<DataBroker>() {
}
};
+ private final DataTreeChangeService treeChangeService;
public BindingDOMDataBrokerAdapter(final DOMDataBroker domDataBroker, final BindingToNormalizedNodeCodec codec) {
super(domDataBroker, codec);
- }
-
- @Deprecated
- public BindingDOMDataBrokerAdapter(final DOMDataBroker domDataBroker, final BindingToNormalizedNodeCodec codec, final SchemaService schemaService) {
- super(domDataBroker, codec,schemaService);
+ final DOMDataTreeChangeService domTreeChange = (DOMDataTreeChangeService) domDataBroker.getSupportedExtensions().get(DOMDataTreeChangeService.class);
+ if(domTreeChange != null) {
+ treeChangeService = BindingDOMDataTreeChangeServiceAdapter.create(codec, domTreeChange);
+ } else {
+ treeChangeService = null;
+ }
}
@Override
}
@Override
- protected DataBroker createInstance(BindingToNormalizedNodeCodec codec,
- ClassToInstanceMap<DOMService> delegates) {
- DOMDataBroker domDataBroker = delegates.getInstance(DOMDataBroker.class);
+ protected DataBroker createInstance(final BindingToNormalizedNodeCodec codec,
+ final ClassToInstanceMap<DOMService> delegates) {
+ final DOMDataBroker domDataBroker = delegates.getInstance(DOMDataBroker.class);
return new BindingDOMDataBrokerAdapter(domDataBroker, codec);
}
+ }
-
+ @Override
+ public <T extends DataObject, L extends DataTreeChangeListener<T>> ListenerRegistration<L> registerDataTreeChangeListener(
+ final DataTreeIdentifier<T> treeId, final L listener) {
+ if(treeChangeService == null) {
+ throw new UnsupportedOperationException("Underlying data broker does not expose DOMDataTreeChangeService.");
+ }
+ return treeChangeService.registerDataTreeChangeListener(treeId, listener);
}
+
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.binding.impl;
+
+import com.google.common.base.Preconditions;
+import java.util.Collection;
+import org.opendaylight.controller.md.sal.binding.api.DataTreeChangeListener;
+import org.opendaylight.controller.md.sal.binding.api.DataTreeModification;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
+import org.opendaylight.yangtools.yang.binding.DataObject;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+
+/**
+ * Adapter wrapping Binding {@link DataTreeChangeListener} and exposing
+ * it as {@link DOMDataTreeChangeListener} and translated DOM events
+ * to their Binding equivalent.
+ *
+ */
+final class BindingDOMDataTreeChangeListenerAdapter<T extends DataObject> implements DOMDataTreeChangeListener {
+
+ private final BindingToNormalizedNodeCodec codec;
+ private final DataTreeChangeListener<T> listener;
+ private final LogicalDatastoreType store;
+
+ BindingDOMDataTreeChangeListenerAdapter(final BindingToNormalizedNodeCodec codec, final DataTreeChangeListener<T> listener,
+ final LogicalDatastoreType store) {
+ this.codec = Preconditions.checkNotNull(codec);
+ this.listener = Preconditions.checkNotNull(listener);
+ this.store = Preconditions.checkNotNull(store);
+ }
+
+ @Override
+ public void onDataTreeChanged(final Collection<DataTreeCandidate> domChanges) {
+ final Collection<DataTreeModification<T>> bindingChanges = LazyDataTreeModification.from(codec, domChanges, store);
+ listener.onDataTreeChanged(bindingChanges);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.binding.impl;
+
+import com.google.common.base.Preconditions;
+import org.opendaylight.controller.md.sal.binding.api.DataTreeChangeListener;
+import org.opendaylight.controller.md.sal.binding.api.DataTreeChangeService;
+import org.opendaylight.controller.md.sal.binding.api.DataTreeIdentifier;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeService;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeIdentifier;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.binding.DataObject;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+
+
+/**
+ *
+ * Adapter exposing Binding {@link DataTreeChangeService} and wrapping
+ * {@link DOMDataTreeChangeService} and is responsible for translation
+ * and instantiation of {@link BindingDOMDataTreeChangeListenerAdapter}
+ * adapters.
+ *
+ * Each registered {@link DataTreeChangeListener} is wrapped using
+ * adapter and registered directly to DOM service.
+ */
+final class BindingDOMDataTreeChangeServiceAdapter implements DataTreeChangeService {
+
+ private final BindingToNormalizedNodeCodec codec;
+ private final DOMDataTreeChangeService dataTreeChangeService;
+
+ private BindingDOMDataTreeChangeServiceAdapter(final BindingToNormalizedNodeCodec codec,
+ final DOMDataTreeChangeService dataTreeChangeService) {
+ this.codec = Preconditions.checkNotNull(codec);
+ this.dataTreeChangeService = Preconditions.checkNotNull(dataTreeChangeService);
+ }
+
+ static DataTreeChangeService create(final BindingToNormalizedNodeCodec codec,
+ final DOMDataTreeChangeService dataTreeChangeService) {
+ return new BindingDOMDataTreeChangeServiceAdapter(codec, dataTreeChangeService);
+ }
+
+ @Override
+ public <T extends DataObject, L extends DataTreeChangeListener<T>> ListenerRegistration<L> registerDataTreeChangeListener(
+ final DataTreeIdentifier<T> treeId, final L listener) {
+ final DOMDataTreeIdentifier domIdentifier = toDomTreeIdentifier(treeId);
+ final BindingDOMDataTreeChangeListenerAdapter<T> domListener = new BindingDOMDataTreeChangeListenerAdapter<>(codec,listener, treeId.getDatastoreType());
+ final ListenerRegistration<BindingDOMDataTreeChangeListenerAdapter<T>> domReg = dataTreeChangeService.registerDataTreeChangeListener(domIdentifier, domListener);
+ return new BindingDataTreeChangeListenerRegistration<>(listener,domReg);
+ }
+
+ private DOMDataTreeIdentifier toDomTreeIdentifier(final DataTreeIdentifier<?> treeId) {
+ final YangInstanceIdentifier domPath = codec.toYangInstanceIdentifier(treeId.getRootIdentifier());
+ return new DOMDataTreeIdentifier(treeId.getDatastoreType(), domPath);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.binding.impl;
+
+import com.google.common.base.Preconditions;
+import org.opendaylight.controller.md.sal.binding.api.DataTreeChangeListener;
+import org.opendaylight.yangtools.concepts.AbstractListenerRegistration;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+
+class BindingDataTreeChangeListenerRegistration<L extends DataTreeChangeListener<?>> extends AbstractListenerRegistration<L> {
+
+ private final ListenerRegistration<?> domReg;
+
+ BindingDataTreeChangeListenerRegistration(final L listener, final ListenerRegistration<?> domReg) {
+ super(listener);
+ this.domReg = Preconditions.checkNotNull(domReg);
+ }
+
+ @Override
+ protected void removeRegistration() {
+ domReg.close();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.binding.impl;
+
+import com.google.common.base.Optional;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.AugmentationIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeWithValue;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
+import org.opendaylight.yangtools.yang.data.api.schema.AnyXmlNode;
+import org.opendaylight.yangtools.yang.data.api.schema.AugmentationNode;
+import org.opendaylight.yangtools.yang.data.api.schema.ChoiceNode;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.data.api.schema.LeafNode;
+import org.opendaylight.yangtools.yang.data.api.schema.LeafSetEntryNode;
+import org.opendaylight.yangtools.yang.data.api.schema.LeafSetNode;
+import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
+import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
+
+/**
+ *
+ * Defines structural mapping of Normalized Node to Binding data
+ * addressable by Instance Identifier.
+ *
+ * Not all binding data are addressable by instance identifier
+ * and there are some differences.
+ *
+ * See {@link #NOT_ADDRESSABLE},{@link #INVISIBLE_CONTAINER},{@link #VISIBLE_CONTAINER}
+ * for more details.
+ *
+ *
+ */
+enum BindingStructuralType {
+
+ /**
+ * DOM Item is not addressable in Binding Instance Identifier,
+ * data is not lost, but are available only via parent object.
+ *
+ * Such types of data are leaf-lists, leafs, list without keys
+ * or anyxml.
+ *
+ */
+ NOT_ADDRESSABLE,
+ /**
+ * Data container is addressable in NormalizedNode format,
+ * but in Binding it is not represented in Instance Identifier.
+ *
+ * This are choice / case nodes.
+ *
+ * This data is still accessible using parent object and their
+ * children are addressable.
+ *
+ */
+ INVISIBLE_CONTAINER,
+ /**
+ * Data container is addressable in NormalizedNode format,
+ * but in Binding it is not represented in Instance Identifier.
+ *
+ * This are list nodes.
+ *
+ * This data is still accessible using parent object and their
+ * children are addressable.
+ *
+ */
+ INVISIBLE_LIST,
+ /**
+ * Data container is addressable in Binding Instance Identifier format
+ * and also YangInstanceIdentifier format.
+ *
+ */
+ VISIBLE_CONTAINER,
+ /**
+ * Mapping algorithm was unable to detect type or was not updated after introduction
+ * of new NormalizedNode type.
+ */
+ UNKNOWN;
+
+ static BindingStructuralType from(final DataTreeCandidateNode domChildNode) {
+ final Optional<NormalizedNode<?, ?>> dataBased = domChildNode.getDataAfter().or(domChildNode.getDataBefore());
+ if(dataBased.isPresent()) {
+ return from(dataBased.get());
+ }
+ return from(domChildNode.getIdentifier());
+ }
+
+ private static BindingStructuralType from(final PathArgument identifier) {
+ if(identifier instanceof NodeIdentifierWithPredicates || identifier instanceof AugmentationIdentifier) {
+ return VISIBLE_CONTAINER;
+ }
+ if(identifier instanceof NodeWithValue) {
+ return NOT_ADDRESSABLE;
+ }
+ return UNKNOWN;
+ }
+
+ static BindingStructuralType from(final NormalizedNode<?, ?> data) {
+ if(isNotAddressable(data)) {
+ return NOT_ADDRESSABLE;
+ }
+ if(data instanceof MapNode) {
+ return INVISIBLE_LIST;
+ }
+ if(data instanceof ChoiceNode) {
+ return INVISIBLE_CONTAINER;
+ }
+ if(isVisibleContainer(data)) {
+ return VISIBLE_CONTAINER;
+ }
+ return UNKNOWN;
+ }
+
+ private static boolean isVisibleContainer(final NormalizedNode<?, ?> data) {
+ return data instanceof MapEntryNode || data instanceof ContainerNode || data instanceof AugmentationNode;
+ }
+
+ private static boolean isNotAddressable(final NormalizedNode<?, ?> d) {
+ return d instanceof LeafNode
+ || d instanceof AnyXmlNode
+ || d instanceof LeafSetNode
+ || d instanceof LeafSetEntryNode;
+ }
+
+}
import com.google.common.base.Function;
import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableBiMap;
import java.lang.reflect.Method;
+import java.util.AbstractMap.SimpleEntry;
import java.util.Iterator;
+import java.util.Map;
import java.util.Map.Entry;
import javax.annotation.Nonnull;
import org.opendaylight.controller.md.sal.common.impl.util.compat.DataNormalizationException;
import org.opendaylight.controller.md.sal.common.impl.util.compat.DataNormalizer;
import org.opendaylight.yangtools.binding.data.codec.api.BindingCodecTree;
import org.opendaylight.yangtools.binding.data.codec.api.BindingCodecTreeFactory;
+import org.opendaylight.yangtools.binding.data.codec.api.BindingCodecTreeNode;
+import org.opendaylight.yangtools.binding.data.codec.api.BindingNormalizedNodeSerializer;
import org.opendaylight.yangtools.binding.data.codec.impl.BindingNormalizedNodeCodecRegistry;
import org.opendaylight.yangtools.sal.binding.generator.impl.GeneratedClassLoadingStrategy;
import org.opendaylight.yangtools.sal.binding.generator.util.BindingRuntimeContext;
import org.opendaylight.yangtools.yang.binding.BindingMapping;
+import org.opendaylight.yangtools.yang.binding.DataContainer;
import org.opendaylight.yangtools.yang.binding.DataObject;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.binding.Notification;
import org.opendaylight.yangtools.yang.binding.RpcService;
import org.opendaylight.yangtools.yang.binding.util.BindingReflections;
import org.opendaylight.yangtools.yang.common.QNameModule;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.data.impl.codec.DeserializationException;
import org.opendaylight.yangtools.yang.model.api.Module;
import org.opendaylight.yangtools.yang.model.api.SchemaContextListener;
import org.opendaylight.yangtools.yang.model.api.SchemaPath;
-public class BindingToNormalizedNodeCodec implements BindingCodecTreeFactory, SchemaContextListener, AutoCloseable {
+public class BindingToNormalizedNodeCodec implements BindingCodecTreeFactory, BindingNormalizedNodeSerializer, SchemaContextListener, AutoCloseable {
private final BindingNormalizedNodeCodecRegistry codecRegistry;
private DataNormalizer legacyToNormalized;
return codecRegistry.toYangInstanceIdentifier(binding);
}
- @SuppressWarnings({ "unchecked", "rawtypes" })
- public Entry<YangInstanceIdentifier, NormalizedNode<?, ?>> toNormalizedNode(
- final InstanceIdentifier<? extends DataObject> bindingPath, final DataObject bindingObject) {
- return codecRegistry.toNormalizedNode((InstanceIdentifier) bindingPath, bindingObject);
+ @Override
+ public YangInstanceIdentifier toYangInstanceIdentifier(final InstanceIdentifier<?> binding) {
+ return codecRegistry.toYangInstanceIdentifier(binding);
+ }
+ @Override
+ public <T extends DataObject> Entry<YangInstanceIdentifier, NormalizedNode<?, ?>> toNormalizedNode(
+ final InstanceIdentifier<T> path, final T data) {
+ return codecRegistry.toNormalizedNode(path, data);
}
+ @SuppressWarnings({"unchecked", "rawtypes"})
public Entry<YangInstanceIdentifier, NormalizedNode<?, ?>> toNormalizedNode(
final Entry<InstanceIdentifier<? extends DataObject>, DataObject> binding) {
- return toNormalizedNode(binding.getKey(),binding.getValue());
+ return toNormalizedNode((InstanceIdentifier) binding.getKey(),binding.getValue());
+ }
+
+ @Override
+ public Entry<InstanceIdentifier<?>, DataObject> fromNormalizedNode(final YangInstanceIdentifier path,
+ final NormalizedNode<?, ?> data) {
+ return codecRegistry.fromNormalizedNode(path, data);
+ }
+
+ @Override
+ public Notification fromNormalizedNodeNotification(final SchemaPath path, final ContainerNode data) {
+ return codecRegistry.fromNormalizedNodeNotification(path, data);
+ }
+
+ @Override
+ public DataObject fromNormalizedNodeRpcData(final SchemaPath path, final ContainerNode data) {
+ return codecRegistry.fromNormalizedNodeRpcData(path, data);
+ }
+
+ @Override
+ public InstanceIdentifier<?> fromYangInstanceIdentifier(final YangInstanceIdentifier dom) {
+ return codecRegistry.fromYangInstanceIdentifier(dom);
+ }
+
+ @Override
+ public ContainerNode toNormalizedNodeNotification(final Notification data) {
+ return codecRegistry.toNormalizedNodeNotification(data);
+ }
+
+ @Override
+ public ContainerNode toNormalizedNodeRpcData(final DataContainer data) {
+ return codecRegistry.toNormalizedNodeRpcData(data);
}
/**
}
@Override
- public BindingCodecTree create(BindingRuntimeContext context) {
+ public BindingCodecTree create(final BindingRuntimeContext context) {
return codecRegistry.create(context);
}
@Override
- public BindingCodecTree create(SchemaContext context, Class<?>... bindingClasses) {
+ public BindingCodecTree create(final SchemaContext context, final Class<?>... bindingClasses) {
return codecRegistry.create(context, bindingClasses);
}
+ @Nonnull protected Map.Entry<InstanceIdentifier<?>, BindingCodecTreeNode<?>> getSubtreeCodec(
+ final YangInstanceIdentifier domIdentifier) {
+
+ final BindingCodecTree currentCodecTree = codecRegistry.getCodecContext();
+ final InstanceIdentifier<?> bindingPath = codecRegistry.fromYangInstanceIdentifier(domIdentifier);
+ Preconditions.checkArgument(bindingPath != null);
+ /**
+ * If we are able to deserialize YANG instance identifier, getSubtreeCodec must
+ * return non-null value.
+ */
+ final BindingCodecTreeNode<?> codecContext = currentCodecTree.getSubtreeCodec(bindingPath);
+ return new SimpleEntry<InstanceIdentifier<?>, BindingCodecTreeNode<?>>(bindingPath, codecContext);
+ }
+
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.binding.impl;
+
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.List;
+import org.opendaylight.controller.md.sal.binding.api.DataObjectModification;
+import org.opendaylight.yangtools.binding.data.codec.api.BindingCodecTreeNode;
+import org.opendaylight.yangtools.yang.binding.Augmentation;
+import org.opendaylight.yangtools.yang.binding.ChildOf;
+import org.opendaylight.yangtools.yang.binding.DataObject;
+import org.opendaylight.yangtools.yang.binding.Identifiable;
+import org.opendaylight.yangtools.yang.binding.Identifier;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier.PathArgument;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Lazily translated {@link DataObjectModification} based on {@link DataTreeCandidateNode}.
+ *
+ * {@link LazyDataObjectModification} represents Data tree change event,
+ * but whole tree is not translated or resolved eagerly, but only child nodes
+ * which are directly accessed by user of data object modification.
+ *
+ * @param <T> Type of Binding Data Object
+ */
+class LazyDataObjectModification<T extends DataObject> implements DataObjectModification<T> {
+
+ private final static Logger LOG = LoggerFactory.getLogger(LazyDataObjectModification.class);
+
+ private final BindingCodecTreeNode<T> codec;
+ private final DataTreeCandidateNode domData;
+ private final PathArgument identifier;
+ private Collection<DataObjectModification<? extends DataObject>> childNodesCache;
+
+ private LazyDataObjectModification(final BindingCodecTreeNode<T> codec, final DataTreeCandidateNode domData) {
+ this.codec = Preconditions.checkNotNull(codec);
+ this.domData = Preconditions.checkNotNull(domData);
+ this.identifier = codec.deserializePathArgument(domData.getIdentifier());
+ }
+
+ static <T extends DataObject> DataObjectModification<T> create(final BindingCodecTreeNode<T> codec,
+ final DataTreeCandidateNode domData) {
+ return new LazyDataObjectModification<>(codec,domData);
+ }
+
+ static Collection<DataObjectModification<? extends DataObject>> from(final BindingCodecTreeNode<?> parentCodec,
+ final Collection<DataTreeCandidateNode> domChildNodes) {
+ final ArrayList<DataObjectModification<? extends DataObject>> result = new ArrayList<>(domChildNodes.size());
+ populateList(result, parentCodec, domChildNodes);
+ return result;
+ }
+
+ private static void populateList(final List<DataObjectModification<? extends DataObject>> result,
+ final BindingCodecTreeNode<?> parentCodec, final Collection<DataTreeCandidateNode> domChildNodes) {
+ for (final DataTreeCandidateNode domChildNode : domChildNodes) {
+ final BindingStructuralType type = BindingStructuralType.from(domChildNode);
+ if (type != BindingStructuralType.NOT_ADDRESSABLE) {
+ /*
+ * Even if type is UNKNOWN, from perspective of BindingStructuralType
+ * we try to load codec for it. We will use that type to further specify
+ * debug log.
+ */
+ try {
+ final BindingCodecTreeNode<?> childCodec =
+ parentCodec.yangPathArgumentChild(domChildNode.getIdentifier());
+ populateList(result,type, childCodec, domChildNode);
+ } catch (final IllegalArgumentException e) {
+ if(type == BindingStructuralType.UNKNOWN) {
+ LOG.debug("Unable to deserialize unknown DOM node {}",domChildNode,e);
+ } else {
+ LOG.debug("Binding representation for DOM node {} was not found",domChildNode,e);
+ }
+ }
+ }
+ }
+ }
+
+
+ private static void populateList(final List<DataObjectModification<? extends DataObject>> result,
+ final BindingStructuralType type, final BindingCodecTreeNode<?> childCodec,
+ final DataTreeCandidateNode domChildNode) {
+ switch (type) {
+ case INVISIBLE_LIST:
+ // We use parent codec intentionally.
+ populateListWithSingleCodec(result, childCodec, domChildNode.getChildNodes());
+ break;
+ case INVISIBLE_CONTAINER:
+ populateList(result, childCodec, domChildNode.getChildNodes());
+ break;
+ case UNKNOWN:
+ case VISIBLE_CONTAINER:
+ result.add(create(childCodec, domChildNode));
+ default:
+ break;
+ }
+ }
+
+ private static void populateListWithSingleCodec(final List<DataObjectModification<? extends DataObject>> result,
+ final BindingCodecTreeNode<?> codec, final Collection<DataTreeCandidateNode> childNodes) {
+ for (final DataTreeCandidateNode child : childNodes) {
+ result.add(create(codec, child));
+ }
+ }
+
+ @Override
+ public T getDataAfter() {
+ return deserialize(domData.getDataAfter());
+ }
+
+ @Override
+ public Class<T> getDataType() {
+ return codec.getBindingClass();
+ }
+
+ @Override
+ public PathArgument getIdentifier() {
+ return identifier;
+ }
+
+ @Override
+ public DataObjectModification.ModificationType getModificationType() {
+ switch(domData.getModificationType()) {
+ case WRITE:
+ return DataObjectModification.ModificationType.WRITE;
+ case SUBTREE_MODIFIED:
+ return DataObjectModification.ModificationType.SUBTREE_MODIFIED;
+ case DELETE:
+ return DataObjectModification.ModificationType.DELETE;
+
+ default:
+ // TODO: Should we lie about modification type instead of exception?
+ throw new IllegalStateException("Unsupported DOM Modification type " + domData.getModificationType());
+ }
+ }
+
+ @Override
+ public Collection<DataObjectModification<? extends DataObject>> getModifiedChildren() {
+ if(childNodesCache == null) {
+ childNodesCache = from(codec,domData.getChildNodes());
+ }
+ return childNodesCache;
+ }
+
+ @Override
+ public DataObjectModification<? extends DataObject> getModifiedChild(final PathArgument arg) {
+ final List<YangInstanceIdentifier.PathArgument> domArgumentList = new ArrayList<>();
+ final BindingCodecTreeNode<?> childCodec = codec.bindingPathArgumentChild(arg, domArgumentList);
+ final Iterator<YangInstanceIdentifier.PathArgument> toEnter = domArgumentList.iterator();
+ DataTreeCandidateNode current = domData;
+ while (toEnter.hasNext() && current != null) {
+ current = current.getModifiedChild(toEnter.next());
+ }
+ if (current != null) {
+ return create(childCodec, current);
+ }
+ return null;
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ public <C extends Identifiable<K> & ChildOf<? super T>, K extends Identifier<C>> DataObjectModification<C> getModifiedChildListItem(
+ final Class<C> listItem, final K listKey) {
+ return (DataObjectModification<C>) getModifiedChild(new InstanceIdentifier.IdentifiableItem<>(listItem, listKey));
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ public <C extends ChildOf<? super T>> DataObjectModification<C> getModifiedChildContainer(final Class<C> arg) {
+ return (DataObjectModification<C>) getModifiedChild(new InstanceIdentifier.Item<>(arg));
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ public <C extends Augmentation<T> & DataObject> DataObjectModification<C> getModifiedAugmentation(
+ final Class<C> augmentation) {
+ return (DataObjectModification<C>) getModifiedChild(new InstanceIdentifier.Item<>(augmentation));
+ }
+
+ private T deserialize(final Optional<NormalizedNode<?, ?>> dataAfter) {
+ if(dataAfter.isPresent()) {
+ return codec.deserialize(dataAfter.get());
+ }
+ return null;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.binding.impl;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map.Entry;
+import org.opendaylight.controller.md.sal.binding.api.DataObjectModification;
+import org.opendaylight.controller.md.sal.binding.api.DataTreeIdentifier;
+import org.opendaylight.controller.md.sal.binding.api.DataTreeModification;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.yangtools.binding.data.codec.api.BindingCodecTreeNode;
+import org.opendaylight.yangtools.yang.binding.DataObject;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+
+/**
+ * Lazily translated {@link DataTreeModification} based on {@link DataTreeCandidate}.
+ *
+ * {@link DataTreeModification} represents Data tree change event,
+ * but whole tree is not translated or resolved eagerly, but only child nodes
+ * which are directly accessed by user of data object modification.
+ *
+ */
+class LazyDataTreeModification<T extends DataObject> implements DataTreeModification<T> {
+
+ private final DataTreeIdentifier<T> path;
+ private final DataObjectModification<T> rootNode;
+
+ LazyDataTreeModification(final LogicalDatastoreType datastoreType, final InstanceIdentifier<T> path, final BindingCodecTreeNode<T> codec, final DataTreeCandidate domChange) {
+ this.path = new DataTreeIdentifier<>(datastoreType, path);
+ this.rootNode = LazyDataObjectModification.create(codec, domChange.getRootNode());
+ }
+
+ @Override
+ public DataObjectModification<T> getRootNode() {
+ return rootNode;
+ }
+
+ @Override
+ public DataTreeIdentifier<T> getRootPath() {
+ return path;
+ }
+
+ @SuppressWarnings({"unchecked", "rawtypes"})
+ static <T extends DataObject> DataTreeModification<T> create(final BindingToNormalizedNodeCodec codec, final DataTreeCandidate domChange,
+ final LogicalDatastoreType datastoreType) {
+ final Entry<InstanceIdentifier<?>, BindingCodecTreeNode<?>> codecCtx =
+ codec.getSubtreeCodec(domChange.getRootPath());
+ return (DataTreeModification<T>) new LazyDataTreeModification(datastoreType, codecCtx.getKey(), codecCtx.getValue(), domChange);
+ }
+
+ static <T extends DataObject> Collection<DataTreeModification<T>> from(final BindingToNormalizedNodeCodec codec,
+ final Collection<DataTreeCandidate> domChanges, final LogicalDatastoreType datastoreType) {
+ final List<DataTreeModification<T>> result = new ArrayList<>(domChanges.size());
+ for (final DataTreeCandidate domChange : domChanges) {
+ result.add(LazyDataTreeModification.<T>create(codec, domChange, datastoreType));
+ }
+ return result;
+ }
+
+}
base config:module-type;
config:provided-service binding-dom-mapping-service;
config:provided-service sal:binding-codec-tree-factory;
+ config:provided-service sal:binding-normalized-node-serializer;
config:java-name-prefix RuntimeMapping;
}
--- /dev/null
+package org.opendaylight.controller.md.sal.binding.impl.test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.opendaylight.controller.md.sal.test.model.util.ListsBindingUtils.TOP_BAR_KEY;
+import static org.opendaylight.controller.md.sal.test.model.util.ListsBindingUtils.TOP_FOO_KEY;
+import static org.opendaylight.controller.md.sal.test.model.util.ListsBindingUtils.USES_ONE_KEY;
+import static org.opendaylight.controller.md.sal.test.model.util.ListsBindingUtils.complexUsesAugment;
+import static org.opendaylight.controller.md.sal.test.model.util.ListsBindingUtils.path;
+import static org.opendaylight.controller.md.sal.test.model.util.ListsBindingUtils.top;
+import static org.opendaylight.controller.md.sal.test.model.util.ListsBindingUtils.topLevelList;
+
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Iterables;
+import com.google.common.util.concurrent.SettableFuture;
+import java.util.Collection;
+import java.util.concurrent.TimeUnit;
+import org.junit.Test;
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.api.DataObjectModification;
+import org.opendaylight.controller.md.sal.binding.api.DataObjectModification.ModificationType;
+import org.opendaylight.controller.md.sal.binding.api.DataTreeChangeListener;
+import org.opendaylight.controller.md.sal.binding.api.DataTreeIdentifier;
+import org.opendaylight.controller.md.sal.binding.api.DataTreeModification;
+import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
+import org.opendaylight.controller.md.sal.binding.impl.BindingDOMDataBrokerAdapter;
+import org.opendaylight.controller.md.sal.binding.test.AbstractDataBrokerTest;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.augment.rev140709.TreeComplexUsesAugment;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.Top;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.TwoLevelList;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelList;
+import org.opendaylight.yangtools.yang.binding.DataObject;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier.PathArgument;
+import org.opendaylight.yangtools.yang.binding.YangModuleInfo;
+import org.opendaylight.yangtools.yang.binding.util.BindingReflections;
+
+public class DataTreeChangeListenerTest extends AbstractDataBrokerTest {
+
+ private static final InstanceIdentifier<Top> TOP_PATH = InstanceIdentifier.create(Top.class);
+ private static final PathArgument TOP_ARGUMENT= TOP_PATH.getPathArguments().iterator().next();
+ private static final InstanceIdentifier<TopLevelList> FOO_PATH = path(TOP_FOO_KEY);
+ private static final PathArgument FOO_ARGUMENT = Iterables.getLast(FOO_PATH.getPathArguments());
+ private static final TopLevelList FOO_DATA = topLevelList(TOP_FOO_KEY, complexUsesAugment(USES_ONE_KEY));
+ private static final InstanceIdentifier<TopLevelList> BAR_PATH = path(TOP_BAR_KEY);
+ private static final PathArgument BAR_ARGUMENT = Iterables.getLast(BAR_PATH.getPathArguments());
+ private static final TopLevelList BAR_DATA = topLevelList(TOP_BAR_KEY);
+private static final DataTreeIdentifier<Top> TOP_IDENTIFIER = new DataTreeIdentifier<Top>(LogicalDatastoreType.OPERATIONAL,
+ TOP_PATH);
+
+ private static final Top TOP_INITIAL_DATA = top(FOO_DATA);
+
+ private BindingDOMDataBrokerAdapter dataBrokerImpl;
+
+ private static final class EventCapturingListener<T extends DataObject> implements DataTreeChangeListener<T> {
+
+ private SettableFuture<Collection<DataTreeModification<T>>> changes = SettableFuture.create();
+
+ @Override
+ public void onDataTreeChanged(final Collection<DataTreeModification<T>> changes) {
+ this.changes.set(changes);
+
+ }
+
+ Collection<DataTreeModification<T>> nextEvent() throws Exception {
+ final Collection<DataTreeModification<T>> result = changes.get(200,TimeUnit.MILLISECONDS);
+ changes = SettableFuture.create();
+ return result;
+ }
+
+ }
+
+ @Override
+ protected Iterable<YangModuleInfo> getModuleInfos() throws Exception {
+ return ImmutableSet.of(
+ BindingReflections.getModuleInfo(TwoLevelList.class),
+ BindingReflections.getModuleInfo(TreeComplexUsesAugment.class)
+ );
+ }
+
+ @Override
+ protected void setupWithDataBroker(final DataBroker dataBroker) {
+ dataBrokerImpl = (BindingDOMDataBrokerAdapter) dataBroker;
+ }
+
+ @Test
+ public void testTopLevelListener() throws Exception {
+ final EventCapturingListener<Top> listener = new EventCapturingListener<>();
+ dataBrokerImpl.registerDataTreeChangeListener(TOP_IDENTIFIER, listener);
+
+ createAndVerifyTop(listener);
+
+ putTx(BAR_PATH, BAR_DATA).submit().checkedGet();
+ final DataObjectModification<Top> afterBarPutEvent = Iterables.getOnlyElement(listener.nextEvent()).getRootNode();
+ verifyModification(afterBarPutEvent, TOP_ARGUMENT, ModificationType.SUBTREE_MODIFIED);
+ final DataObjectModification<TopLevelList> barPutMod = afterBarPutEvent.getModifiedChildListItem(TopLevelList.class, TOP_BAR_KEY);
+ assertNotNull(barPutMod);
+ verifyModification(barPutMod, BAR_ARGUMENT, ModificationType.WRITE);
+
+ deleteTx(BAR_PATH).submit().checkedGet();
+ final DataObjectModification<Top> afterBarDeleteEvent = Iterables.getOnlyElement(listener.nextEvent()).getRootNode();
+ verifyModification(afterBarDeleteEvent, TOP_ARGUMENT, ModificationType.SUBTREE_MODIFIED);
+ final DataObjectModification<TopLevelList> barDeleteMod = afterBarDeleteEvent.getModifiedChildListItem(TopLevelList.class, TOP_BAR_KEY);
+ verifyModification(barDeleteMod, BAR_ARGUMENT, ModificationType.DELETE);
+ }
+
+ @Test
+ public void testWildcardedListListener() throws Exception {
+ final EventCapturingListener<TopLevelList> listener = new EventCapturingListener<>();
+ final DataTreeIdentifier<TopLevelList> wildcard = new DataTreeIdentifier<>(LogicalDatastoreType.OPERATIONAL, TOP_PATH.child(TopLevelList.class));
+ dataBrokerImpl.registerDataTreeChangeListener(wildcard, listener);
+
+ putTx(TOP_PATH, TOP_INITIAL_DATA).submit().checkedGet();
+
+ final DataTreeModification<TopLevelList> fooWriteEvent = Iterables.getOnlyElement(listener.nextEvent());
+ assertEquals(FOO_PATH, fooWriteEvent.getRootPath().getRootIdentifier());
+ verifyModification(fooWriteEvent.getRootNode(), FOO_ARGUMENT, ModificationType.WRITE);
+
+ putTx(BAR_PATH, BAR_DATA).submit().checkedGet();
+ final DataTreeModification<TopLevelList> barWriteEvent = Iterables.getOnlyElement(listener.nextEvent());
+ assertEquals(BAR_PATH, barWriteEvent.getRootPath().getRootIdentifier());
+ verifyModification(barWriteEvent.getRootNode(), BAR_ARGUMENT, ModificationType.WRITE);
+
+ deleteTx(BAR_PATH).submit().checkedGet();
+ final DataTreeModification<TopLevelList> barDeleteEvent = Iterables.getOnlyElement(listener.nextEvent());
+ assertEquals(BAR_PATH, barDeleteEvent.getRootPath().getRootIdentifier());
+ verifyModification(barDeleteEvent.getRootNode(), BAR_ARGUMENT, ModificationType.DELETE);
+ }
+
+
+
+ private void createAndVerifyTop(final EventCapturingListener<Top> listener) throws Exception {
+ putTx(TOP_PATH,TOP_INITIAL_DATA).submit().checkedGet();
+ final Collection<DataTreeModification<Top>> events = listener.nextEvent();
+
+ assertFalse("Non empty collection should be received.",events.isEmpty());
+ final DataTreeModification<Top> initialWrite = Iterables.getOnlyElement(events);
+ final DataObjectModification<? extends DataObject> initialNode = initialWrite.getRootNode();
+ verifyModification(initialNode,TOP_PATH.getPathArguments().iterator().next(),ModificationType.WRITE);
+ assertEquals(TOP_INITIAL_DATA, initialNode.getDataAfter());
+ }
+
+ private void verifyModification(final DataObjectModification<? extends DataObject> barWrite, final PathArgument pathArg,
+ final ModificationType eventType) {
+ assertEquals(pathArg.getType(), barWrite.getDataType());
+ assertEquals(eventType,barWrite.getModificationType());
+ assertEquals(pathArg, barWrite.getIdentifier());
+ }
+
+ private <T extends DataObject> WriteTransaction putTx(final InstanceIdentifier<T> path,final T data) {
+ final WriteTransaction tx = dataBrokerImpl.newWriteOnlyTransaction();
+ tx.put(LogicalDatastoreType.OPERATIONAL, path, data);
+ return tx;
+ }
+
+ private WriteTransaction deleteTx(final InstanceIdentifier<?> path) {
+ final WriteTransaction tx = dataBrokerImpl.newWriteOnlyTransaction();
+ tx.delete(LogicalDatastoreType.OPERATIONAL, path);
+ return tx;
+ }
+}
config:java-class "org.opendaylight.yangtools.binding.data.codec.api.BindingCodecTreeFactory";
}
+ identity binding-normalized-node-serializer {
+ base "config:service-type";
+ config:java-class "org.opendaylight.yangtools.binding.data.codec.api.BindingNormalizedNodeSerializer";
+ }
+
identity binding-notification-subscription-service {
base "config:service-type";
config:java-class "org.opendaylight.controller.sal.binding.api.NotificationService";
mavenBundle(CONTROLLER, "sal-common-impl").versionAsInProject(), // //
mavenBundle("org.apache.commons", "commons-lang3").versionAsInProject(), //
- mavenBundle("com.google.guava", "guava").versionAsInProject()
+ mavenBundle("com.google.guava", "guava").versionAsInProject(),
+ mavenBundle("com.github.romix", "java-concurrent-hash-trie-map").versionAsInProject()
);
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster;
+
+import akka.japi.Procedure;
+import akka.persistence.SnapshotSelectionCriteria;
+
+/**
+ * A DataPersistenceProvider implementation that delegates to another implementation.
+ *
+ * @author Thomas Pantelis
+ */
+public class DelegatingPersistentDataProvider implements DataPersistenceProvider {
+ private DataPersistenceProvider delegate;
+
+ public DelegatingPersistentDataProvider(DataPersistenceProvider delegate) {
+ this.delegate = delegate;
+ }
+
+ public void setDelegate(DataPersistenceProvider delegate) {
+ this.delegate = delegate;
+ }
+
+ public DataPersistenceProvider getDelegate() {
+ return delegate;
+ }
+
+ @Override
+ public boolean isRecoveryApplicable() {
+ return delegate.isRecoveryApplicable();
+ }
+
+ @Override
+ public <T> void persist(T o, Procedure<T> procedure) {
+ delegate.persist(o, procedure);
+ }
+
+ @Override
+ public void saveSnapshot(Object o) {
+ delegate.saveSnapshot(o);
+ }
+
+ @Override
+ public void deleteSnapshots(SnapshotSelectionCriteria criteria) {
+ delegate.deleteSnapshots(criteria);
+ }
+
+ @Override
+ public void deleteMessages(long sequenceNumber) {
+ delegate.deleteMessages(sequenceNumber);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster;
+
+import akka.japi.Procedure;
+import akka.persistence.SnapshotSelectionCriteria;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * A DataPersistenceProvider implementation with persistence disabled, essentially a no-op.
+ */
+public class NonPersistentDataProvider implements DataPersistenceProvider {
+ private static final Logger LOG = LoggerFactory.getLogger(NonPersistentDataProvider.class);
+
+ @Override
+ public boolean isRecoveryApplicable() {
+ return false;
+ }
+
+ @Override
+ public <T> void persist(T o, Procedure<T> procedure) {
+ try {
+ procedure.apply(o);
+ } catch (Exception e) {
+ LOG.error("An unexpected error occurred", e);
+ }
+ }
+
+ @Override
+ public void saveSnapshot(Object o) {
+ }
+
+ @Override
+ public void deleteSnapshots(SnapshotSelectionCriteria criteria) {
+ }
+
+ @Override
+ public void deleteMessages(long sequenceNumber) {
+ }
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster;
+
+import akka.japi.Procedure;
+import akka.persistence.SnapshotSelectionCriteria;
+import akka.persistence.UntypedPersistentActor;
+import com.google.common.base.Preconditions;
+
+/**
+ * A DataPersistenceProvider implementation with persistence enabled.
+ */
+public class PersistentDataProvider implements DataPersistenceProvider {
+
+ private final UntypedPersistentActor persistentActor;
+
+ public PersistentDataProvider(UntypedPersistentActor persistentActor) {
+ this.persistentActor = Preconditions.checkNotNull(persistentActor, "persistentActor can't be null");
+ }
+
+ @Override
+ public boolean isRecoveryApplicable() {
+ return true;
+ }
+
+ @Override
+ public <T> void persist(T o, Procedure<T> procedure) {
+ persistentActor.persist(o, procedure);
+ }
+
+ @Override
+ public void saveSnapshot(Object o) {
+ persistentActor.saveSnapshot(o);
+ }
+
+ @Override
+ public void deleteSnapshots(SnapshotSelectionCriteria criteria) {
+ persistentActor.deleteSnapshots(criteria);
+ }
+
+ @Override
+ public void deleteMessages(long sequenceNumber) {
+ persistentActor.deleteMessages(sequenceNumber);
+ }
+}
\ No newline at end of file
package org.opendaylight.controller.cluster.common.actor;
-import akka.japi.Procedure;
-import akka.persistence.SnapshotSelectionCriteria;
import akka.persistence.UntypedPersistentActor;
-import org.opendaylight.controller.cluster.DataPersistenceProvider;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
}
unhandled(message);
}
-
- protected class PersistentDataProvider implements DataPersistenceProvider {
-
- public PersistentDataProvider(){
-
- }
-
- @Override
- public boolean isRecoveryApplicable() {
- return true;
- }
-
- @Override
- public <T> void persist(T o, Procedure<T> procedure) {
- AbstractUntypedPersistentActor.this.persist(o, procedure);
- }
-
- @Override
- public void saveSnapshot(Object o) {
- AbstractUntypedPersistentActor.this.saveSnapshot(o);
- }
-
- @Override
- public void deleteSnapshots(SnapshotSelectionCriteria criteria) {
- AbstractUntypedPersistentActor.this.deleteSnapshots(criteria);
- }
-
- @Override
- public void deleteMessages(long sequenceNumber) {
- AbstractUntypedPersistentActor.this.deleteMessages(sequenceNumber);
- }
- }
-
- protected class NonPersistentDataProvider implements DataPersistenceProvider {
-
- public NonPersistentDataProvider(){
-
- }
-
- @Override
- public boolean isRecoveryApplicable() {
- return false;
- }
-
- @Override
- public <T> void persist(T o, Procedure<T> procedure) {
- try {
- procedure.apply(o);
- } catch (Exception e) {
- LOG.error("An unexpected error occurred", e);
- }
- }
-
- @Override
- public void saveSnapshot(Object o) {
- }
-
- @Override
- public void deleteSnapshots(SnapshotSelectionCriteria criteria) {
-
- }
-
- @Override
- public void deleteMessages(long sequenceNumber) {
-
- }
- }
}
private final Logger LOG = LoggerFactory.getLogger(MeteredBoundedMailbox.class);
private MeteredMessageQueue queue;
- private Integer capacity;
- private FiniteDuration pushTimeOut;
- private MetricRegistry registry;
+ private final Integer capacity;
+ private final FiniteDuration pushTimeOut;
+ private final MetricRegistry registry;
private final String QUEUE_SIZE = "q-size";
this.capacity = commonConfig.getMailBoxCapacity();
this.pushTimeOut = commonConfig.getMailBoxPushTimeout();
- MetricsReporter reporter = MetricsReporter.getInstance();
+ MetricsReporter reporter = MetricsReporter.getInstance(MeteringBehavior.DOMAIN);
registry = reporter.getMetricsRegistry();
}
String metricName = MetricRegistry.name(actorName, QUEUE_SIZE);
if (registry.getMetrics().containsKey(metricName))
+ {
return; //already registered
+ }
Gauge<Integer> queueSize = getQueueSizeGuage(monitoredQueue);
registerQueueSizeMetric(metricName, queueSize);
* The information is reported to {@link org.opendaylight.controller.cluster.reporting.MetricsReporter}
*/
public class MeteringBehavior implements Procedure<Object> {
+ public static final String DOMAIN = "org.opendaylight.controller.actor.metric";
private final UntypedActor meteredActor;
- private final MetricRegistry METRICREGISTRY = MetricsReporter.getInstance().getMetricsRegistry();
+ private final MetricRegistry METRICREGISTRY = MetricsReporter.getInstance(DOMAIN).getMetricsRegistry();
private final String MSG_PROCESSING_RATE = "msg-rate";
private String actorQualifiedName;
output = new DataOutputStream(stream);
}
- public NormalizedNodeOutputStreamWriter(DataOutput output) throws IOException {
+ public NormalizedNodeOutputStreamWriter(DataOutput output) {
this.output = Preconditions.checkNotNull(output);
}
import com.codahale.metrics.JmxReporter;
import com.codahale.metrics.MetricRegistry;
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.CacheLoader;
+import com.google.common.cache.LoadingCache;
/**
* Maintains metrics registry that is provided to reporters.
*/
public class MetricsReporter implements AutoCloseable {
- private static final MetricRegistry METRICS_REGISTRY = new MetricRegistry();
- private static final String DOMAIN = "org.opendaylight.controller.actor.metric";
- private static final MetricsReporter INSTANCE = new MetricsReporter();
-
- private final JmxReporter jmxReporter = JmxReporter.forRegistry(METRICS_REGISTRY).inDomain(DOMAIN).build();
-
- private MetricsReporter() {
+ private static LoadingCache<String, MetricsReporter> METRIC_REPORTERS = CacheBuilder.newBuilder().build(
+ new CacheLoader<String, MetricsReporter>() {
+ @Override
+ public MetricsReporter load(String domainName) {
+ return new MetricsReporter(domainName);
+ }
+ });
+
+ private final String domainName;
+ private final JmxReporter jmxReporter;
+ private final MetricRegistry metricRegistry = new MetricRegistry();
+
+ private MetricsReporter(String domainName) {
+ this.domainName = domainName;
+ jmxReporter = JmxReporter.forRegistry(metricRegistry).inDomain(domainName).build();
jmxReporter.start();
}
- public static MetricsReporter getInstance() {
- return INSTANCE;
+ public static MetricsReporter getInstance(String domainName) {
+ return METRIC_REPORTERS.getUnchecked(domainName);
}
public MetricRegistry getMetricsRegistry() {
- return METRICS_REGISTRY;
+ return metricRegistry;
}
@Override
public void close() {
jmxReporter.close();
+
+ METRIC_REPORTERS.invalidate(domainName);
}
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import akka.actor.ActorSelection;
+import java.util.List;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
+import scala.concurrent.Future;
+
+/**
+ * Abstract base class for {@link DOMStoreThreePhaseCommitCohort} instances returned by this
+ * implementation. In addition to the usual set of methods it also contains the list of actor
+ * futures.
+ */
+abstract class AbstractThreePhaseCommitCohort implements DOMStoreThreePhaseCommitCohort {
+ abstract List<Future<ActorSelection>> getCohortFutures();
+}
*/
package org.opendaylight.controller.cluster.datastore;
-import com.google.common.collect.Lists;
+import com.google.common.collect.ImmutableList;
+import java.util.ArrayList;
+import java.util.Collection;
import java.util.List;
import org.opendaylight.controller.cluster.datastore.identifiers.TransactionIdentifier;
import scala.concurrent.Future;
abstract class AbstractTransactionContext implements TransactionContext {
- protected final TransactionIdentifier identifier;
- protected final List<Future<Object>> recordedOperationFutures = Lists.newArrayList();
+ private final List<Future<Object>> recordedOperationFutures = new ArrayList<>();
+ private final TransactionIdentifier identifier;
- AbstractTransactionContext(TransactionIdentifier identifier) {
+ protected AbstractTransactionContext(TransactionIdentifier identifier) {
this.identifier = identifier;
}
@Override
- public List<Future<Object>> getRecordedOperationFutures() {
- return recordedOperationFutures;
+ public final void copyRecordedOperationFutures(Collection<Future<Object>> target) {
+ target.addAll(recordedOperationFutures);
}
-}
\ No newline at end of file
+
+ protected final TransactionIdentifier getIdentifier() {
+ return identifier;
+ }
+
+ protected final Collection<Future<Object>> copyRecordedOperationFutures() {
+ return ImmutableList.copyOf(recordedOperationFutures);
+ }
+
+ protected final int recordedOperationCount() {
+ return recordedOperationFutures.size();
+ }
+
+ protected final void recordOperationFuture(Future<Object> future) {
+ recordedOperationFutures.add(future);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import akka.actor.ActorSelection;
+import akka.dispatch.OnComplete;
+import java.util.List;
+import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import scala.concurrent.Future;
+import scala.concurrent.Promise;
+
+final class ChainedTransactionProxy extends TransactionProxy {
+ private static final Logger LOG = LoggerFactory.getLogger(ChainedTransactionProxy.class);
+
+ /**
+ * Stores the ready Futures from the previous Tx in the chain.
+ */
+ private final List<Future<ActorSelection>> previousReadyFutures;
+
+ /**
+ * Stores the ready Futures from this transaction when it is readied.
+ */
+ private volatile List<Future<ActorSelection>> readyFutures;
+
+ ChainedTransactionProxy(ActorContext actorContext, TransactionType transactionType,
+ String transactionChainId, List<Future<ActorSelection>> previousReadyFutures) {
+ super(actorContext, transactionType, transactionChainId);
+ this.previousReadyFutures = previousReadyFutures;
+ }
+
+ List<Future<ActorSelection>> getReadyFutures() {
+ return readyFutures;
+ }
+
+ boolean isReady() {
+ return readyFutures != null;
+ }
+
+ @Override
+ public AbstractThreePhaseCommitCohort ready() {
+ final AbstractThreePhaseCommitCohort ret = super.ready();
+ readyFutures = ret.getCohortFutures();
+ LOG.debug("onTransactionReady {} pending readyFutures size {} chain {}", getIdentifier(),
+ readyFutures.size(), getTransactionChainId());
+ return ret;
+ }
+
+ /**
+ * This method is overridden to ensure the previous Tx's ready operations complete
+ * before we initiate the next Tx in the chain to avoid creation failures if the
+ * previous Tx's ready operations haven't completed yet.
+ */
+ @Override
+ protected Future<ActorSelection> sendFindPrimaryShardAsync(final String shardName) {
+ // Check if there are any previous ready Futures, otherwise let the super class handle it.
+ if(previousReadyFutures.isEmpty()) {
+ return super.sendFindPrimaryShardAsync(shardName);
+ }
+
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Waiting for {} previous ready futures for Tx {} on chain {}",
+ previousReadyFutures.size(), getIdentifier(), getTransactionChainId());
+ }
+
+ // Combine the ready Futures into 1.
+ Future<Iterable<ActorSelection>> combinedFutures = akka.dispatch.Futures.sequence(
+ previousReadyFutures, getActorContext().getClientDispatcher());
+
+ // Add a callback for completion of the combined Futures.
+ final Promise<ActorSelection> returnPromise = akka.dispatch.Futures.promise();
+ OnComplete<Iterable<ActorSelection>> onComplete = new OnComplete<Iterable<ActorSelection>>() {
+ @Override
+ public void onComplete(Throwable failure, Iterable<ActorSelection> notUsed) {
+ if(failure != null) {
+ // A Ready Future failed so fail the returned Promise.
+ returnPromise.failure(failure);
+ } else {
+ LOG.debug("Previous Tx readied - sending FindPrimaryShard for {} on chain {}",
+ getIdentifier(), getTransactionChainId());
+
+ // Send the FindPrimaryShard message and use the resulting Future to complete the
+ // returned Promise.
+ returnPromise.completeWith(ChainedTransactionProxy.super.sendFindPrimaryShardAsync(shardName));
+ }
+ }
+ };
+
+ combinedFutures.onComplete(onComplete, getActorContext().getClientDispatcher());
+
+ return returnPromise.future();
+ }
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import java.util.ArrayList;
+import java.util.List;
+import akka.actor.ActorRef;
+import akka.actor.ActorSelection;
+import org.opendaylight.controller.cluster.datastore.messages.EnableNotification;
+import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListener;
+import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListenerReply;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+final class DataChangeListenerSupport extends LeaderLocalDelegateFactory<RegisterChangeListener, ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>>> {
+ private static final Logger LOG = LoggerFactory.getLogger(DataChangeListenerSupport.class);
+ private final List<DelayedListenerRegistration> delayedListenerRegistrations = new ArrayList<>();
+ private final List<ActorSelection> dataChangeListeners = new ArrayList<>();
+
+ DataChangeListenerSupport(final Shard shard) {
+ super(shard);
+ }
+
+ @Override
+ void onLeadershipChange(final boolean isLeader) {
+ for (ActorSelection dataChangeListener : dataChangeListeners) {
+ dataChangeListener.tell(new EnableNotification(isLeader), getSelf());
+ }
+
+ if (isLeader) {
+ for (DelayedListenerRegistration reg: delayedListenerRegistrations) {
+ if(!reg.isClosed()) {
+ reg.setDelegate(createDelegate(reg.getRegisterChangeListener()));
+ }
+ }
+
+ delayedListenerRegistrations.clear();
+ }
+ }
+
+ @Override
+ void onMessage(final RegisterChangeListener message, final boolean isLeader) {
+
+ LOG.debug("{}: registerDataChangeListener for {}, leader: {}", persistenceId(), message.getPath(), isLeader);
+
+ ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier,
+ NormalizedNode<?, ?>>> registration;
+ if (isLeader) {
+ registration = createDelegate(message);
+ } else {
+ LOG.debug("{}: Shard is not the leader - delaying registration", persistenceId());
+
+ DelayedListenerRegistration delayedReg = new DelayedListenerRegistration(message);
+ delayedListenerRegistrations.add(delayedReg);
+ registration = delayedReg;
+ }
+
+ ActorRef listenerRegistration = createActor(DataChangeListenerRegistration.props(registration));
+
+ LOG.debug("{}: registerDataChangeListener sending reply, listenerRegistrationPath = {} ",
+ persistenceId(), listenerRegistration.path());
+
+ tellSender(new RegisterChangeListenerReply(listenerRegistration));
+ }
+
+ @Override
+ ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>> createDelegate(
+ final RegisterChangeListener message) {
+ ActorSelection dataChangeListenerPath = selectActor(message.getDataChangeListenerPath());
+
+ // Notify the listener if notifications should be enabled or not
+ // If this shard is the leader then it will enable notifications else
+ // it will not
+ dataChangeListenerPath.tell(new EnableNotification(true), getSelf());
+
+ // Now store a reference to the data change listener so it can be notified
+ // at a later point if notifications should be enabled or disabled
+ dataChangeListeners.add(dataChangeListenerPath);
+
+ AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>> listener =
+ new DataChangeListenerProxy(dataChangeListenerPath);
+
+ LOG.debug("{}: Registering for path {}", persistenceId(), message.getPath());
+
+ return getShard().getDataStore().registerChangeListener(message.getPath(), listener,
+ message.getScope());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import akka.actor.Props;
+import akka.japi.Creator;
+import com.google.common.base.Preconditions;
+import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
+import org.opendaylight.controller.cluster.datastore.messages.DataTreeChanged;
+import org.opendaylight.controller.cluster.datastore.messages.DataTreeChangedReply;
+import org.opendaylight.controller.cluster.datastore.messages.EnableNotification;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Proxy actor which acts as a facade to the user-provided listener. Responsible for decapsulating
+ * DataTreeChanged messages and dispatching their context to the user.
+ */
+final class DataTreeChangeListenerActor extends AbstractUntypedActor {
+ private static final Logger LOG = LoggerFactory.getLogger(DataTreeChangeListenerActor.class);
+ private final DOMDataTreeChangeListener listener;
+ private boolean notificationsEnabled = false;
+
+ private DataTreeChangeListenerActor(final DOMDataTreeChangeListener listener) {
+ this.listener = Preconditions.checkNotNull(listener);
+ }
+
+ @Override
+ protected void handleReceive(final Object message) {
+ if (message instanceof DataTreeChanged) {
+ dataChanged((DataTreeChanged)message);
+ } else if (message instanceof EnableNotification) {
+ enableNotification((EnableNotification) message);
+ }
+ }
+
+ private void dataChanged(final DataTreeChanged message) {
+ // Do nothing if notifications are not enabled
+ if (!notificationsEnabled) {
+ LOG.debug("Notifications not enabled for listener {} - dropping change notification", listener);
+ return;
+ }
+
+ LOG.debug("Sending change notification {} to listener {}", message.getChanges(), listener);
+
+ try {
+ this.listener.onDataTreeChanged(message.getChanges());
+ } catch (Exception e) {
+ LOG.error("Error notifying listener {}", this.listener, e);
+ }
+
+ // TODO: do we really need this?
+ // It seems the sender is never null but it doesn't hurt to check. If the caller passes in
+ // a null sender (ActorRef.noSender()), akka translates that to the deadLetters actor.
+ if (getSender() != null && !getContext().system().deadLetters().equals(getSender())) {
+ getSender().tell(DataTreeChangedReply.getInstance(), getSelf());
+ }
+ }
+
+ private void enableNotification(final EnableNotification message) {
+ notificationsEnabled = message.isEnabled();
+ LOG.debug("{} notifications for listener {}", (notificationsEnabled ? "Enabled" : "Disabled"),
+ listener);
+ }
+
+ public static Props props(final DOMDataTreeChangeListener listener) {
+ return Props.create(new DataTreeChangeListenerCreator(listener));
+ }
+
+ private static final class DataTreeChangeListenerCreator implements Creator<DataTreeChangeListenerActor> {
+ private static final long serialVersionUID = 1L;
+ private final DOMDataTreeChangeListener listener;
+
+ DataTreeChangeListenerCreator(final DOMDataTreeChangeListener listener) {
+ this.listener = Preconditions.checkNotNull(listener);
+ }
+
+ @Override
+ public DataTreeChangeListenerActor create() {
+ return new DataTreeChangeListenerActor(listener);
+ }
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import akka.actor.ActorRef;
+import akka.actor.ActorSelection;
+import akka.actor.PoisonPill;
+import akka.dispatch.OnComplete;
+import com.google.common.base.Preconditions;
+import javax.annotation.concurrent.GuardedBy;
+import org.opendaylight.controller.cluster.datastore.exceptions.LocalShardNotFoundException;
+import org.opendaylight.controller.cluster.datastore.messages.CloseDataTreeChangeListenerRegistration;
+import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListener;
+import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListenerReply;
+import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
+import org.opendaylight.yangtools.concepts.AbstractListenerRegistration;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import scala.concurrent.Future;
+
+/**
+ * Proxy class for holding required state to lazily instantiate a listener registration with an
+ * asynchronously-discovered actor.
+ *
+ * @param <T> listener type
+ */
+final class DataTreeChangeListenerProxy<T extends DOMDataTreeChangeListener> extends AbstractListenerRegistration<T> {
+ private static final Logger LOG = LoggerFactory.getLogger(DataTreeChangeListenerProxy.class);
+ private final ActorRef dataChangeListenerActor;
+ private final ActorContext actorContext;
+
+ @GuardedBy("this")
+ private ActorSelection listenerRegistrationActor;
+
+ public DataTreeChangeListenerProxy(final ActorContext actorContext, final T listener) {
+ super(listener);
+ this.actorContext = Preconditions.checkNotNull(actorContext);
+ this.dataChangeListenerActor = actorContext.getActorSystem().actorOf(
+ DataTreeChangeListenerActor.props(getInstance()).withDispatcher(actorContext.getNotificationDispatcherPath()));
+ }
+
+ @Override
+ protected synchronized void removeRegistration() {
+ if (listenerRegistrationActor != null) {
+ listenerRegistrationActor.tell(CloseDataTreeChangeListenerRegistration.getInstance(), ActorRef.noSender());
+ listenerRegistrationActor = null;
+ }
+
+ dataChangeListenerActor.tell(PoisonPill.getInstance(), ActorRef.noSender());
+ }
+
+ void init(final String shardName, final YangInstanceIdentifier treeId) {
+ Future<ActorRef> findFuture = actorContext.findLocalShardAsync(shardName);
+ findFuture.onComplete(new OnComplete<ActorRef>() {
+ @Override
+ public void onComplete(final Throwable failure, final ActorRef shard) {
+ if (failure instanceof LocalShardNotFoundException) {
+ LOG.debug("No local shard found for {} - DataTreeChangeListener {} at path {} " +
+ "cannot be registered", shardName, getInstance(), treeId);
+ } else if (failure != null) {
+ LOG.error("Failed to find local shard {} - DataTreeChangeListener {} at path {} " +
+ "cannot be registered: {}", shardName, getInstance(), treeId, failure);
+ } else {
+ doRegistration(shard, treeId);
+ }
+ }
+ }, actorContext.getClientDispatcher());
+ }
+
+ private void setListenerRegistrationActor(final ActorSelection actor) {
+ if (actor == null) {
+ LOG.debug("Ignoring null actor on {}", this);
+ return;
+ }
+
+ synchronized (this) {
+ if (!isClosed()) {
+ this.listenerRegistrationActor = actor;
+ return;
+ }
+ }
+
+ // This registration has already been closed, notify the actor
+ actor.tell(CloseDataTreeChangeListenerRegistration.getInstance(), null);
+ }
+
+ private void doRegistration(final ActorRef shard, final YangInstanceIdentifier path) {
+
+ Future<Object> future = actorContext.executeOperationAsync(shard,
+ new RegisterDataTreeChangeListener(path, dataChangeListenerActor),
+ actorContext.getDatastoreContext().getShardInitializationTimeout());
+
+ future.onComplete(new OnComplete<Object>(){
+ @Override
+ public void onComplete(final Throwable failure, final Object result) {
+ if (failure != null) {
+ LOG.error("Failed to register DataTreeChangeListener {} at path {}",
+ getInstance(), path.toString(), failure);
+ } else {
+ RegisterDataTreeChangeListenerReply reply = (RegisterDataTreeChangeListenerReply) result;
+ setListenerRegistrationActor(actorContext.actorSelection(
+ reply.getListenerRegistrationPath().path()));
+ }
+ }
+ }, actorContext.getClientDispatcher());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import akka.actor.PoisonPill;
+import akka.actor.Props;
+import akka.japi.Creator;
+import com.google.common.base.Preconditions;
+import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
+import org.opendaylight.controller.cluster.datastore.messages.CloseDataTreeChangeListenerRegistration;
+import org.opendaylight.controller.cluster.datastore.messages.CloseDataTreeChangeListenerRegistrationReply;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+
+/**
+ * Actor co-located with a shard. It exists only to terminate the registration when
+ * asked to do so via {@link CloseDataTreeChangeListenerRegistration}.
+ */
+public final class DataTreeChangeListenerRegistrationActor extends AbstractUntypedActor {
+ private final ListenerRegistration<DOMDataTreeChangeListener> registration;
+
+ public DataTreeChangeListenerRegistrationActor(final ListenerRegistration<DOMDataTreeChangeListener> registration) {
+ this.registration = Preconditions.checkNotNull(registration);
+ }
+
+ @Override
+ protected void handleReceive(Object message) throws Exception {
+ if (message instanceof CloseDataTreeChangeListenerRegistration) {
+ registration.close();
+ getSender().tell(CloseDataTreeChangeListenerRegistrationReply.getInstance(), getSelf());
+ getSelf().tell(PoisonPill.getInstance(), getSelf());
+ }
+ }
+
+ public static Props props(final ListenerRegistration<DOMDataTreeChangeListener> registration) {
+ return Props.create(new DataTreeChangeListenerRegistrationCreator(registration));
+ }
+
+ private static final class DataTreeChangeListenerRegistrationCreator implements Creator<DataTreeChangeListenerRegistrationActor> {
+ private static final long serialVersionUID = 1L;
+ final ListenerRegistration<DOMDataTreeChangeListener> registration;
+
+ DataTreeChangeListenerRegistrationCreator(ListenerRegistration<DOMDataTreeChangeListener> registration) {
+ this.registration = Preconditions.checkNotNull(registration);
+ }
+
+ @Override
+ public DataTreeChangeListenerRegistrationActor create() {
+ return new DataTreeChangeListenerRegistrationActor(registration);
+ }
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import akka.actor.ActorRef;
+import akka.actor.ActorSelection;
+import java.util.ArrayList;
+import java.util.Collection;
+import org.opendaylight.controller.cluster.datastore.messages.EnableNotification;
+import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListener;
+import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListenerReply;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+final class DataTreeChangeListenerSupport extends LeaderLocalDelegateFactory<RegisterDataTreeChangeListener, ListenerRegistration<DOMDataTreeChangeListener>> {
+ private static final Logger LOG = LoggerFactory.getLogger(DataTreeChangeListenerSupport.class);
+ private final ArrayList<DelayedDataTreeListenerRegistration> delayedRegistrations = new ArrayList<>();
+ private final Collection<ActorSelection> actors = new ArrayList<>();
+
+ DataTreeChangeListenerSupport(final Shard shard) {
+ super(shard);
+ }
+
+ @Override
+ void onLeadershipChange(final boolean isLeader) {
+ if (isLeader) {
+ for (DelayedDataTreeListenerRegistration reg : delayedRegistrations) {
+ reg.createDelegate(this);
+ }
+ delayedRegistrations.clear();
+ delayedRegistrations.trimToSize();
+ }
+
+ final EnableNotification msg = new EnableNotification(isLeader);
+ for (ActorSelection dataChangeListener : actors) {
+ dataChangeListener.tell(msg, getSelf());
+ }
+ }
+
+ @Override
+ void onMessage(final RegisterDataTreeChangeListener registerTreeChangeListener, final boolean isLeader) {
+ LOG.debug("{}: registerTreeChangeListener for {}, leader: {}", persistenceId(), registerTreeChangeListener.getPath(), isLeader);
+
+ final ListenerRegistration<DOMDataTreeChangeListener> registration;
+ if (!isLeader) {
+ LOG.debug("{}: Shard is not the leader - delaying registration", persistenceId());
+
+ DelayedDataTreeListenerRegistration delayedReg =
+ new DelayedDataTreeListenerRegistration(registerTreeChangeListener);
+ delayedRegistrations.add(delayedReg);
+ registration = delayedReg;
+ } else {
+ registration = createDelegate(registerTreeChangeListener);
+ }
+
+ ActorRef listenerRegistration = createActor(DataTreeChangeListenerRegistrationActor.props(registration));
+
+ LOG.debug("{}: registerDataChangeListener sending reply, listenerRegistrationPath = {} ",
+ persistenceId(), listenerRegistration.path());
+
+ tellSender(new RegisterDataTreeChangeListenerReply(listenerRegistration));
+ }
+
+ @Override
+ ListenerRegistration<DOMDataTreeChangeListener> createDelegate(final RegisterDataTreeChangeListener message) {
+ ActorSelection dataChangeListenerPath = selectActor(message.getDataTreeChangeListenerPath());
+
+ // Notify the listener if notifications should be enabled or not
+ // If this shard is the leader then it will enable notifications else
+ // it will not
+ dataChangeListenerPath.tell(new EnableNotification(true), getSelf());
+
+ // Now store a reference to the data change listener so it can be notified
+ // at a later point if notifications should be enabled or disabled
+ actors.add(dataChangeListenerPath);
+
+ DOMDataTreeChangeListener listener = new ForwardingDataTreeChangeListener(dataChangeListenerPath);
+
+ LOG.debug("{}: Registering for path {}", persistenceId(), message.getPath());
+
+ return getShard().getDataStore().registerTreeChangeListener(message.getPath(), listener);
+ }
+}
package org.opendaylight.controller.cluster.datastore;
import akka.util.Timeout;
+import com.google.common.collect.Sets;
+import java.util.Set;
import java.util.concurrent.TimeUnit;
import org.apache.commons.lang3.text.WordUtils;
import org.opendaylight.controller.cluster.datastore.config.ConfigurationReader;
* @author Thomas Pantelis
*/
public class DatastoreContext {
+ public static final String METRICS_DOMAIN = "org.opendaylight.controller.cluster.datastore";
public static final Duration DEFAULT_SHARD_TRANSACTION_IDLE_TIMEOUT = Duration.create(10, TimeUnit.MINUTES);
public static final int DEFAULT_OPERATION_TIMEOUT_IN_SECONDS = 5;
public static final String UNKNOWN_DATA_STORE_TYPE = "unknown";
public static final int DEFAULT_SHARD_BATCHED_MODIFICATION_COUNT= 100;
+ private static Set<String> globalDatastoreTypes = Sets.newConcurrentHashSet();
+
private InMemoryDOMDataStoreConfigProperties dataStoreProperties;
private Duration shardTransactionIdleTimeout = DatastoreContext.DEFAULT_SHARD_TRANSACTION_IDLE_TIMEOUT;
private int operationTimeoutInSeconds = DEFAULT_OPERATION_TIMEOUT_IN_SECONDS;
private int shardBatchedModificationCount = DEFAULT_SHARD_BATCHED_MODIFICATION_COUNT;
private boolean writeOnlyTransactionOptimizationsEnabled = false;
+ public static Set<String> getGlobalDatastoreTypes() {
+ return globalDatastoreTypes;
+ }
+
private DatastoreContext() {
setShardJournalRecoveryLogBatchSize(DEFAULT_JOURNAL_RECOVERY_BATCH_SIZE);
setSnapshotBatchCount(DEFAULT_SNAPSHOT_BATCH_COUNT);
datastoreContext.dataStoreProperties = InMemoryDOMDataStoreConfigProperties.create(
maxShardDataChangeExecutorPoolSize, maxShardDataChangeExecutorQueueSize,
maxShardDataChangeListenerQueueSize, maxShardDataStoreExecutorQueueSize);
+
+ if(datastoreContext.dataStoreType != null) {
+ globalDatastoreTypes.add(datastoreContext.dataStoreType);
+ }
+
return datastoreContext;
}
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import com.google.common.base.Preconditions;
+import javax.annotation.concurrent.GuardedBy;
+import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListener;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+
+/**
+ * Intermediate proxy registration returned to the user when we cannot
+ * instantiate the registration immediately. It provides a bridge to
+ * a real registration which may materialize at some point in the future.
+ */
+final class DelayedDataTreeListenerRegistration implements ListenerRegistration<DOMDataTreeChangeListener> {
+ private final RegisterDataTreeChangeListener registerTreeChangeListener;
+ private volatile ListenerRegistration<DOMDataTreeChangeListener> delegate;
+ @GuardedBy("this")
+ private boolean closed;
+
+ DelayedDataTreeListenerRegistration(final RegisterDataTreeChangeListener registerTreeChangeListener) {
+ this.registerTreeChangeListener = Preconditions.checkNotNull(registerTreeChangeListener);
+ }
+
+ synchronized void createDelegate(final DelegateFactory<RegisterDataTreeChangeListener, ListenerRegistration<DOMDataTreeChangeListener>> factory) {
+ if (!closed) {
+ this.delegate = factory.createDelegate(registerTreeChangeListener);
+ }
+ }
+
+ @Override
+ public DOMDataTreeChangeListener getInstance() {
+ final ListenerRegistration<DOMDataTreeChangeListener> d = delegate;
+ return d == null ? null : d.getInstance();
+ }
+
+ @Override
+ public synchronized void close() {
+ if (!closed) {
+ closed = true;
+ if (delegate != null) {
+ delegate.close();
+ }
+ }
+ }
+}
+
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListener;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+
+final class DelayedListenerRegistration implements
+ ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>> {
+
+ private volatile boolean closed;
+
+ private final RegisterChangeListener registerChangeListener;
+
+ private volatile ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier,
+ NormalizedNode<?, ?>>> delegate;
+
+ DelayedListenerRegistration(final RegisterChangeListener registerChangeListener) {
+ this.registerChangeListener = registerChangeListener;
+ }
+
+ void setDelegate( final ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier,
+ NormalizedNode<?, ?>>> registration) {
+ this.delegate = registration;
+ }
+
+ boolean isClosed() {
+ return closed;
+ }
+
+ RegisterChangeListener getRegisterChangeListener() {
+ return registerChangeListener;
+ }
+
+ @Override
+ public AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>> getInstance() {
+ return delegate != null ? delegate.getInstance() : null;
+ }
+
+ @Override
+ public void close() {
+ closed = true;
+ if(delegate != null) {
+ delegate.close();
+ }
+ }
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+/**
+ * Base class for factories instantiating delegates.
+ *
+ * <D> delegate type
+ * <M> message type
+ */
+abstract class DelegateFactory<M, D> {
+ abstract D createDelegate(M message);
+}
import java.util.concurrent.TimeUnit;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardManagerIdentifier;
import org.opendaylight.controller.cluster.datastore.jmx.mbeans.DatastoreConfigurationMXBeanImpl;
+import org.opendaylight.controller.cluster.datastore.jmx.mbeans.DatastoreInfoMXBeanImpl;
import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategyFactory;
import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
import org.opendaylight.controller.cluster.datastore.utils.Dispatchers;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
import org.opendaylight.controller.sal.core.spi.data.DOMStore;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreTreeChangePublisher;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
import org.opendaylight.yangtools.concepts.ListenerRegistration;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
*
*/
public class DistributedDataStore implements DOMStore, SchemaContextListener,
- DatastoreContextConfigAdminOverlay.Listener, AutoCloseable {
+ DatastoreContextConfigAdminOverlay.Listener, DOMStoreTreeChangePublisher, AutoCloseable {
private static final Logger LOG = LoggerFactory.getLogger(DistributedDataStore.class);
private static final String UNKNOWN_TYPE = "unknown";
private DatastoreConfigurationMXBeanImpl datastoreConfigMXBean;
- private CountDownLatch waitTillReadyCountDownLatch = new CountDownLatch(1);
+ private DatastoreInfoMXBeanImpl datastoreInfoMXBean;
+
+ private final CountDownLatch waitTillReadyCountDownLatch = new CountDownLatch(1);
private final String type;
datastoreConfigMXBean = new DatastoreConfigurationMXBeanImpl(datastoreContext.getDataStoreMXBeanType());
datastoreConfigMXBean.setContext(datastoreContext);
datastoreConfigMXBean.registerMBean();
+
+ datastoreInfoMXBean = new DatastoreInfoMXBeanImpl(datastoreContext.getDataStoreMXBeanType(), actorContext);
+ datastoreInfoMXBean.registerMBean();
}
public DistributedDataStore(ActorContext actorContext) {
return listenerRegistrationProxy;
}
+ @Override
+ public <L extends DOMDataTreeChangeListener> ListenerRegistration<L> registerTreeChangeListener(YangInstanceIdentifier treeId, L listener) {
+ Preconditions.checkNotNull(treeId, "treeId should not be null");
+ Preconditions.checkNotNull(listener, "listener should not be null");
+
+ final String shardName = ShardStrategyFactory.getStrategy(treeId).findShard(treeId);
+ LOG.debug("Registering tree listener: {} for tree: {} shard: {}", listener, treeId, shardName);
+
+ final DataTreeChangeListenerProxy<L> listenerRegistrationProxy =
+ new DataTreeChangeListenerProxy<L>(actorContext, listener);
+ listenerRegistrationProxy.init(shardName, treeId);
+
+ return listenerRegistrationProxy;
+ }
+
@Override
public DOMStoreTransactionChain createTransactionChain() {
return new TransactionChainProxy(actorContext);
@Override
public void close() {
datastoreConfigMXBean.unregisterMBean();
+ datastoreInfoMXBean.unregisterMBean();
if(closeable != null) {
try {
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import com.google.common.base.Preconditions;
+import akka.actor.ActorRef;
+import akka.actor.ActorSelection;
+import java.util.Collection;
+import org.opendaylight.controller.cluster.datastore.messages.DataTreeChanged;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+
+/**
+ * Internal implementation of a {@link DOMDataTreeChangeListener} which
+ * encapsulates received notifications into a {@link DataTreeChanged}
+ * message and forwards them towards the client's {@link DataTreeChangeListenerActor}.
+ */
+final class ForwardingDataTreeChangeListener implements DOMDataTreeChangeListener {
+ private final ActorSelection actor;
+
+ ForwardingDataTreeChangeListener(final ActorSelection actor) {
+ this.actor = Preconditions.checkNotNull(actor, "actor should not be null");
+ }
+
+ @Override
+ public void onDataTreeChanged(Collection<DataTreeCandidate> changes) {
+ actor.tell(new DataTreeChanged(changes), ActorRef.noSender());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import akka.actor.ActorPath;
+import akka.actor.ActorRef;
+import akka.actor.ActorSelection;
+import akka.actor.Props;
+import com.google.common.base.Preconditions;
+
+/**
+ * Base class for factories instantiating delegates which are local to the
+ * shard leader.
+ *
+ * <D> delegate type
+ * <M> message type
+ */
+abstract class LeaderLocalDelegateFactory<M, D> extends DelegateFactory<M, D> {
+ private final Shard shard;
+
+ protected LeaderLocalDelegateFactory(final Shard shard) {
+ this.shard = Preconditions.checkNotNull(shard);
+ }
+
+ protected final ActorRef getSelf() {
+ return shard.getSelf();
+ }
+
+ protected final Shard getShard() {
+ return shard;
+ }
+
+ protected final String persistenceId() {
+ return shard.persistenceId();
+ }
+
+ protected final void tellSender(final Object message) {
+ shard.getSender().tell(message, getSelf());
+ }
+
+ protected final ActorRef createActor(final Props props) {
+ return shard.getContext().actorOf(props);
+ }
+
+ protected final ActorSelection selectActor(ActorRef ref) {
+ return shard.getContext().system().actorSelection(ref.path());
+ }
+
+ protected final ActorSelection selectActor(ActorPath path) {
+ return shard.getContext().system().actorSelection(path);
+ }
+
+ /**
+ * Invoked whenever the local shard's leadership role changes.
+ *
+ * @param isLeader true if the shard has become leader, false if it has
+ * become a follower.
+ */
+ abstract void onLeadershipChange(boolean isLeader);
+ abstract void onMessage(M message, boolean isLeader);
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import akka.actor.ActorSelection;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import java.util.Collections;
+import java.util.List;
+import scala.concurrent.Future;
+
+/**
+ * A {@link org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort}
+ * instance given out for empty transactions.
+ */
+final class NoOpDOMStoreThreePhaseCommitCohort extends AbstractThreePhaseCommitCohort {
+ static final NoOpDOMStoreThreePhaseCommitCohort INSTANCE = new NoOpDOMStoreThreePhaseCommitCohort();
+
+ private static final ListenableFuture<Void> IMMEDIATE_VOID_SUCCESS = Futures.immediateFuture(null);
+ private static final ListenableFuture<Boolean> IMMEDIATE_BOOLEAN_SUCCESS = Futures.immediateFuture(Boolean.TRUE);
+
+ private NoOpDOMStoreThreePhaseCommitCohort() {
+ // Hidden to prevent instantiation
+ }
+
+ @Override
+ public ListenableFuture<Boolean> canCommit() {
+ return IMMEDIATE_BOOLEAN_SUCCESS;
+ }
+
+ @Override
+ public ListenableFuture<Void> preCommit() {
+ return IMMEDIATE_VOID_SUCCESS;
+ }
+
+ @Override
+ public ListenableFuture<Void> abort() {
+ return IMMEDIATE_VOID_SUCCESS;
+ }
+
+ @Override
+ public ListenableFuture<Void> commit() {
+ return IMMEDIATE_VOID_SUCCESS;
+ }
+
+ @Override
+ List<Future<ActorSelection>> getCohortFutures() {
+ return Collections.emptyList();
+ }
+}
@Override
public void closeTransaction() {
- LOG.debug("NoOpTransactionContext {} closeTransaction called", identifier);
+ LOG.debug("NoOpTransactionContext {} closeTransaction called", getIdentifier());
}
@Override
public Future<ActorSelection> readyTransaction() {
- LOG.debug("Tx {} readyTransaction called", identifier);
+ LOG.debug("Tx {} readyTransaction called", getIdentifier());
operationLimiter.release();
return akka.dispatch.Futures.failed(failure);
}
@Override
public void deleteData(YangInstanceIdentifier path) {
- LOG.debug("Tx {} deleteData called path = {}", identifier, path);
+ LOG.debug("Tx {} deleteData called path = {}", getIdentifier(), path);
operationLimiter.release();
}
@Override
public void mergeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
- LOG.debug("Tx {} mergeData called path = {}", identifier, path);
+ LOG.debug("Tx {} mergeData called path = {}", getIdentifier(), path);
operationLimiter.release();
}
@Override
public void writeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
- LOG.debug("Tx {} writeData called path = {}", identifier, path);
+ LOG.debug("Tx {} writeData called path = {}", getIdentifier(), path);
operationLimiter.release();
}
@Override
public void readData(final YangInstanceIdentifier path, SettableFuture<Optional<NormalizedNode<?, ?>>> proxyFuture) {
- LOG.debug("Tx {} readData called path = {}", identifier, path);
+ LOG.debug("Tx {} readData called path = {}", getIdentifier(), path);
operationLimiter.release();
proxyFuture.setException(new ReadFailedException("Error reading data for path " + path, failure));
}
@Override
public void dataExists(YangInstanceIdentifier path, SettableFuture<Boolean> proxyFuture) {
- LOG.debug("Tx {} dataExists called path = {}", identifier, path);
+ LOG.debug("Tx {} dataExists called path = {}", getIdentifier(), path);
operationLimiter.release();
proxyFuture.setException(new ReadFailedException("Error checking exists for path " + path, failure));
}
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import java.io.IOException;
import java.util.HashMap;
-import java.util.List;
import java.util.Map;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import javax.annotation.Nonnull;
-import org.opendaylight.controller.cluster.DataPersistenceProvider;
import org.opendaylight.controller.cluster.common.actor.CommonConfig;
import org.opendaylight.controller.cluster.common.actor.MeteringBehavior;
import org.opendaylight.controller.cluster.datastore.ShardCommitCoordinator.CohortEntry;
import org.opendaylight.controller.cluster.datastore.messages.CreateSnapshot;
import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
-import org.opendaylight.controller.cluster.datastore.messages.EnableNotification;
import org.opendaylight.controller.cluster.datastore.messages.ForwardedReadyTransaction;
import org.opendaylight.controller.cluster.datastore.messages.PeerAddressResolved;
import org.opendaylight.controller.cluster.datastore.messages.ReadyTransactionReply;
import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListener;
-import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListenerReply;
+import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListener;
import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
import org.opendaylight.controller.cluster.datastore.modification.Modification;
import org.opendaylight.controller.cluster.datastore.modification.ModificationPayload;
import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationByteStringPayload;
import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationPayload;
import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
-import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStoreFactory;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
private final ShardStats shardMBean;
- private final List<ActorSelection> dataChangeListeners = Lists.newArrayList();
-
- private final List<DelayedListenerRegistration> delayedListenerRegistrations =
- Lists.newArrayList();
-
private DatastoreContext datastoreContext;
- private DataPersistenceProvider dataPersistenceProvider;
-
private SchemaContext schemaContext;
private int createSnapshotTransactionCounter;
private final String txnDispatcherPath;
+ private final DataTreeChangeListenerSupport treeChangeSupport = new DataTreeChangeListenerSupport(this);
+ private final DataChangeListenerSupport changeSupport = new DataChangeListenerSupport(this);
+
protected Shard(final ShardIdentifier name, final Map<String, String> peerAddresses,
final DatastoreContext datastoreContext, final SchemaContext schemaContext) {
super(name.toString(), new HashMap<>(peerAddresses), Optional.of(datastoreContext.getShardRaftConfig()));
this.name = name.toString();
this.datastoreContext = datastoreContext;
this.schemaContext = schemaContext;
- this.dataPersistenceProvider = (datastoreContext.isPersistent())
- ? new PersistentDataProvider() : new NonPersistentRaftDataProvider();
this.txnDispatcherPath = new Dispatchers(context().system().dispatchers())
.getDispatcherPath(Dispatchers.DispatcherType.Transaction);
+ setPersistence(datastoreContext.isPersistent());
LOG.info("Shard created : {}, persistent : {}", name, datastoreContext.isPersistent());
store = InMemoryDOMDataStoreFactory.create(name.toString(), null,
datastoreContext.getDataStoreProperties());
- if(schemaContext != null) {
+ if (schemaContext != null) {
store.onGlobalContextUpdated(schemaContext);
}
} else if (CloseTransactionChain.SERIALIZABLE_CLASS.isInstance(message)) {
closeTransactionChain(CloseTransactionChain.fromSerializable(message));
} else if (message instanceof RegisterChangeListener) {
- registerChangeListener((RegisterChangeListener) message);
+ changeSupport.onMessage((RegisterChangeListener) message, isLeader());
+ } else if (message instanceof RegisterDataTreeChangeListener) {
+ treeChangeSupport.onMessage((RegisterDataTreeChangeListener) message, isLeader());
} else if (message instanceof UpdateSchemaContext) {
updateSchemaContext((UpdateSchemaContext) message);
} else if (message instanceof PeerAddressResolved) {
setTransactionCommitTimeout();
- if(datastoreContext.isPersistent() &&
- dataPersistenceProvider instanceof NonPersistentRaftDataProvider) {
- dataPersistenceProvider = new PersistentDataProvider();
- } else if(!datastoreContext.isPersistent() &&
- dataPersistenceProvider instanceof PersistentDataProvider) {
- dataPersistenceProvider = new NonPersistentRaftDataProvider();
+ if(datastoreContext.isPersistent() && !persistence().isRecoveryApplicable()) {
+ setPersistence(true);
+ } else if(!datastoreContext.isPersistent() && persistence().isRecoveryApplicable()) {
+ setPersistence(false);
}
updateConfigParams(datastoreContext.getShardRaftConfig());
store.onGlobalContextUpdated(schemaContext);
}
- private void registerChangeListener(final RegisterChangeListener registerChangeListener) {
-
- LOG.debug("{}: registerDataChangeListener for {}", persistenceId(), registerChangeListener.getPath());
-
- ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier,
- NormalizedNode<?, ?>>> registration;
- if(isLeader()) {
- registration = doChangeListenerRegistration(registerChangeListener);
- } else {
- LOG.debug("{}: Shard is not the leader - delaying registration", persistenceId());
-
- DelayedListenerRegistration delayedReg =
- new DelayedListenerRegistration(registerChangeListener);
- delayedListenerRegistrations.add(delayedReg);
- registration = delayedReg;
- }
-
- ActorRef listenerRegistration = getContext().actorOf(
- DataChangeListenerRegistration.props(registration));
-
- LOG.debug("{}: registerDataChangeListener sending reply, listenerRegistrationPath = {} ",
- persistenceId(), listenerRegistration.path());
-
- getSender().tell(new RegisterChangeListenerReply(listenerRegistration), getSelf());
- }
-
- private ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier,
- NormalizedNode<?, ?>>> doChangeListenerRegistration(
- final RegisterChangeListener registerChangeListener) {
-
- ActorSelection dataChangeListenerPath = getContext().system().actorSelection(
- registerChangeListener.getDataChangeListenerPath());
-
- // Notify the listener if notifications should be enabled or not
- // If this shard is the leader then it will enable notifications else
- // it will not
- dataChangeListenerPath.tell(new EnableNotification(true), getSelf());
-
- // Now store a reference to the data change listener so it can be notified
- // at a later point if notifications should be enabled or disabled
- dataChangeListeners.add(dataChangeListenerPath);
-
- AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>> listener =
- new DataChangeListenerProxy(dataChangeListenerPath);
-
- LOG.debug("{}: Registering for path {}", persistenceId(), registerChangeListener.getPath());
-
- return store.registerChangeListener(registerChangeListener.getPath(), listener,
- registerChangeListener.getScope());
- }
-
- private boolean isMetricsCaptureEnabled(){
+ private boolean isMetricsCaptureEnabled() {
CommonConfig config = new CommonConfig(getContext().system().settings().config());
return config.isMetricCaptureEnabled();
}
@Override
protected void onStateChanged() {
boolean isLeader = isLeader();
- for (ActorSelection dataChangeListener : dataChangeListeners) {
- dataChangeListener.tell(new EnableNotification(isLeader), getSelf());
- }
-
- if(isLeader) {
- for(DelayedListenerRegistration reg: delayedListenerRegistrations) {
- if(!reg.isClosed()) {
- reg.setDelegate(doChangeListenerRegistration(reg.getRegisterChangeListener()));
- }
- }
-
- delayedListenerRegistrations.clear();
- }
+ changeSupport.onLeadershipChange(isLeader);
+ treeChangeSupport.onLeadershipChange(isLeader);
// If this actor is no longer the leader close all the transaction chains
- if(!isLeader) {
+ if (!isLeader) {
if(LOG.isDebugEnabled()) {
LOG.debug(
"{}: onStateChanged: Closing all transaction chains because shard {} is no longer the leader",
}
@Override
- protected DataPersistenceProvider persistence() {
- return dataPersistenceProvider;
- }
-
- @Override public String persistenceId() {
+ public String persistenceId() {
return this.name;
}
- @VisibleForTesting
- DataPersistenceProvider getDataPersistenceProvider() {
- return dataPersistenceProvider;
- }
-
@VisibleForTesting
ShardCommitCoordinator getCommitCoordinator() {
return commitCoordinator;
ShardStats getShardMBean() {
return shardMBean;
}
-
- private static class DelayedListenerRegistration implements
- ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>> {
-
- private volatile boolean closed;
-
- private final RegisterChangeListener registerChangeListener;
-
- private volatile ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier,
- NormalizedNode<?, ?>>> delegate;
-
- DelayedListenerRegistration(final RegisterChangeListener registerChangeListener) {
- this.registerChangeListener = registerChangeListener;
- }
-
- void setDelegate( final ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier,
- NormalizedNode<?, ?>>> registration) {
- this.delegate = registration;
- }
-
- boolean isClosed() {
- return closed;
- }
-
- RegisterChangeListener getRegisterChangeListener() {
- return registerChangeListener;
- }
-
- @Override
- public AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>> getInstance() {
- return delegate != null ? delegate.getInstance() : null;
- }
-
- @Override
- public void close() {
- closed = true;
- if(delegate != null) {
- delegate.close();
- }
- }
- }
}
import java.util.Set;
import java.util.concurrent.CountDownLatch;
import org.opendaylight.controller.cluster.DataPersistenceProvider;
+import org.opendaylight.controller.cluster.NonPersistentDataProvider;
+import org.opendaylight.controller.cluster.PersistentDataProvider;
import org.opendaylight.controller.cluster.common.actor.AbstractUntypedPersistentActorWithMetering;
import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
import org.opendaylight.controller.cluster.datastore.exceptions.NotInitializedException;
}
protected DataPersistenceProvider createDataPersistenceProvider(boolean persistent) {
- return (persistent) ? new PersistentDataProvider() : new NonPersistentDataProvider();
+ return (persistent) ? new PersistentDataProvider(this) : new NonPersistentDataProvider();
}
public static Props props(
import akka.actor.ActorSelection;
import akka.dispatch.Futures;
import akka.dispatch.OnComplete;
-import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.Lists;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.SettableFuture;
import org.opendaylight.controller.cluster.datastore.messages.CommitTransaction;
import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import scala.concurrent.Future;
/**
* ThreePhaseCommitCohortProxy represents a set of remote cohort proxies
*/
-public class ThreePhaseCommitCohortProxy implements DOMStoreThreePhaseCommitCohort{
+public class ThreePhaseCommitCohortProxy extends AbstractThreePhaseCommitCohort {
private static final Logger LOG = LoggerFactory.getLogger(ThreePhaseCommitCohortProxy.class);
@Override
public ListenableFuture<Void> commit() {
- OperationCallback operationCallback = (cohortFutures.size() == 0) ? NO_OP_CALLBACK :
+ OperationCallback operationCallback = cohortFutures.isEmpty() ? NO_OP_CALLBACK :
new TransactionRateLimitingCallback(actorContext);
return voidOperation("commit", new CommitTransaction(transactionId).toSerializable(),
}, actorContext.getClientDispatcher());
}
- @VisibleForTesting
+ @Override
List<Future<ActorSelection>> getCohortFutures() {
return Collections.unmodifiableList(cohortFutures);
}
package org.opendaylight.controller.cluster.datastore;
import akka.actor.ActorSelection;
-import akka.dispatch.OnComplete;
import com.google.common.base.Preconditions;
import java.util.Collections;
import java.util.List;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
import scala.concurrent.Future;
-import scala.concurrent.Promise;
/**
* TransactionChainProxy acts as a proxy for a DOMStoreTransactionChain created on a remote shard
*/
public class TransactionChainProxy implements DOMStoreTransactionChain {
- private static final Logger LOG = LoggerFactory.getLogger(TransactionChainProxy.class);
-
private interface State {
boolean isReady();
private void checkReadyState(State state) {
Preconditions.checkState(state.isReady(), "Previous transaction is not ready yet");
}
-
- private static class ChainedTransactionProxy extends TransactionProxy {
-
- /**
- * Stores the ready Futures from the previous Tx in the chain.
- */
- private final List<Future<ActorSelection>> previousReadyFutures;
-
- /**
- * Stores the ready Futures from this transaction when it is readied.
- */
- private volatile List<Future<ActorSelection>> readyFutures;
-
- private ChainedTransactionProxy(ActorContext actorContext, TransactionType transactionType,
- String transactionChainId, List<Future<ActorSelection>> previousReadyFutures) {
- super(actorContext, transactionType, transactionChainId);
- this.previousReadyFutures = previousReadyFutures;
- }
-
- List<Future<ActorSelection>> getReadyFutures() {
- return readyFutures;
- }
-
- boolean isReady() {
- return readyFutures != null;
- }
-
- @Override
- protected void onTransactionReady(List<Future<ActorSelection>> readyFutures) {
- LOG.debug("onTransactionReady {} pending readyFutures size {} chain {}", getIdentifier(),
- readyFutures.size(), getTransactionChainId());
- this.readyFutures = readyFutures;
- }
-
- /**
- * This method is overridden to ensure the previous Tx's ready operations complete
- * before we initiate the next Tx in the chain to avoid creation failures if the
- * previous Tx's ready operations haven't completed yet.
- */
- @Override
- protected Future<ActorSelection> sendFindPrimaryShardAsync(final String shardName) {
- // Check if there are any previous ready Futures, otherwise let the super class handle it.
- if(previousReadyFutures.isEmpty()) {
- return super.sendFindPrimaryShardAsync(shardName);
- }
-
- if(LOG.isDebugEnabled()) {
- LOG.debug("Waiting for {} previous ready futures for Tx {} on chain {}",
- previousReadyFutures.size(), getIdentifier(), getTransactionChainId());
- }
-
- // Combine the ready Futures into 1.
- Future<Iterable<ActorSelection>> combinedFutures = akka.dispatch.Futures.sequence(
- previousReadyFutures, getActorContext().getClientDispatcher());
-
- // Add a callback for completion of the combined Futures.
- final Promise<ActorSelection> returnPromise = akka.dispatch.Futures.promise();
- OnComplete<Iterable<ActorSelection>> onComplete = new OnComplete<Iterable<ActorSelection>>() {
- @Override
- public void onComplete(Throwable failure, Iterable<ActorSelection> notUsed) {
- if(failure != null) {
- // A Ready Future failed so fail the returned Promise.
- returnPromise.failure(failure);
- } else {
- LOG.debug("Previous Tx readied - sending FindPrimaryShard for {} on chain {}",
- getIdentifier(), getTransactionChainId());
-
- // Send the FindPrimaryShard message and use the resulting Future to complete the
- // returned Promise.
- returnPromise.completeWith(ChainedTransactionProxy.super.sendFindPrimaryShardAsync(shardName));
- }
- }
- };
-
- combinedFutures.onComplete(onComplete, getActorContext().getClientDispatcher());
-
- return returnPromise.future();
- }
- }
}
import akka.actor.ActorSelection;
import com.google.common.base.Optional;
import com.google.common.util.concurrent.SettableFuture;
-import java.util.List;
+import java.util.Collection;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import scala.concurrent.Future;
void dataExists(YangInstanceIdentifier path, SettableFuture<Boolean> proxyFuture);
- List<Future<Object>> getRecordedOperationFutures();
+ void copyRecordedOperationFutures(Collection<Future<Object>> target);
}
@Override
public void closeTransaction() {
- LOG.debug("Tx {} closeTransaction called", identifier);
+ LOG.debug("Tx {} closeTransaction called", getIdentifier());
actorContext.sendOperationAsync(getActor(), CloseTransaction.INSTANCE.toSerializable());
}
@Override
public Future<ActorSelection> readyTransaction() {
LOG.debug("Tx {} readyTransaction called with {} previous recorded operations pending",
- identifier, recordedOperationFutures.size());
+ getIdentifier(), recordedOperationCount());
// Send the remaining batched modifications if any.
// Future will fail. We need all prior operations and the ready operation to succeed
// in order to attempt commit.
- List<Future<Object>> futureList = Lists.newArrayListWithCapacity(recordedOperationFutures.size() + 1);
- futureList.addAll(recordedOperationFutures);
+ List<Future<Object>> futureList = Lists.newArrayListWithCapacity(recordedOperationCount() + 1);
+ copyRecordedOperationFutures(futureList);
futureList.add(withLastReplyFuture);
Future<Iterable<Object>> combinedFutures = akka.dispatch.Futures.sequence(futureList,
@Override
public ActorSelection checkedApply(Iterable<Object> notUsed) {
LOG.debug("Tx {} readyTransaction: pending recorded operations succeeded",
- identifier);
+ getIdentifier());
// At this point all the Futures succeeded and we need to extract the cohort
// actor path from the ReadyTransactionReply. For the recorded operations, they
} else {
// Throwing an exception here will fail the Future.
throw new IllegalArgumentException(String.format("%s: Invalid reply type %s",
- identifier, serializedReadyReply.getClass()));
+ getIdentifier(), serializedReadyReply.getClass()));
}
}
}, TransactionProxy.SAME_FAILURE_TRANSFORMER, actorContext.getClientDispatcher());
private void batchModification(Modification modification) {
if(batchedModifications == null) {
- batchedModifications = new BatchedModifications(identifier.toString(), remoteTransactionVersion,
+ batchedModifications = new BatchedModifications(getIdentifier().toString(), remoteTransactionVersion,
transactionChainId);
}
private void sendAndRecordBatchedModifications() {
Future<Object> sentFuture = sendBatchedModifications();
if(sentFuture != null) {
- recordedOperationFutures.add(sentFuture);
+ recordOperationFuture(sentFuture);
}
}
Future<Object> sent = null;
if(batchedModifications != null) {
if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} sending {} batched modifications, ready: {}", identifier,
+ LOG.debug("Tx {} sending {} batched modifications, ready: {}", getIdentifier(),
batchedModifications.getModifications().size(), ready);
}
batchedModifications.setReady(ready);
sent = executeOperationAsync(batchedModifications);
- batchedModifications = new BatchedModifications(identifier.toString(), remoteTransactionVersion,
+ batchedModifications = new BatchedModifications(getIdentifier().toString(), remoteTransactionVersion,
transactionChainId);
}
@Override
public void deleteData(YangInstanceIdentifier path) {
- LOG.debug("Tx {} deleteData called path = {}", identifier, path);
+ LOG.debug("Tx {} deleteData called path = {}", getIdentifier(), path);
batchModification(new DeleteModification(path));
}
@Override
public void mergeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
- LOG.debug("Tx {} mergeData called path = {}", identifier, path);
+ LOG.debug("Tx {} mergeData called path = {}", getIdentifier(), path);
batchModification(new MergeModification(path, data));
}
@Override
public void writeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
- LOG.debug("Tx {} writeData called path = {}", identifier, path);
+ LOG.debug("Tx {} writeData called path = {}", getIdentifier(), path);
batchModification(new WriteModification(path, data));
}
@Override
- public void readData(
- final YangInstanceIdentifier path,final SettableFuture<Optional<NormalizedNode<?, ?>>> returnFuture ) {
+ public void readData(final YangInstanceIdentifier path,
+ final SettableFuture<Optional<NormalizedNode<?, ?>>> returnFuture ) {
- LOG.debug("Tx {} readData called path = {}", identifier, path);
+ LOG.debug("Tx {} readData called path = {}", getIdentifier(), path);
- // Send the remaining batched modifications if any.
+ // Send any batched modifications. This is necessary to honor the read uncommitted semantics of the
+ // public API contract.
sendAndRecordBatchedModifications();
- // If there were any previous recorded put/merge/delete operation reply Futures then we
- // must wait for them to successfully complete. This is necessary to honor the read
- // uncommitted semantics of the public API contract. If any one fails then fail the read.
-
- if(recordedOperationFutures.isEmpty()) {
- finishReadData(path, returnFuture);
- } else {
- LOG.debug("Tx {} readData: verifying {} previous recorded operations",
- identifier, recordedOperationFutures.size());
-
- // Note: we make a copy of recordedOperationFutures to be on the safe side in case
- // Futures#sequence accesses the passed List on a different thread, as
- // recordedOperationFutures is not synchronized.
-
- Future<Iterable<Object>> combinedFutures = akka.dispatch.Futures.sequence(
- Lists.newArrayList(recordedOperationFutures),
- actorContext.getClientDispatcher());
-
- OnComplete<Iterable<Object>> onComplete = new OnComplete<Iterable<Object>>() {
- @Override
- public void onComplete(Throwable failure, Iterable<Object> notUsed)
- throws Throwable {
- if(failure != null) {
- LOG.debug("Tx {} readData: a recorded operation failed: {}",
- identifier, failure);
- returnFuture.setException(new ReadFailedException(
- "The read could not be performed because a previous put, merge,"
- + "or delete operation failed", failure));
- } else {
- finishReadData(path, returnFuture);
- }
- }
- };
-
- combinedFutures.onComplete(onComplete, actorContext.getClientDispatcher());
- }
-
- }
-
- private void finishReadData(final YangInstanceIdentifier path,
- final SettableFuture<Optional<NormalizedNode<?, ?>>> returnFuture) {
-
- LOG.debug("Tx {} finishReadData called path = {}", identifier, path);
-
OnComplete<Object> onComplete = new OnComplete<Object>() {
@Override
public void onComplete(Throwable failure, Object readResponse) throws Throwable {
if(failure != null) {
- LOG.debug("Tx {} read operation failed: {}", identifier, failure);
+ LOG.debug("Tx {} read operation failed: {}", getIdentifier(), failure);
returnFuture.setException(new ReadFailedException(
"Error reading data for path " + path, failure));
} else {
- LOG.debug("Tx {} read operation succeeded", identifier, failure);
+ LOG.debug("Tx {} read operation succeeded", getIdentifier(), failure);
if (readResponse instanceof ReadDataReply) {
ReadDataReply reply = (ReadDataReply) readResponse;
@Override
public void dataExists(final YangInstanceIdentifier path, final SettableFuture<Boolean> returnFuture) {
- LOG.debug("Tx {} dataExists called path = {}", identifier, path);
+ LOG.debug("Tx {} dataExists called path = {}", getIdentifier(), path);
- // Send the remaining batched modifications if any.
+ // Send any batched modifications. This is necessary to honor the read uncommitted semantics of the
+ // public API contract.
sendAndRecordBatchedModifications();
- // If there were any previous recorded put/merge/delete operation reply Futures then we
- // must wait for them to successfully complete. This is necessary to honor the read
- // uncommitted semantics of the public API contract. If any one fails then fail this
- // request.
-
- if(recordedOperationFutures.isEmpty()) {
- finishDataExists(path, returnFuture);
- } else {
- LOG.debug("Tx {} dataExists: verifying {} previous recorded operations",
- identifier, recordedOperationFutures.size());
-
- // Note: we make a copy of recordedOperationFutures to be on the safe side in case
- // Futures#sequence accesses the passed List on a different thread, as
- // recordedOperationFutures is not synchronized.
-
- Future<Iterable<Object>> combinedFutures = akka.dispatch.Futures.sequence(
- Lists.newArrayList(recordedOperationFutures),
- actorContext.getClientDispatcher());
- OnComplete<Iterable<Object>> onComplete = new OnComplete<Iterable<Object>>() {
- @Override
- public void onComplete(Throwable failure, Iterable<Object> notUsed)
- throws Throwable {
- if(failure != null) {
- LOG.debug("Tx {} dataExists: a recorded operation failed: {}",
- identifier, failure);
- returnFuture.setException(new ReadFailedException(
- "The data exists could not be performed because a previous "
- + "put, merge, or delete operation failed", failure));
- } else {
- finishDataExists(path, returnFuture);
- }
- }
- };
-
- combinedFutures.onComplete(onComplete, actorContext.getClientDispatcher());
- }
- }
-
- private void finishDataExists(final YangInstanceIdentifier path,
- final SettableFuture<Boolean> returnFuture) {
-
- LOG.debug("Tx {} finishDataExists called path = {}", identifier, path);
-
OnComplete<Object> onComplete = new OnComplete<Object>() {
@Override
public void onComplete(Throwable failure, Object response) throws Throwable {
if(failure != null) {
- LOG.debug("Tx {} dataExists operation failed: {}", identifier, failure);
+ LOG.debug("Tx {} dataExists operation failed: {}", getIdentifier(), failure);
returnFuture.setException(new ReadFailedException(
"Error checking data exists for path " + path, failure));
} else {
- LOG.debug("Tx {} dataExists operation succeeded", identifier, failure);
+ LOG.debug("Tx {} dataExists operation succeeded", getIdentifier(), failure);
if (response instanceof DataExistsReply) {
returnFuture.set(Boolean.valueOf(((DataExistsReply) response).exists()));
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+/**
+ * Abstract superclass for transaction operations which should be executed
+ * on a {@link TransactionContext} at a later point in time.
+ */
+abstract class TransactionOperation {
+ /**
+ * Execute the delayed operation.
+ *
+ * @param transactionContext
+ */
+ protected abstract void invoke(TransactionContext transactionContext);
+}
import akka.dispatch.Mapper;
import akka.dispatch.OnComplete;
import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.FinalizablePhantomReference;
-import com.google.common.base.FinalizableReferenceQueue;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.common.util.concurrent.CheckedFuture;
-import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.SettableFuture;
import java.util.ArrayList;
import java.util.Collection;
-import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import org.opendaylight.controller.cluster.datastore.compat.PreLithiumTransactionContextImpl;
import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
import org.opendaylight.controller.cluster.datastore.identifiers.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.messages.CloseTransaction;
import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategyFactory;
import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.controller.sal.core.spi.data.AbstractDOMStoreTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
import org.opendaylight.yangtools.util.concurrent.MappingCheckedFuture;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
* shards will be executed.
* </p>
*/
-public class TransactionProxy implements DOMStoreReadWriteTransaction {
+public class TransactionProxy extends AbstractDOMStoreTransaction<TransactionIdentifier> implements DOMStoreReadWriteTransaction {
public static enum TransactionType {
READ_ONLY,
WRITE_ONLY,
READ_WRITE;
- public static TransactionType fromInt(int type) {
- if(type == WRITE_ONLY.ordinal()) {
- return WRITE_ONLY;
- } else if(type == READ_WRITE.ordinal()) {
- return READ_WRITE;
- } else if(type == READ_ONLY.ordinal()) {
- return READ_ONLY;
- } else {
- throw new IllegalArgumentException("In TransactionType enum value" + type);
+ // Cache all values
+ private static final TransactionType[] VALUES = values();
+
+ public static TransactionType fromInt(final int type) {
+ try {
+ return VALUES[type];
+ } catch (IndexOutOfBoundsException e) {
+ throw new IllegalArgumentException("In TransactionType enum value " + type, e);
}
}
}
+ private static enum TransactionState {
+ OPEN,
+ READY,
+ CLOSED,
+ }
+
static final Mapper<Throwable, Throwable> SAME_FAILURE_TRANSFORMER =
new Mapper<Throwable, Throwable>() {
@Override
private static final FiniteDuration CREATE_TX_TRY_INTERVAL =
FiniteDuration.create(1, TimeUnit.SECONDS);
- /**
- * Used to enqueue the PhantomReferences for read-only TransactionProxy instances. The
- * FinalizableReferenceQueue is safe to use statically in an OSGi environment as it uses some
- * trickery to clean up its internal thread when the bundle is unloaded.
- */
- private static final FinalizableReferenceQueue phantomReferenceQueue =
- new FinalizableReferenceQueue();
-
- /**
- * This stores the TransactionProxyCleanupPhantomReference instances statically, This is
- * necessary because PhantomReferences need a hard reference so they're not garbage collected.
- * Once finalized, the TransactionProxyCleanupPhantomReference removes itself from this map
- * and thus becomes eligible for garbage collection.
- */
- private static final Map<TransactionProxyCleanupPhantomReference,
- TransactionProxyCleanupPhantomReference> phantomReferenceCache =
- new ConcurrentHashMap<>();
-
- /**
- * A PhantomReference that closes remote transactions for a TransactionProxy when it's
- * garbage collected. This is used for read-only transactions as they're not explicitly closed
- * by clients. So the only way to detect that a transaction is no longer in use and it's safe
- * to clean up is when it's garbage collected. It's inexact as to when an instance will be GC'ed
- * but TransactionProxy instances should generally be short-lived enough to avoid being moved
- * to the old generation space and thus should be cleaned up in a timely manner as the GC
- * runs on the young generation (eden, swap1...) space much more frequently.
- */
- private static class TransactionProxyCleanupPhantomReference
- extends FinalizablePhantomReference<TransactionProxy> {
-
- private final List<ActorSelection> remoteTransactionActors;
- private final AtomicBoolean remoteTransactionActorsMB;
- private final ActorContext actorContext;
- private final TransactionIdentifier identifier;
-
- protected TransactionProxyCleanupPhantomReference(TransactionProxy referent) {
- super(referent, phantomReferenceQueue);
-
- // Note we need to cache the relevant fields from the TransactionProxy as we can't
- // have a hard reference to the TransactionProxy instance itself.
-
- remoteTransactionActors = referent.remoteTransactionActors;
- remoteTransactionActorsMB = referent.remoteTransactionActorsMB;
- actorContext = referent.actorContext;
- identifier = referent.identifier;
- }
-
- @Override
- public void finalizeReferent() {
- LOG.trace("Cleaning up {} Tx actors for TransactionProxy {}",
- remoteTransactionActors.size(), identifier);
-
- phantomReferenceCache.remove(this);
-
- // Access the memory barrier volatile to ensure all previous updates to the
- // remoteTransactionActors list are visible to this thread.
-
- if(remoteTransactionActorsMB.get()) {
- for(ActorSelection actor : remoteTransactionActors) {
- LOG.trace("Sending CloseTransaction to {}", actor);
- actorContext.sendOperationAsync(actor, CloseTransaction.INSTANCE.toSerializable());
- }
- }
- }
- }
-
/**
* Stores the remote Tx actors for each requested data store path to be used by the
* PhantomReference to close the remote Tx's. This is only used for read-only Tx's. The
* remoteTransactionActors list so they will be visible to the thread accessing the
* PhantomReference.
*/
- private List<ActorSelection> remoteTransactionActors;
- private volatile AtomicBoolean remoteTransactionActorsMB;
+ List<ActorSelection> remoteTransactionActors;
+ volatile AtomicBoolean remoteTransactionActorsMB;
/**
* Stores the create transaction results per shard.
private final Map<String, TransactionFutureCallback> txFutureCallbackMap = new HashMap<>();
private final TransactionType transactionType;
- private final ActorContext actorContext;
- private final TransactionIdentifier identifier;
+ final ActorContext actorContext;
private final String transactionChainId;
private final SchemaContext schemaContext;
- private boolean inReadyState;
+ private TransactionState state = TransactionState.OPEN;
private volatile boolean initialized;
private Semaphore operationLimiter;
this(actorContext, transactionType, "");
}
- public TransactionProxy(ActorContext actorContext, TransactionType transactionType,
- String transactionChainId) {
+ public TransactionProxy(ActorContext actorContext, TransactionType transactionType, String transactionChainId) {
+ super(createIdentifier(actorContext));
this.actorContext = Preconditions.checkNotNull(actorContext,
"actorContext should not be null");
this.transactionType = Preconditions.checkNotNull(transactionType,
"schemaContext should not be null");
this.transactionChainId = transactionChainId;
+ LOG.debug("Created txn {} of type {} on chain {}", getIdentifier(), transactionType, transactionChainId);
+ }
+
+ private static TransactionIdentifier createIdentifier(ActorContext actorContext) {
String memberName = actorContext.getCurrentMemberName();
- if(memberName == null){
+ if (memberName == null) {
memberName = "UNKNOWN-MEMBER";
}
- this.identifier = new TransactionIdentifier(memberName, counter.getAndIncrement());
-
- LOG.debug("Created txn {} of type {} on chain {}", identifier, transactionType, transactionChainId);
+ return new TransactionIdentifier(memberName, counter.getAndIncrement());
}
@VisibleForTesting
List<Future<Object>> recordedOperationFutures = Lists.newArrayList();
for(TransactionFutureCallback txFutureCallback : txFutureCallbackMap.values()) {
TransactionContext transactionContext = txFutureCallback.getTransactionContext();
- if(transactionContext != null) {
- recordedOperationFutures.addAll(transactionContext.getRecordedOperationFutures());
+ if (transactionContext != null) {
+ transactionContext.copyRecordedOperationFutures(recordedOperationFutures);
}
}
Preconditions.checkState(transactionType != TransactionType.WRITE_ONLY,
"Read operation on write-only transaction is not allowed");
- LOG.debug("Tx {} read {}", identifier, path);
+ LOG.debug("Tx {} read {}", getIdentifier(), path);
throttleOperation();
Preconditions.checkState(transactionType != TransactionType.WRITE_ONLY,
"Exists operation on write-only transaction is not allowed");
- LOG.debug("Tx {} exists {}", identifier, path);
+ LOG.debug("Tx {} exists {}", getIdentifier(), path);
throttleOperation();
private void checkModificationState() {
Preconditions.checkState(transactionType != TransactionType.READ_ONLY,
"Modification operation on read-only transaction is not allowed");
- Preconditions.checkState(!inReadyState,
+ Preconditions.checkState(state == TransactionState.OPEN,
"Transaction is sealed - further modifications are not allowed");
}
}
}
-
@Override
public void write(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
checkModificationState();
- LOG.debug("Tx {} write {}", identifier, path);
+ LOG.debug("Tx {} write {}", getIdentifier(), path);
throttleOperation();
checkModificationState();
- LOG.debug("Tx {} merge {}", identifier, path);
+ LOG.debug("Tx {} merge {}", getIdentifier(), path);
throttleOperation();
checkModificationState();
- LOG.debug("Tx {} delete {}", identifier, path);
+ LOG.debug("Tx {} delete {}", getIdentifier(), path);
throttleOperation();
});
}
- @Override
- public DOMStoreThreePhaseCommitCohort ready() {
+ private boolean seal(final TransactionState newState) {
+ if (state == TransactionState.OPEN) {
+ state = newState;
+ return true;
+ } else {
+ return false;
+ }
+ }
- checkModificationState();
+ @Override
+ public AbstractThreePhaseCommitCohort ready() {
+ Preconditions.checkState(transactionType != TransactionType.READ_ONLY,
+ "Read-only transactions cannot be readied");
- inReadyState = true;
+ final boolean success = seal(TransactionState.READY);
+ Preconditions.checkState(success, "Transaction %s is %s, it cannot be readied", getIdentifier(), state);
- LOG.debug("Tx {} Readying {} transactions for commit", identifier,
+ LOG.debug("Tx {} Readying {} transactions for commit", getIdentifier(),
txFutureCallbackMap.size());
- if(txFutureCallbackMap.size() == 0) {
- onTransactionReady(Collections.<Future<ActorSelection>>emptyList());
+ if (txFutureCallbackMap.isEmpty()) {
+ TransactionRateLimitingCallback.adjustRateLimitForUnusedTransaction(actorContext);
return NoOpDOMStoreThreePhaseCommitCohort.INSTANCE;
}
throttleOperation(txFutureCallbackMap.size());
- List<Future<ActorSelection>> cohortFutures = Lists.newArrayList();
-
+ List<Future<ActorSelection>> cohortFutures = new ArrayList<>(txFutureCallbackMap.size());
for(TransactionFutureCallback txFutureCallback : txFutureCallbackMap.values()) {
- LOG.debug("Tx {} Readying transaction for shard {} chain {}", identifier,
+ LOG.debug("Tx {} Readying transaction for shard {} chain {}", getIdentifier(),
txFutureCallback.getShardName(), transactionChainId);
final TransactionContext transactionContext = txFutureCallback.getTransactionContext();
cohortFutures.add(future);
}
- onTransactionReady(cohortFutures);
-
return new ThreePhaseCommitCohortProxy(actorContext, cohortFutures,
- identifier.toString());
- }
-
- /**
- * Method for derived classes to be notified when the transaction has been readied.
- *
- * @param cohortFutures the cohort Futures for each shard transaction.
- */
- protected void onTransactionReady(List<Future<ActorSelection>> cohortFutures) {
- }
-
- @Override
- public Object getIdentifier() {
- return this.identifier;
+ getIdentifier().toString());
}
@Override
public void close() {
+ if (!seal(TransactionState.CLOSED)) {
+ if (state == TransactionState.CLOSED) {
+ // Idempotent no-op as per AutoCloseable recommendation
+ return;
+ }
+
+ throw new IllegalStateException(String.format("Transaction %s is ready, it cannot be closed",
+ getIdentifier()));
+ }
+
for (TransactionFutureCallback txFutureCallback : txFutureCallbackMap.values()) {
txFutureCallback.enqueueTransactionOperation(new TransactionOperation() {
@Override
return actorContext;
}
- /**
- * Interfaces for transaction operations to be invoked later.
- */
- private static interface TransactionOperation {
- void invoke(TransactionContext transactionContext);
- }
-
/**
* Implements a Future OnComplete callback for a CreateTransaction message. This class handles
* retries, up to a limit, if the shard doesn't have a leader yet. This is done by scheduling a
if(transactionType == TransactionType.WRITE_ONLY &&
actorContext.getDatastoreContext().isWriteOnlyTransactionOptimizationsEnabled()) {
LOG.debug("Tx {} Primary shard {} found - creating WRITE_ONLY transaction context",
- identifier, primaryShard);
+ getIdentifier(), primaryShard);
// For write-only Tx's we prepare the transaction modifications directly on the shard actor
// to avoid the overhead of creating a separate transaction actor.
boolean invokeOperation = true;
synchronized(txOperationsOnComplete) {
if(transactionContext == null) {
- LOG.debug("Tx {} Adding operation on complete", identifier);
+ LOG.debug("Tx {} Adding operation on complete", getIdentifier());
invokeOperation = false;
txOperationsOnComplete.add(operation);
*/
private void tryCreateTransaction() {
if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} Primary shard {} found - trying create transaction", identifier, primaryShard);
+ LOG.debug("Tx {} Primary shard {} found - trying create transaction", getIdentifier(), primaryShard);
}
- Object serializedCreateMessage = new CreateTransaction(identifier.toString(),
+ Object serializedCreateMessage = new CreateTransaction(getIdentifier().toString(),
TransactionProxy.this.transactionType.ordinal(),
getTransactionChainId()).toSerializable();
// is ok.
if(--createTxTries > 0) {
LOG.debug("Tx {} Shard {} has no leader yet - scheduling create Tx retry",
- identifier, shardName);
+ getIdentifier(), shardName);
actorContext.getActorSystem().scheduler().scheduleOnce(CREATE_TX_TRY_INTERVAL,
new Runnable() {
// TransactionContext until after we've executed all cached TransactionOperations.
TransactionContext localTransactionContext;
if(failure != null) {
- LOG.debug("Tx {} Creating NoOpTransaction because of error", identifier, failure);
+ LOG.debug("Tx {} Creating NoOpTransaction because of error", getIdentifier(), failure);
- localTransactionContext = new NoOpTransactionContext(failure, identifier, operationLimiter);
- } else if (response.getClass().equals(CreateTransactionReply.SERIALIZABLE_CLASS)) {
+ localTransactionContext = new NoOpTransactionContext(failure, getIdentifier(), operationLimiter);
+ } else if (CreateTransactionReply.SERIALIZABLE_CLASS.equals(response.getClass())) {
localTransactionContext = createValidTransactionContext(
CreateTransactionReply.fromSerializable(response));
} else {
IllegalArgumentException exception = new IllegalArgumentException(String.format(
"Invalid reply type %s for CreateTransaction", response.getClass()));
- localTransactionContext = new NoOpTransactionContext(exception, identifier, operationLimiter);
+ localTransactionContext = new NoOpTransactionContext(exception, getIdentifier(), operationLimiter);
}
executeTxOperatonsOnComplete(localTransactionContext);
}
private TransactionContext createValidTransactionContext(CreateTransactionReply reply) {
- LOG.debug("Tx {} Received {}", identifier, reply);
+ LOG.debug("Tx {} Received {}", getIdentifier(), reply);
return createValidTransactionContext(actorContext.actorSelection(reply.getTransactionPath()),
reply.getTransactionPath(), reply.getVersion());
remoteTransactionActors = Lists.newArrayList();
remoteTransactionActorsMB = new AtomicBoolean();
- TransactionProxyCleanupPhantomReference cleanup =
- new TransactionProxyCleanupPhantomReference(TransactionProxy.this);
- phantomReferenceCache.put(cleanup, cleanup);
+ TransactionProxyCleanupPhantomReference.track(TransactionProxy.this);
}
// Add the actor to the remoteTransactionActors list for access by the
boolean isTxActorLocal = actorContext.isPathLocal(transactionPath);
if(remoteTransactionVersion < DataStoreVersions.LITHIUM_VERSION) {
- return new PreLithiumTransactionContextImpl(transactionPath, transactionActor, identifier,
+ return new PreLithiumTransactionContextImpl(transactionPath, transactionActor, getIdentifier(),
transactionChainId, actorContext, schemaContext, isTxActorLocal, remoteTransactionVersion,
operationCompleter);
} else if (transactionType == TransactionType.WRITE_ONLY &&
actorContext.getDatastoreContext().isWriteOnlyTransactionOptimizationsEnabled()) {
- return new WriteOnlyTransactionContextImpl(transactionActor, identifier, transactionChainId,
+ return new WriteOnlyTransactionContextImpl(transactionActor, getIdentifier(), transactionChainId,
actorContext, schemaContext, isTxActorLocal, remoteTransactionVersion, operationCompleter);
} else {
- return new TransactionContextImpl(transactionActor, identifier, transactionChainId,
+ return new TransactionContextImpl(transactionActor, getIdentifier(), transactionChainId,
actorContext, schemaContext, isTxActorLocal, remoteTransactionVersion, operationCompleter);
}
}
}
-
- private static class NoOpDOMStoreThreePhaseCommitCohort implements DOMStoreThreePhaseCommitCohort {
- static NoOpDOMStoreThreePhaseCommitCohort INSTANCE = new NoOpDOMStoreThreePhaseCommitCohort();
-
- private static final ListenableFuture<Void> IMMEDIATE_VOID_SUCCESS =
- com.google.common.util.concurrent.Futures.immediateFuture(null);
- private static final ListenableFuture<Boolean> IMMEDIATE_BOOLEAN_SUCCESS =
- com.google.common.util.concurrent.Futures.immediateFuture(Boolean.TRUE);
-
- private NoOpDOMStoreThreePhaseCommitCohort() {
- }
-
- @Override
- public ListenableFuture<Boolean> canCommit() {
- return IMMEDIATE_BOOLEAN_SUCCESS;
- }
-
- @Override
- public ListenableFuture<Void> preCommit() {
- return IMMEDIATE_VOID_SUCCESS;
- }
-
- @Override
- public ListenableFuture<Void> abort() {
- return IMMEDIATE_VOID_SUCCESS;
- }
-
- @Override
- public ListenableFuture<Void> commit() {
- return IMMEDIATE_VOID_SUCCESS;
- }
- }
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import akka.actor.ActorSelection;
+import com.google.common.base.FinalizablePhantomReference;
+import com.google.common.base.FinalizableReferenceQueue;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicBoolean;
+import org.opendaylight.controller.cluster.datastore.identifiers.TransactionIdentifier;
+import org.opendaylight.controller.cluster.datastore.messages.CloseTransaction;
+import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * A PhantomReference that closes remote transactions for a TransactionProxy when it's
+ * garbage collected. This is used for read-only transactions as they're not explicitly closed
+ * by clients. So the only way to detect that a transaction is no longer in use and it's safe
+ * to clean up is when it's garbage collected. It's inexact as to when an instance will be GC'ed
+ * but TransactionProxy instances should generally be short-lived enough to avoid being moved
+ * to the old generation space and thus should be cleaned up in a timely manner as the GC
+ * runs on the young generation (eden, swap1...) space much more frequently.
+ */
+final class TransactionProxyCleanupPhantomReference
+ extends FinalizablePhantomReference<TransactionProxy> {
+ private static final Logger LOG = LoggerFactory.getLogger(TransactionProxyCleanupPhantomReference.class);
+ /**
+ * Used to enqueue the PhantomReferences for read-only TransactionProxy instances. The
+ * FinalizableReferenceQueue is safe to use statically in an OSGi environment as it uses some
+ * trickery to clean up its internal thread when the bundle is unloaded.
+ */
+ private static final FinalizableReferenceQueue phantomReferenceQueue =
+ new FinalizableReferenceQueue();
+
+ /**
+ * This stores the TransactionProxyCleanupPhantomReference instances statically, This is
+ * necessary because PhantomReferences need a hard reference so they're not garbage collected.
+ * Once finalized, the TransactionProxyCleanupPhantomReference removes itself from this map
+ * and thus becomes eligible for garbage collection.
+ */
+ private static final Map<TransactionProxyCleanupPhantomReference,
+ TransactionProxyCleanupPhantomReference> phantomReferenceCache =
+ new ConcurrentHashMap<>();
+
+ private final List<ActorSelection> remoteTransactionActors;
+ private final AtomicBoolean remoteTransactionActorsMB;
+ private final ActorContext actorContext;
+ private final TransactionIdentifier identifier;
+
+ private TransactionProxyCleanupPhantomReference(TransactionProxy referent) {
+ super(referent, phantomReferenceQueue);
+
+ // Note we need to cache the relevant fields from the TransactionProxy as we can't
+ // have a hard reference to the TransactionProxy instance itself.
+
+ remoteTransactionActors = referent.remoteTransactionActors;
+ remoteTransactionActorsMB = referent.remoteTransactionActorsMB;
+ actorContext = referent.actorContext;
+ identifier = referent.getIdentifier();
+ }
+
+ static void track(TransactionProxy referent) {
+ final TransactionProxyCleanupPhantomReference ret = new TransactionProxyCleanupPhantomReference(referent);
+ phantomReferenceCache.put(ret, ret);
+ }
+
+ @Override
+ public void finalizeReferent() {
+ LOG.trace("Cleaning up {} Tx actors for TransactionProxy {}",
+ remoteTransactionActors.size(), identifier);
+
+ phantomReferenceCache.remove(this);
+
+ // Access the memory barrier volatile to ensure all previous updates to the
+ // remoteTransactionActors list are visible to this thread.
+
+ if(remoteTransactionActorsMB.get()) {
+ for(ActorSelection actor : remoteTransactionActors) {
+ LOG.trace("Sending CloseTransaction to {}", actor);
+ actorContext.sendOperationAsync(actor, CloseTransaction.INSTANCE.toSerializable());
+ }
+ }
+ }
+}
\ No newline at end of file
Preconditions.checkState(timerContext != null, "Call run before success");
timerContext.stop();
+ double newRateLimit = calculateNewRateLimit(commitTimer, actorContext.getDatastoreContext());
+
+ LOG.debug("Data Store {} commit rateLimit adjusted to {}", actorContext.getDataStoreType(), newRateLimit);
+
+ actorContext.setTxCreationLimit(newRateLimit);
+ }
+
+ @Override
+ public void failure() {
+ // This would mean we couldn't get a transaction completed in 30 seconds which is
+ // the default transaction commit timeout. Using the timeout information to figure out the rate limit is
+ // not going to be useful - so we leave it as it is
+ }
+
+ private static double calculateNewRateLimit(Timer commitTimer, DatastoreContext context) {
+ if(commitTimer == null) {
+ // This can happen in unit tests.
+ return 0;
+ }
+
Snapshot timerSnapshot = commitTimer.getSnapshot();
double newRateLimit = 0;
- long commitTimeoutInSeconds = actorContext.getDatastoreContext()
- .getShardTransactionCommitTimeoutInSeconds();
+ long commitTimeoutInSeconds = context.getShardTransactionCommitTimeoutInSeconds();
long commitTimeoutInNanos = TimeUnit.SECONDS.toNanos(commitTimeoutInSeconds);
// Find the time that it takes for transactions to get executed in every 10th percentile
if(percentileTimeInNanos > 0) {
// Figure out the rate limit for the i*10th percentile in nanos
- double percentileRateLimit = ((double) commitTimeoutInNanos / percentileTimeInNanos);
+ double percentileRateLimit = (commitTimeoutInNanos / percentileTimeInNanos);
// Add the percentileRateLimit to the total rate limit
newRateLimit += percentileRateLimit;
}
// Compute the rate limit per second
- newRateLimit = newRateLimit/(commitTimeoutInSeconds*10);
-
- LOG.debug("Data Store {} commit rateLimit adjusted to {}", actorContext.getDataStoreType(), newRateLimit);
-
- actorContext.setTxCreationLimit(newRateLimit);
+ return newRateLimit/(commitTimeoutInSeconds*10);
}
- @Override
- public void failure() {
- // This would mean we couldn't get a transaction completed in 30 seconds which is
- // the default transaction commit timeout. Using the timeout information to figure out the rate limit is
- // not going to be useful - so we leave it as it is
+ public static void adjustRateLimitForUnusedTransaction(ActorContext actorContext) {
+ // Unused transactions in one data store can artificially limit the rate for other data stores
+ // if the first data store's rate is still at a lower initial rate since the front-end creates
+ // transactions in each data store up-front even though the client may not actually submit changes.
+ // So we may have to adjust the rate for data stores with unused transactions.
+
+ // First calculate the current rate for the data store. If it's 0 then there have been no
+ // actual transactions committed to the data store.
+
+ double newRateLimit = calculateNewRateLimit(actorContext.getOperationTimer(COMMIT),
+ actorContext.getDatastoreContext());
+ if(newRateLimit == 0.0) {
+ // Since we have no rate data for unused Tx's data store, adjust to the rate from another
+ // data store that does have rate data.
+ for(String datastoreType: DatastoreContext.getGlobalDatastoreTypes()) {
+ if(datastoreType.equals(actorContext.getDataStoreType())) {
+ continue;
+ }
+
+ newRateLimit = calculateNewRateLimit(actorContext.getOperationTimer(datastoreType, COMMIT),
+ actorContext.getDatastoreContext());
+ if(newRateLimit > 0.0) {
+ LOG.debug("On unused Tx - data Store {} commit rateLimit adjusted to {}",
+ actorContext.getDataStoreType(), newRateLimit);
+
+ actorContext.setTxCreationLimit(newRateLimit);
+ break;
+ }
+ }
+ }
}
}
\ No newline at end of file
@Override
public Future<ActorSelection> readyTransaction() {
LOG.debug("Tx {} readyTransaction called with {} previous recorded operations pending",
- identifier, recordedOperationFutures.size());
+ getIdentifier(), recordedOperationCount());
// Send the remaining batched modifications if any.
@Override
public void deleteData(YangInstanceIdentifier path) {
- recordedOperationFutures.add(executeOperationAsync(
+ recordOperationFuture(executeOperationAsync(
new DeleteData(path, getRemoteTransactionVersion())));
}
@Override
public void mergeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
- recordedOperationFutures.add(executeOperationAsync(
+ recordOperationFuture(executeOperationAsync(
new MergeData(path, data, getRemoteTransactionVersion())));
}
@Override
public void writeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
- recordedOperationFutures.add(executeOperationAsync(
+ recordOperationFuture(executeOperationAsync(
new WriteData(path, data, getRemoteTransactionVersion())));
}
@Override
public Future<ActorSelection> readyTransaction() {
LOG.debug("Tx {} readyTransaction called with {} previous recorded operations pending",
- identifier, recordedOperationFutures.size());
+ getIdentifier(), recordedOperationCount());
// Send the ReadyTransaction message to the Tx actor.
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.jmx.mbeans;
+
+/**
+ * JMX bean for general datastore info.
+ *
+ * @author Thomas Pantelis
+ */
+public interface DatastoreInfoMXBean {
+ double getTransactionCreationRateLimit();
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.jmx.mbeans;
+
+import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.md.sal.common.util.jmx.AbstractMXBean;
+
+/**
+ * Implementation of DatastoreInfoMXBean.
+ *
+ * @author Thomas Pantelis
+ */
+public class DatastoreInfoMXBeanImpl extends AbstractMXBean implements DatastoreInfoMXBean {
+
+ private final ActorContext actorContext;
+
+ public DatastoreInfoMXBeanImpl(String mxBeanType, ActorContext actorContext) {
+ super("GeneralRuntimeInfo", mxBeanType, null);
+ this.actorContext = actorContext;
+ }
+
+
+ @Override
+ public double getTransactionCreationRateLimit() {
+ return actorContext.getTxCreationLimit();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.messages;
+
+import java.io.ObjectStreamException;
+import java.io.Serializable;
+
+public final class CloseDataTreeChangeListenerRegistration implements Serializable {
+ private static final long serialVersionUID = 1L;
+ private static final CloseDataTreeChangeListenerRegistration INSTANCE = new CloseDataTreeChangeListenerRegistration();
+
+ private CloseDataTreeChangeListenerRegistration() {
+ }
+
+ public static CloseDataTreeChangeListenerRegistration getInstance() {
+ return INSTANCE;
+ }
+
+ private Object readResolve() throws ObjectStreamException {
+ return INSTANCE;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.messages;
+
+import java.io.ObjectStreamException;
+import java.io.Serializable;
+
+public final class CloseDataTreeChangeListenerRegistrationReply implements Serializable {
+ private static final long serialVersionUID = 1L;
+ private static final CloseDataTreeChangeListenerRegistrationReply INSTANCE = new CloseDataTreeChangeListenerRegistrationReply();
+
+ private CloseDataTreeChangeListenerRegistrationReply() {
+ // Use getInstance() instead
+ }
+
+ public static CloseDataTreeChangeListenerRegistrationReply getInstance() {
+ return INSTANCE;
+ }
+
+ private Object readResolve() throws ObjectStreamException {
+ return INSTANCE;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.messages;
+
+import com.google.common.base.Preconditions;
+import java.util.Collection;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+
+/**
+ * A message about a DataTree having been changed. The message is not
+ * serializable on purpose. For delegating the change across cluster nodes,
+ * this needs to be intercepted by a local agent and forwarded as
+ * a {@link DataTreeDelta}.
+ */
+public final class DataTreeChanged {
+ private final Collection<DataTreeCandidate> changes;
+
+ public DataTreeChanged(final Collection<DataTreeCandidate> changes) {
+ this.changes = Preconditions.checkNotNull(changes);
+ }
+
+ /**
+ * Return the data changes.
+ *
+ * @return Change events
+ */
+ public Collection<DataTreeCandidate> getChanges() {
+ return changes;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.messages;
+
+import java.io.ObjectStreamException;
+import java.io.Serializable;
+
+public final class DataTreeChangedReply implements Serializable {
+ private static final long serialVersionUID = 1L;
+ private static final DataTreeChangedReply INSTANCE = new DataTreeChangedReply();
+
+ private DataTreeChangedReply() {
+ // Use getInstance() instead
+ }
+
+ public static DataTreeChangedReply getInstance() {
+ return INSTANCE;
+ }
+
+ private Object readResolve() throws ObjectStreamException {
+ return INSTANCE;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.messages;
+
+import akka.actor.ActorRef;
+import com.google.common.base.Preconditions;
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.opendaylight.controller.cluster.datastore.utils.SerializationUtils;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+
+/**
+ * Request a {@link org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener} registration be made on the shard
+ * leader.
+ */
+public final class RegisterDataTreeChangeListener implements Externalizable {
+ private static final long serialVersionUID = 1L;
+ private ActorRef dataTreeChangeListenerPath;
+ private YangInstanceIdentifier path;
+
+ public RegisterDataTreeChangeListener(final YangInstanceIdentifier path, final ActorRef dataTreeChangeListenerPath) {
+ this.path = Preconditions.checkNotNull(path);
+ this.dataTreeChangeListenerPath = Preconditions.checkNotNull(dataTreeChangeListenerPath);
+ }
+
+ public YangInstanceIdentifier getPath() {
+ return path;
+ }
+
+ public ActorRef getDataTreeChangeListenerPath() {
+ return dataTreeChangeListenerPath;
+ }
+
+ @Override
+ public void writeExternal(final ObjectOutput out) throws IOException {
+ out.writeObject(dataTreeChangeListenerPath);
+ SerializationUtils.serializePath(path, out);
+ }
+
+ @Override
+ public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
+ dataTreeChangeListenerPath = (ActorRef) in.readObject();
+ path = SerializationUtils.deserializePath(in);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.messages;
+
+import akka.actor.ActorRef;
+import com.google.common.base.Preconditions;
+import java.io.Serializable;
+
+/**
+ * Successful reply to a {@link RegisterDataTreeChangeListener} request.
+ */
+public final class RegisterDataTreeChangeListenerReply implements Serializable {
+ private static final long serialVersionUID = 1L;
+ private final ActorRef listenerRegistrationPath;
+
+ public RegisterDataTreeChangeListenerReply(final ActorRef listenerRegistrationPath) {
+ this.listenerRegistrationPath = Preconditions.checkNotNull(listenerRegistrationPath);
+ }
+
+ public ActorRef getListenerRegistrationPath() {
+ return listenerRegistrationPath;
+ }
+}
import akka.dispatch.OnComplete;
import akka.pattern.AskTimeoutException;
import akka.util.Timeout;
-import com.codahale.metrics.JmxReporter;
import com.codahale.metrics.MetricRegistry;
import com.codahale.metrics.Timer;
import com.google.common.annotations.VisibleForTesting;
import org.opendaylight.controller.cluster.datastore.messages.LocalShardNotFound;
import org.opendaylight.controller.cluster.datastore.messages.PrimaryFound;
import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
+import org.opendaylight.controller.cluster.reporting.MetricsReporter;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
*/
public class ActorContext {
private static final Logger LOG = LoggerFactory.getLogger(ActorContext.class);
- private static final String UNKNOWN_DATA_STORE_TYPE = "unknown";
private static final String DISTRIBUTED_DATA_STORE_METRIC_REGISTRY = "distributed-data-store";
private static final String METRIC_RATE = "rate";
- private static final String DOMAIN = "org.opendaylight.controller.cluster.datastore";
private static final Mapper<Throwable, Throwable> FIND_PRIMARY_FAILURE_TRANSFORMER =
new Mapper<Throwable, Throwable>() {
@Override
private Timeout operationTimeout;
private final String selfAddressHostPort;
private RateLimiter txRateLimiter;
- private final MetricRegistry metricRegistry = new MetricRegistry();
- private final JmxReporter jmxReporter = JmxReporter.forRegistry(metricRegistry).inDomain(DOMAIN).build();
private final int transactionOutstandingOperationLimit;
private Timeout transactionCommitOperationTimeout;
private Timeout shardInitializationTimeout;
private volatile SchemaContext schemaContext;
private volatile boolean updated;
+ private final MetricRegistry metricRegistry = MetricsReporter.getInstance(DatastoreContext.METRICS_DOMAIN).getMetricsRegistry();
public ActorContext(ActorSystem actorSystem, ActorRef shardManager,
ClusterWrapper clusterWrapper, Configuration configuration) {
}
transactionOutstandingOperationLimit = new CommonConfig(this.getActorSystem().settings().config()).getMailBoxCapacity();
- jmxReporter.start();
-
}
private void setCachedProperties() {
* @return
*/
public Timer getOperationTimer(String operationName){
- final String rate = MetricRegistry.name(DISTRIBUTED_DATA_STORE_METRIC_REGISTRY, datastoreContext.getDataStoreType(), operationName, METRIC_RATE);
+ return getOperationTimer(datastoreContext.getDataStoreType(), operationName);
+ }
+
+ public Timer getOperationTimer(String dataStoreType, String operationName){
+ final String rate = MetricRegistry.name(DISTRIBUTED_DATA_STORE_METRIC_REGISTRY, dataStoreType,
+ operationName, METRIC_RATE);
return metricRegistry.timer(rate);
}
import akka.dispatch.Dispatchers;
import akka.dispatch.OnComplete;
import akka.japi.Creator;
-import akka.japi.Procedure;
import akka.pattern.Patterns;
-import akka.persistence.SnapshotSelectionCriteria;
import akka.testkit.TestActorRef;
import akka.util.Timeout;
import com.google.common.base.Function;
import org.junit.Test;
import org.mockito.InOrder;
import org.opendaylight.controller.cluster.DataPersistenceProvider;
+import org.opendaylight.controller.cluster.DelegatingPersistentDataProvider;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
import org.opendaylight.controller.cluster.datastore.messages.AbortTransaction;
import org.opendaylight.controller.cluster.datastore.messages.AbortTransactionReply;
public void testCreateSnapshot(final boolean persistent, final String shardActorName) throws Exception{
final AtomicReference<Object> savedSnapshot = new AtomicReference<>();
- class DelegatingPersistentDataProvider implements DataPersistenceProvider {
- DataPersistenceProvider delegate;
-
- DelegatingPersistentDataProvider(DataPersistenceProvider delegate) {
- this.delegate = delegate;
- }
-
- @Override
- public boolean isRecoveryApplicable() {
- return delegate.isRecoveryApplicable();
- }
-
- @Override
- public <T> void persist(T o, Procedure<T> procedure) {
- delegate.persist(o, procedure);
+ class TestPersistentDataProvider extends DelegatingPersistentDataProvider {
+ TestPersistentDataProvider(DataPersistenceProvider delegate) {
+ super(delegate);
}
@Override
public void saveSnapshot(Object o) {
savedSnapshot.set(o);
- delegate.saveSnapshot(o);
- }
-
- @Override
- public void deleteSnapshots(SnapshotSelectionCriteria criteria) {
- delegate.deleteSnapshots(criteria);
- }
-
- @Override
- public void deleteMessages(long sequenceNumber) {
- delegate.deleteMessages(sequenceNumber);
+ super.saveSnapshot(o);
}
}
dataStoreContextBuilder.persistent(persistent);
-
-
new ShardTestKit(getSystem()) {{
final AtomicReference<CountDownLatch> latch = new AtomicReference<>(new CountDownLatch(1));
protected TestShard(ShardIdentifier name, Map<String, String> peerAddresses,
DatastoreContext datastoreContext, SchemaContext schemaContext) {
super(name, peerAddresses, datastoreContext, schemaContext);
- }
-
- DelegatingPersistentDataProvider delegating;
-
- protected DataPersistenceProvider persistence() {
- if(delegating == null) {
- delegating = new DelegatingPersistentDataProvider(super.persistence());
- }
- return delegating;
+ setPersistence(new TestPersistentDataProvider(super.persistence()));
}
@Override
TestActorRef<Shard> shard1 = TestActorRef.create(getSystem(),
persistentProps, "testPersistence1");
- assertTrue("Recovery Applicable", shard1.underlyingActor().getDataPersistenceProvider().isRecoveryApplicable());
+ assertTrue("Recovery Applicable", shard1.underlyingActor().persistence().isRecoveryApplicable());
shard1.tell(PoisonPill.getInstance(), ActorRef.noSender());
TestActorRef<Shard> shard2 = TestActorRef.create(getSystem(),
nonPersistentProps, "testPersistence2");
- assertFalse("Recovery Not Applicable", shard2.underlyingActor().getDataPersistenceProvider().isRecoveryApplicable());
+ assertFalse("Recovery Not Applicable", shard2.underlyingActor().persistence().isRecoveryApplicable());
shard2.tell(PoisonPill.getInstance(), ActorRef.noSender());
TestActorRef<Shard> shard = TestActorRef.create(getSystem(), newShardProps(), "testOnDatastoreContext");
assertEquals("isRecoveryApplicable", true,
- shard.underlyingActor().getDataPersistenceProvider().isRecoveryApplicable());
+ shard.underlyingActor().persistence().isRecoveryApplicable());
waitUntilLeader(shard);
shard.tell(dataStoreContextBuilder.persistent(false).build(), ActorRef.noSender());
assertEquals("isRecoveryApplicable", false,
- shard.underlyingActor().getDataPersistenceProvider().isRecoveryApplicable());
+ shard.underlyingActor().persistence().isRecoveryApplicable());
shard.tell(dataStoreContextBuilder.persistent(true).build(), ActorRef.noSender());
assertEquals("isRecoveryApplicable", true,
- shard.underlyingActor().getDataPersistenceProvider().isRecoveryApplicable());
+ shard.underlyingActor().persistence().isRecoveryApplicable());
shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
}};
import static org.mockito.Matchers.isA;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.never;
-import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.opendaylight.controller.cluster.datastore.TransactionProxy.TransactionType.READ_ONLY;
import static org.opendaylight.controller.cluster.datastore.TransactionProxy.TransactionType.READ_WRITE;
testReadWithExceptionOnInitialCreateTransaction(new TestException());
}
- @Test(expected = TestException.class)
- public void testReadWithPriorRecordingOperationFailure() throws Throwable {
- doReturn(dataStoreContextBuilder.shardBatchedModificationCount(2).build()).
- when(mockActorContext).getDatastoreContext();
-
- ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
-
- NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
- expectFailedBatchedModifications(actorRef);
-
- doReturn(readSerializedDataReply(null)).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqSerializedReadData());
-
- TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, READ_WRITE);
-
- transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
-
- transactionProxy.delete(TestModel.TEST_PATH);
-
- try {
- propagateReadFailedExceptionCause(transactionProxy.read(TestModel.TEST_PATH));
- } finally {
- verify(mockActorContext, times(0)).executeOperationAsync(
- eq(actorSelection(actorRef)), eqSerializedReadData());
- }
- }
-
@Test
public void testReadWithPriorRecordingOperationSuccessful() throws Throwable {
ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
propagateReadFailedExceptionCause(transactionProxy.exists(TestModel.TEST_PATH));
}
- @Test(expected = TestException.class)
- public void testExistsWithPriorRecordingOperationFailure() throws Throwable {
- doReturn(dataStoreContextBuilder.shardBatchedModificationCount(2).build()).
- when(mockActorContext).getDatastoreContext();
-
- ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
-
- NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
- expectFailedBatchedModifications(actorRef);
-
- doReturn(dataExistsSerializedReply(false)).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqSerializedDataExists());
-
- TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
- READ_WRITE);
-
- transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
-
- transactionProxy.delete(TestModel.TEST_PATH);
-
- try {
- propagateReadFailedExceptionCause(transactionProxy.exists(TestModel.TEST_PATH));
- } finally {
- verify(mockActorContext, times(0)).executeOperationAsync(
- eq(actorSelection(actorRef)), eqSerializedDataExists());
- }
- }
-
@Test
public void testExistsWithPriorRecordingOperationSuccessful() throws Throwable {
ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
import org.junit.Test;
import org.mockito.Matchers;
import org.mockito.Mock;
+import org.mockito.Mockito;
import org.mockito.MockitoAnnotations;
import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
}
+ @Test
+ public void testAdjustRateLimitForUnusedTransaction() {
+ doReturn(commitTimer).when(actorContext).getOperationTimer("one", "commit");
+ doReturn("one").when(actorContext).getDataStoreType();
+
+ Timer commitTimer2 = Mockito.mock(Timer.class);
+ Snapshot commitSnapshot2 = Mockito.mock(Snapshot.class);
+
+ doReturn(commitSnapshot2).when(commitTimer2).getSnapshot();
+
+ doReturn(commitTimer2).when(actorContext).getOperationTimer("two", "commit");
+
+ DatastoreContext.newBuilder().dataStoreType("one").build();
+ DatastoreContext.newBuilder().dataStoreType("two").build();
+
+ doReturn(TimeUnit.MICROSECONDS.toNanos(500) * 1D).when(commitSnapshot).getValue(1 * 0.1);
+
+ TransactionRateLimitingCallback.adjustRateLimitForUnusedTransaction(actorContext);
+
+ verify(actorContext, never()).setTxCreationLimit(anyDouble());
+
+ Mockito.reset(commitSnapshot);
+
+ TransactionRateLimitingCallback.adjustRateLimitForUnusedTransaction(actorContext);
+
+ verify(actorContext, never()).setTxCreationLimit(anyDouble());
+
+ System.out.println(""+TimeUnit.SECONDS.toNanos(30)/TimeUnit.MICROSECONDS.toNanos(100));
+
+ doReturn(TimeUnit.MICROSECONDS.toNanos(100) * 1D).when(commitSnapshot2).getValue(1 * 0.1);
+
+ TransactionRateLimitingCallback.adjustRateLimitForUnusedTransaction(actorContext);
+
+ verify(actorContext).setTxCreationLimit(Matchers.doubleThat(approximately(1000)));
+ }
+
public Matcher<Double> approximately(final double val){
return new BaseMatcher<Double>() {
@Override
public boolean matches(Object o) {
Double aDouble = (Double) o;
- return aDouble > val && aDouble < val+1;
+ return aDouble >= val && aDouble <= val+1;
}
@Override
*/
package org.opendaylight.controller.md.sal.dom.api;
+import com.google.common.base.MoreObjects;
import com.google.common.base.Preconditions;
import java.io.Serializable;
import java.util.Iterator;
return oi.hasNext() ? -1 : 0;
}
+
+ @Override
+ public String toString() {
+ return MoreObjects.toStringHelper(this).add("datastore", datastoreType).add("root", rootIdentifier).toString();
+ }
}
import com.google.common.util.concurrent.ListenableFuture;
import com.lmax.disruptor.EventHandler;
import com.lmax.disruptor.InsufficientCapacityException;
-import com.lmax.disruptor.SleepingWaitStrategy;
+import com.lmax.disruptor.PhasedBackoffWaitStrategy;
import com.lmax.disruptor.WaitStrategy;
import com.lmax.disruptor.dsl.Disruptor;
import com.lmax.disruptor.dsl.ProducerType;
*/
public final class DOMNotificationRouter implements AutoCloseable, DOMNotificationPublishService, DOMNotificationService {
private static final ListenableFuture<Void> NO_LISTENERS = Futures.immediateFuture(null);
- private static final WaitStrategy DEFAULT_STRATEGY = new SleepingWaitStrategy();
+ private static final WaitStrategy DEFAULT_STRATEGY = PhasedBackoffWaitStrategy.withLock(1L, 30L, TimeUnit.MILLISECONDS);
private static final EventHandler<DOMNotificationRouterEvent> DISPATCH_NOTIFICATIONS = new EventHandler<DOMNotificationRouterEvent>() {
@Override
public void onEvent(final DOMNotificationRouterEvent event, final long sequence, final boolean endOfBatch) throws Exception {
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.md.sal.dom.store.impl;
+package org.opendaylight.controller.sal.core.spi.data;
+import com.google.common.annotations.Beta;
import com.google.common.base.MoreObjects;
import com.google.common.base.MoreObjects.ToStringHelper;
import com.google.common.base.Preconditions;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransaction;
-import org.slf4j.Logger;
+import javax.annotation.Nonnull;
+import javax.annotation.Nullable;
/**
- * Abstract DOM Store Transaction
+ * Abstract DOM Store Transaction.
*
* Convenience super implementation of DOM Store transaction which provides
* common implementation of {@link #toString()} and {@link #getIdentifier()}.
+ *
+ * It can optionally capture the context where it was allocated.
+ *
+ * <T> identifier type
*/
-abstract class AbstractDOMStoreTransaction implements DOMStoreTransaction {
+@Beta
+public abstract class AbstractDOMStoreTransaction<T> implements DOMStoreTransaction {
private final Throwable debugContext;
- private final Object identifier;
+ private final T identifier;
+
+ protected AbstractDOMStoreTransaction(@Nonnull final T identifier) {
+ this(identifier, false);
+ }
- protected AbstractDOMStoreTransaction(final Object identifier, final boolean debug) {
+ protected AbstractDOMStoreTransaction(@Nonnull final T identifier, final boolean debug) {
this.identifier = Preconditions.checkNotNull(identifier, "Identifier must not be null.");
this.debugContext = debug ? new Throwable().fillInStackTrace() : null;
}
@Override
- public final Object getIdentifier() {
+ public final T getIdentifier() {
return identifier;
}
- protected final void warnDebugContext(final Logger logger) {
- if (debugContext != null) {
- logger.warn("Transaction {} has been allocated in the following context", identifier, debugContext);
- }
+ /**
+ * Return the context in which this transaction was allocated.
+ *
+ * @return The context in which this transaction was allocated, or null
+ * if the context was not recorded.
+ */
+ @Nullable public final Throwable getDebugContext() {
+ return debugContext;
}
@Override
* ToStringHelper instance
* @return ToStringHelper instance which was passed in
*/
- protected ToStringHelper addToStringAttributes(final ToStringHelper toStringHelper) {
+ protected ToStringHelper addToStringAttributes(@Nonnull final ToStringHelper toStringHelper) {
return toStringHelper.add("id", identifier);
}
-}
\ No newline at end of file
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.sal.core.spi.data;
+
+import com.google.common.annotations.Beta;
+import com.google.common.collect.ForwardingObject;
+import com.google.common.util.concurrent.ListenableFuture;
+
+/**
+ * Abstract base class for {@link DOMStoreThreePhaseCommitCohort} implementations,
+ * which forward most of their functionality to a backend {@link #delegate()}.
+ */
+@Beta
+public abstract class ForwardingDOMStoreThreePhaseCommitCohort extends ForwardingObject implements DOMStoreThreePhaseCommitCohort {
+ @Override
+ protected abstract DOMStoreThreePhaseCommitCohort delegate();
+
+ @Override
+ public ListenableFuture<Boolean> canCommit() {
+ return delegate().canCommit();
+ }
+
+ @Override
+ public ListenableFuture<Void> preCommit() {
+ return delegate().preCommit();
+ }
+
+ @Override
+ public ListenableFuture<Void> abort() {
+ return delegate().abort();
+ }
+
+ @Override
+ public ListenableFuture<Void> commit() {
+ return delegate().commit();
+ }
+}
</type>
<name>XSQL</name>
<data-broker>
- <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">binding:binding-data-broker</type>
- <name>binding-data-broker</name>
+ <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">binding:binding-async-data-broker</type>
+ <name>binding-data-broker</name>
</data-broker>
<async-data-broker>
<type xmlns:dom="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">dom:dom-async-data-broker</type>
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
package org.odl.xsql;
import java.sql.Connection;
import java.util.logging.Logger;
import org.opendaylight.controller.md.sal.dom.xsql.jdbc.JDBCConnection;
-
+/**
+ * @author Sharon Aicler(saichler@gmail.com)
+ **/
public class JDBCDriver implements Driver {
public static JDBCDriver drv = new JDBCDriver();
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
package org.opendaylight.controller.md.sal.dom.xsql;
import java.io.InputStream;
import java.sql.Timestamp;
import java.util.Calendar;
import java.util.Map;
-
+/**
+ * @author Sharon Aicler(saichler@gmail.com)
+ **/
public class TablesResultSet implements ResultSet {
private String tables[] = null;
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
package org.opendaylight.controller.md.sal.dom.xsql;
import java.io.File;
import org.opendaylight.yangtools.yang.model.api.Module;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import org.opendaylight.yangtools.yang.model.api.SchemaContextListener;
-
+/**
+ * @author Sharon Aicler(saichler@gmail.com)
+ **/
public class XSQLAdapter extends Thread implements SchemaContextListener {
private static final int SLEEP = 10000;
private String pinningFile;
private ServerSocket serverSocket = null;
private DOMDataBroker domDataBroker = null;
+ private static final String REFERENCE_FIELD_NAME = "reference";
private XSQLAdapter() {
XSQLAdapter.log("Starting Adapter");
List<Object> result = new LinkedList<Object>();
YangInstanceIdentifier instanceIdentifier = YangInstanceIdentifier
.builder()
- .node(XSQLODLUtils.getPath(table.getODLNode()).get(0))
+ .node(XSQLODLUtils.getPath(table.getFirstFromSchemaNodes()).get(0))
.toInstance();
DOMDataReadTransaction t = this.domDataBroker
.newReadOnlyTransaction();
Object node = t.read(type,
instanceIdentifier).get();
- node = XSQLODLUtils.get(node, "reference");
+ node = XSQLODLUtils.get(node, REFERENCE_FIELD_NAME);
if (node == null) {
return result;
}
-
- Map<?, ?> children = XSQLODLUtils.getChildren(node);
- for (Object c : children.values()) {
- result.add(c);
- /* I don't remember why i did this... possibly to prevent different siblings queried together
- Map<?, ?> sons = XSQLODLUtils.getChildren(c);
- for (Object child : sons.values()) {
- result.add(child);
- }*/
- }
-
+ result.add(node);
return result;
} catch (Exception err) {
XSQLAdapter.log(err);
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
package org.opendaylight.controller.md.sal.dom.xsql;
import java.io.DataInputStream;
import java.util.List;
import java.util.Map;
import java.util.Set;
-
+/**
+ * @author Sharon Aicler(saichler@gmail.com)
+ **/
public class XSQLBluePrint implements DatabaseMetaData, Serializable {
private static final long serialVersionUID = 1L;
return result;
}
- public void addToBluePrintCache(XSQLBluePrintNode blNode) {
- this.tableNameToBluePrint.put(blNode.getBluePrintNodeName(), blNode);
- Map<String, XSQLBluePrintNode> map = this.odlNameToBluePrint.get(blNode
- .getODLTableName());
- if (map == null) {
- map = new HashMap<String, XSQLBluePrintNode>();
- this.odlNameToBluePrint.put(blNode.getODLTableName(), map);
+ public XSQLBluePrintNode addToBluePrintCache(XSQLBluePrintNode blNode,XSQLBluePrintNode parent) {
+ XSQLBluePrintNode existingNode = this.tableNameToBluePrint.get(blNode.getBluePrintNodeName());
+ if(existingNode!=null){
+ existingNode.mergeAugmentation(blNode);
+ return existingNode;
+ }else{
+ this.tableNameToBluePrint.put(blNode.getBluePrintNodeName(), blNode);
+ Map<String, XSQLBluePrintNode> map = this.odlNameToBluePrint.get(blNode.getODLTableName());
+ if (map == null) {
+ map = new HashMap<String, XSQLBluePrintNode>();
+ this.odlNameToBluePrint.put(blNode.getODLTableName(), map);
+ }
+ map.put(blNode.getBluePrintNodeName(), blNode);
+ if(parent!=null)
+ parent.addChild(blNode);
+ return blNode;
}
- map.put(blNode.getBluePrintNodeName(), blNode);
}
public Class<?> getGenericType(ParameterizedType type) {
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
package org.opendaylight.controller.md.sal.dom.xsql;
import java.io.Serializable;
import java.util.Map;
import java.util.Set;
+/**
+ * @author Sharon Aicler(saichler@gmail.com)
+ **/
public class XSQLBluePrintNode implements Serializable {
private static final long serialVersionUID = 1L;
private Set<XSQLColumn> columns = new HashSet<XSQLColumn>();
private Map<String, XSQLColumn> origNameToColumn = new HashMap<String, XSQLColumn>();
- private transient Object odlNode = null;
+ private transient Object[] odlSchemaNodes = null;
private boolean module = false;
private String bluePrintTableName = null;
private String odlTableName = null;
private String origName = null;
+ public void mergeAugmentation(XSQLBluePrintNode aug) {
+ this.relations.addAll(aug.relations);
+ this.inheritingNodes.addAll(aug.inheritingNodes);
+ this.children.addAll(aug.children);
+ this.columns.addAll(aug.columns);
+ this.origNameToColumn.putAll(aug.origNameToColumn);
+ if (aug.odlSchemaNodes != null) {
+ for (Object sn : aug.odlSchemaNodes) {
+ addToSchemaNodes(sn);
+ }
+ }
+ }
+
public XSQLBluePrintNode(String name, String _origName, int _level) {
this.level = _level;
this.odlTableName = name;
public XSQLBluePrintNode(Object _odlNode, int _level,
XSQLBluePrintNode _parent) {
- this.odlNode = _odlNode;
+ addToSchemaNodes(_odlNode);
this.level = _level;
this.module = XSQLODLUtils.isModule(_odlNode);
this.parent = _parent;
this.bluePrintTableName = XSQLODLUtils.getBluePrintName(_odlNode);
- this.odlTableName = XSQLODLUtils.getODLNodeName(this.odlNode);
+ this.odlTableName = XSQLODLUtils
+ .getODLNodeName(getFirstFromSchemaNodes());
+ }
+
+ private void addToSchemaNodes(Object schemaObject) {
+ if (this.odlSchemaNodes == null)
+ this.odlSchemaNodes = new Object[1];
+ else {
+ Object[] temp = new Object[this.odlSchemaNodes.length + 1];
+ System.arraycopy(this.odlSchemaNodes, 0, temp, 0,
+ this.odlSchemaNodes.length);
+ this.odlSchemaNodes = temp;
+ }
+ this.odlSchemaNodes[this.odlSchemaNodes.length - 1] = schemaObject;
+ }
+
+ public Object getFirstFromSchemaNodes() {
+ if (this.odlSchemaNodes == null) {
+ return null;
+ }
+ return this.odlSchemaNodes[0];
}
public String getOrigName() {
public String getODLTableName() {
if (this.odlTableName == null) {
- this.odlTableName = XSQLODLUtils.getODLNodeName(this.odlNode);
+ this.odlTableName = XSQLODLUtils
+ .getODLNodeName(getFirstFromSchemaNodes());
}
return this.odlTableName;
}
- public Object getODLNode() {
- return this.odlNode;
- }
-
- public void AddChild(XSQLBluePrintNode ch) {
+ public void addChild(XSQLBluePrintNode ch) {
this.children.add(ch);
}
if (myInterfaceName != null) {
return myInterfaceName;
}
- if (odlNode != null) {
+ if (this.odlSchemaNodes != null) {
return getBluePrintNodeName();
}
if (odlTableName != null) {
@Override
public boolean equals(Object obj) {
XSQLBluePrintNode other = (XSQLBluePrintNode) obj;
- if (odlNode != null) {
+ if (this.odlSchemaNodes != null) {
return getBluePrintNodeName().equals(other.getBluePrintNodeName());
} else if (this.odlTableName == null && other.odlTableName != null) {
return false;
}
if (this.odlTableName != null && other.odlTableName == null) {
return false;
- }
- else {
+ } else {
return this.odlTableName.equals(other.odlTableName);
}
}
public int hashCode() {
if (myInterfaceString != null) {
return myInterfaceString.hashCode();
- } else if (odlNode != null) {
+ } else if (this.odlSchemaNodes != null) {
return bluePrintTableName.hashCode();
}
return 0;
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
package org.opendaylight.controller.md.sal.dom.xsql;
import java.lang.reflect.Field;
+import java.util.ArrayList;
import java.util.Collection;
+import java.util.Collections;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
+import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
+import org.opendaylight.yangtools.yang.model.api.AugmentationSchema;
+import org.opendaylight.yangtools.yang.model.api.ContainerSchemaNode;
import org.opendaylight.yangtools.yang.model.api.DataNodeContainer;
import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
import org.opendaylight.yangtools.yang.model.api.ListSchemaNode;
import org.opendaylight.yangtools.yang.model.util.Uint32;
import org.opendaylight.yangtools.yang.model.util.Uint64;
import org.opendaylight.yangtools.yang.model.util.Uint8;
-
+/**
+ * @author Sharon Aicler(saichler@gmail.com)
+ **/
public class XSQLODLUtils {
private static Map<Class<?>, Class<?>> types =
public static boolean createOpenDaylightCache(XSQLBluePrint bluePrint,Object module) {
XSQLBluePrintNode node = new XSQLBluePrintNode(module, 0,null);
- bluePrint.addToBluePrintCache(node);
+ bluePrint.addToBluePrintCache(node,null);
collectODL(bluePrint, node, ((Module) module).getChildNodes(), 1);
return true;
}
return;
}
for (DataSchemaNode n : nodes) {
- if (n instanceof DataNodeContainer /*|| n instanceof LeafListSchemaNode*/
- || n instanceof ListSchemaNode) {
+ if (n instanceof DataNodeContainer) {
XSQLBluePrintNode bn = new XSQLBluePrintNode(n, level,parent);
- bluePrint.addToBluePrintCache(bn);
- parent.AddChild(bn);
- if (n instanceof DataNodeContainer) {
+ bn = bluePrint.addToBluePrintCache(bn,parent);
+ if (n instanceof ListSchemaNode) {
level++;
- collectODL(bluePrint, bn,
- ((DataNodeContainer) n).getChildNodes(), level);
+ collectODL(bluePrint, bn,((ListSchemaNode) n).getChildNodes(), level);
+ Set<AugmentationSchema> s = ((ListSchemaNode)n).getAvailableAugmentations();
+ if(s!=null){
+ for(AugmentationSchema as:s){
+ collectODL(bluePrint, bn,as.getChildNodes(), level);
+ }
+ }
level--;
- } else if (n instanceof ListSchemaNode) {
+ }else{
level++;
- collectODL(bluePrint, bn,
- ((ListSchemaNode) n).getChildNodes(), level);
+ collectODL(bluePrint, bn,((DataNodeContainer) n).getChildNodes(), level);
+ if(n instanceof ContainerSchemaNode){
+ Set<AugmentationSchema> s = ((ContainerSchemaNode)n).getAvailableAugmentations();
+ if(s!=null){
+ for(AugmentationSchema as:s){
+ collectODL(bluePrint, bn,as.getChildNodes(), level);
+ }
+ }
+ }
level--;
}
} else {
Field f = findField(c, name);
return f.get(o);
} catch (Exception err) {
- XSQLAdapter.log(err);
+ //XSQLAdapter.log(err);
}
return null;
}
return (Map<?, ?>) get(o, "children");
}
+ public static Collection<?> getChildrenCollection(Object o) {
+ Object value = get(o, "children");
+ if(value==null)
+ return Collections.emptyList();
+ if(value instanceof Map)
+ return ((Map<?,?>)value).values();
+ else
+ if(value instanceof Collection){
+ return (Collection<?>)value;
+ }else{
+ XSQLAdapter.log("Unknown Child Value Type="+value.getClass().getName());
+ return new ArrayList();
+ }
+ }
+
public static Object getValue(Object o) {
return get(o, "value");
}
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
package org.opendaylight.controller.md.sal.dom.xsql.jdbc;
import java.io.InputStream;
import java.io.Reader;
import java.io.Serializable;
import java.lang.reflect.Method;
-import java.lang.reflect.Proxy;
import java.math.BigDecimal;
import java.net.URL;
import java.sql.Array;
import java.sql.Timestamp;
import java.util.ArrayList;
import java.util.Calendar;
+import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
+import org.opendaylight.controller.md.sal.dom.xsql.XSQLAdapter;
import org.opendaylight.controller.md.sal.dom.xsql.XSQLBluePrint;
import org.opendaylight.controller.md.sal.dom.xsql.XSQLBluePrintNode;
import org.opendaylight.controller.md.sal.dom.xsql.XSQLColumn;
import org.opendaylight.controller.md.sal.dom.xsql.XSQLCriteria;
import org.opendaylight.controller.md.sal.dom.xsql.XSQLODLUtils;
+import org.opendaylight.yangtools.yang.data.api.schema.DataContainerNode;
-public class JDBCResultSet implements Serializable, ResultSet,
- ResultSetMetaData {
+/**
+ * @author Sharon Aicler(saichler@gmail.com)
+ **/
+public class JDBCResultSet implements Serializable, ResultSet, ResultSetMetaData {
private static final long serialVersionUID = -7450200738431047057L;
private static final ClassLoader CLASS_LOADER = JDBCResultSet.class.getClassLoader();
private static final Class<?>[] PROXY_INTERFACES = new Class[] { ResultSet.class };
private Map<String, Map<XSQLColumn, List<XSQLCriteria>>> criteria = new ConcurrentHashMap<String, Map<XSQLColumn, List<XSQLCriteria>>>();
private Exception err = null;
private List<Record> EMPTY_RESULT = new LinkedList<Record>();
- private transient Map<String,JDBCResultSet> subQueries = new HashMap<String,JDBCResultSet>();
+ private transient Map<String, JDBCResultSet> subQueries = new HashMap<String, JDBCResultSet>();
public ResultSet getProxy() {
- return (ResultSet) Proxy.newProxyInstance(CLASS_LOADER, PROXY_INTERFACES, new JDBCProxy(this));
+ return this;
+ //return (ResultSet) Proxy.newProxyInstance(CLASS_LOADER, PROXY_INTERFACES, new JDBCProxy(this));
}
public void setSQL(String _sql) {
this.sql = _sql;
}
- public JDBCResultSet addSubQuery(String _sql,String logicalName) {
+ public JDBCResultSet addSubQuery(String _sql, String logicalName) {
if (subQueries == null) {
- subQueries = new HashMap<String,JDBCResultSet>();
+ subQueries = new HashMap<String, JDBCResultSet>();
}
JDBCResultSet rs = new JDBCResultSet(_sql);
- this.subQueries.put(logicalName,rs);
+ this.subQueries.put(logicalName, rs);
return rs;
}
- public Map<String,JDBCResultSet> getSubQueries() {
- if (this.subQueries==null) {
+ public Map<String, JDBCResultSet> getSubQueries() {
+ if (this.subQueries == null) {
this.subQueries = new HashMap<>();
}
return this.subQueries;
}
}
- public int isObjectFitCriteria(Map<String, Object> objValues, String tableName) {
+ public int isObjectFitCriteria(Map<String, Object> objValues,
+ String tableName) {
Map<XSQLColumn, List<XSQLCriteria>> tblCriteria = criteria
.get(tableName);
if (tblCriteria == null) {
}
public static class Record {
+ // The map container the Attribute 2 the attribute value
public Map<String, Object> data = new HashMap<>();
+ // The Element Object (Possibly some kind of NormalizedNode
public Object element = null;
+ // Does this record fit the criteria
+ // In case of a list property, we first collect the list and only then
+ // we
+ // we decide which list item should be included or not.
+ public boolean fitCriteria = true;
public Map<String, Object> getRecord() {
return this.data;
}
}
- private Map<String, Object> collectColumnValues(Object node, XSQLBluePrintNode bpn) {
- Map<?, ?> subChildren = XSQLODLUtils.getChildren(node);
- Map<String, Object> result = new HashMap<>();
- for (Object stc : subChildren.values()) {
- if (stc.getClass().getName().endsWith("ImmutableAugmentationNode")) {
+ public static class RecordsContainer {
+ public List<Record> records = new LinkedList<Record>();
+ public List<Record> fitRecords = new LinkedList<Record>();
+ public Object currentObject = null;
+ }
+
+ private void collectColumnValues(RecordsContainer rContainer,
+ XSQLBluePrintNode bpn) {
+ Collection<?> subChildren = XSQLODLUtils
+ .getChildrenCollection(rContainer.currentObject);
+ Record r = new Record();
+ r.element = rContainer.currentObject;
+ for (Object stc : subChildren) {
+ if (stc.getClass().getName()
+ .endsWith("ImmutableUnkeyedListEntryNode")) {
+ r.fitCriteria = false;
+ rContainer.currentObject = stc;
+ collectColumnValues(rContainer, bpn);
+ } else if (stc.getClass().getName()
+ .endsWith("ImmutableAugmentationNode")) {
Map<?, ?> values = XSQLODLUtils.getChildren(stc);
for (Object key : values.keySet()) {
Object val = values.get(key);
Object value = XSQLODLUtils.getValue(val);
String k = XSQLODLUtils.getNodeName(val);
if (value != null) {
- result.put(bpn.getBluePrintNodeName() + "." + k,
+ r.data.put(bpn.getBluePrintNodeName() + "." + k,
value.toString());
}
}
String k = XSQLODLUtils.getNodeName(stc);
Object value = XSQLODLUtils.getValue(stc);
if (value != null) {
- result.put(bpn.getBluePrintNodeName() + "." + k,
+ r.data.put(bpn.getBluePrintNodeName() + "." + k,
value.toString());
}
}
}
- return result;
+ if (r.fitCriteria) {
+ rContainer.records.add(r);
+ }
}
- private void addToData(Record rec, XSQLBluePrintNode bpn,
- XSQLBluePrint bluePrint, Map<String, Object> fullRecord) {
+ private void addToData(Record rec, XSQLBluePrintNode bpn,XSQLBluePrint bluePrint, Map<String, Object> fullRecord) {
XSQLBluePrintNode eNodes[] = bluePrint
.getBluePrintNodeByODLTableName(XSQLODLUtils
.getNodeIdentiofier(rec.element));
String odlNodeName = XSQLODLUtils.getNodeIdentiofier(child);
if (odlNodeName == null) {
+ if (child instanceof DataContainerNode) {
+ List<Object> augChidlren = getChildren(child, tableName,
+ bluePrint);
+ result.addAll(augChidlren);
+ }
continue;
}
continue;
}
- if (child.getClass().getName().endsWith("ImmutableContainerNode")) {
+ if (child.getClass().getName().endsWith("ImmutableUnkeyedListNode")) {
+ result.add(child);
+ } else if (child.getClass().getName()
+ .endsWith("ImmutableContainerNode")) {
result.add(child);
} else if (child.getClass().getName()
.endsWith("ImmutableAugmentationNode")) {
}
} else if (child.getClass().getName().endsWith("ImmutableMapNode")) {
result.addAll(XSQLODLUtils.getMChildren(child));
+ } else {
+ XSQLAdapter.log("Missed Node Data OF Type="
+ + child.getClass().getName());
}
}
return result;
}
- public List<Record> addRecords(Object element, XSQLBluePrintNode node,boolean root, String tableName, XSQLBluePrint bluePrint) {
+ public List<Record> addRecords(Object element, XSQLBluePrintNode node,
+ boolean root, String tableName, XSQLBluePrint bluePrint) {
List<Record> result = new LinkedList<Record>();
- //In case this is a sibling to the requested table, the elenment type
- //won't be in the path of the leaf node
- if(node==null){
- return result;
- }
String nodeID = XSQLODLUtils.getNodeIdentiofier(element);
if (node.getODLTableName().equals(nodeID)) {
- XSQLBluePrintNode bluePrintNode = bluePrint.getBluePrintNodeByODLTableName(nodeID)[0];
- Record rec = new Record();
- rec.element = element;
- XSQLBluePrintNode bpn = this.tablesInQueryMap.get(bluePrintNode.getBluePrintNodeName());
- if (this.criteria.containsKey(bluePrintNode.getBluePrintNodeName()) || bpn != null) {
- Map<String, Object> allKeyValues = collectColumnValues(element, bpn);
- if (!(isObjectFitCriteria(allKeyValues,
- bpn.getBluePrintNodeName()) == 1)) {
- return EMPTY_RESULT;
+ XSQLBluePrintNode bluePrintNode = bluePrint
+ .getBluePrintNodeByODLTableName(nodeID)[0];
+ RecordsContainer rContainer = new RecordsContainer();
+ rContainer.currentObject = element;
+ XSQLBluePrintNode bpn = this.tablesInQueryMap.get(bluePrintNode
+ .getBluePrintNodeName());
+ if (this.criteria.containsKey(bluePrintNode.getBluePrintNodeName())
+ || bpn != null) {
+ collectColumnValues(rContainer, bpn);
+ for (Record r : rContainer.records) {
+ if (!(isObjectFitCriteria(r.data,
+ bpn.getBluePrintNodeName()) == 1)) {
+ r.fitCriteria = false;
+ }
+ if (r.fitCriteria) {
+ Record rec = new Record();
+ rec.element = r.element;
+ addToData(rec, bpn, bluePrint, r.data);
+ rContainer.fitRecords.add(rec);
+ }
}
- addToData(rec, bpn, bluePrint, allKeyValues);
+ if (rContainer.fitRecords.isEmpty())
+ return EMPTY_RESULT;
}
- if (root) {
- addRecord(rec.data);
+ if (rContainer.records.isEmpty()) {
+ Record rec = new Record();
+ rec.element = rContainer.currentObject;
+ if (root) {
+ addRecord(rec.data);
+ } else {
+ result.add(rec);
+ }
} else {
- result.add(rec);
+ for (Record rec : rContainer.fitRecords) {
+ if (root) {
+ addRecord(rec.data);
+ } else {
+ result.add(rec);
+ }
+ }
}
return result;
}
XSQLBluePrintNode parent = node.getParent();
- List<Record> subRecords = addRecords(element, parent, false, tableName,bluePrint);
+ List<Record> subRecords = addRecords(element, parent, false, tableName,
+ bluePrint);
for (Record subRec : subRecords) {
List<Object> subO = getChildren(subRec.element, tableName,
bluePrint);
if (subO != null) {
for (Object subData : subO) {
- Record rec = new Record();
- rec.element = subData;
- rec.data.putAll(subRec.data);
+ RecordsContainer rContainer = new RecordsContainer();
+ rContainer.currentObject = subData;
- String recID = XSQLODLUtils.getNodeIdentiofier(rec.element);
+ String recID = XSQLODLUtils
+ .getNodeIdentiofier(rContainer.currentObject);
XSQLBluePrintNode eNodes[] = bluePrint
.getBluePrintNodeByODLTableName(recID);
XSQLBluePrintNode bpn = null;
break;
}
}
- boolean isObjectInCriteria = true;
if (bpn != null) {
- Map<String, Object> allKeyValues = collectColumnValues(rec.element, bpn);
- if ((isObjectFitCriteria(allKeyValues,
- bpn.getBluePrintNodeName()) == 1)) {
- addToData(rec, bpn, bluePrint, allKeyValues);
- } else {
- isObjectInCriteria = false;
+ collectColumnValues(rContainer, bpn);
+ for (Record r : rContainer.records) {
+ if ((isObjectFitCriteria(r.data,
+ bpn.getBluePrintNodeName()) == 1)) {
+ Record rec = new Record();
+ rec.data.putAll(subRec.data);
+ rec.element = r.element;
+ addToData(rec, bpn, bluePrint, r.data);
+ } else {
+ r.fitCriteria = false;
+ }
}
}
-
- if (isObjectInCriteria) {
+ if (rContainer.records.isEmpty()) {
+ Record rec = new Record();
+ rec.data.putAll(subRec.data);
+ rec.element = rContainer.currentObject;
if (root) {
if (!rec.data.isEmpty()) {
addRecord(rec.data);
} else {
result.add(rec);
}
+ } else {
+ for (Record r : rContainer.records) {
+ r.data.putAll(subRec.data);
+ if (r.fitCriteria) {
+ if (root) {
+ if (!r.data.isEmpty()) {
+ addRecord(r.data);
+ }
+ } else {
+ result.add(r);
+ }
+ }
+ }
}
}
}
}
-
return result;
}
}
}
- public static void execute(JDBCResultSet rs, XSQLAdapter adapter)
- throws SQLException {
+ public static void execute(JDBCResultSet rs, XSQLAdapter adapter)throws SQLException {
if(rs.getSQL().toLowerCase().trim().equals("select 1")){
rs.setFinished(true);
return;
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
package org.opendaylight.xsql;
-import org.opendaylight.controller.sal.binding.api.data.DataModificationTransaction;
-import org.opendaylight.controller.sal.binding.api.data.DataProviderService;
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.dom.xsql.XSQLAdapter;
import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.xsql.rev140626.XSQL;
import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.xsql.rev140626.XSQLBuilder;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
/**
- * Created by root on 6/26/14.
- */
+ * @author Sharon Aicler(saichler@gmail.com)
+ **/
public class XSQLProvider implements AutoCloseable {
public static final InstanceIdentifier<XSQL> ID = InstanceIdentifier.builder(XSQL.class).build();
- private static final Logger LOG = LoggerFactory.getLogger(XSQLProvider.class);
+ //public static final InstanceIdentifier<SalTest> ID2 = InstanceIdentifier.builder(SalTest.class).build();
public void close() {
}
- public XSQL buildXSQL(DataProviderService dps) {
+ public XSQL buildXSQL(DataBroker dps) {
+ XSQLAdapter.log("Building XSL...");
XSQLBuilder builder = new XSQLBuilder();
builder.setPort("34343");
XSQL xsql = builder.build();
try {
if (dps != null) {
- final DataModificationTransaction t = dps.beginTransaction();
- t.removeOperationalData(ID);
- t.putOperationalData(ID,xsql);
- t.commit().get();
+ XSQLAdapter.log("Starting TRansaction...");
+ WriteTransaction t = dps.newReadWriteTransaction();
+ t.delete(LogicalDatastoreType.OPERATIONAL, ID);
+ t.put(LogicalDatastoreType.OPERATIONAL,ID,xsql);
+ XSQLAdapter.log("Submitting...");
+ t.submit();
}
} catch (Exception e) {
- LOG.warn("Failed to update XSQL port status, ", e);
+ XSQLAdapter.log(e);
}
return xsql;
}
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
package org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.xsql.rev140626;
import org.opendaylight.controller.md.sal.dom.xsql.XSQLAdapter;
import org.opendaylight.xsql.XSQLProvider;
-
+/**
+ * @author Sharon Aicler(saichler@gmail.com)
+ **/
public class XSQLModule extends org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.xsql.rev140626.AbstractXSQLModule {
+ private static final long SLEEP_TIME_BEFORE_CREATING_TRANSACTION = 10000;
public XSQLModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier, org.opendaylight.controller.config.api.DependencyResolver dependencyResolver) {
super(identifier, dependencyResolver);
}
XSQLAdapter xsqlAdapter = XSQLAdapter.getInstance();
getSchemaServiceDependency().registerSchemaContextListener(xsqlAdapter);
xsqlAdapter.setDataBroker(getAsyncDataBrokerDependency());
- XSQLProvider p = new XSQLProvider();
- //p.buildXSQL(getDataBrokerDependency());
+ final XSQLProvider p = new XSQLProvider();
+ Runnable runthis = new Runnable() {
+ @Override
+ public void run() {
+ try{Thread.sleep(SLEEP_TIME_BEFORE_CREATING_TRANSACTION);}catch(Exception err){}
+ p.buildXSQL(getDataBrokerDependency());
+ }
+ };
return p;
}
-
}
</type>
<name>XSQL</name>
<data-broker>
- <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">binding:binding-data-broker</type>
- <name>binding-data-broker</name>
+ <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">binding:binding-async-data-broker</type>
+ <name>binding-data-broker</name>
</data-broker>
<async-data-broker>
<type xmlns:dom="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">dom:dom-async-data-broker</type>
case XSQL {
when "/config:modules/config:module/config:type = 'XSQL'";
- container data-broker {
+ container data-broker {
uses config:service-ref {
refine type {
mandatory false;
- config:required-identity mdsal:binding-data-broker;
+ config:required-identity mdsal:binding-async-data-broker;
}
}
- }
+ }
container async-data-broker {
uses config:service-ref {
import java.io.InputStream;
import java.sql.SQLException;
+import java.util.Collections;
+import java.util.Set;
import org.junit.Assert;
import org.junit.Before;
+import org.junit.BeforeClass;
import org.junit.Test;
import org.opendaylight.controller.md.sal.dom.xsql.XSQLAdapter;
import org.opendaylight.controller.md.sal.dom.xsql.XSQLBluePrint;
import org.opendaylight.controller.md.sal.dom.xsql.jdbc.JDBCResultSet;
import org.opendaylight.controller.md.sal.dom.xsql.jdbc.JDBCServer;
+import org.opendaylight.yangtools.yang.model.api.Module;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.parser.impl.YangParserImpl;
public class XSQLTest {
-
- XSQLBluePrint bluePrint = null;
+ private static final String DATASTORE_TEST_YANG = "/sal-persisted-dom-test.yang";
+ private XSQLBluePrint bluePrint = null;
+ //private static SchemaContext schemaContext = null;
+ @BeforeClass
+ public static void loadSchemaContext(){
+ //schemaContext = createTestContext();
+ }
@Before
public void before() {
System.out.print("*** XSQL Tests -");
System.out.println(str);
}
+
+ public static final InputStream getDatastoreTestInputStream() {
+ return getInputStream(DATASTORE_TEST_YANG);
+ }
+
+ private static InputStream getInputStream(final String resourceName) {
+ return XSQLTest.class.getResourceAsStream(DATASTORE_TEST_YANG);
+ }
+
+ public static SchemaContext createTestContext() {
+ YangParserImpl parser = new YangParserImpl();
+ Set<Module> modules = parser.parseYangModelsFromStreams(Collections.singletonList(getDatastoreTestInputStream()));
+ return parser.resolveSchemaContext(modules);
+ }
}
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
+import org.opendaylight.controller.sal.core.spi.data.ForwardingDOMStoreThreePhaseCommitCohort;
-final class ChainedTransactionCommitImpl implements DOMStoreThreePhaseCommitCohort {
+final class ChainedTransactionCommitImpl extends ForwardingDOMStoreThreePhaseCommitCohort {
private final SnapshotBackedWriteTransaction transaction;
private final DOMStoreThreePhaseCommitCohort delegate;
private final DOMStoreTransactionChainImpl txChain;
- protected ChainedTransactionCommitImpl(final SnapshotBackedWriteTransaction transaction,
+ ChainedTransactionCommitImpl(final SnapshotBackedWriteTransaction transaction,
final DOMStoreThreePhaseCommitCohort delegate, final DOMStoreTransactionChainImpl txChain) {
this.transaction = Preconditions.checkNotNull(transaction);
this.delegate = Preconditions.checkNotNull(delegate);
}
@Override
- public ListenableFuture<Boolean> canCommit() {
- return delegate.canCommit();
- }
-
- @Override
- public ListenableFuture<Void> preCommit() {
- return delegate.preCommit();
- }
-
- @Override
- public ListenableFuture<Void> abort() {
- return delegate.abort();
+ protected DOMStoreThreePhaseCommitCohort delegate() {
+ return delegate;
}
@Override
public ListenableFuture<Void> commit() {
- ListenableFuture<Void> commitFuture = delegate.commit();
+ ListenableFuture<Void> commitFuture = super.commit();
Futures.addCallback(commitFuture, new FutureCallback<Void>() {
@Override
public void onFailure(final Throwable t) {
});
return commitFuture;
}
+
}
\ No newline at end of file
import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
import org.opendaylight.controller.md.sal.dom.store.impl.SnapshotBackedWriteTransaction.TransactionReadyPrototype;
import org.opendaylight.controller.md.sal.dom.store.impl.tree.ListenerTree;
+import org.opendaylight.controller.sal.core.spi.data.AbstractDOMStoreTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStore;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
return name + "-" + txCounter.getAndIncrement();
}
+ private static void warnDebugContext(AbstractDOMStoreTransaction<?> transaction) {
+ final Throwable ctx = transaction.getDebugContext();
+ if (ctx != null) {
+ LOG.warn("Transaction {} has been allocated in the following context", transaction.getIdentifier(), ctx);
+ }
+ }
+
private final class ThreePhaseCommitImpl implements DOMStoreThreePhaseCommitCohort {
private final SnapshotBackedWriteTransaction transaction;
private final DataTreeModification modification;
} catch (ConflictingModificationAppliedException e) {
LOG.warn("Store Tx: {} Conflicting modification for {}.", transaction.getIdentifier(),
e.getPath());
- transaction.warnDebugContext(LOG);
+ warnDebugContext(transaction);
return Futures.immediateFailedFuture(new OptimisticLockFailedException("Optimistic lock failed.", e));
} catch (DataValidationFailedException e) {
LOG.warn("Store Tx: {} Data Precondition failed for {}.", transaction.getIdentifier(),
e.getPath(), e);
- transaction.warnDebugContext(LOG);
+ warnDebugContext(transaction);
// For debugging purposes, allow dumping of the modification. Coupled with the above
// precondition log, it should allow us to understand what went on.
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.spi.DefaultDataTreeCandidate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@Override
protected void notifyListeners(final Collection<AbstractDOMDataTreeChangeListenerRegistration<?>> registrations, final YangInstanceIdentifier path, final DataTreeCandidateNode node) {
- final DataTreeCandidate candidate = new SimpleDataTreeCandidate(path, node);
+ final DataTreeCandidate candidate = new DefaultDataTreeCandidate(path, node);
for (AbstractDOMDataTreeChangeListenerRegistration<?> reg : registrations) {
LOG.debug("Enqueueing candidate {} to registration {}", candidate, registrations);
+++ /dev/null
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.store.impl;
-
-import com.google.common.base.MoreObjects;
-import com.google.common.base.Preconditions;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
-
-final class SimpleDataTreeCandidate implements DataTreeCandidate {
- private final YangInstanceIdentifier rootPath;
- private final DataTreeCandidateNode rootNode;
-
- SimpleDataTreeCandidate(final YangInstanceIdentifier rootPath, final DataTreeCandidateNode rootNode) {
- this.rootPath = Preconditions.checkNotNull(rootPath);
- this.rootNode = Preconditions.checkNotNull(rootNode);
- }
-
- @Override
- public DataTreeCandidateNode getRootNode() {
- return rootNode;
- }
-
- @Override
- public YangInstanceIdentifier getRootPath() {
- return rootPath;
- }
-
- @Override
- public String toString() {
- return MoreObjects.toStringHelper(this).add("rootPath", rootPath).add("rootNode", rootNode).toString();
- }
-}
\ No newline at end of file
package org.opendaylight.controller.md.sal.dom.store.impl;
import static com.google.common.base.Preconditions.checkNotNull;
-
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.util.concurrent.CheckedFuture;
import com.google.common.util.concurrent.Futures;
-
import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.controller.sal.core.spi.data.AbstractDOMStoreTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
* which delegates most of its calls to similar methods provided by underlying snapshot.
*
*/
-final class SnapshotBackedReadTransaction extends AbstractDOMStoreTransaction
+final class SnapshotBackedReadTransaction extends AbstractDOMStoreTransaction<Object>
implements DOMStoreReadTransaction {
private static final Logger LOG = LoggerFactory.getLogger(SnapshotBackedReadTransaction.class);
import com.google.common.base.Preconditions;
import com.google.common.base.Throwables;
import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
+import org.opendaylight.controller.sal.core.spi.data.AbstractDOMStoreTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
* {@link org.opendaylight.controller.md.sal.dom.store.impl.SnapshotBackedWriteTransaction.TransactionReadyPrototype}.
*
*/
-class SnapshotBackedWriteTransaction extends AbstractDOMStoreTransaction implements DOMStoreWriteTransaction {
+class SnapshotBackedWriteTransaction extends AbstractDOMStoreTransaction<Object> implements DOMStoreWriteTransaction {
private static final Logger LOG = LoggerFactory.getLogger(SnapshotBackedWriteTransaction.class);
private static final AtomicReferenceFieldUpdater<SnapshotBackedWriteTransaction, TransactionReadyPrototype> READY_UPDATER =
AtomicReferenceFieldUpdater.newUpdater(SnapshotBackedWriteTransaction.class, TransactionReadyPrototype.class, "readyImpl");
import com.google.common.util.concurrent.ListeningExecutorService;
import com.google.common.util.concurrent.MoreExecutors;
import java.util.Collection;
-import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Set;
@Override
public DeviceSources call() throws Exception {
-
- final Set<SourceIdentifier> requiredSources = Sets.newHashSet(Collections2.transform(
- remoteSessionCapabilities.getModuleBasedCaps(), QNAME_TO_SOURCE_ID_FUNCTION));
-
- // If monitoring is not supported, we will still attempt to create schema, sources might be already provided
final NetconfStateSchemas availableSchemas = stateSchemasResolver.resolve(deviceRpc, remoteSessionCapabilities, id);
logger.debug("{}: Schemas exposed by ietf-netconf-monitoring: {}", id, availableSchemas.getAvailableYangSchemasQNames());
- final Set<SourceIdentifier> providedSources = Sets.newHashSet(Collections2.transform(
- availableSchemas.getAvailableYangSchemasQNames(), QNAME_TO_SOURCE_ID_FUNCTION));
-
- final Set<SourceIdentifier> requiredSourcesNotProvided = Sets.difference(requiredSources, providedSources);
+ final Set<QName> requiredSources = Sets.newHashSet(remoteSessionCapabilities.getModuleBasedCaps());
+ final Set<QName> providedSources = availableSchemas.getAvailableYangSchemasQNames();
+ final Set<QName> requiredSourcesNotProvided = Sets.difference(requiredSources, providedSources);
if (!requiredSourcesNotProvided.isEmpty()) {
logger.warn("{}: Netconf device does not provide all yang models reported in hello message capabilities, required but not provided: {}",
id, requiredSourcesNotProvided);
logger.warn("{}: Attempting to build schema context from required sources", id);
}
-
// Here all the sources reported in netconf monitoring are merged with those reported in hello.
// It is necessary to perform this since submodules are not mentioned in hello but still required.
// This clashes with the option of a user to specify supported yang models manually in configuration for netconf-connector
// and as a result one is not able to fully override yang models of a device. It is only possible to add additional models.
- final Set<SourceIdentifier> providedSourcesNotRequired = Sets.difference(providedSources, requiredSources);
+ final Set<QName> providedSourcesNotRequired = Sets.difference(providedSources, requiredSources);
if (!providedSourcesNotRequired.isEmpty()) {
logger.warn("{}: Netconf device provides additional yang models not reported in hello message capabilities: {}",
id, providedSourcesNotRequired);
* Contains RequiredSources - sources from capabilities.
*/
private static final class DeviceSources {
- private final Collection<SourceIdentifier> requiredSources;
- private final Collection<SourceIdentifier> providedSources;
+ private final Set<QName> requiredSources;
+ private final Set<QName> providedSources;
- public DeviceSources(final Collection<SourceIdentifier> requiredSources, final Collection<SourceIdentifier> providedSources) {
+ public DeviceSources(final Set<QName> requiredSources, final Set<QName> providedSources) {
this.requiredSources = requiredSources;
this.providedSources = providedSources;
}
- public Collection<SourceIdentifier> getRequiredSources() {
+ public Set<QName> getRequiredSourcesQName() {
return requiredSources;
}
- public Collection<SourceIdentifier> getProvidedSources() {
+ public Set<QName> getProvidedSourcesQName() {
return providedSources;
}
+ public Collection<SourceIdentifier> getRequiredSources() {
+ return Collections2.transform(requiredSources, QNAME_TO_SOURCE_ID_FUNCTION);
+ }
+
+ public Collection<SourceIdentifier> getProvidedSources() {
+ return Collections2.transform(providedSources, QNAME_TO_SOURCE_ID_FUNCTION);
+ }
+
}
/**
// If no more sources, fail
if(requiredSources.isEmpty()) {
- handleSalInitializationFailure(new IllegalStateException(id + ": No more sources for schema context"), listener);
+ final IllegalStateException cause = new IllegalStateException(id + ": No more sources for schema context");
+ handleSalInitializationFailure(cause, listener);
+ salFacade.onDeviceFailed(cause);
return;
}
@Override
public void onSuccess(final SchemaContext result) {
logger.debug("{}: Schema context built successfully from {}", id, requiredSources);
- final Collection<QName> filteredQNames = Sets.difference(remoteSessionCapabilities.getModuleBasedCaps(), capabilities.getUnresolvedCapabilites().keySet());
+ final Collection<QName> filteredQNames = Sets.difference(deviceSources.getProvidedSourcesQName(), capabilities.getUnresolvedCapabilites().keySet());
capabilities.addCapabilities(filteredQNames);
capabilities.addNonModuleBasedCapabilities(remoteSessionCapabilities.getNonModuleCaps());
handleSalInitializationSuccess(result, remoteSessionCapabilities, getDeviceSpecificRpc(result));
}
private Collection<QName> getQNameFromSourceIdentifiers(final Collection<SourceIdentifier> identifiers) {
- final Collection<QName> qNames = new HashSet<>();
- for (final SourceIdentifier source : identifiers) {
- final Optional<QName> qname = getQNameFromSourceIdentifier(source);
- if (qname.isPresent()) {
- qNames.add(qname.get());
+ final Collection<QName> qNames = Collections2.transform(identifiers, new Function<SourceIdentifier, QName>() {
+ @Override
+ public QName apply(final SourceIdentifier sourceIdentifier) {
+ return getQNameFromSourceIdentifier(sourceIdentifier);
}
- }
+ });
+
if (qNames.isEmpty()) {
logger.debug("Unable to map any source identfiers to a capability reported by device : " + identifiers);
}
return qNames;
}
- private Optional<QName> getQNameFromSourceIdentifier(final SourceIdentifier identifier) {
- for (final QName qname : remoteSessionCapabilities.getModuleBasedCaps()) {
- if (qname.getLocalName().equals(identifier.getName())
- && qname.getFormattedRevision().equals(identifier.getRevision())) {
- return Optional.of(qname);
+ private QName getQNameFromSourceIdentifier(final SourceIdentifier identifier) {
+ // Required sources are all required and provided merged in DeviceSourcesResolver
+ for (final QName qname : deviceSources.getRequiredSourcesQName()) {
+ if(qname.getLocalName().equals(identifier.getName()) == false) {
+ continue;
+ }
+
+ if(identifier.getRevision().equals(SourceIdentifier.NOT_PRESENT_FORMATTED_REVISION) &&
+ qname.getRevision() == null) {
+ return qname;
+ }
+
+ if (qname.getFormattedRevision().equals(identifier.getRevision())) {
+ return qname;
}
}
- throw new IllegalArgumentException("Unable to map identifier to a devices reported capability: " + identifier);
+ throw new IllegalArgumentException("Unable to map identifier to a devices reported capability: " + identifier + " Available: " + deviceSources.getRequiredSourcesQName());
}
}
}
public final static class RemoteYangSchema {
private final QName qname;
- private RemoteYangSchema(final QName qname) {
+ RemoteYangSchema(final QName qname) {
this.qname = qname;
}
import com.google.common.base.Optional;
import com.google.common.collect.HashMultimap;
+import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
import com.google.common.util.concurrent.Futures;
import java.io.InputStream;
import java.util.ArrayList;
import org.opendaylight.controller.sal.connect.netconf.sal.NetconfDeviceRpc;
import org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil;
import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
+import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.model.api.Module;
public void testNetconfDeviceMissingSource() throws Exception {
final RemoteDeviceHandler<NetconfSessionPreferences> facade = getFacade();
final NetconfDeviceCommunicator listener = getListener();
+ final SchemaContext schema = getSchema();
final SchemaContextFactory schemaFactory = getSchemaFactory();
if(((Collection<?>) invocation.getArguments()[0]).size() == 2) {
return Futures.immediateFailedCheckedFuture(schemaResolutionException);
} else {
- return Futures.immediateCheckedFuture(getSchema());
+ return Futures.immediateCheckedFuture(schema);
}
}
}).when(schemaFactory).createSchemaContext(anyCollectionOf(SourceIdentifier.class));
final NetconfDevice.SchemaResourcesDTO schemaResourcesDTO
- = new NetconfDevice.SchemaResourcesDTO(getSchemaRegistry(), schemaFactory, stateSchemasResolver);
+ = new NetconfDevice.SchemaResourcesDTO(getSchemaRegistry(), schemaFactory, new NetconfStateSchemas.NetconfStateSchemasResolver() {
+ @Override
+ public NetconfStateSchemas resolve(final NetconfDeviceRpc deviceRpc, final NetconfSessionPreferences remoteSessionCapabilities, final RemoteDeviceId id) {
+ final Module first = Iterables.getFirst(schema.getModules(), null);
+ final QName qName = QName.create(first.getQNameModule(), first.getName());
+ final NetconfStateSchemas.RemoteYangSchema source1 = new NetconfStateSchemas.RemoteYangSchema(qName);
+ final NetconfStateSchemas.RemoteYangSchema source2 = new NetconfStateSchemas.RemoteYangSchema(QName.create(first.getQNameModule(), "test-module2"));
+ return new NetconfStateSchemas(Sets.newHashSet(source1, source2));
+ }
+ });
+
final NetconfDevice device = new NetconfDevice(schemaResourcesDTO, getId(), facade, getExecutor(), true);
// Monitoring supported
final NetconfSessionPreferences sessionCaps = getSessionCaps(true, Lists.newArrayList(TEST_CAPABILITY, TEST_CAPABILITY2));
<param-name>javax.ws.rs.Application</param-name>
<param-value>org.opendaylight.controller.sal.rest.doc.jaxrs.ApiDocApplication</param-value>
</init-param>
+ <!-- AAA Auth Filter -->
+ <init-param>
+ <param-name>com.sun.jersey.spi.container.ContainerRequestFilters</param-name>
+ <param-value> org.opendaylight.aaa.sts.TokenAuthFilter</param-value>
+ </init-param>
<load-on-startup>1</load-on-startup>
</servlet>
<security-constraint>
<web-resource-collection>
- <web-resource-name>free access</web-resource-name>
- <url-pattern>/explorer/css/*</url-pattern>
- <url-pattern>/explorer/images/*</url-pattern>
- <url-pattern>/explorer/lib/*</url-pattern>
- <url-pattern>/explorer/*</url-pattern>
+ <web-resource-name>API Doc</web-resource-name>
+ <url-pattern>/*</url-pattern>
</web-resource-collection>
</security-constraint>
}
Date revision = module.getRevision();
- Preconditions.checkState(!revisionsByNamespace.containsKey(revision),
- "Duplicate revision %s for namespace %s", revision, namespace);
IdentityMapping identityMapping = revisionsByNamespace.get(revision);
if(identityMapping == null) {
package org.opendaylight.controller.netconf.confignetconfconnector.osgi;
+import com.google.common.base.Optional;
+import com.google.common.base.Predicate;
+import com.google.common.collect.Iterables;
import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
+import java.util.NoSuchElementException;
import java.util.Set;
import org.opendaylight.controller.config.yangjmxgenerator.ModuleMXBeanEntry;
import org.opendaylight.controller.config.yangjmxgenerator.PackageTranslator;
import org.opendaylight.yangtools.yang.model.api.IdentitySchemaNode;
import org.opendaylight.yangtools.yang.model.api.Module;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.parser.builder.impl.ModuleIdentifierImpl;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@Override
public Set<Module> getModules() {
- return schemaContext.getModules();
+ final Set<Module> modules = Sets.newHashSet(schemaContext.getModules());
+ for (final Module module : schemaContext.getModules()) {
+ modules.addAll(module.getSubmodules());
+ }
+ return modules;
}
@Override
public String getModuleSource(final org.opendaylight.yangtools.yang.model.api.ModuleIdentifier moduleIdentifier) {
- return schemaContext.getModuleSource(moduleIdentifier).get();
+ final Optional<String> moduleSource = schemaContext.getModuleSource(moduleIdentifier);
+ if(moduleSource.isPresent()) {
+ return moduleSource.get();
+ } else {
+ try {
+ return Iterables.find(getModules(), new Predicate<Module>() {
+ @Override
+ public boolean apply(final Module input) {
+ final ModuleIdentifierImpl id = new ModuleIdentifierImpl(input.getName(), Optional.fromNullable(input.getNamespace()), Optional.fromNullable(input.getRevision()));
+ return id.equals(moduleIdentifier);
+ }
+ }).getSource();
+ } catch (final NoSuchElementException e) {
+ throw new IllegalArgumentException("Source for yang module " + moduleIdentifier + " not found", e);
+ }
+ }
}
@Override
import org.opendaylight.controller.netconf.mdsal.connector.MdsalNetconfOperationServiceFactory;
-public class NetconfMdsalMapperModule extends org.opendaylight.controller.config.yang.netconf.mdsal.mapper.AbstractNetconfMdsalMapperModule {
+public class NetconfMdsalMapperModule extends org.opendaylight.controller.config.yang.netconf.mdsal.mapper.AbstractNetconfMdsalMapperModule{
public NetconfMdsalMapperModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier, org.opendaylight.controller.config.api.DependencyResolver dependencyResolver) {
super(identifier, dependencyResolver);
}
@Override
public java.lang.AutoCloseable createInstance() {
- final MdsalNetconfOperationServiceFactory mdsalNetconfOperationServiceFactory = new MdsalNetconfOperationServiceFactory(getRootSchemaServiceDependency(), getDomBrokerDependency()) {
- @Override
- public void close() throws Exception {
- super.close();
- getMapperAggregatorDependency().onRemoveNetconfOperationServiceFactory(this);
- }
- };
+ final MdsalNetconfOperationServiceFactory mdsalNetconfOperationServiceFactory =
+ new MdsalNetconfOperationServiceFactory(getRootSchemaServiceDependency()) {
+ @Override
+ public void close() throws Exception {
+ super.close();
+ getMapperAggregatorDependency().onRemoveNetconfOperationServiceFactory(this);
+ }
+ };
+ getDomBrokerDependency().registerConsumer(mdsalNetconfOperationServiceFactory);
getMapperAggregatorDependency().onAddNetconfOperationServiceFactory(mdsalNetconfOperationServiceFactory);
return mdsalNetconfOperationServiceFactory;
}
import java.util.Set;
import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcService;
import org.opendaylight.controller.netconf.mapping.api.NetconfOperation;
import org.opendaylight.controller.netconf.mapping.api.NetconfOperationService;
private final OperationProvider operationProvider;
public MdsalNetconfOperationService(final CurrentSchemaContext schemaContext, final String netconfSessionIdForReporting,
- final DOMDataBroker dataBroker) {
- this.operationProvider = new OperationProvider(netconfSessionIdForReporting, schemaContext, dataBroker);
+ final DOMDataBroker dataBroker, final DOMRpcService rpcService) {
+ this.operationProvider = new OperationProvider(netconfSessionIdForReporting, schemaContext, dataBroker, rpcService);
}
@Override
package org.opendaylight.controller.netconf.mdsal.connector;
+import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
+import java.util.Collection;
+import java.util.Collections;
import java.util.HashSet;
import java.util.Set;
import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcService;
import org.opendaylight.controller.netconf.api.Capability;
import org.opendaylight.controller.netconf.api.monitoring.CapabilityListener;
import org.opendaylight.controller.netconf.mapping.api.NetconfOperationServiceFactory;
import org.opendaylight.controller.netconf.util.capability.BasicCapability;
import org.opendaylight.controller.netconf.util.capability.YangModuleCapability;
+import org.opendaylight.controller.sal.core.api.Broker.ConsumerSession;
+import org.opendaylight.controller.sal.core.api.Consumer;
import org.opendaylight.controller.sal.core.api.model.SchemaService;
import org.opendaylight.yangtools.yang.model.api.Module;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-public class MdsalNetconfOperationServiceFactory implements NetconfOperationServiceFactory, AutoCloseable {
+public class MdsalNetconfOperationServiceFactory implements NetconfOperationServiceFactory, Consumer, AutoCloseable {
private static final Logger LOG = LoggerFactory.getLogger(MdsalNetconfOperationServiceFactory.class);
- private final DOMDataBroker dataBroker;
+ private ConsumerSession session = null;
+ private DOMDataBroker dataBroker = null;
+ private DOMRpcService rpcService = null;
private final CurrentSchemaContext currentSchemaContext;
- public MdsalNetconfOperationServiceFactory(final SchemaService schemaService, final DOMDataBroker domDataBroker) {
+ public MdsalNetconfOperationServiceFactory(final SchemaService schemaService) {
this.currentSchemaContext = new CurrentSchemaContext(Preconditions.checkNotNull(schemaService));
- this.dataBroker = Preconditions.checkNotNull(domDataBroker);
}
@Override
public MdsalNetconfOperationService createService(final String netconfSessionIdForReporting) {
- return new MdsalNetconfOperationService(currentSchemaContext, netconfSessionIdForReporting, dataBroker);
+ Preconditions.checkState(dataBroker != null, "MD-SAL provider not yet initialized");
+ return new MdsalNetconfOperationService(currentSchemaContext, netconfSessionIdForReporting, dataBroker, rpcService);
}
@Override
return transformCapabilities(currentSchemaContext.getCurrentContext());
}
- static Set<Capability> transformCapabilities(final SchemaContext currentContext1) {
+ static Set<Capability> transformCapabilities(final SchemaContext currentContext) {
final Set<Capability> capabilities = new HashSet<>();
// [RFC6241] 8.3. Candidate Configuration Capability
capabilities.add(new BasicCapability("urn:ietf:params:netconf:capability:candidate:1.0"));
- final SchemaContext currentContext = currentContext1;
final Set<Module> modules = currentContext.getModules();
for (final Module module : modules) {
- if(currentContext.getModuleSource(module).isPresent()) {
- capabilities.add(new YangModuleCapability(module, currentContext.getModuleSource(module).get()));
- } else {
- LOG.warn("Missing source for module {}. This module will not be available from netconf server",
- module);
+ Optional<YangModuleCapability> cap = moduleToCapability(module);
+ if(cap.isPresent()) {
+ capabilities.add(cap.get());
+ }
+ for (final Module submodule : module.getSubmodules()) {
+ cap = moduleToCapability(submodule);
+ if(cap.isPresent()) {
+ capabilities.add(cap.get());
+ }
}
}
return capabilities;
}
+ private static Optional<YangModuleCapability> moduleToCapability(final Module module) {
+ final String source = module.getSource();
+ if(source !=null) {
+ return Optional.of(new YangModuleCapability(module, source));
+ } else {
+ LOG.warn("Missing source for module {}. This module will not be available from netconf server",
+ module);
+ }
+ return Optional.absent();
+ }
+
@Override
public AutoCloseable registerCapabilityListener(final CapabilityListener listener) {
return currentSchemaContext.registerCapabilityListener(listener);
}
+ @Override
+ public void onSessionInitiated(ConsumerSession session) {
+ this.session = Preconditions.checkNotNull(session);
+ this.dataBroker = this.session.getService(DOMDataBroker.class);
+ this.rpcService = this.session.getService(DOMRpcService.class);
+ }
+
+ @Override
+ public Collection<ConsumerFunctionality> getConsumerFunctionality() {
+ return Collections.emptySet();
+ }
}
import com.google.common.collect.Sets;
import java.util.Set;
import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcService;
import org.opendaylight.controller.netconf.mapping.api.NetconfOperation;
import org.opendaylight.controller.netconf.mdsal.connector.ops.Commit;
import org.opendaylight.controller.netconf.mdsal.connector.ops.DiscardChanges;
import org.opendaylight.controller.netconf.mdsal.connector.ops.EditConfig;
import org.opendaylight.controller.netconf.mdsal.connector.ops.Lock;
+import org.opendaylight.controller.netconf.mdsal.connector.ops.RuntimeRpc;
import org.opendaylight.controller.netconf.mdsal.connector.ops.Unlock;
import org.opendaylight.controller.netconf.mdsal.connector.ops.get.Get;
import org.opendaylight.controller.netconf.mdsal.connector.ops.get.GetConfig;
private final String netconfSessionIdForReporting;
private final CurrentSchemaContext schemaContext;
private final DOMDataBroker dataBroker;
+ private final DOMRpcService rpcService;
private final TransactionProvider transactionProvider;
- public OperationProvider(final String netconfSessionIdForReporting, final CurrentSchemaContext schemaContext, final DOMDataBroker dataBroker) {
+ public OperationProvider(final String netconfSessionIdForReporting, final CurrentSchemaContext schemaContext,
+ final DOMDataBroker dataBroker, final DOMRpcService rpcService) {
this.netconfSessionIdForReporting = netconfSessionIdForReporting;
this.schemaContext = schemaContext;
this.dataBroker = dataBroker;
- this.transactionProvider = new TransactionProvider(dataBroker, netconfSessionIdForReporting);
-
+ this.rpcService = rpcService;
+ this.transactionProvider = new TransactionProvider(this.dataBroker, netconfSessionIdForReporting);
}
Set<NetconfOperation> getOperations() {
new Get(netconfSessionIdForReporting, schemaContext, transactionProvider),
new GetConfig(netconfSessionIdForReporting, schemaContext, transactionProvider),
new Lock(netconfSessionIdForReporting),
- new Unlock(netconfSessionIdForReporting)
+ new Unlock(netconfSessionIdForReporting),
+ new RuntimeRpc(netconfSessionIdForReporting, schemaContext, rpcService)
);
}
import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
import org.opendaylight.controller.netconf.mdsal.connector.TransactionProvider;
-import org.opendaylight.controller.netconf.util.mapping.AbstractLastNetconfOperation;
+import org.opendaylight.controller.netconf.util.mapping.AbstractSingletonNetconfOperation;
import org.opendaylight.controller.netconf.util.xml.XmlElement;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
import org.slf4j.Logger;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
-public class Commit extends AbstractLastNetconfOperation{
+public class Commit extends AbstractSingletonNetconfOperation {
private static final Logger LOG = LoggerFactory.getLogger(Commit.class);
import org.opendaylight.controller.netconf.api.NetconfDocumentedException.ErrorType;
import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
import org.opendaylight.controller.netconf.mdsal.connector.TransactionProvider;
-import org.opendaylight.controller.netconf.util.mapping.AbstractLastNetconfOperation;
+import org.opendaylight.controller.netconf.util.mapping.AbstractSingletonNetconfOperation;
import org.opendaylight.controller.netconf.util.xml.XmlElement;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
import org.slf4j.Logger;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
-public class DiscardChanges extends AbstractLastNetconfOperation{
+public class DiscardChanges extends AbstractSingletonNetconfOperation {
private static final Logger LOG = LoggerFactory.getLogger(DiscardChanges.class);
protected String getOperationName() {
return OPERATION_NAME;
}
+
}
import org.opendaylight.controller.netconf.mdsal.connector.TransactionProvider;
import org.opendaylight.controller.netconf.util.exception.MissingNameSpaceException;
import org.opendaylight.controller.netconf.util.exception.UnexpectedNamespaceException;
-import org.opendaylight.controller.netconf.util.mapping.AbstractLastNetconfOperation;
+import org.opendaylight.controller.netconf.util.mapping.AbstractSingletonNetconfOperation;
import org.opendaylight.controller.netconf.util.xml.XmlElement;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
import org.opendaylight.yangtools.yang.data.api.ModifyAction;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
-public class EditConfig extends AbstractLastNetconfOperation {
+public class EditConfig extends AbstractSingletonNetconfOperation {
private static final Logger LOG = LoggerFactory.getLogger(EditConfig.class);
protected String getOperationName() {
return OPERATION_NAME;
}
+
}
import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
import org.opendaylight.controller.netconf.util.exception.MissingNameSpaceException;
import org.opendaylight.controller.netconf.util.exception.UnexpectedNamespaceException;
-import org.opendaylight.controller.netconf.util.mapping.AbstractLastNetconfOperation;
+import org.opendaylight.controller.netconf.util.mapping.AbstractSingletonNetconfOperation;
import org.opendaylight.controller.netconf.util.xml.XmlElement;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
import org.slf4j.Logger;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
-public class Lock extends AbstractLastNetconfOperation{
+public class Lock extends AbstractSingletonNetconfOperation {
private static final Logger LOG = LoggerFactory.getLogger(Lock.class);
protected String getOperationName() {
return OPERATION_NAME;
}
+
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.mdsal.connector.ops;
+
+import com.google.common.base.Optional;
+import com.google.common.base.Throwables;
+import com.google.common.util.concurrent.CheckedFuture;
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.Collections;
+import java.util.Map;
+import javax.annotation.Nullable;
+import javax.xml.stream.XMLOutputFactory;
+import javax.xml.stream.XMLStreamException;
+import javax.xml.stream.XMLStreamWriter;
+import javax.xml.transform.dom.DOMResult;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcException;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcResult;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcService;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException.ErrorSeverity;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException.ErrorTag;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException.ErrorType;
+import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
+import org.opendaylight.controller.netconf.mapping.api.HandlingPriority;
+import org.opendaylight.controller.netconf.mapping.api.NetconfOperationChainedExecution;
+import org.opendaylight.controller.netconf.mdsal.connector.CurrentSchemaContext;
+import org.opendaylight.controller.netconf.util.exception.MissingNameSpaceException;
+import org.opendaylight.controller.netconf.util.mapping.AbstractSingletonNetconfOperation;
+import org.opendaylight.controller.netconf.util.xml.XmlElement;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeStreamWriter;
+import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeWriter;
+import org.opendaylight.yangtools.yang.data.impl.codec.xml.XMLStreamNormalizedNodeStreamWriter;
+import org.opendaylight.yangtools.yang.data.impl.schema.transform.dom.DomUtils;
+import org.opendaylight.yangtools.yang.data.impl.schema.transform.dom.parser.DomToNormalizedNodeParserFactory;
+import org.opendaylight.yangtools.yang.model.api.ContainerSchemaNode;
+import org.opendaylight.yangtools.yang.model.api.Module;
+import org.opendaylight.yangtools.yang.model.api.RpcDefinition;
+import org.opendaylight.yangtools.yang.model.api.SchemaPath;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.w3c.dom.Attr;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+import org.w3c.dom.Node;
+import org.w3c.dom.NodeList;
+
+public class RuntimeRpc extends AbstractSingletonNetconfOperation {
+
+ private static final Logger LOG = LoggerFactory.getLogger(RuntimeRpc.class);
+
+ private final CurrentSchemaContext schemaContext;
+ private static final XMLOutputFactory XML_OUTPUT_FACTORY;
+
+ static {
+ XML_OUTPUT_FACTORY = XMLOutputFactory.newFactory();
+ XML_OUTPUT_FACTORY.setProperty(XMLOutputFactory.IS_REPAIRING_NAMESPACES, true);
+ }
+
+ private final DOMRpcService rpcService;
+
+ public RuntimeRpc(final String netconfSessionIdForReporting, CurrentSchemaContext schemaContext, DOMRpcService rpcService) {
+ super(netconfSessionIdForReporting);
+ this.schemaContext = schemaContext;
+ this.rpcService = rpcService;
+ }
+
+ @Override
+ protected HandlingPriority canHandle(final String netconfOperationName, final String namespace) {
+ final URI namespaceURI = createNsUri(namespace);
+ final Optional<Module> module = getModule(namespaceURI);
+
+ if (!module.isPresent()) {
+ LOG.debug("Cannot handle rpc: {}, {}", netconfOperationName, namespace);
+ return HandlingPriority.CANNOT_HANDLE;
+ }
+
+ getRpcDefinitionFromModule(module.get(), namespaceURI, netconfOperationName);
+ return HandlingPriority.HANDLE_WITH_DEFAULT_PRIORITY;
+
+ }
+
+ @Override
+ protected String getOperationName() {
+ throw new UnsupportedOperationException("Runtime rpc does not have a stable name");
+ }
+
+ private URI createNsUri(final String namespace) {
+ final URI namespaceURI;
+ try {
+ namespaceURI = new URI(namespace);
+ } catch (URISyntaxException e) {
+ // Cannot occur, namespace in parsed XML cannot be invalid URI
+ throw new IllegalStateException("Unable to parse URI " + namespace, e);
+ }
+ return namespaceURI;
+ }
+
+ //this returns module with the newest revision if more then 1 module with same namespace is found
+ private Optional<Module> getModule(final URI namespaceURI) {
+ return Optional.of(schemaContext.getCurrentContext().findModuleByNamespaceAndRevision(namespaceURI, null));
+ }
+
+ private Optional<RpcDefinition> getRpcDefinitionFromModule(Module module, URI namespaceURI, String name) {
+ for (RpcDefinition rpcDef : module.getRpcs()) {
+ if (rpcDef.getQName().getNamespace().equals(namespaceURI)
+ && rpcDef.getQName().getLocalName().equals(name)) {
+ return Optional.of(rpcDef);
+ }
+ }
+ return Optional.absent();
+ }
+
+ @Override
+ protected Element handleWithNoSubsequentOperations(final Document document, final XmlElement operationElement) throws NetconfDocumentedException {
+
+ final String netconfOperationName = operationElement.getName();
+ final String netconfOperationNamespace;
+ try {
+ netconfOperationNamespace = operationElement.getNamespace();
+ } catch (MissingNameSpaceException e) {
+ LOG.debug("Cannot retrieve netconf operation namespace from message due to ", e);
+ throw new NetconfDocumentedException("Cannot retrieve netconf operation namespace from message",
+ ErrorType.protocol, ErrorTag.unknown_namespace, ErrorSeverity.error);
+ }
+
+ final URI namespaceURI = createNsUri(netconfOperationNamespace);
+ final Optional<Module> moduleOptional = getModule(namespaceURI);
+
+ if (!moduleOptional.isPresent()) {
+ throw new NetconfDocumentedException("Unable to find module in Schema Context with namespace and name : " +
+ namespaceURI + " " + netconfOperationName + schemaContext.getCurrentContext(),
+ ErrorType.application, ErrorTag.bad_element, ErrorSeverity.error);
+ }
+
+ final Optional<RpcDefinition> rpcDefinitionOptional = getRpcDefinitionFromModule(moduleOptional.get(), namespaceURI, netconfOperationName);
+
+ if (!rpcDefinitionOptional.isPresent()) {
+ throw new NetconfDocumentedException("Unable to find RpcDefinition with namespace and name : " + namespaceURI + " " + netconfOperationName,
+ ErrorType.application, ErrorTag.bad_element, ErrorSeverity.error);
+ }
+
+ final RpcDefinition rpcDefinition = rpcDefinitionOptional.get();
+ final SchemaPath schemaPath = SchemaPath.create(Collections.singletonList(rpcDefinition.getQName()), true);
+ final NormalizedNode<?, ?> inputNode = rpcToNNode(operationElement, rpcDefinition.getInput());
+
+ final CheckedFuture<DOMRpcResult, DOMRpcException> rpcFuture = rpcService.invokeRpc(schemaPath, inputNode);
+ try {
+ final DOMRpcResult result = rpcFuture.checkedGet();
+ if (result.getResult() == null) {
+ return XmlUtil.createElement(document, XmlNetconfConstants.OK, Optional.of(XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_BASE_1_0));
+ }
+ return (Element) transformNormalizedNode(document, result.getResult(), rpcDefinition.getOutput().getPath());
+ } catch (DOMRpcException e) {
+ throw NetconfDocumentedException.wrap(e);
+ }
+ }
+
+ @Override
+ public Document handle(final Document requestMessage,
+ final NetconfOperationChainedExecution subsequentOperation) throws NetconfDocumentedException {
+
+ final XmlElement requestElement = getRequestElementWithCheck(requestMessage);
+
+ final Document document = XmlUtil.newDocument();
+
+ final XmlElement operationElement = requestElement.getOnlyChildElement();
+ final Map<String, Attr> attributes = requestElement.getAttributes();
+
+ final Element response = handle(document, operationElement, subsequentOperation);
+ final Element rpcReply = XmlUtil.createElement(document, XmlNetconfConstants.RPC_REPLY_KEY, Optional.of(XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_BASE_1_0));
+
+ if(XmlElement.fromDomElement(response).hasNamespace()) {
+ rpcReply.appendChild(response);
+ } else {
+ final NodeList list = response.getChildNodes();
+ if (list.getLength() == 0) {
+ rpcReply.appendChild(response);
+ } else {
+ while (list.getLength() != 0) {
+ rpcReply.appendChild(list.item(0));
+ }
+ }
+ }
+
+ for (Attr attribute : attributes.values()) {
+ rpcReply.setAttributeNode((Attr) document.importNode(attribute, true));
+ }
+ document.appendChild(rpcReply);
+ return document;
+ }
+
+ //TODO move all occurences of this method in mdsal netconf(and xml factories) to a utility class
+ private Node transformNormalizedNode(final Document document, final NormalizedNode<?, ?> data, final SchemaPath rpcOutputPath) {
+ final DOMResult result = new DOMResult(document.createElement(XmlNetconfConstants.RPC_REPLY_KEY));
+
+ final XMLStreamWriter xmlWriter = getXmlStreamWriter(result);
+
+ final NormalizedNodeStreamWriter nnStreamWriter = XMLStreamNormalizedNodeStreamWriter.create(xmlWriter,
+ schemaContext.getCurrentContext(), rpcOutputPath);
+
+ final NormalizedNodeWriter nnWriter = NormalizedNodeWriter.forStreamWriter(nnStreamWriter);
+
+ writeRootElement(xmlWriter, nnWriter, (ContainerNode) data);
+ try {
+ nnStreamWriter.close();
+ xmlWriter.close();
+ } catch (IOException | XMLStreamException e) {
+ LOG.warn("Error while closing streams", e);
+ }
+
+ return result.getNode();
+ }
+
+ private XMLStreamWriter getXmlStreamWriter(final DOMResult result) {
+ try {
+ return XML_OUTPUT_FACTORY.createXMLStreamWriter(result);
+ } catch (final XMLStreamException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ private void writeRootElement(final XMLStreamWriter xmlWriter, final NormalizedNodeWriter nnWriter, final ContainerNode data) {
+ try {
+ for (final DataContainerChild<? extends PathArgument, ?> child : data.getValue()) {
+ nnWriter.write(child);
+ }
+ nnWriter.flush();
+ xmlWriter.flush();
+ } catch (XMLStreamException | IOException e) {
+ Throwables.propagate(e);
+ }
+ }
+
+ /**
+ * Parses xml element rpc input into normalized node or null if rpc does not take any input
+ * @param oElement rpc xml element
+ * @param input input container schema node, or null if rpc does not take any input
+ * @return parsed rpc into normalized node, or null if input schema is null
+ */
+ @Nullable
+ private NormalizedNode<?, ?> rpcToNNode(final XmlElement oElement, @Nullable final ContainerSchemaNode input) {
+ return input == null ? null : DomToNormalizedNodeParserFactory
+ .getInstance(DomUtils.defaultValueCodecProvider(), schemaContext.getCurrentContext())
+ .getContainerNodeParser()
+ .parse(Collections.singletonList(oElement.getDomElement()), input);
+ }
+
+}
import com.google.common.base.Optional;
import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
-import org.opendaylight.controller.netconf.util.mapping.AbstractLastNetconfOperation;
+import org.opendaylight.controller.netconf.util.mapping.AbstractSingletonNetconfOperation;
import org.opendaylight.controller.netconf.util.xml.XmlElement;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
import org.slf4j.Logger;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
-public class Unlock extends AbstractLastNetconfOperation{
+public class Unlock extends AbstractSingletonNetconfOperation {
private static final Logger LOG = LoggerFactory.getLogger(Unlock.class);
import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
import org.opendaylight.controller.netconf.mdsal.connector.CurrentSchemaContext;
import org.opendaylight.controller.netconf.mdsal.connector.ops.Datastore;
-import org.opendaylight.controller.netconf.util.mapping.AbstractLastNetconfOperation;
+import org.opendaylight.controller.netconf.util.mapping.AbstractSingletonNetconfOperation;
import org.opendaylight.controller.netconf.util.xml.XmlElement;
import org.opendaylight.controller.sal.connect.netconf.util.InstanceIdToNodes;
import org.opendaylight.yangtools.yang.common.QName;
import org.w3c.dom.Element;
import org.w3c.dom.Node;
-public abstract class AbstractGet extends AbstractLastNetconfOperation {
+public abstract class AbstractGet extends AbstractSingletonNetconfOperation {
private static final Logger LOG = LoggerFactory.getLogger(AbstractGet.class);
container dom-broker {
uses config:service-ref {
refine type {
- mandatory false;
- config:required-identity md-sal-dom:dom-async-data-broker;
+ mandatory true;
+ config:required-identity md-sal-dom:dom-broker-osgi-registry;
}
}
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.mdsal.connector.ops;
+
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.MockitoAnnotations.initMocks;
+
+import com.google.common.base.Preconditions;
+import com.google.common.io.ByteSource;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.Futures;
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.URI;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+import javax.annotation.Nonnull;
+import javax.annotation.Nullable;
+import org.custommonkey.xmlunit.DetailedDiff;
+import org.custommonkey.xmlunit.Diff;
+import org.custommonkey.xmlunit.XMLUnit;
+import org.custommonkey.xmlunit.examples.RecursiveElementNameAndTextQualifier;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mock;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcAvailabilityListener;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcException;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcResult;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcService;
+import org.opendaylight.controller.md.sal.dom.spi.DefaultDOMRpcResult;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException.ErrorSeverity;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException.ErrorTag;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException.ErrorType;
+import org.opendaylight.controller.netconf.mapping.api.HandlingPriority;
+import org.opendaylight.controller.netconf.mapping.api.NetconfOperationChainedExecution;
+import org.opendaylight.controller.netconf.mdsal.connector.CurrentSchemaContext;
+import org.opendaylight.controller.netconf.util.test.XmlFileLoader;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.opendaylight.controller.sal.core.api.model.SchemaService;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.common.RpcError;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
+import org.opendaylight.yangtools.yang.model.api.ContainerSchemaNode;
+import org.opendaylight.yangtools.yang.model.api.Module;
+import org.opendaylight.yangtools.yang.model.api.RpcDefinition;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.model.api.SchemaContextListener;
+import org.opendaylight.yangtools.yang.model.api.SchemaPath;
+import org.opendaylight.yangtools.yang.model.parser.api.YangSyntaxErrorException;
+import org.opendaylight.yangtools.yang.parser.builder.impl.BuilderUtils;
+import org.opendaylight.yangtools.yang.parser.impl.YangParserImpl;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.w3c.dom.Document;
+
+public class RuntimeRpcTest {
+
+ private static final Logger LOG = LoggerFactory.getLogger(RuntimeRpcTest.class);
+
+ private String sessionIdForReporting = "netconf-test-session1";
+
+ private static Document RPC_REPLY_OK = null;
+
+ static {
+ try {
+ RPC_REPLY_OK = XmlFileLoader.xmlFileToDocument("messages/mapping/rpcs/runtimerpc-ok-reply.xml");
+ } catch (Exception e) {
+ LOG.debug("unable to load rpc reply ok.", e);
+ RPC_REPLY_OK = XmlUtil.newDocument();
+ }
+ }
+
+ private DOMRpcService rpcServiceVoidInvoke = new DOMRpcService() {
+ @Nonnull
+ @Override
+ public CheckedFuture<DOMRpcResult, DOMRpcException> invokeRpc(@Nonnull SchemaPath type, @Nullable NormalizedNode<?, ?> input) {
+ return Futures.immediateCheckedFuture((DOMRpcResult) new DefaultDOMRpcResult(null, Collections.<RpcError>emptyList()));
+ }
+
+ @Nonnull
+ @Override
+ public <T extends DOMRpcAvailabilityListener> ListenerRegistration<T> registerRpcListener(@Nonnull T listener) {
+ return null;
+ }
+ };
+
+ private DOMRpcService rpcServiceFailedInvocation = new DOMRpcService() {
+ @Nonnull
+ @Override
+ public CheckedFuture<DOMRpcResult, DOMRpcException> invokeRpc(@Nonnull SchemaPath type, @Nullable NormalizedNode<?, ?> input) {
+ return Futures.immediateFailedCheckedFuture((DOMRpcException) new DOMRpcException("rpc invocation not implemented yet") {
+ });
+ }
+
+ @Nonnull
+ @Override
+ public <T extends DOMRpcAvailabilityListener> ListenerRegistration<T> registerRpcListener(@Nonnull T listener) {
+ return null;
+ }
+ };
+
+ private DOMRpcService rpcServiceSuccesfullInvocation = new DOMRpcService() {
+ @Nonnull
+ @Override
+ public CheckedFuture<DOMRpcResult, DOMRpcException> invokeRpc(@Nonnull SchemaPath type, @Nullable NormalizedNode<?, ?> input) {
+ Collection<DataContainerChild<? extends YangInstanceIdentifier.PathArgument, ?>> children = (Collection) input.getValue();
+ Module module = schemaContext.findModuleByNamespaceAndRevision(type.getLastComponent().getNamespace(), null);
+ RpcDefinition rpcDefinition = getRpcDefinitionFromModule(module, module.getNamespace(), type.getLastComponent().getLocalName());
+ ContainerSchemaNode outputSchemaNode = rpcDefinition.getOutput();
+ ContainerNode node = ImmutableContainerNodeBuilder.create()
+ .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(outputSchemaNode.getQName()))
+ .withValue(children).build();
+
+ return Futures.immediateCheckedFuture((DOMRpcResult) new DefaultDOMRpcResult(node));
+ }
+
+ @Nonnull
+ @Override
+ public <T extends DOMRpcAvailabilityListener> ListenerRegistration<T> registerRpcListener(@Nonnull T listener) {
+ return null;
+ }
+ };
+
+ private SchemaContext schemaContext = null;
+ private CurrentSchemaContext currentSchemaContext = null;
+ @Mock
+ private SchemaService schemaService;
+ @Mock
+ private SchemaContextListener listener;
+ @Mock
+ private ListenerRegistration registration;
+
+ @Before
+ public void setUp() throws Exception {
+
+ initMocks(this);
+ doNothing().when(registration).close();
+ doReturn(listener).when(registration).getInstance();
+ doNothing().when(schemaService).addModule(any(Module.class));
+ doNothing().when(schemaService).removeModule(any(Module.class));
+ doReturn(schemaContext).when(schemaService).getGlobalContext();
+ doReturn(schemaContext).when(schemaService).getSessionContext();
+ doAnswer(new Answer() {
+ @Override
+ public Object answer(InvocationOnMock invocationOnMock) throws Throwable {
+ ((SchemaContextListener) invocationOnMock.getArguments()[0]).onGlobalContextUpdated(schemaContext);
+ return registration;
+ }
+ }).when(schemaService).registerSchemaContextListener(any(SchemaContextListener.class));
+
+ XMLUnit.setIgnoreWhitespace(true);
+ XMLUnit.setIgnoreAttributeOrder(true);
+
+ this.schemaContext = parseSchemas(getYangSchemas());
+ this.currentSchemaContext = new CurrentSchemaContext(schemaService);
+ }
+
+ @Test
+ public void testVoidOutputRpc() throws Exception {
+ RuntimeRpc rpc = new RuntimeRpc(sessionIdForReporting, currentSchemaContext, rpcServiceVoidInvoke);
+
+ Document rpcDocument = XmlFileLoader.xmlFileToDocument("messages/mapping/rpcs/rpc-void-output.xml");
+ HandlingPriority priority = rpc.canHandle(rpcDocument);
+ Preconditions.checkState(priority != HandlingPriority.CANNOT_HANDLE);
+
+ Document response = rpc.handle(rpcDocument, NetconfOperationChainedExecution.EXECUTION_TERMINATION_POINT);
+
+ verifyResponse(response, RPC_REPLY_OK);
+ }
+
+ @Test
+ public void testSuccesfullNonVoidInvocation() throws Exception {
+ RuntimeRpc rpc = new RuntimeRpc(sessionIdForReporting, currentSchemaContext, rpcServiceSuccesfullInvocation);
+
+ Document rpcDocument = XmlFileLoader.xmlFileToDocument("messages/mapping/rpcs/rpc-nonvoid.xml");
+ HandlingPriority priority = rpc.canHandle(rpcDocument);
+ Preconditions.checkState(priority != HandlingPriority.CANNOT_HANDLE);
+
+ Document response = rpc.handle(rpcDocument, NetconfOperationChainedExecution.EXECUTION_TERMINATION_POINT);
+ verifyResponse(response, XmlFileLoader.xmlFileToDocument("messages/mapping/rpcs/rpc-nonvoid-control.xml"));
+ }
+
+ @Test
+ public void testFailedInvocation() throws Exception {
+ RuntimeRpc rpc = new RuntimeRpc(sessionIdForReporting, currentSchemaContext, rpcServiceFailedInvocation);
+
+ Document rpcDocument = XmlFileLoader.xmlFileToDocument("messages/mapping/rpcs/rpc-nonvoid.xml");
+ HandlingPriority priority = rpc.canHandle(rpcDocument);
+ Preconditions.checkState(priority != HandlingPriority.CANNOT_HANDLE);
+
+ try {
+ rpc.handle(rpcDocument, NetconfOperationChainedExecution.EXECUTION_TERMINATION_POINT);
+ fail("should have failed with rpc invocation not implemented yet");
+ } catch (NetconfDocumentedException e) {
+ assertTrue(e.getErrorType() == ErrorType.application);
+ assertTrue(e.getErrorSeverity() == ErrorSeverity.error);
+ assertTrue(e.getErrorTag() == ErrorTag.operation_failed);
+ }
+ }
+
+ @Test
+ public void testVoidInputOutputRpc() throws Exception {
+ RuntimeRpc rpc = new RuntimeRpc(sessionIdForReporting, currentSchemaContext, rpcServiceVoidInvoke);
+
+ Document rpcDocument = XmlFileLoader.xmlFileToDocument("messages/mapping/rpcs/rpc-void-input-output.xml");
+ HandlingPriority priority = rpc.canHandle(rpcDocument);
+ Preconditions.checkState(priority != HandlingPriority.CANNOT_HANDLE);
+
+ Document response = rpc.handle(rpcDocument, NetconfOperationChainedExecution.EXECUTION_TERMINATION_POINT);
+
+ verifyResponse(response, RPC_REPLY_OK);
+ }
+
+ private void verifyResponse(Document response, Document template) {
+ DetailedDiff dd = new DetailedDiff(new Diff(response, template));
+ dd.overrideElementQualifier(new RecursiveElementNameAndTextQualifier());
+ assertTrue(dd.similar());
+ }
+
+ private RpcDefinition getRpcDefinitionFromModule(Module module, URI namespaceURI, String name) {
+ for (RpcDefinition rpcDef : module.getRpcs()) {
+ if (rpcDef.getQName().getNamespace().equals(namespaceURI)
+ && rpcDef.getQName().getLocalName().equals(name)) {
+ return rpcDef;
+ }
+ }
+
+ return null;
+
+ }
+
+ private Collection<InputStream> getYangSchemas() {
+ final List<String> schemaPaths = Arrays.asList("/yang/mdsal-netconf-rpc-test.yang");
+ final List<InputStream> schemas = new ArrayList<>();
+
+ for (String schemaPath : schemaPaths) {
+ InputStream resourceAsStream = getClass().getResourceAsStream(schemaPath);
+ schemas.add(resourceAsStream);
+ }
+
+ return schemas;
+ }
+
+ private SchemaContext parseSchemas(Collection<InputStream> schemas) throws IOException, YangSyntaxErrorException {
+ final YangParserImpl parser = new YangParserImpl();
+ Collection<ByteSource> sources = BuilderUtils.streamsToByteSources(schemas);
+ return parser.parseSources(sources);
+ }
+}
\ No newline at end of file
--- /dev/null
+<!--
+ ~ Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ ~
+ ~ This program and the accompanying materials are made available under the
+ ~ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ ~ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+
+<rpc-reply message-id="2"
+ xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
+ <test-string xmlns="urn:opendaylight:mdsal:mapping:rpc:test">
+ test rpc input string 1
+ </test-string>
+ <test-string2 xmlns="urn:opendaylight:mdsal:mapping:rpc:test">
+ test rpc input string 2
+ </test-string2>
+</rpc-reply>
\ No newline at end of file
--- /dev/null
+<!--
+ ~ Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ ~
+ ~ This program and the accompanying materials are made available under the
+ ~ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ ~ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+
+<rpc message-id="2"
+ xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
+ <nonvoid-rpc xmlns="urn:opendaylight:mdsal:mapping:rpc:test">
+ <test-string>
+ test rpc input string 1
+ </test-string>
+ <test-string2>
+ test rpc input string 2
+ </test-string2>
+ </nonvoid-rpc>
+</rpc>
\ No newline at end of file
--- /dev/null
+<!--
+ ~ Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ ~
+ ~ This program and the accompanying materials are made available under the
+ ~ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ ~ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+
+<rpc message-id="2"
+ xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
+ <void-input-output-rpc xmlns="urn:opendaylight:mdsal:mapping:rpc:test"/>
+</rpc>
\ No newline at end of file
--- /dev/null
+<!--
+ ~ Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ ~
+ ~ This program and the accompanying materials are made available under the
+ ~ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ ~ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+
+<rpc message-id="2"
+ xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
+ <void-output-rpc xmlns="urn:opendaylight:mdsal:mapping:rpc:test">
+ <test-string>
+ test rpc input string 1
+ </test-string>
+ <test-string2>
+ test rpc input string 2
+ </test-string2>
+ </void-output-rpc>
+</rpc>
\ No newline at end of file
--- /dev/null
+<!--
+ ~ Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ ~
+ ~ This program and the accompanying materials are made available under the
+ ~ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ ~ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+
+<rpc-reply xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" message-id="2">
+ <ok/>
+</rpc-reply>
\ No newline at end of file
--- /dev/null
+module rpc-test {
+ yang-version 1;
+ namespace "urn:opendaylight:mdsal:mapping:rpc:test";
+ prefix "rpc";
+
+ rpc void-input-output-rpc {
+
+ }
+
+ rpc void-output-rpc {
+ input {
+ leaf test-string {
+ type string;
+ }
+
+ leaf test-string2 {
+ type string;
+ }
+ }
+ }
+
+ rpc nonvoid-rpc {
+ input {
+ leaf test-string {
+ type string;
+ }
+
+ leaf test-string2 {
+ type string;
+ }
+ }
+
+ output {
+ leaf test-string {
+ type string;
+ }
+
+ leaf test-string2 {
+ type string;
+ }
+ }
+ }
+}
+
package org.opendaylight.controller.netconf.client;
-import com.google.common.base.Preconditions;
+import com.google.common.base.Strings;
import com.google.common.collect.ImmutableList;
import io.netty.channel.Channel;
import io.netty.channel.ChannelFuture;
private static final XPathExpression sessionIdXPath = XMLNetconfUtil
.compileXPath("/netconf:hello/netconf:session-id");
+ private static final XPathExpression sessionIdXPathNoNamespace = XMLNetconfUtil
+ .compileXPath("/hello/session-id");
+
private static final String EXI_1_0_CAPABILITY_MARKER = "exi:1.0";
protected NetconfClientSessionNegotiator(final NetconfClientSessionPreferences sessionPreferences,
}
private long extractSessionId(final Document doc) {
- final Node sessionIdNode = (Node) XmlUtil.evaluateXPath(sessionIdXPath, doc, XPathConstants.NODE);
- Preconditions.checkState(sessionIdNode != null, "");
- String textContent = sessionIdNode.getTextContent();
- if (textContent == null || textContent.equals("")) {
- throw new IllegalStateException("Session id not received from server");
+ String textContent = getSessionIdWithXPath(doc, sessionIdXPath);
+ if (Strings.isNullOrEmpty(textContent)) {
+ textContent = getSessionIdWithXPath(doc, sessionIdXPathNoNamespace);
+ if (Strings.isNullOrEmpty(textContent)) {
+ throw new IllegalStateException("Session id not received from server, hello message: " + XmlUtil.toString(doc));
+ }
}
return Long.valueOf(textContent);
}
+ private String getSessionIdWithXPath(final Document doc, final XPathExpression sessionIdXPath) {
+ final Node sessionIdNode = (Node) XmlUtil.evaluateXPath(sessionIdXPath, doc, XPathConstants.NODE);
+ return sessionIdNode != null ? sessionIdNode.getTextContent() : null;
+ }
+
@Override
protected NetconfClientSession getSession(final NetconfClientSessionListener sessionListener, final Channel channel,
final NetconfHelloMessage message) throws NetconfDocumentedException {
<name>yang-schema-service</name>
</root-schema-service>
<dom-broker xmlns="urn:opendaylight:params:xml:ns:yang:controller:netconf:mdsal:mapper">
- <type xmlns:dom="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">dom:dom-async-data-broker</type>
- <name>inmemory-data-broker</name>
+ <type xmlns:dom="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">dom:dom-broker-osgi-registry</type>
+ <name>dom-broker</name>
</dom-broker>
<mapper-aggregator xmlns="urn:opendaylight:params:xml:ns:yang:controller:netconf:mdsal:mapper">
<type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:netconf:north:mapper">prefix:netconf-mapper-registry</type>
import org.opendaylight.controller.netconf.api.monitoring.NetconfMonitoringService;
import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
import org.opendaylight.controller.netconf.util.exception.MissingNameSpaceException;
-import org.opendaylight.controller.netconf.util.mapping.AbstractLastNetconfOperation;
+import org.opendaylight.controller.netconf.util.mapping.AbstractSingletonNetconfOperation;
import org.opendaylight.controller.netconf.util.xml.XmlElement;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
import org.slf4j.Logger;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
-public class GetSchema extends AbstractLastNetconfOperation {
+public class GetSchema extends AbstractSingletonNetconfOperation {
public static final String GET_SCHEMA = "get-schema";
public static final String IDENTIFIER = "identifier";
public static final String VERSION = "version";
@Override
public synchronized void connect(final ChannelHandlerContext ctx, final SocketAddress remoteAddress, final SocketAddress localAddress, final ChannelPromise promise) throws Exception {
- LOG.debug("XXX session connecting on channel {}. promise: {} ", ctx.channel(), connectPromise);
+ LOG.debug("SSH session connecting on channel {}. promise: {} ", ctx.channel(), connectPromise);
this.connectPromise = promise;
startSsh(ctx, remoteAddress);
}
*/
package org.opendaylight.controller.netconf.util.mapping;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
import org.opendaylight.controller.netconf.mapping.api.HandlingPriority;
+import org.opendaylight.controller.netconf.mapping.api.NetconfOperationChainedExecution;
+import org.opendaylight.controller.netconf.util.xml.XmlElement;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
public abstract class AbstractSingletonNetconfOperation extends AbstractLastNetconfOperation {
super(netconfSessionIdForReporting);
}
+ @Override
+ protected Element handle(Document document, XmlElement operationElement,
+ NetconfOperationChainedExecution subsequentOperation) throws NetconfDocumentedException {
+ return handleWithNoSubsequentOperations(document, operationElement);
+ }
+
@Override
protected HandlingPriority getHandlingPriority() {
return HandlingPriority.HANDLE_WITH_MAX_PRIORITY;
private static boolean isHelloMessage(final Document document) {
XmlElement element = XmlElement.fromDomElement(document.getDocumentElement());
try {
+ // accept even if hello has no namespace
return element.getName().equals(HELLO_TAG) &&
- element.hasNamespace() &&
- element.getNamespace().equals(XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_BASE_1_0);
+ (!element.hasNamespace() || element.getNamespace().equals(XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_BASE_1_0));
} catch (MissingNameSpaceException e) {
// Cannot happen, since we check for hasNamespace
throw new IllegalStateException(e);
package org.opendaylight.controller.netconf.util.messages;
import com.google.common.base.Function;
+import com.google.common.base.Optional;
import com.google.common.collect.Collections2;
import java.util.Collection;
import java.util.List;
public static Collection<String> extractCapabilitiesFromHello(Document doc) throws NetconfDocumentedException {
XmlElement responseElement = XmlElement.fromDomDocument(doc);
- XmlElement capabilitiesElement = responseElement
- .getOnlyChildElementWithSameNamespace(XmlNetconfConstants.CAPABILITIES);
- List<XmlElement> caps = capabilitiesElement.getChildElements(XmlNetconfConstants.CAPABILITY);
+ // Extract child element <capabilities> from <hello> with or without(fallback) the same namespace
+ Optional<XmlElement> capabilitiesElement = responseElement
+ .getOnlyChildElementWithSameNamespaceOptionally(XmlNetconfConstants.CAPABILITIES)
+ .or(responseElement
+ .getOnlyChildElementOptionally(XmlNetconfConstants.CAPABILITIES));
+
+ List<XmlElement> caps = capabilitiesElement.get().getChildElements(XmlNetconfConstants.CAPABILITY);
return Collections2.transform(caps, new Function<XmlElement, String>() {
@Override
import com.google.common.base.Optional;
import io.netty.channel.local.LocalAddress;
import java.net.InetSocketAddress;
+import java.util.concurrent.TimeUnit;
import org.osgi.framework.BundleContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
private static final String PRIVATE_KEY_PATH_PROP = ".pk.path";
private static final String CONNECTION_TIMEOUT_MILLIS_PROP = "connectionTimeoutMillis";
- private static final long DEFAULT_TIMEOUT_MILLIS = 5000;
+ public static final long DEFAULT_TIMEOUT_MILLIS = TimeUnit.SECONDS.toMillis(30);
private static final LocalAddress netconfLocalAddress = new LocalAddress("netconf");
public static LocalAddress getNetconfLocalAddress() {
assertEquals(NetconfConfigUtil.getNetconfLocalAddress(), new LocalAddress("netconf"));
doReturn("").when(bundleContext).getProperty("netconf.connectionTimeoutMillis");
- assertEquals(NetconfConfigUtil.extractTimeoutMillis(bundleContext), 5000);
+ assertEquals(NetconfConfigUtil.extractTimeoutMillis(bundleContext), NetconfConfigUtil.DEFAULT_TIMEOUT_MILLIS);
doReturn("a").when(bundleContext).getProperty("netconf.connectionTimeoutMillis");
- assertEquals(NetconfConfigUtil.extractTimeoutMillis(bundleContext), 5000);
+ assertEquals(NetconfConfigUtil.extractTimeoutMillis(bundleContext), NetconfConfigUtil.DEFAULT_TIMEOUT_MILLIS);
}
@Test