<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-dom-xsql</artifactId>
</dependency>
+
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-karaf-xsql</artifactId>
+ </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-dom-xsql-config</artifactId>
<feature name ='odl-mdsal-xsql' version='${project.version}'>
<feature version='${project.version}'>odl-mdsal-broker</feature>
<bundle>mvn:org.opendaylight.controller/sal-dom-xsql/${project.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/sal-karaf-xsql/${project.version}</bundle>
<configfile finalname="${config.configfile.directory}/${config.xsql.configfile}">mvn:org.opendaylight.controller/sal-dom-xsql-config/${project.version}/xml/config</configfile>
</feature>
<feature name ='odl-mdsal-apidocs' version='${project.version}'>
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
-
+ <parent>
+ <groupId>org.opendaylight.controller.archetypes</groupId>
+ <artifactId>archetypes-parent</artifactId>
+ <version>0.1.1-SNAPSHOT</version>
+ </parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>opendaylight-karaf-distro-archetype</artifactId>
<version>1.0.0-SNAPSHOT</version>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
+ <parent>
+ <groupId>org.opendaylight.controller.archetypes</groupId>
+ <artifactId>archetypes-parent</artifactId>
+ <version>0.1.1-SNAPSHOT</version>
+ </parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>opendaylight-karaf-features-archetype</artifactId>
<version>1.0.0-SNAPSHOT</version>
<artifactId>sal-dom-xsql</artifactId>
<version>${mdsal.version}</version>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-karaf-xsql</artifactId>
+ <version>${mdsal.version}</version>
+ </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-dom-xsql-config</artifactId>
*/
public class FeatureConfigPusher {
private static final Logger logger = LoggerFactory.getLogger(FeatureConfigPusher.class);
+ private static final int MAX_RETRIES=100;
private FeaturesService featuresService = null;
private ConfigPusher pusher = null;
/*
}
private boolean isInstalled(Feature feature) {
- List<Feature> installedFeatures = Arrays.asList(featuresService.listInstalledFeatures());
+ List<Feature> installedFeatures= null;
+ boolean cont = true;
+ int retries = 0;
+ while(cont) {
+ try {
+ installedFeatures = Arrays.asList(featuresService.listInstalledFeatures());
+ break;
+ } catch (Exception e) {
+ if(retries < MAX_RETRIES) {
+ logger.warn("Karaf featuresService.listInstalledFeatures() has thrown an exception, retry {}, Exception {}", retries,e);
+ try {
+ Thread.sleep(1);
+ } catch (InterruptedException e1) {
+ throw new IllegalStateException(e1);
+ }
+ retries++;
+ continue;
+ } else {
+ logger.error("Giving up on Karaf featuresService.listInstalledFeatures() which has thrown an exception, retry {}, Exception {}", retries,e);
+ throw e;
+ }
+ }
+ }
return installedFeatures.contains(feature);
}
--- /dev/null
+@echo off
+rem
+rem
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements. See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License. You may obtain a copy of the License at
+rem
+rem http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+rem
+
+rem
+rem handle specific scripts; the SCRIPT_NAME is exactly the name of the Karaf
+rem script; for example karaf.bat, start.bat, stop.bat, admin.bat, client.bat, ...
+rem
+rem if "%KARAF_SCRIPT%" == "SCRIPT_NAME" (
+rem Actions go here...
+rem )
+
+rem
+rem general settings which should be applied for all scripts go here; please keep
+rem in mind that it is possible that scripts might be executed more than once, e.g.
+rem in example of the start script where the start script is executed first and the
+rem karaf script afterwards.
+rem
+
+rem
+rem The following section shows the possible configuration options for the default
+rem karaf scripts
+rem
+rem Window name of the windows console
+rem SET KARAF_TITLE
+rem Location of Java installation
+rem SET JAVA_HOME
+rem Minimum memory for the JVM
+rem SET JAVA_MIN_MEM
+rem Maximum memory for the JVM
+rem SET JAVA_MAX_MEM
+rem Minimum perm memory for the JVM
+rem SET JAVA_PERM_MEM
+rem Maximum perm memory for the JVM
+rem SET JAVA_MAX_PERM_MEM
+rem Karaf home folder
+rem SET KARAF_HOME
+rem Karaf data folder
+rem SET KARAF_DATA
+rem Karaf base folder
+rem SET KARAF_BASE
+rem Karaf etc folder
+rem SET KARAF_ETC
+rem Additional available Karaf options
+rem SET KARAF_OPTS
+rem Enable debug mode
+rem SET KARAF_DEBUG
+IF "%JAVA_MAX_PERM_MEM%"=="" SET JAVA_MAX_PERM_MEM=512m
+IF "%JAVA_MAX_MEM%"=="" SET JAVA_MAX_MEM=2048m
# default Openflow version = 1.0, we also support 1.3.
# ovsdb.of.version=1.3
+# ovsdb can be configured with ml2 to perform l3 forwarding. The config below enables that functionality, which is
+# disabled by default.
+# ovsdb.l3.fwd.enabled=yes
+
# ovsdb can be configured with ml2 to perform l3 forwarding. When used in that scenario, the mac address of the default
# gateway --on the external subnet-- is expected to be resolved from its inet address. The config below overrides that
# specific arp/neighDiscovery lookup.
# default Openflow version = 1.3, we also support 1.0.
ovsdb.of.version=1.3
+# ovsdb can be configured with ml2 to perform l3 forwarding. The config below enables that functionality, which is
+# disabled by default.
+# ovsdb.l3.fwd.enabled=yes
+
# ovsdb can be configured with ml2 to perform l3 forwarding. When used in that scenario, the mac address of the default
# gateway --on the external subnet-- is expected to be resolved from its inet address. The config below overrides that
# specific arp/neighDiscovery lookup.
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
-
+ <parent>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>releasepom</artifactId>
+ <version>0.1.2-SNAPSHOT</version>
+ <relativePath>../..</relativePath>
+ </parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>karaf.branding</artifactId>
<version>1.0.0-SNAPSHOT</version>
protected Object[] getImplementations() {
return new Object[] {
dataPacketService,
+ inventory,
};
}
_instanceConfigure((ComponentActivator)imp, c, containerName);
} else if (imp instanceof DataPacketServiceAdapter) {
_instanceConfigure((DataPacketServiceAdapter)imp, c, containerName);
+ } else if (imp instanceof InventoryAndReadAdapter) {
+ _instanceConfigure((InventoryAndReadAdapter)imp, c, containerName);
} else {
throw new IllegalArgumentException(String.format("Unhandled implementation class %s", imp.getClass()));
}
.setRequired(false));
}
+ private void _instanceConfigure(final InventoryAndReadAdapter imp, final Component it, String containerName) {
+ it.setInterface(new String[] {
+ IPluginInInventoryService.class.getName(),
+ IPluginInReadService.class.getName(),
+ }, properties());
+
+ it.add(createServiceDependency()
+ .setService(IPluginOutReadService.class)
+ .setCallbacks("setReadPublisher", "unsetReadPublisher")
+ .setRequired(false));
+ it.add(createServiceDependency()
+ .setService(IPluginOutInventoryService.class)
+ .setCallbacks("setInventoryPublisher", "unsetInventoryPublisher")
+ .setRequired(false));
+ }
+
private void _configure(final TopologyAdapter imp, final Component it) {
it.setInterface(IPluginInTopologyService.class.getName(), properties());
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.AggregateFlowStatisticsUpdate;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.FlowStatisticsData;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.FlowsStatisticsUpdate;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.GetAllFlowsStatisticsFromAllFlowTablesInputBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.GetFlowStatisticsFromFlowTableInputBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.OpendaylightFlowStatisticsListener;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.OpendaylightFlowStatisticsService;
* @param id Table id
* @return Table contents, or null if not present
*/
- private Table readConfigTable(final Node node, final short id) {
+ private Table readOperationalTable(final Node node, final short id) {
final InstanceIdentifier<Table> tableRef = InstanceIdentifier.builder(Nodes.class)
- .child(org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node.class, InventoryMapping.toNodeKey(node))
+ .child(org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node.class, NodeMapping.toNodeKey(node))
.augmentation(FlowCapableNode.class)
.child(Table.class, new TableKey(id))
.build();
- return (Table) startChange().readConfigurationData(tableRef);
+ return (Table) startChange().readOperationalData(tableRef);
}
@Override
public List<FlowOnNode> readAllFlow(final Node node, final boolean cached) {
final ArrayList<FlowOnNode> output = new ArrayList<>();
- final Table table = readConfigTable(node, OPENFLOWV10_TABLE_ID);
+ final Table table = readOperationalTable(node, OPENFLOWV10_TABLE_ID);
if (table != null) {
final List<Flow> flows = table.getFlow();
LOG.trace("Number of flows installed in table 0 of node {} : {}", node, flows.size());
}
}
- // TODO (main): Shall we send request to the switch? It will make async request to the switch.
- // Once the plugin receives a response, it will let the adaptor know through onFlowStatisticsUpdate()
- // If we assume that md-sal statistics manager will always be running, then it is not required
- // But if not, then sending request will collect the latest data for adaptor at least.
- getFlowStatisticsService().getAllFlowsStatisticsFromAllFlowTables(
- new GetAllFlowsStatisticsFromAllFlowTablesInputBuilder().setNode(NodeMapping.toNodeRef(node)).build());
return output;
}
@Override
public FlowOnNode readFlow(final Node node, final org.opendaylight.controller.sal.flowprogrammer.Flow targetFlow, final boolean cached) {
FlowOnNode ret = null;
- final Table table = readConfigTable(node, OPENFLOWV10_TABLE_ID);
+ final Table table = readOperationalTable(node, OPENFLOWV10_TABLE_ID);
if (table != null) {
final List<Flow> flows = table.getFlow();
InventoryAndReadAdapter.LOG.trace("Number of flows installed in table 0 of node {} : {}", node, flows.size());
@Override
public NodeTableStatistics readNodeTable(final NodeTable nodeTable, final boolean cached) {
NodeTableStatistics nodeStats = null;
- final Table table = readConfigTable(nodeTable.getNode(), (short) nodeTable.getID());
+ final Table table = readOperationalTable(nodeTable.getNode(), (short) nodeTable.getID());
if (table != null) {
final FlowTableStatisticsData tableStats = table.getAugmentation(FlowTableStatisticsData.class);
if (tableStats != null) {
* @return
*/
private static NodeId toNodeId(org.opendaylight.controller.sal.core.Node aDNode) {
- return new NodeId(aDNode.getType() + ":" + String.valueOf(aDNode.getID()));
+ String targetPrefix = null;
+ if (NodeIDType.OPENFLOW.equals(aDNode.getType())) {
+ targetPrefix = OPENFLOW_ID_PREFIX;
+ } else {
+ targetPrefix = aDNode.getType() + ":";
+ }
+
+ return new NodeId(targetPrefix + String.valueOf(aDNode.getID()));
+ }
+
+ /**
+ * @param aDNode
+ * @return md-sal {@link NodeKey}
+ */
+ public static NodeKey toNodeKey(org.opendaylight.controller.sal.core.Node aDNode) {
+ return new NodeKey(toNodeId(aDNode));
}
public static String toNodeConnectorType(final NodeConnectorId ncId, final NodeId nodeId) {
*/
package org.opendaylight.controller.sal.compatibility.topology;
-import static com.google.common.base.Preconditions.checkNotNull;
-
-import java.util.List;
-import java.util.Set;
-import java.util.concurrent.CopyOnWriteArrayList;
-
+import com.google.common.base.Function;
+import com.google.common.collect.FluentIterable;
import org.opendaylight.controller.md.sal.binding.util.TypeSafeDataReader;
import org.opendaylight.controller.sal.compatibility.NodeMapping;
import org.opendaylight.controller.sal.core.ConstructionException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.google.common.base.Function;
-import com.google.common.collect.FluentIterable;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.regex.Pattern;
+
+import static com.google.common.base.Preconditions.checkNotNull;
public final class TopologyMapping {
private static final Logger LOG = LoggerFactory.getLogger(TopologyMapping.class);
+ private final static Pattern NUMBERS_ONLY = Pattern.compile("[0-9]+");
private TopologyMapping() {
throw new UnsupportedOperationException("Utility class. Instantiation is not allowed.");
public static NodeConnector toADNodeConnector(final TpId source, final NodeId nodeId) throws ConstructionException {
checkNotNull(source);
- return new NodeConnector(NodeConnectorIDType.OPENFLOW, Short.valueOf(toADNodeConnectorId(source)), toADNode(nodeId));
+ String nodeConnectorIdStripped = toADNodeConnectorId(source);
+ if (NUMBERS_ONLY.matcher(nodeConnectorIdStripped).matches()) {
+ return new NodeConnector(NodeConnectorIDType.OPENFLOW, Short.valueOf(nodeConnectorIdStripped), toADNode(nodeId));
+ }
+ LOG.debug("NodeConnectorId does not match openflow id type, using " + NodeMapping.MD_SAL_TYPE + "instead");
+ NodeConnectorIDType.registerIDType(NodeMapping.MD_SAL_TYPE, String.class, NodeMapping.MD_SAL_TYPE);
+ return new NodeConnector(NodeMapping.MD_SAL_TYPE, nodeConnectorIdStripped, toADNode(nodeId));
}
public static String toADNodeConnectorId(final TpId nodeConnectorId) {
public static Node toADNode(final NodeId nodeId) throws ConstructionException {
checkNotNull(nodeId);
- return new Node(NodeIDType.OPENFLOW, Long.valueOf(toADNodeId(nodeId)));
+ String nodeIdStripped = toADNodeId(nodeId);
+ if (NUMBERS_ONLY.matcher(nodeIdStripped).matches()) {
+ return new Node(NodeIDType.OPENFLOW, Long.valueOf(nodeIdStripped));
+ }
+ LOG.debug("NodeId does not match openflow id type, using " + NodeMapping.MD_SAL_TYPE + "instead");
+ NodeIDType.registerIDType(NodeMapping.MD_SAL_TYPE, String.class);
+ return new Node(NodeMapping.MD_SAL_TYPE, nodeId.getValue());
}
}
Assert.assertEquals(0xCC4E241C4A000000L, NodeMapping.openflowFullNodeIdToLong("14721743935839928320").longValue());
}
+ /**
+ * Test method for
+ * {@link org.opendaylight.controller.sal.compatibility.NodeMapping#toNodeKey(org.opendaylight.controller.sal.core.Node)}
+ * .
+ * @throws ConstructionException
+ */
+ @Test
+ public void testToNodeKey() throws ConstructionException {
+ org.opendaylight.controller.sal.core.Node aDNode = new org.opendaylight.controller.sal.core.Node(NodeIDType.OPENFLOW, 42L);
+ NodeKey nodeKey = NodeMapping.toNodeKey(aDNode);
+ Assert.assertEquals("openflow:42", nodeKey.getId().getValue());
+ }
+
/**
* @param nodeId
* @param portId
Assert.assertEquals("OF|00:00:00:00:00:00:00:01", observedNode.toString());
}
+ /**
+ * Test method for {@link org.opendaylight.controller.sal.compatibility.topology.TopologyMapping#toADNodeConnector(org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.TpId, org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NodeId)}.
+ * @throws ConstructionException
+ */
+ @Test
+ public void bug1309ToADNodeConnector() throws ConstructionException {
+ NodeId nodeId = new NodeId("some_unknown_node");
+ TpId source = new TpId("192.168.0.1");
+ NodeConnector observedNodeConnector = TopologyMapping.toADNodeConnector(source, nodeId);
+
+ Assert.assertEquals("MD_SAL_DEPRECATED|192.168.0.1@MD_SAL_DEPRECATED|some_unknown_node", observedNodeConnector.toString());
+ }
+
}
*/
package org.opendaylight.controller.frm.impl;
-import com.google.common.base.Preconditions;
import org.opendaylight.controller.frm.ForwardingRulesManager;
import org.opendaylight.controller.md.sal.binding.api.DataBroker;
import org.opendaylight.controller.md.sal.binding.api.DataChangeListener;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import com.google.common.base.Preconditions;
+
/**
* GroupForwarder
* It implements {@link org.opendaylight.controller.md.sal.binding.api.DataChangeListener}}
public FlowForwarder (final ForwardingRulesManager manager, final DataBroker db) {
super(manager, Flow.class);
Preconditions.checkNotNull(db, "DataBroker can not be null!");
- this.listenerRegistration = db.registerDataChangeListener(LogicalDatastoreType.CONFIGURATION,
- getWildCardPath(), FlowForwarder.this, DataChangeScope.SUBTREE);
+ registrationListener(db, 5);
+ }
+
+ private void registrationListener(final DataBroker db, int i) {
+ try {
+ listenerRegistration = db.registerDataChangeListener(LogicalDatastoreType.CONFIGURATION,
+ getWildCardPath(), FlowForwarder.this, DataChangeScope.SUBTREE);
+ } catch (final Exception e) {
+ if (i >= 1) {
+ try {
+ Thread.sleep(100);
+ } catch (InterruptedException e1) {
+ LOG.error("Thread interrupted '{}'", e1);
+ Thread.currentThread().interrupt();
+ }
+ registrationListener(db, --i);
+ } else {
+ LOG.error("FRM Flow DataChange listener registration fail!", e);
+ throw new IllegalStateException("FlowForwarder registration Listener fail! System needs restart.", e);
+ }
+ }
}
@Override
if (listenerRegistration != null) {
try {
listenerRegistration.close();
- } catch (Exception e) {
+ } catch (final Exception e) {
LOG.error("Error by stop FRM FlowChangeListener.", e);
}
listenerRegistration = null;
builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
builder.setFlowTable(new FlowTableRef(nodeIdent.child(Table.class, tableKey)));
builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
- this.provider.getSalFlowService().removeFlow(builder.build());
+ provider.getSalFlowService().removeFlow(builder.build());
}
}
builder.setUpdatedFlow((new UpdatedFlowBuilder(update)).build());
builder.setOriginalFlow((new OriginalFlowBuilder(original)).build());
- this.provider.getSalFlowService().updateFlow(builder.build());
+ provider.getSalFlowService().updateFlow(builder.build());
}
}
builder.setFlowRef(new FlowRef(identifier));
builder.setFlowTable(new FlowTableRef(nodeIdent.child(Table.class, tableKey)));
builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
- this.provider.getSalFlowService().addFlow(builder.build());
+ provider.getSalFlowService().addFlow(builder.build());
}
}
<!-- XSQL -->
<module>sal-dom-xsql</module>
+ <module>sal-karaf-xsql</module>
<module>sal-dom-xsql-config</module>
<!-- Yang Test Models for MD-SAL -->
import akka.actor.ActorRef;
import akka.actor.Props;
import akka.japi.Creator;
+
import com.google.common.base.Optional;
import com.google.protobuf.ByteString;
+
import org.opendaylight.controller.cluster.example.messages.KeyValue;
import org.opendaylight.controller.cluster.example.messages.KeyValueSaved;
import org.opendaylight.controller.cluster.example.messages.PrintRole;
@Override public String persistenceId() {
return getId();
}
+
+ @Override
+ protected void startLogRecoveryBatch(int maxBatchSize) {
+ }
+
+ @Override
+ protected void appendRecoveredLogEntry(Payload data) {
+ }
+
+ @Override
+ protected void applyCurrentLogRecoveryBatch() {
+ }
+
+ @Override
+ protected void onRecoveryComplete() {
+ }
+
+ @Override
+ protected void applyRecoverySnapshot(ByteString snapshot) {
+ }
}
*/
public abstract class AbstractReplicatedLogImpl implements ReplicatedLog {
- protected List<ReplicatedLogEntry> journal;
+ // We define this as ArrayList so we can use ensureCapacity.
+ protected ArrayList<ReplicatedLogEntry> journal;
protected ByteString snapshot;
protected long snapshotIndex = -1;
protected long snapshotTerm = -1;
// to be used for rollback during save snapshot failure
- protected List<ReplicatedLogEntry> snapshottedJournal;
+ protected ArrayList<ReplicatedLogEntry> snapshottedJournal;
protected ByteString previousSnapshot;
protected long previousSnapshotIndex = -1;
protected long previousSnapshotTerm = -1;
journal.add(replicatedLogEntry);
}
+ @Override
+ public void increaseJournalLogCapacity(int amount) {
+ journal.ensureCapacity(journal.size() + amount);
+ }
+
@Override
public List<ReplicatedLogEntry> getFrom(long logEntryIndex) {
return getFrom(logEntryIndex, journal.size());
@Override
public void snapshotCommit() {
- snapshottedJournal.clear();
snapshottedJournal = null;
previousSnapshotIndex = -1;
previousSnapshotTerm = -1;
@Override
public void snapshotRollback() {
snapshottedJournal.addAll(journal);
- journal.clear();
journal = snapshottedJournal;
snapshottedJournal = null;
*
* @return long
*/
- public long getSnapshotBatchCount();
+ long getSnapshotBatchCount();
/**
* The interval at which a heart beat message will be sent to the remote
*
* @return FiniteDuration
*/
- public FiniteDuration getHeartBeatInterval();
+ FiniteDuration getHeartBeatInterval();
/**
* The interval in which a new election would get triggered if no leader is found
*
* @return FiniteDuration
*/
- public FiniteDuration getElectionTimeOutInterval();
+ FiniteDuration getElectionTimeOutInterval();
/**
* The maximum election time variance. The election is scheduled using both
*
* @return int
*/
- public int getElectionTimeVariance();
+ int getElectionTimeVariance();
/**
* The size (in bytes) of the snapshot chunk sent from Leader
*/
- public int getSnapshotChunkSize();
+ int getSnapshotChunkSize();
+
+ /**
+ * The number of journal log entries to batch on recovery before applying.
+ */
+ int getJournalRecoveryLogBatchSize();
}
private static final int SNAPSHOT_BATCH_COUNT = 20000;
+ private static final int JOURNAL_RECOVERY_LOG_BATCH_SIZE = 1000;
+
/**
* The maximum election time variance
*/
private static final int ELECTION_TIME_MAX_VARIANCE = 100;
- private final int SNAPSHOT_CHUNK_SIZE = 2048 * 1000; //2MB
+ private static final int SNAPSHOT_CHUNK_SIZE = 2048 * 1000; //2MB
/**
new FiniteDuration(100, TimeUnit.MILLISECONDS);
+ private FiniteDuration heartBeatInterval = HEART_BEAT_INTERVAL;
+ private long snapshotBatchCount = SNAPSHOT_BATCH_COUNT;
+ private int journalRecoveryLogBatchSize = JOURNAL_RECOVERY_LOG_BATCH_SIZE;
+
+ public void setHeartBeatInterval(FiniteDuration heartBeatInterval) {
+ this.heartBeatInterval = heartBeatInterval;
+ }
+
+ public void setSnapshotBatchCount(long snapshotBatchCount) {
+ this.snapshotBatchCount = snapshotBatchCount;
+ }
+
+ public void setJournalRecoveryLogBatchSize(int journalRecoveryLogBatchSize) {
+ this.journalRecoveryLogBatchSize = journalRecoveryLogBatchSize;
+ }
+
@Override
public long getSnapshotBatchCount() {
- return SNAPSHOT_BATCH_COUNT;
+ return snapshotBatchCount;
}
@Override
public FiniteDuration getHeartBeatInterval() {
- return HEART_BEAT_INTERVAL;
+ return heartBeatInterval;
}
-
@Override
public FiniteDuration getElectionTimeOutInterval() {
// returns 2 times the heart beat interval
public int getSnapshotChunkSize() {
return SNAPSHOT_CHUNK_SIZE;
}
+
+ @Override
+ public int getJournalRecoveryLogBatchSize() {
+ return journalRecoveryLogBatchSize;
+ }
}
import akka.persistence.SnapshotSelectionCriteria;
import akka.persistence.UntypedPersistentActor;
import com.google.common.base.Optional;
+import com.google.common.base.Stopwatch;
import com.google.protobuf.ByteString;
+import org.opendaylight.controller.cluster.raft.base.messages.ApplyLogEntries;
import org.opendaylight.controller.cluster.raft.base.messages.ApplySnapshot;
import org.opendaylight.controller.cluster.raft.base.messages.ApplyState;
import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshot;
import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
import org.opendaylight.controller.protobuff.messages.cluster.raft.AppendEntriesMessages;
-
import java.io.Serializable;
import java.util.Map;
* This context should NOT be passed directly to any other actor it is
* only to be consumed by the RaftActorBehaviors
*/
- protected RaftActorContext context;
+ private final RaftActorContext context;
/**
* The in-memory journal
private volatile boolean hasSnapshotCaptureInitiated = false;
+ private Stopwatch recoveryTimer;
+
+ private int currentRecoveryBatchCount;
+
public RaftActor(String id, Map<String, String> peerAddresses) {
this(id, peerAddresses, Optional.<ConfigParams>absent());
}
LOG);
}
- @Override public void onReceiveRecover(Object message) {
+ private void initRecoveryTimer() {
+ if(recoveryTimer == null) {
+ recoveryTimer = new Stopwatch();
+ recoveryTimer.start();
+ }
+ }
+
+ @Override
+ public void preStart() throws Exception {
+ LOG.info("Starting recovery for {} with journal batch size {}", persistenceId(),
+ context.getConfigParams().getJournalRecoveryLogBatchSize());
+ super.preStart();
+ }
+
+ @Override
+ public void onReceiveRecover(Object message) {
if (message instanceof SnapshotOffer) {
- LOG.info("SnapshotOffer called..");
- SnapshotOffer offer = (SnapshotOffer) message;
- Snapshot snapshot = (Snapshot) offer.snapshot();
+ onRecoveredSnapshot((SnapshotOffer)message);
+ } else if (message instanceof ReplicatedLogEntry) {
+ onRecoveredJournalLogEntry((ReplicatedLogEntry)message);
+ } else if (message instanceof ApplyLogEntries) {
+ onRecoveredApplyLogEntries((ApplyLogEntries)message);
+ } else if (message instanceof DeleteEntries) {
+ replicatedLog.removeFrom(((DeleteEntries) message).getFromIndex());
+ } else if (message instanceof UpdateElectionTerm) {
+ context.getTermInformation().update(((UpdateElectionTerm) message).getCurrentTerm(),
+ ((UpdateElectionTerm) message).getVotedFor());
+ } else if (message instanceof RecoveryCompleted) {
+ onRecoveryCompletedMessage();
+ }
+ }
- // Create a replicated log with the snapshot information
- // The replicated log can be used later on to retrieve this snapshot
- // when we need to install it on a peer
- replicatedLog = new ReplicatedLogImpl(snapshot);
+ private void onRecoveredSnapshot(SnapshotOffer offer) {
+ LOG.debug("SnapshotOffer called..");
- context.setReplicatedLog(replicatedLog);
- context.setLastApplied(snapshot.getLastAppliedIndex());
- context.setCommitIndex(snapshot.getLastAppliedIndex());
+ initRecoveryTimer();
- LOG.info("Applied snapshot to replicatedLog. " +
- "snapshotIndex={}, snapshotTerm={}, journal-size={}",
- replicatedLog.snapshotIndex, replicatedLog.snapshotTerm,
- replicatedLog.size()
- );
+ Snapshot snapshot = (Snapshot) offer.snapshot();
- // Apply the snapshot to the actors state
- applySnapshot(ByteString.copyFrom(snapshot.getState()));
+ // Create a replicated log with the snapshot information
+ // The replicated log can be used later on to retrieve this snapshot
+ // when we need to install it on a peer
+ replicatedLog = new ReplicatedLogImpl(snapshot);
- } else if (message instanceof ReplicatedLogEntry) {
- ReplicatedLogEntry logEntry = (ReplicatedLogEntry) message;
+ context.setReplicatedLog(replicatedLog);
+ context.setLastApplied(snapshot.getLastAppliedIndex());
+ context.setCommitIndex(snapshot.getLastAppliedIndex());
- // Apply State immediately
- replicatedLog.append(logEntry);
- applyState(null, "recovery", logEntry.getData());
- context.setLastApplied(logEntry.getIndex());
- context.setCommitIndex(logEntry.getIndex());
+ Stopwatch timer = new Stopwatch();
+ timer.start();
- } else if (message instanceof DeleteEntries) {
- replicatedLog.removeFrom(((DeleteEntries) message).getFromIndex());
+ // Apply the snapshot to the actors state
+ applyRecoverySnapshot(ByteString.copyFrom(snapshot.getState()));
- } else if (message instanceof UpdateElectionTerm) {
- context.getTermInformation().update(((UpdateElectionTerm) message).getCurrentTerm(),
- ((UpdateElectionTerm) message).getVotedFor());
+ timer.stop();
+ LOG.info("Recovery snapshot applied for {} in {}: snapshotIndex={}, snapshotTerm={}, journal-size=" +
+ replicatedLog.size(), persistenceId(), timer.toString(),
+ replicatedLog.snapshotIndex, replicatedLog.snapshotTerm);
+ }
- } else if (message instanceof RecoveryCompleted) {
- LOG.info(
- "RecoveryCompleted - Switching actor to Follower - " +
- "Persistence Id = " + persistenceId() +
- " Last index in log:{}, snapshotIndex={}, snapshotTerm={}, " +
- "journal-size={}",
- replicatedLog.lastIndex(), replicatedLog.snapshotIndex,
- replicatedLog.snapshotTerm, replicatedLog.size());
- currentBehavior = switchBehavior(RaftState.Follower);
- onStateChanged();
+ private void onRecoveredJournalLogEntry(ReplicatedLogEntry logEntry) {
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Received ReplicatedLogEntry for recovery: {}", logEntry.getIndex());
+ }
+
+ replicatedLog.append(logEntry);
+ }
+
+ private void onRecoveredApplyLogEntries(ApplyLogEntries ale) {
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Received ApplyLogEntries for recovery, applying to state: {} to {}",
+ context.getLastApplied() + 1, ale.getToIndex());
+ }
+
+ for (long i = context.getLastApplied() + 1; i <= ale.getToIndex(); i++) {
+ batchRecoveredLogEntry(replicatedLog.get(i));
+ }
+
+ context.setLastApplied(ale.getToIndex());
+ context.setCommitIndex(ale.getToIndex());
+ }
+
+ private void batchRecoveredLogEntry(ReplicatedLogEntry logEntry) {
+ initRecoveryTimer();
+
+ int batchSize = context.getConfigParams().getJournalRecoveryLogBatchSize();
+ if(currentRecoveryBatchCount == 0) {
+ startLogRecoveryBatch(batchSize);
+ }
+
+ appendRecoveredLogEntry(logEntry.getData());
+
+ if(++currentRecoveryBatchCount >= batchSize) {
+ endCurrentLogRecoveryBatch();
}
}
+ private void endCurrentLogRecoveryBatch() {
+ applyCurrentLogRecoveryBatch();
+ currentRecoveryBatchCount = 0;
+ }
+
+ private void onRecoveryCompletedMessage() {
+ if(currentRecoveryBatchCount > 0) {
+ endCurrentLogRecoveryBatch();
+ }
+
+ onRecoveryComplete();
+
+ String recoveryTime = "";
+ if(recoveryTimer != null) {
+ recoveryTimer.stop();
+ recoveryTime = " in " + recoveryTimer.toString();
+ recoveryTimer = null;
+ }
+
+ LOG.info(
+ "Recovery completed" + recoveryTime + " - Switching actor to Follower - " +
+ "Persistence Id = " + persistenceId() +
+ " Last index in log={}, snapshotIndex={}, snapshotTerm={}, " +
+ "journal-size={}",
+ replicatedLog.lastIndex(), replicatedLog.snapshotIndex,
+ replicatedLog.snapshotTerm, replicatedLog.size());
+
+ currentBehavior = switchBehavior(RaftState.Follower);
+ onStateChanged();
+ }
+
@Override public void onReceiveCommand(Object message) {
if (message instanceof ApplyState){
ApplyState applyState = (ApplyState) message;
applyState(applyState.getClientActor(), applyState.getIdentifier(),
applyState.getReplicatedLogEntry().getData());
+ } else if (message instanceof ApplyLogEntries){
+ ApplyLogEntries ale = (ApplyLogEntries) message;
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Persisting ApplyLogEntries with index={}", ale.getToIndex());
+ }
+ persist(new ApplyLogEntries(ale.getToIndex()), new Procedure<ApplyLogEntries>() {
+ @Override
+ public void apply(ApplyLogEntries param) throws Exception {
+ }
+ });
+
} else if(message instanceof ApplySnapshot ) {
Snapshot snapshot = ((ApplySnapshot) message).getSnapshot();
return context.getLastApplied();
}
+ protected RaftActorContext getRaftActorContext() {
+ return context;
+ }
+
/**
* setPeerAddress sets the address of a known peer at a later time.
* <p>
protected abstract void applyState(ActorRef clientActor, String identifier,
Object data);
+ /**
+ * This method is called during recovery at the start of a batch of state entries. Derived
+ * classes should perform any initialization needed to start a batch.
+ */
+ protected abstract void startLogRecoveryBatch(int maxBatchSize);
+
+ /**
+ * This method is called during recovery to append state data to the current batch. This method
+ * is called 1 or more times after {@link #startRecoveryStateBatch}.
+ *
+ * @param data the state data
+ */
+ protected abstract void appendRecoveredLogEntry(Payload data);
+
+ /**
+ * This method is called during recovery to reconstruct the state of the actor.
+ *
+ * @param snapshot A snapshot of the state of the actor
+ */
+ protected abstract void applyRecoverySnapshot(ByteString snapshot);
+
+ /**
+ * This method is called during recovery at the end of a batch to apply the current batched
+ * log entries. This method is called after {@link #appendRecoveryLogEntry}.
+ */
+ protected abstract void applyCurrentLogRecoveryBatch();
+
+ /**
+ * This method is called when recovery is complete.
+ */
+ protected abstract void onRecoveryComplete();
+
/**
* This method will be called by the RaftActor when a snapshot needs to be
* created. The derived actor should respond with its current state.
protected abstract void createSnapshot();
/**
- * This method will be called by the RaftActor during recovery to
- * reconstruct the state of the actor.
- * <p/>
- * This method may also be called at any other point during normal
+ * This method can be called at any other point during normal
* operations when the derived actor is out of sync with it's peers
* and the only way to bring it in sync is by applying a snapshot
*
// of a single command.
persist(replicatedLogEntry,
new Procedure<ReplicatedLogEntry>() {
+ @Override
public void apply(ReplicatedLogEntry evt) throws Exception {
// when a snaphsot is being taken, captureSnapshot != null
if (hasSnapshotCaptureInitiated == false &&
private long currentTerm = 0;
private String votedFor = null;
+ @Override
public long getCurrentTerm() {
return currentTerm;
}
+ @Override
public String getVotedFor() {
return votedFor;
}
this.LOG = logger;
}
+ @Override
public ActorRef actorOf(Props props){
return context.actorOf(props);
}
+ @Override
public ActorSelection actorSelection(String path){
return context.actorSelection(path);
}
+ @Override
public String getId() {
return id;
}
+ @Override
public ActorRef getActor() {
return actor;
}
+ @Override
public ElectionTerm getTermInformation() {
return termInformation;
}
+ @Override
public long getCommitIndex() {
return commitIndex;
}
this.commitIndex = commitIndex;
}
+ @Override
public long getLastApplied() {
return lastApplied;
}
*/
void append(ReplicatedLogEntry replicatedLogEntry);
+ /**
+ * Optimization method to increase the capacity of the journal log prior to appending entries.
+ *
+ * @param amount the amount to increase by
+ */
+ void increaseJournalLogCapacity(int amount);
+
/**
*
* @param replicatedLogEntry
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.base.messages;
+
+import java.io.Serializable;
+
+/**
+ * ApplyLogEntries serves as a message which is stored in the akka's persistent
+ * journal.
+ * During recovery if this message is found, then all in-mem journal entries from
+ * context.lastApplied to ApplyLogEntries.toIndex are applied to the state
+ *
+ * This class is also used as a internal message sent from Behaviour to
+ * RaftActor to persist the ApplyLogEntries
+ *
+ */
+public class ApplyLogEntries implements Serializable {
+ private final int toIndex;
+
+ public ApplyLogEntries(int toIndex) {
+ this.toIndex = toIndex;
+ }
+
+ public int getToIndex() {
+ return toIndex;
+ }
+}
import org.opendaylight.controller.cluster.raft.RaftState;
import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
import org.opendaylight.controller.cluster.raft.SerializationUtils;
+import org.opendaylight.controller.cluster.raft.base.messages.ApplyLogEntries;
import org.opendaylight.controller.cluster.raft.base.messages.ApplyState;
import org.opendaylight.controller.cluster.raft.base.messages.ElectionTimeout;
import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
}
context.getLogger().debug("Setting last applied to {}", newLastApplied);
context.setLastApplied(newLastApplied);
+
+ // send a message to persist a ApplyLogEntries marker message into akka's persistent journal
+ // will be used during recovery
+ //in case if the above code throws an error and this message is not sent, it would be fine
+ // as the append entries received later would initiate add this message to the journal
+ actor().tell(new ApplyLogEntries((int) context.getLastApplied()), actor());
}
protected Object fromSerializableMessage(Object serializable){
LOG = context.getLogger();
- if (lastIndex() >= 0) {
- context.setCommitIndex(lastIndex());
- }
-
followers = context.getPeerAddresses().keySet();
for (String followerId : followers) {
FollowerLogInformation followerLogInformation =
new FollowerLogInformationImpl(followerId,
- new AtomicLong(lastIndex()),
+ new AtomicLong(context.getCommitIndex()),
new AtomicLong(-1));
followerToLog.put(followerId, followerLogInformation);
public static class MockPayload extends Payload implements Serializable {
private String value = "";
+ public MockPayload(){
+
+ }
+
public MockPayload(String s) {
this.value = s;
}
return index;
}
}
+
+ public static class MockReplicatedLogBuilder {
+ private ReplicatedLog mockLog = new SimpleReplicatedLog();
+
+ public MockReplicatedLogBuilder createEntries(int start, int end, int term) {
+ for (int i=start; i<end; i++) {
+ this.mockLog.append(new ReplicatedLogImplEntry(i, term, new MockRaftActorContext.MockPayload("foo" + i)));
+ }
+ return this;
+ }
+
+ public MockReplicatedLogBuilder addEntry(int index, int term, MockPayload payload) {
+ this.mockLog.append(new ReplicatedLogImplEntry(index, term, payload));
+ return this;
+ }
+
+ public ReplicatedLog build() {
+ return this.mockLog;
+ }
+ }
}
import akka.actor.ActorSystem;
import akka.actor.PoisonPill;
import akka.actor.Props;
+import akka.actor.Terminated;
import akka.event.Logging;
import akka.japi.Creator;
import akka.testkit.JavaTestKit;
import akka.testkit.TestActorRef;
+import com.google.common.base.Optional;
import com.google.protobuf.ByteString;
+import org.junit.After;
import org.junit.Test;
+import org.opendaylight.controller.cluster.raft.base.messages.ApplyLogEntries;
import org.opendaylight.controller.cluster.raft.client.messages.FindLeader;
import org.opendaylight.controller.cluster.raft.client.messages.FindLeaderReply;
+import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
+import org.opendaylight.controller.cluster.raft.utils.MockAkkaJournal;
import org.opendaylight.controller.cluster.raft.utils.MockSnapshotStore;
-
+import scala.concurrent.duration.FiniteDuration;
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Map;
-
-import static junit.framework.Assert.assertTrue;
-import static junit.framework.TestCase.assertEquals;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import static org.junit.Assert.assertEquals;
public class RaftActorTest extends AbstractActorTest {
+ @After
+ public void tearDown() {
+ MockAkkaJournal.clearJournal();
+ MockSnapshotStore.setMockSnapshot(null);
+ }
+
public static class MockRaftActor extends RaftActor {
- boolean applySnapshotCalled = false;
+ public static final class MockRaftActorCreator implements Creator<MockRaftActor> {
+ private final Map<String, String> peerAddresses;
+ private final String id;
+ private final Optional<ConfigParams> config;
+
+ private MockRaftActorCreator(Map<String, String> peerAddresses, String id,
+ Optional<ConfigParams> config) {
+ this.peerAddresses = peerAddresses;
+ this.id = id;
+ this.config = config;
+ }
+
+ @Override
+ public MockRaftActor create() throws Exception {
+ return new MockRaftActor(id, peerAddresses, config);
+ }
+ }
+
+ private final CountDownLatch recoveryComplete = new CountDownLatch(1);
+ private final List<Object> state;
- public MockRaftActor(String id,
- Map<String, String> peerAddresses) {
- super(id, peerAddresses);
+ public MockRaftActor(String id, Map<String, String> peerAddresses, Optional<ConfigParams> config) {
+ super(id, peerAddresses, config);
+ state = new ArrayList<>();
}
- public RaftActorContext getRaftActorContext() {
- return context;
+ public void waitForRecoveryComplete() {
+ try {
+ assertEquals("Recovery complete", true, recoveryComplete.await(5, TimeUnit.SECONDS));
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
}
- public boolean isApplySnapshotCalled() {
- return applySnapshotCalled;
+ public List<Object> getState() {
+ return state;
}
- public static Props props(final String id, final Map<String, String> peerAddresses){
- return Props.create(new Creator<MockRaftActor>(){
+ public static Props props(final String id, final Map<String, String> peerAddresses,
+ Optional<ConfigParams> config){
+ return Props.create(new MockRaftActorCreator(peerAddresses, id, config));
+ }
- @Override public MockRaftActor create() throws Exception {
- return new MockRaftActor(id, peerAddresses);
- }
- });
+ @Override protected void applyState(ActorRef clientActor, String identifier, Object data) {
+ }
+
+ @Override
+ protected void startLogRecoveryBatch(int maxBatchSize) {
+ }
+
+ @Override
+ protected void appendRecoveredLogEntry(Payload data) {
+ state.add(data);
}
- @Override protected void applyState(ActorRef clientActor,
- String identifier,
- Object data) {
+ @Override
+ protected void applyCurrentLogRecoveryBatch() {
+ }
+
+ @Override
+ protected void onRecoveryComplete() {
+ recoveryComplete.countDown();
+ }
+
+ @Override
+ protected void applyRecoverySnapshot(ByteString snapshot) {
+ try {
+ Object data = toObject(snapshot);
+ System.out.println("!!!!!applyRecoverySnapshot: "+data);
+ if (data instanceof List) {
+ state.addAll((List) data);
+ }
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
}
@Override protected void createSnapshot() {
}
@Override protected void applySnapshot(ByteString snapshot) {
- applySnapshotCalled = true;
}
@Override protected void onStateChanged() {
return this.getId();
}
+ private Object toObject(ByteString bs) throws ClassNotFoundException, IOException {
+ Object obj = null;
+ ByteArrayInputStream bis = null;
+ ObjectInputStream ois = null;
+ try {
+ bis = new ByteArrayInputStream(bs.toByteArray());
+ ois = new ObjectInputStream(bis);
+ obj = ois.readObject();
+ } finally {
+ if (bis != null) {
+ bis.close();
+ }
+ if (ois != null) {
+ ois.close();
+ }
+ }
+ return obj;
+ }
+
+
}
public RaftActorTestKit(ActorSystem actorSystem, String actorName) {
super(actorSystem);
- raftActor = this.getSystem()
- .actorOf(MockRaftActor.props(actorName,
- Collections.EMPTY_MAP), actorName);
+ raftActor = this.getSystem().actorOf(MockRaftActor.props(actorName,
+ Collections.EMPTY_MAP, Optional.<ConfigParams>absent()), actorName);
}
return
new JavaTestKit.EventFilter<Boolean>(Logging.Info.class
) {
+ @Override
protected Boolean run() {
return true;
}
}
public void findLeader(final String expectedLeader){
+ raftActor.tell(new FindLeader(), getRef());
-
- new Within(duration("1 seconds")) {
- protected void run() {
-
- raftActor.tell(new FindLeader(), getRef());
-
- String s = new ExpectMsg<String>(duration("1 seconds"),
- "findLeader") {
- // do not put code outside this method, will run afterwards
- protected String match(Object in) {
- if (in instanceof FindLeaderReply) {
- return ((FindLeaderReply) in).getLeaderActor();
- } else {
- throw noMatch();
- }
- }
- }.get();// this extracts the received message
-
- assertEquals(expectedLeader, s);
-
- }
-
-
- };
+ FindLeaderReply reply = expectMsgClass(duration("5 seconds"), FindLeaderReply.class);
+ assertEquals("getLeaderActor", expectedLeader, reply.getLeaderActor());
}
public ActorRef getRaftActor() {
return raftActor;
}
-
}
}
@Test
- public void testActorRecovery() {
+ public void testRaftActorRecovery() throws Exception {
new JavaTestKit(getSystem()) {{
- new Within(duration("1 seconds")) {
- protected void run() {
-
- String persistenceId = "follower10";
-
- ActorRef followerActor = getSystem().actorOf(
- MockRaftActor.props(persistenceId, Collections.EMPTY_MAP), persistenceId);
-
-
- List<ReplicatedLogEntry> entries = new ArrayList<>();
- ReplicatedLogEntry entry1 = new MockRaftActorContext.MockReplicatedLogEntry(1, 4, new MockRaftActorContext.MockPayload("E"));
- ReplicatedLogEntry entry2 = new MockRaftActorContext.MockReplicatedLogEntry(1, 5, new MockRaftActorContext.MockPayload("F"));
- entries.add(entry1);
- entries.add(entry2);
-
- int lastApplied = 3;
- int lastIndex = 5;
- Snapshot snapshot = Snapshot.create("A B C D".getBytes(), entries, lastIndex, 1 , lastApplied, 1);
- MockSnapshotStore.setMockSnapshot(snapshot);
- MockSnapshotStore.setPersistenceId(persistenceId);
-
- followerActor.tell(PoisonPill.getInstance(), null);
- try {
- // give some time for actor to die
- Thread.sleep(200);
- } catch (InterruptedException e) {
- e.printStackTrace();
- }
-
- TestActorRef<MockRaftActor> ref = TestActorRef.create(getSystem(), MockRaftActor.props(persistenceId, Collections.EMPTY_MAP));
- try {
- //give some time for snapshot offer to get called.
- Thread.sleep(200);
- } catch (InterruptedException e) {
- e.printStackTrace();
- }
- RaftActorContext context = ref.underlyingActor().getRaftActorContext();
- assertEquals(entries.size(), context.getReplicatedLog().size());
- assertEquals(lastApplied, context.getLastApplied());
- assertEquals(lastApplied, context.getCommitIndex());
- assertTrue(ref.underlyingActor().isApplySnapshotCalled());
- }
-
- };
+ String persistenceId = "follower10";
+
+ DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
+ // Set the heartbeat interval high to essentially disable election otherwise the test
+ // may fail if the actor is switched to Leader and the commitIndex is set to the last
+ // log entry.
+ config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
+
+ ActorRef followerActor = getSystem().actorOf(MockRaftActor.props(persistenceId,
+ Collections.EMPTY_MAP, Optional.<ConfigParams>of(config)), persistenceId);
+
+ watch(followerActor);
+
+ List<ReplicatedLogEntry> snapshotUnappliedEntries = new ArrayList<>();
+ ReplicatedLogEntry entry1 = new MockRaftActorContext.MockReplicatedLogEntry(1, 4,
+ new MockRaftActorContext.MockPayload("E"));
+ snapshotUnappliedEntries.add(entry1);
+
+ int lastAppliedDuringSnapshotCapture = 3;
+ int lastIndexDuringSnapshotCapture = 4;
+
+ // 4 messages as part of snapshot, which are applied to state
+ ByteString snapshotBytes = fromObject(Arrays.asList(
+ new MockRaftActorContext.MockPayload("A"),
+ new MockRaftActorContext.MockPayload("B"),
+ new MockRaftActorContext.MockPayload("C"),
+ new MockRaftActorContext.MockPayload("D")));
+
+ Snapshot snapshot = Snapshot.create(snapshotBytes.toByteArray(),
+ snapshotUnappliedEntries, lastIndexDuringSnapshotCapture, 1 ,
+ lastAppliedDuringSnapshotCapture, 1);
+ MockSnapshotStore.setMockSnapshot(snapshot);
+ MockSnapshotStore.setPersistenceId(persistenceId);
+
+ // add more entries after snapshot is taken
+ List<ReplicatedLogEntry> entries = new ArrayList<>();
+ ReplicatedLogEntry entry2 = new MockRaftActorContext.MockReplicatedLogEntry(1, 5,
+ new MockRaftActorContext.MockPayload("F"));
+ ReplicatedLogEntry entry3 = new MockRaftActorContext.MockReplicatedLogEntry(1, 6,
+ new MockRaftActorContext.MockPayload("G"));
+ ReplicatedLogEntry entry4 = new MockRaftActorContext.MockReplicatedLogEntry(1, 7,
+ new MockRaftActorContext.MockPayload("H"));
+ entries.add(entry2);
+ entries.add(entry3);
+ entries.add(entry4);
+
+ int lastAppliedToState = 5;
+ int lastIndex = 7;
+
+ MockAkkaJournal.addToJournal(5, entry2);
+ // 2 entries are applied to state besides the 4 entries in snapshot
+ MockAkkaJournal.addToJournal(6, new ApplyLogEntries(lastAppliedToState));
+ MockAkkaJournal.addToJournal(7, entry3);
+ MockAkkaJournal.addToJournal(8, entry4);
+
+ // kill the actor
+ followerActor.tell(PoisonPill.getInstance(), null);
+ expectMsgClass(duration("5 seconds"), Terminated.class);
+
+ unwatch(followerActor);
+
+ //reinstate the actor
+ TestActorRef<MockRaftActor> ref = TestActorRef.create(getSystem(),
+ MockRaftActor.props(persistenceId, Collections.EMPTY_MAP,
+ Optional.<ConfigParams>of(config)));
+
+ ref.underlyingActor().waitForRecoveryComplete();
+
+ RaftActorContext context = ref.underlyingActor().getRaftActorContext();
+ assertEquals("Journal log size", snapshotUnappliedEntries.size() + entries.size(),
+ context.getReplicatedLog().size());
+ assertEquals("Last index", lastIndex, context.getReplicatedLog().lastIndex());
+ assertEquals("Last applied", lastAppliedToState, context.getLastApplied());
+ assertEquals("Commit index", lastAppliedToState, context.getCommitIndex());
+ assertEquals("Recovered state size", 6, ref.underlyingActor().getState().size());
}};
-
}
-
+ private ByteString fromObject(Object snapshot) throws Exception {
+ ByteArrayOutputStream b = null;
+ ObjectOutputStream o = null;
+ try {
+ b = new ByteArrayOutputStream();
+ o = new ObjectOutputStream(b);
+ o.writeObject(snapshot);
+ byte[] snapshotBytes = b.toByteArray();
+ return ByteString.copyFrom(snapshotBytes);
+ } finally {
+ if (o != null) {
+ o.flush();
+ o.close();
+ }
+ if (b != null) {
+ b.close();
+ }
+ }
+ }
}
import akka.actor.ActorRef;
import akka.actor.Props;
import akka.testkit.JavaTestKit;
-import akka.util.Timeout;
import com.google.protobuf.ByteString;
import junit.framework.Assert;
import org.junit.Test;
import org.opendaylight.controller.cluster.raft.messages.RequestVoteReply;
import org.opendaylight.controller.cluster.raft.utils.DoNothingActor;
import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor;
-import scala.concurrent.Await;
-import scala.concurrent.Future;
-import scala.concurrent.duration.Duration;
-import scala.concurrent.duration.FiniteDuration;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
-import java.util.concurrent.TimeUnit;
-import static akka.pattern.Patterns.ask;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
}
public Object executeLocalOperation(ActorRef actor, Object message) throws Exception {
- FiniteDuration operationDuration = Duration.create(5, TimeUnit.SECONDS);
- Timeout operationTimeout = new Timeout(operationDuration);
- Future<Object> future = ask(actor, message, operationTimeout);
-
- try {
- return Await.result(future, operationDuration);
- } catch (Exception e) {
- throw e;
- }
+ return MessageCollectorActor.getAllMessages(actor);
}
public ByteString getNextChunk (ByteString bs, int offset){
import org.opendaylight.controller.cluster.raft.base.messages.SendHeartBeat;
import org.opendaylight.controller.cluster.raft.base.messages.SendInstallSnapshot;
import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
+import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
import org.opendaylight.controller.cluster.raft.messages.InstallSnapshot;
import org.opendaylight.controller.cluster.raft.messages.InstallSnapshotReply;
import org.opendaylight.controller.cluster.raft.utils.DoNothingActor;
+import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor;
+import org.opendaylight.controller.protobuff.messages.cluster.raft.AppendEntriesMessages;
import org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages;
import java.io.ByteArrayOutputStream;
actorContext.getReplicatedLog().removeFrom(0);
- actorContext.getReplicatedLog().append(new ReplicatedLogImplEntry(0, 1,
- new MockRaftActorContext.MockPayload("foo")));
-
- ReplicatedLogImplEntry entry =
- new ReplicatedLogImplEntry(1, 1,
- new MockRaftActorContext.MockPayload("foo"));
-
- actorContext.getReplicatedLog().append(entry);
+ actorContext.setReplicatedLog(
+ new MockRaftActorContext.MockReplicatedLogBuilder().createEntries(0, 2, 1)
+ .build());
Leader leader = new Leader(actorContext);
RaftState raftState = leader
- .handleMessage(senderActor, new Replicate(null, "state-id",entry));
+ .handleMessage(senderActor, new Replicate(null, "state-id",actorContext.getReplicatedLog().get(1)));
// State should not change
assertEquals(RaftState.Leader, raftState);
new ReplicatedLogImplEntry(newEntryIndex, currentTerm,
new MockRaftActorContext.MockPayload("D"));
-
RaftState raftState = leader.handleMessage(senderActor, new SendInstallSnapshot());
assertEquals(RaftState.Leader, raftState);
return null;
}
+ public static class ForwardMessageToBehaviorActor extends MessageCollectorActor {
+ private static AbstractRaftActorBehavior behavior;
+
+ public ForwardMessageToBehaviorActor(){
+
+ }
+
+ @Override public void onReceive(Object message) throws Exception {
+ super.onReceive(message);
+ behavior.handleMessage(sender(), message);
+ }
+
+ public static void setBehavior(AbstractRaftActorBehavior behavior){
+ ForwardMessageToBehaviorActor.behavior = behavior;
+ }
+ }
+
+ @Test
+ public void testLeaderCreatedWithCommitIndexLessThanLastIndex() throws Exception {
+ new JavaTestKit(getSystem()) {{
+
+ ActorRef leaderActor = getSystem().actorOf(Props.create(MessageCollectorActor.class));
+
+ MockRaftActorContext leaderActorContext =
+ new MockRaftActorContext("leader", getSystem(), leaderActor);
+
+ ActorRef followerActor = getSystem().actorOf(Props.create(ForwardMessageToBehaviorActor.class));
+
+ MockRaftActorContext followerActorContext =
+ new MockRaftActorContext("follower", getSystem(), followerActor);
+
+ Follower follower = new Follower(followerActorContext);
+
+ ForwardMessageToBehaviorActor.setBehavior(follower);
+
+ Map<String, String> peerAddresses = new HashMap();
+ peerAddresses.put(followerActor.path().toString(),
+ followerActor.path().toString());
+
+ leaderActorContext.setPeerAddresses(peerAddresses);
+
+ leaderActorContext.getReplicatedLog().removeFrom(0);
+
+ //create 3 entries
+ leaderActorContext.setReplicatedLog(
+ new MockRaftActorContext.MockReplicatedLogBuilder().createEntries(0, 3, 1).build());
+
+ leaderActorContext.setCommitIndex(1);
+
+ followerActorContext.getReplicatedLog().removeFrom(0);
+
+ // follower too has the exact same log entries and has the same commit index
+ followerActorContext.setReplicatedLog(
+ new MockRaftActorContext.MockReplicatedLogBuilder().createEntries(0, 3, 1).build());
+
+ followerActorContext.setCommitIndex(1);
+
+ Leader leader = new Leader(leaderActorContext);
+
+ leader.handleMessage(leaderActor, new SendHeartBeat());
+
+ AppendEntriesMessages.AppendEntries appendEntries =
+ (AppendEntriesMessages.AppendEntries) MessageCollectorActor
+ .getFirstMatching(followerActor, AppendEntriesMessages.AppendEntries.class);
+
+ assertNotNull(appendEntries);
+
+ assertEquals(1, appendEntries.getLeaderCommit());
+ assertEquals(1, appendEntries.getLogEntries(0).getIndex());
+ assertEquals(0, appendEntries.getPrevLogIndex());
+
+ AppendEntriesReply appendEntriesReply =
+ (AppendEntriesReply) MessageCollectorActor.getFirstMatching(
+ leaderActor, AppendEntriesReply.class);
+
+ assertNotNull(appendEntriesReply);
+
+ // follower returns its next index
+ assertEquals(2, appendEntriesReply.getLogLastIndex());
+ assertEquals(1, appendEntriesReply.getLogLastTerm());
+
+ }};
+ }
+
+
+ @Test
+ public void testLeaderCreatedWithCommitIndexLessThanFollowersCommitIndex() throws Exception {
+ new JavaTestKit(getSystem()) {{
+
+ ActorRef leaderActor = getSystem().actorOf(Props.create(MessageCollectorActor.class));
+
+ MockRaftActorContext leaderActorContext =
+ new MockRaftActorContext("leader", getSystem(), leaderActor);
+
+ ActorRef followerActor = getSystem().actorOf(
+ Props.create(ForwardMessageToBehaviorActor.class));
+
+ MockRaftActorContext followerActorContext =
+ new MockRaftActorContext("follower", getSystem(), followerActor);
+
+ Follower follower = new Follower(followerActorContext);
+
+ ForwardMessageToBehaviorActor.setBehavior(follower);
+
+ Map<String, String> peerAddresses = new HashMap();
+ peerAddresses.put(followerActor.path().toString(),
+ followerActor.path().toString());
+
+ leaderActorContext.setPeerAddresses(peerAddresses);
+
+ leaderActorContext.getReplicatedLog().removeFrom(0);
+
+ leaderActorContext.setReplicatedLog(
+ new MockRaftActorContext.MockReplicatedLogBuilder().createEntries(0, 3, 1).build());
+
+ leaderActorContext.setCommitIndex(1);
+
+ followerActorContext.getReplicatedLog().removeFrom(0);
+
+ followerActorContext.setReplicatedLog(
+ new MockRaftActorContext.MockReplicatedLogBuilder().createEntries(0, 3, 1).build());
+
+ // follower has the same log entries but its commit index > leaders commit index
+ followerActorContext.setCommitIndex(2);
+
+ Leader leader = new Leader(leaderActorContext);
+
+ leader.handleMessage(leaderActor, new SendHeartBeat());
+
+ AppendEntriesMessages.AppendEntries appendEntries =
+ (AppendEntriesMessages.AppendEntries) MessageCollectorActor
+ .getFirstMatching(followerActor, AppendEntriesMessages.AppendEntries.class);
+
+ assertNotNull(appendEntries);
+
+ assertEquals(1, appendEntries.getLeaderCommit());
+ assertEquals(1, appendEntries.getLogEntries(0).getIndex());
+ assertEquals(0, appendEntries.getPrevLogIndex());
+
+ AppendEntriesReply appendEntriesReply =
+ (AppendEntriesReply) MessageCollectorActor.getFirstMatching(
+ leaderActor, AppendEntriesReply.class);
+
+ assertNotNull(appendEntriesReply);
+
+ assertEquals(2, appendEntriesReply.getLogLastIndex());
+ assertEquals(1, appendEntriesReply.getLogLastTerm());
+
+ }};
+ }
+
private static class LeaderTestKit extends JavaTestKit {
private LeaderTestKit(ActorSystem actorSystem) {
package org.opendaylight.controller.cluster.raft.utils;
+import akka.actor.ActorRef;
import akka.actor.UntypedActor;
+import akka.pattern.Patterns;
+import akka.util.Timeout;
+import scala.concurrent.Await;
+import scala.concurrent.Future;
+import scala.concurrent.duration.Duration;
+import scala.concurrent.duration.FiniteDuration;
import java.util.ArrayList;
import java.util.List;
+import java.util.concurrent.TimeUnit;
public class MessageCollectorActor extends UntypedActor {
messages.add(message);
}
}
+
+ public static List<Object> getAllMessages(ActorRef actor) throws Exception {
+ FiniteDuration operationDuration = Duration.create(5, TimeUnit.SECONDS);
+ Timeout operationTimeout = new Timeout(operationDuration);
+ Future<Object> future = Patterns.ask(actor, "get-all-messages", operationTimeout);
+
+ try {
+ return (List<Object>) Await.result(future, operationDuration);
+ } catch (Exception e) {
+ throw e;
+ }
+ }
+
+ /**
+ * Get the first message that matches the specified class
+ * @param actor
+ * @param clazz
+ * @return
+ */
+ public static Object getFirstMatching(ActorRef actor, Class clazz) throws Exception {
+ List<Object> allMessages = getAllMessages(actor);
+
+ for(Object message : allMessages){
+ if(message.getClass().equals(clazz)){
+ return message;
+ }
+ }
+
+ return null;
+ }
+
}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.utils;
+
+import akka.dispatch.Futures;
+import akka.japi.Procedure;
+import akka.persistence.PersistentConfirmation;
+import akka.persistence.PersistentId;
+import akka.persistence.PersistentImpl;
+import akka.persistence.PersistentRepr;
+import akka.persistence.journal.japi.AsyncWriteJournal;
+import com.google.common.collect.Maps;
+import scala.concurrent.Future;
+
+import java.util.Map;
+import java.util.concurrent.Callable;
+
+public class MockAkkaJournal extends AsyncWriteJournal {
+
+ private static Map<Long, Object> journal = Maps.newHashMap();
+
+ public static void addToJournal(long sequenceNr, Object message) {
+ journal.put(sequenceNr, message);
+ }
+
+ public static void clearJournal() {
+ journal.clear();
+ }
+
+ @Override
+ public Future<Void> doAsyncReplayMessages(final String persistenceId, long fromSequenceNr,
+ long toSequenceNr, long max, final Procedure<PersistentRepr> replayCallback) {
+
+ return Futures.future(new Callable<Void>() {
+ @Override
+ public Void call() throws Exception {
+ for (Map.Entry<Long,Object> entry : journal.entrySet()) {
+ PersistentRepr persistentMessage =
+ new PersistentImpl(entry.getValue(), entry.getKey(), persistenceId, false, null, null);
+ replayCallback.apply(persistentMessage);
+ }
+ return null;
+ }
+ }, context().dispatcher());
+ }
+
+ @Override
+ public Future<Long> doAsyncReadHighestSequenceNr(String s, long l) {
+ return Futures.successful(new Long(0));
+ }
+
+ @Override
+ public Future<Void> doAsyncWriteMessages(Iterable<PersistentRepr> persistentReprs) {
+ return Futures.successful(null);
+ }
+
+ @Override
+ public Future<Void> doAsyncWriteConfirmations(Iterable<PersistentConfirmation> persistentConfirmations) {
+ return Futures.successful(null);
+ }
+
+ @Override
+ public Future<Void> doAsyncDeleteMessages(Iterable<PersistentId> persistentIds, boolean b) {
+ return Futures.successful(null);
+ }
+
+ @Override
+ public Future<Void> doAsyncDeleteMessagesTo(String s, long l, boolean b) {
+ return Futures.successful(null);
+ }
+}
akka {
persistence.snapshot-store.plugin = "mock-snapshot-store"
+ persistence.journal.plugin = "mock-journal"
loglevel = "DEBUG"
loggers = ["akka.testkit.TestEventListener", "akka.event.slf4j.Slf4jLogger"]
# Dispatcher for the plugin actor.
plugin-dispatcher = "akka.persistence.dispatchers.default-plugin-dispatcher"
}
+
+mock-journal {
+ # Class name of the plugin.
+ class = "org.opendaylight.controller.cluster.raft.utils.MockAkkaJournal"
+ # Dispatcher for the plugin actor.
+ plugin-dispatcher = "akka.persistence.dispatchers.default-plugin-dispatcher"
+}
private final DOMTransactionChain delegate;
private final BindingToNormalizedNodeCodec codec;
- private final DelegateChainListener delegatingListener;
- private final TransactionChainListener listener;
+ private final DelegateChainListener domListener;
+ private final TransactionChainListener bindingListener;
public BindingTranslatedTransactionChain(final DOMDataBroker chainFactory,
final BindingToNormalizedNodeCodec codec, final TransactionChainListener listener) {
Preconditions.checkNotNull(chainFactory, "DOM Transaction chain factory must not be null");
- this.delegatingListener = new DelegateChainListener();
- this.listener = listener;
- this.delegate = chainFactory.createTransactionChain(listener);
+ this.domListener = new DelegateChainListener();
+ this.bindingListener = listener;
+ this.delegate = chainFactory.createTransactionChain(domListener);
this.codec = codec;
}
* chain, so we are not changing any of our internal state
* to mark that we failed.
*/
- this.delegatingListener.onTransactionChainFailed(this, tx, t);
+ this.bindingListener.onTransactionChainFailed(this, tx, t);
}
@Override
public void onTransactionChainSuccessful(final TransactionChain<?, ?> chain) {
Preconditions.checkState(delegate.equals(chain),
"Illegal state - listener for %s was invoked for incorrect chain %s.", delegate, chain);
- listener.onTransactionChainSuccessful(BindingTranslatedTransactionChain.this);
+ bindingListener.onTransactionChainSuccessful(BindingTranslatedTransactionChain.this);
}
}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.common.actor;
+
+import akka.event.Logging;
+import akka.event.LoggingAdapter;
+import akka.persistence.UntypedPersistentActor;
+
+public abstract class AbstractUntypedPersistentActor extends UntypedPersistentActor {
+
+ protected final LoggingAdapter LOG =
+ Logging.getLogger(getContext().system(), this);
+
+ public AbstractUntypedPersistentActor() {
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Actor created {}", getSelf());
+ }
+ getContext().
+ system().
+ actorSelection("user/termination-monitor").
+ tell(new Monitor(getSelf()), getSelf());
+
+ }
+
+
+ @Override public void onReceiveCommand(Object message) throws Exception {
+ final String messageType = message.getClass().getSimpleName();
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Received message {}", messageType);
+ }
+ handleCommand(message);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Done handling message {}", messageType);
+ }
+
+ }
+
+ @Override public void onReceiveRecover(Object message) throws Exception {
+ final String messageType = message.getClass().getSimpleName();
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Received message {}", messageType);
+ }
+ handleRecover(message);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Done handling message {}", messageType);
+ }
+
+ }
+
+ protected abstract void handleRecover(Object message) throws Exception;
+
+ protected abstract void handleCommand(Object message) throws Exception;
+
+ protected void ignoreMessage(Object message) {
+ LOG.debug("Unhandled message {} ", message);
+ }
+
+ protected void unknownMessage(Object message) throws Exception {
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Received unhandled message {}", message);
+ }
+ unhandled(message);
+ }
+}
package org.opendaylight.controller.cluster.datastore.node.utils.serialization;
import com.google.common.base.Preconditions;
+
import org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages;
import org.opendaylight.yangtools.yang.common.SimpleDateFormatUtil;
import org.opendaylight.yangtools.yang.data.api.Node;
import java.net.URI;
import java.util.ArrayList;
import java.util.Date;
+import java.util.EnumMap;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
private static class DeSerializer implements NormalizedNodeDeSerializationContext {
private static Map<NormalizedNodeType, DeSerializationFunction>
- deSerializationFunctions = new HashMap<>();
+ deSerializationFunctions = new EnumMap<>(NormalizedNodeType.class);
static {
deSerializationFunctions.put(CONTAINER_NODE_TYPE,
private NormalizedNode deSerialize(NormalizedNodeMessages.Node node){
Preconditions.checkNotNull(node, "node should not be null");
- DeSerializationFunction deSerializationFunction =
- Preconditions.checkNotNull(deSerializationFunctions.get(NormalizedNodeType.values()[node.getIntType()]), "Unknown type " + node);
+
+ DeSerializationFunction deSerializationFunction = deSerializationFunctions.get(
+ NormalizedNodeType.values()[node.getIntType()]);
return deSerializationFunction.apply(this, node);
}
NormalizedNode apply(DeSerializer deserializer, NormalizedNodeMessages.Node node);
}
}
-
-
-
-
}
package org.opendaylight.controller.cluster.datastore.node.utils.serialization;
import com.google.common.base.Preconditions;
+
import org.opendaylight.controller.cluster.datastore.node.utils.NodeIdentifierFactory;
import org.opendaylight.controller.cluster.datastore.node.utils.QNameFactory;
import org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages;
import static org.opendaylight.controller.cluster.datastore.node.utils.serialization.PathArgumentType.getSerializablePathArgumentType;
public class PathArgumentSerializer {
+ private static final String REVISION_ARG = "?revision=";
private static final Map<Class, PathArgumentAttributesGetter> pathArgumentAttributesGetters = new HashMap<>();
public static NormalizedNodeMessages.PathArgument serialize(NormalizedNodeSerializationContext context, YangInstanceIdentifier.PathArgument pathArgument){
// If this serializer is used qName cannot be null (see encodeQName)
// adding null check only in case someone tried to deSerialize a protocol buffer node
// that was not serialized using the PathArgumentSerializer
- Preconditions.checkNotNull(qName, "qName should not be null");
- Preconditions.checkArgument(!"".equals(qName.getLocalName()),
- "qName.localName cannot be empty qName = " + qName.toString());
- Preconditions.checkArgument(qName.getNamespace() != -1, "qName.namespace should be valid");
+// Preconditions.checkNotNull(qName, "qName should not be null");
+// Preconditions.checkArgument(qName.getNamespace() != -1, "qName.namespace should be valid");
- StringBuilder sb = new StringBuilder();
String namespace = context.getNamespace(qName.getNamespace());
- String revision = "";
String localName = context.getLocalName(qName.getLocalName());
+ StringBuilder sb;
if(qName.getRevision() != -1){
- revision = context.getRevision(qName.getRevision());
- sb.append("(").append(namespace).append("?revision=").append(
- revision).append(")").append(
- localName);
+ String revision = context.getRevision(qName.getRevision());
+ sb = new StringBuilder(namespace.length() + REVISION_ARG.length() + revision.length() +
+ localName.length() + 2);
+ sb.append('(').append(namespace).append(REVISION_ARG).append(
+ revision).append(')').append(localName);
} else {
- sb.append("(").append(namespace).append(")").append(
- localName);
+ sb = new StringBuilder(namespace.length() + localName.length() + 2);
+ sb.append('(').append(namespace).append(')').append(localName);
}
return sb.toString();
-
}
/**
NormalizedNodeDeSerializationContext context,
NormalizedNodeMessages.PathArgument pathArgument) {
- Preconditions.checkArgument(pathArgument.getIntType() >= 0
- && pathArgument.getIntType() < PathArgumentType.values().length,
- "Illegal PathArgumentType " + pathArgument.getIntType());
-
switch(PathArgumentType.values()[pathArgument.getIntType()]){
case NODE_IDENTIFIER_WITH_VALUE : {
NormalizedNodeDeSerializationContext context,
List<NormalizedNodeMessages.PathArgumentAttribute> attributesList) {
- Map<QName, Object> map = new HashMap<>();
-
- for(NormalizedNodeMessages.PathArgumentAttribute attribute : attributesList){
+ Map<QName, Object> map;
+ if(attributesList.size() == 1) {
+ NormalizedNodeMessages.PathArgumentAttribute attribute = attributesList.get(0);
NormalizedNodeMessages.QName name = attribute.getName();
Object value = parseAttribute(context, attribute);
+ map = Collections.singletonMap(QNameFactory.create(qNameToString(context, name)), value);
+ } else {
+ map = new HashMap<>();
+
+ for(NormalizedNodeMessages.PathArgumentAttribute attribute : attributesList){
+ NormalizedNodeMessages.QName name = attribute.getName();
+ Object value = parseAttribute(context, attribute);
- map.put(QNameFactory.create(qNameToString(context, name)), value);
+ map.put(QNameFactory.create(qNameToString(context, name)), value);
+ }
}
return map;
package org.opendaylight.controller.cluster.datastore.node.utils.serialization;
-import com.google.common.base.Preconditions;
import org.opendaylight.controller.cluster.datastore.node.utils.QNameFactory;
import org.opendaylight.controller.cluster.datastore.util.InstanceIdentifierUtils;
import org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages;
private static Object deSerializeBasicTypes(int valueType, String value) {
- Preconditions.checkArgument(valueType >= 0 && valueType < ValueType.values().length,
- "Illegal value type " + valueType );
-
switch(ValueType.values()[valueType]){
case SHORT_TYPE: {
return Short.valueOf(value);
public static final ValueType getSerializableType(Object node){
Preconditions.checkNotNull(node, "node should not be null");
- if(types.containsKey(node.getClass())) {
- return types.get(node.getClass());
+ ValueType type = types.get(node.getClass());
+ if(type != null) {
+ return type;
} else if(node instanceof Set){
return BITS_TYPE;
}
package org.opendaylight.controller.cluster.datastore;
-import com.google.common.base.Preconditions;
-
+import org.opendaylight.controller.cluster.raft.ConfigParams;
+import org.opendaylight.controller.cluster.raft.DefaultConfigParamsImpl;
import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStoreConfigProperties;
import scala.concurrent.duration.Duration;
+import scala.concurrent.duration.FiniteDuration;
import java.util.concurrent.TimeUnit;
private final Duration shardTransactionIdleTimeout;
private final int operationTimeoutInSeconds;
private final String dataStoreMXBeanType;
+ private final ConfigParams shardRaftConfig;
public DatastoreContext() {
- this.dataStoreProperties = null;
- this.dataStoreMXBeanType = "DistributedDatastore";
- this.shardTransactionIdleTimeout = Duration.create(10, TimeUnit.MINUTES);
- this.operationTimeoutInSeconds = 5;
+ this("DistributedDatastore", null, Duration.create(10, TimeUnit.MINUTES), 5, 1000, 20000, 500);
}
public DatastoreContext(String dataStoreMXBeanType,
InMemoryDOMDataStoreConfigProperties dataStoreProperties,
Duration shardTransactionIdleTimeout,
- int operationTimeoutInSeconds) {
+ int operationTimeoutInSeconds,
+ int shardJournalRecoveryLogBatchSize,
+ int shardSnapshotBatchCount,
+ int shardHeartbeatIntervalInMillis) {
this.dataStoreMXBeanType = dataStoreMXBeanType;
- this.dataStoreProperties = Preconditions.checkNotNull(dataStoreProperties);
+ this.dataStoreProperties = dataStoreProperties;
this.shardTransactionIdleTimeout = shardTransactionIdleTimeout;
this.operationTimeoutInSeconds = operationTimeoutInSeconds;
+
+ DefaultConfigParamsImpl raftConfig = new DefaultConfigParamsImpl();
+ raftConfig.setHeartBeatInterval(new FiniteDuration(shardHeartbeatIntervalInMillis,
+ TimeUnit.MILLISECONDS));
+ raftConfig.setJournalRecoveryLogBatchSize(shardJournalRecoveryLogBatchSize);
+ raftConfig.setSnapshotBatchCount(shardSnapshotBatchCount);
+ shardRaftConfig = raftConfig;
}
public InMemoryDOMDataStoreConfigProperties getDataStoreProperties() {
public int getOperationTimeoutInSeconds() {
return operationTimeoutInSeconds;
}
+
+ public ConfigParams getShardRaftConfig() {
+ return shardRaftConfig;
+ }
}
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
import com.google.common.util.concurrent.CheckedFuture;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import org.opendaylight.controller.cluster.datastore.modification.Modification;
import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification;
import org.opendaylight.controller.cluster.datastore.node.NormalizedNodeToNodeCodec;
-import org.opendaylight.controller.cluster.raft.ConfigParams;
-import org.opendaylight.controller.cluster.raft.DefaultConfigParamsImpl;
import org.opendaylight.controller.cluster.raft.RaftActor;
import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply;
import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationPayload;
+import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import scala.concurrent.duration.FiniteDuration;
-
import java.util.ArrayList;
+import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
/**
* A Shard represents a portion of the logical data tree <br/>
*/
public class Shard extends RaftActor {
- private static final ConfigParams configParams = new ShardConfigParams();
-
public static final String DEFAULT_NAME = "default";
// The state of this Shard
private ActorRef createSnapshotTransaction;
+ /**
+ * Coordinates persistence recovery on startup.
+ */
+ private ShardRecoveryCoordinator recoveryCoordinator;
+ private List<Object> currentLogRecoveryBatch;
+
private final Map<String, DOMStoreTransactionChain> transactionChains = new HashMap<>();
- private Shard(ShardIdentifier name, Map<ShardIdentifier, String> peerAddresses,
+ protected Shard(ShardIdentifier name, Map<ShardIdentifier, String> peerAddresses,
DatastoreContext datastoreContext, SchemaContext schemaContext) {
- super(name.toString(), mapPeerAddresses(peerAddresses), Optional.of(configParams));
+ super(name.toString(), mapPeerAddresses(peerAddresses),
+ Optional.of(datastoreContext.getShardRaftConfig()));
this.name = name;
this.datastoreContext = datastoreContext;
DOMStoreThreePhaseCommitCohort cohort =
modificationToCohort.remove(serialized);
if (cohort == null) {
-
- if(LOG.isDebugEnabled()) {
- LOG.debug(
- "Could not find cohort for modification : {}. Writing modification using a new transaction",
- modification);
- }
-
- DOMStoreWriteTransaction transaction =
- store.newWriteOnlyTransaction();
-
- if(LOG.isDebugEnabled()) {
- LOG.debug("Created new transaction {}", transaction.getIdentifier().toString());
- }
-
- modification.apply(transaction);
- try {
- syncCommitTransaction(transaction);
- } catch (InterruptedException | ExecutionException e) {
- shardMBean.incrementFailedTransactionsCount();
- LOG.error("Failed to commit", e);
- return;
- }
- //we want to just apply the recovery commit and return
- shardMBean.incrementCommittedTransactionCount();
+ // If there's no cached cohort then we must be applying replicated state.
+ commitWithNewTransaction(serialized);
return;
}
-
- if(sender == null){
+ if(sender == null) {
LOG.error("Commit failed. Sender cannot be null");
return;
}
}
+ private void commitWithNewTransaction(Object modification) {
+ DOMStoreWriteTransaction tx = store.newWriteOnlyTransaction();
+ MutableCompositeModification.fromSerializable(modification, schemaContext).apply(tx);
+ try {
+ syncCommitTransaction(tx);
+ shardMBean.incrementCommittedTransactionCount();
+ } catch (InterruptedException | ExecutionException e) {
+ shardMBean.incrementFailedTransactionsCount();
+ LOG.error(e, "Failed to commit");
+ }
+ }
+
private void handleForwardedCommit(ForwardedCommitTransaction message) {
Object serializedModification =
message.getModification().toSerializable();
return config.isMetricCaptureEnabled();
}
- @Override protected void applyState(ActorRef clientActor, String identifier,
- Object data) {
+ @Override
+ protected
+ void startLogRecoveryBatch(int maxBatchSize) {
+ currentLogRecoveryBatch = Lists.newArrayListWithCapacity(maxBatchSize);
+
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("{} : starting log recovery batch with max size {}", persistenceId(), maxBatchSize);
+ }
+ }
+
+ @Override
+ protected void appendRecoveredLogEntry(Payload data) {
+ if (data instanceof CompositeModificationPayload) {
+ currentLogRecoveryBatch.add(((CompositeModificationPayload) data).getModification());
+ } else {
+ LOG.error("Unknown state received {} during recovery", data);
+ }
+ }
+
+ @Override
+ protected void applyRecoverySnapshot(ByteString snapshot) {
+ if(recoveryCoordinator == null) {
+ recoveryCoordinator = new ShardRecoveryCoordinator(persistenceId(), schemaContext);
+ }
+
+ recoveryCoordinator.submit(snapshot, store.newWriteOnlyTransaction());
+
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("{} : submitted recovery sbapshot", persistenceId());
+ }
+ }
+
+ @Override
+ protected void applyCurrentLogRecoveryBatch() {
+ if(recoveryCoordinator == null) {
+ recoveryCoordinator = new ShardRecoveryCoordinator(persistenceId(), schemaContext);
+ }
+
+ recoveryCoordinator.submit(currentLogRecoveryBatch, store.newWriteOnlyTransaction());
+
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("{} : submitted log recovery batch with size {}", persistenceId(),
+ currentLogRecoveryBatch.size());
+ }
+ }
+
+ @Override
+ protected void onRecoveryComplete() {
+ if(recoveryCoordinator != null) {
+ Collection<DOMStoreWriteTransaction> txList = recoveryCoordinator.getTransactions();
+
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("{} : recovery complete - committing {} Tx's", persistenceId(), txList.size());
+ }
+
+ for(DOMStoreWriteTransaction tx: txList) {
+ try {
+ syncCommitTransaction(tx);
+ shardMBean.incrementCommittedTransactionCount();
+ } catch (InterruptedException | ExecutionException e) {
+ shardMBean.incrementFailedTransactionsCount();
+ LOG.error(e, "Failed to commit");
+ }
+ }
+ }
+
+ recoveryCoordinator = null;
+ currentLogRecoveryBatch = null;
+ updateJournalStats();
+ }
+
+ @Override
+ protected void applyState(ActorRef clientActor, String identifier, Object data) {
if (data instanceof CompositeModificationPayload) {
- Object modification =
- ((CompositeModificationPayload) data).getModification();
+ Object modification = ((CompositeModificationPayload) data).getModification();
if (modification != null) {
commit(clientActor, modification);
} else {
LOG.error(
"modification is null - this is very unexpected, clientActor = {}, identifier = {}",
- identifier, clientActor.path().toString());
+ identifier, clientActor != null ? clientActor.path().toString() : null);
}
} else {
- LOG.error("Unknown state received {} Class loader = {} CompositeNodeMod.ClassLoader = {}", data, data.getClass().getClassLoader(), CompositeModificationPayload.class.getClassLoader());
+ LOG.error("Unknown state received {} Class loader = {} CompositeNodeMod.ClassLoader = {}",
+ data, data.getClass().getClassLoader(),
+ CompositeModificationPayload.class.getClassLoader());
}
- // Update stats
+ updateJournalStats();
+
+ }
+
+ private void updateJournalStats() {
ReplicatedLogEntry lastLogEntry = getLastLogEntry();
if (lastLogEntry != null) {
shardMBean.setCommitIndex(getCommitIndex());
shardMBean.setLastApplied(getLastApplied());
-
}
- @Override protected void createSnapshot() {
+ @Override
+ protected void createSnapshot() {
if (createSnapshotTransaction == null) {
// Create a transaction. We are really going to treat the transaction as a worker
}
}
- @VisibleForTesting @Override protected void applySnapshot(ByteString snapshot) {
+ @VisibleForTesting
+ @Override
+ protected void applySnapshot(ByteString snapshot) {
// Since this will be done only on Recovery or when this actor is a Follower
// we can safely commit everything in here. We not need to worry about event notifications
// as they would have already been disabled on the follower
return this.name.toString();
}
-
- private static class ShardConfigParams extends DefaultConfigParamsImpl {
- public static final FiniteDuration HEART_BEAT_INTERVAL =
- new FiniteDuration(500, TimeUnit.MILLISECONDS);
-
- @Override public FiniteDuration getHeartBeatInterval() {
- return HEART_BEAT_INTERVAL;
- }
- }
-
private static class ShardCreator implements Creator<Shard> {
private static final long serialVersionUID = 1L;
}
}
- @VisibleForTesting NormalizedNode readStore() throws ExecutionException, InterruptedException {
+ @VisibleForTesting
+ NormalizedNode<?,?> readStore(YangInstanceIdentifier id)
+ throws ExecutionException, InterruptedException {
DOMStoreReadTransaction transaction = store.newReadOnlyTransaction();
CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> future =
- transaction.read(YangInstanceIdentifier.builder().build());
+ transaction.read(id);
- NormalizedNode<?, ?> node = future.get().get();
+ Optional<NormalizedNode<?, ?>> optional = future.get();
+ NormalizedNode<?, ?> node = optional.isPresent()? optional.get() : null;
transaction.close();
return node;
}
- @VisibleForTesting void writeToStore(YangInstanceIdentifier id, NormalizedNode node)
+ @VisibleForTesting
+ void writeToStore(YangInstanceIdentifier id, NormalizedNode<?,?> node)
throws ExecutionException, InterruptedException {
DOMStoreWriteTransaction transaction = store.newWriteOnlyTransaction();
syncCommitTransaction(transaction);
}
+ @VisibleForTesting
+ ShardStats getShardMBean() {
+ return shardMBean;
+ }
}
import akka.actor.Props;
import akka.actor.SupervisorStrategy;
import akka.cluster.ClusterEvent;
+import akka.event.Logging;
+import akka.event.LoggingAdapter;
import akka.japi.Creator;
import akka.japi.Function;
+import akka.japi.Procedure;
+import akka.persistence.RecoveryCompleted;
+import akka.persistence.RecoveryFailure;
+import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
-import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActorWithMetering;
-
+import org.opendaylight.controller.cluster.common.actor.AbstractUntypedPersistentActor;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardManagerIdentifier;
import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shardmanager.ShardManagerInfo;
import org.opendaylight.controller.cluster.datastore.messages.PrimaryFound;
import org.opendaylight.controller.cluster.datastore.messages.PrimaryNotFound;
import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
-import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.yangtools.yang.model.api.ModuleIdentifier;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import scala.concurrent.duration.Duration;
+import java.io.Serializable;
import java.util.ArrayList;
+import java.util.Collection;
import java.util.HashMap;
+import java.util.HashSet;
import java.util.List;
import java.util.Map;
+import java.util.Set;
/**
* The ShardManager has the following jobs,
* <li> Monitor the cluster members and store their addresses
* <ul>
*/
-public class ShardManager extends AbstractUntypedActorWithMetering {
+public class ShardManager extends AbstractUntypedPersistentActor {
+
+ protected final LoggingAdapter LOG =
+ Logging.getLogger(getContext().system(), this);
// Stores a mapping between a member name and the address of the member
// Member names look like "member-1", "member-2" etc and are as specified
private final DatastoreContext datastoreContext;
+ private final Collection<String> knownModules = new HashSet<>(128);
+
/**
* @param type defines the kind of data that goes into shards created by this shard manager. Examples of type would be
* configuration or operational
}
@Override
- public void handleReceive(Object message) throws Exception {
+ public void handleCommand(Object message) throws Exception {
if (message.getClass().equals(FindPrimary.SERIALIZABLE_CLASS)) {
findPrimary(
FindPrimary.fromSerializable(message));
}
+ @Override protected void handleRecover(Object message) throws Exception {
+
+ if(message instanceof SchemaContextModules){
+ SchemaContextModules msg = (SchemaContextModules) message;
+ knownModules.clear();
+ knownModules.addAll(msg.getModules());
+ } else if(message instanceof RecoveryFailure){
+ RecoveryFailure failure = (RecoveryFailure) message;
+ LOG.error(failure.cause(), "Recovery failed");
+ } else if(message instanceof RecoveryCompleted){
+ LOG.info("Recovery complete : {}", persistenceId());
+
+ // Delete all the messages from the akka journal except the last one
+ deleteMessages(lastSequenceNr() - 1);
+ }
+ }
+
private void findLocalShard(FindLocalShard message) {
ShardInformation shardInformation =
localShards.get(message.getShardName());
*
* @param message
*/
- private void updateSchemaContext(Object message) {
- SchemaContext schemaContext = ((UpdateSchemaContext) message).getSchemaContext();
+ private void updateSchemaContext(final Object message) {
+ final SchemaContext schemaContext = ((UpdateSchemaContext) message).getSchemaContext();
+
+ Set<ModuleIdentifier> allModuleIdentifiers = schemaContext.getAllModuleIdentifiers();
+ Set<String> newModules = new HashSet<>(128);
+
+ for(ModuleIdentifier moduleIdentifier : allModuleIdentifiers){
+ String s = moduleIdentifier.getNamespace().toString();
+ newModules.add(s);
+ }
+
+ if(newModules.containsAll(knownModules)) {
+
+ LOG.info("New SchemaContext has a super set of current knownModules - persisting info");
+
+ knownModules.clear();
+ knownModules.addAll(newModules);
+
+ persist(new SchemaContextModules(newModules), new Procedure<SchemaContextModules>() {
- if(localShards.size() == 0){
- createLocalShards(schemaContext);
+ @Override public void apply(SchemaContextModules param) throws Exception {
+ LOG.info("Sending new SchemaContext to Shards");
+ if (localShards.size() == 0) {
+ createLocalShards(schemaContext);
+ } else {
+ for (ShardInformation info : localShards.values()) {
+ info.getActor().tell(message, getSelf());
+ }
+ }
+ }
+
+ });
} else {
- for (ShardInformation info : localShards.values()) {
- info.getActor().tell(message, getSelf());
- }
+ LOG.info("Rejecting schema context update because it is not a super set of previously known modules");
}
+
}
private void findPrimary(FindPrimary message) {
ShardIdentifier shardId = getShardIdentifier(memberName, shardName);
Map<ShardIdentifier, String> peerAddresses = getPeerAddresses(shardName);
ActorRef actor = getContext()
- .actorOf(Shard.props(shardId, peerAddresses, datastoreContext, schemaContext).
- withMailbox(ActorContext.MAILBOX), shardId.toString());
+ .actorOf(Shard.props(shardId, peerAddresses, datastoreContext, schemaContext),
+ shardId.toString());
localShardActorNames.add(shardId.toString());
localShards.put(shardName, new ShardInformation(shardName, actor, peerAddresses));
}
}
+ @Override public String persistenceId() {
+ return "shard-manager-" + type;
+ }
+
+ @VisibleForTesting public Collection<String> getKnownModules() {
+ return knownModules;
+ }
+
private class ShardInformation {
private final String shardName;
private final ActorRef actor;
return new ShardManager(type, cluster, configuration, datastoreContext);
}
}
+
+ static class SchemaContextModules implements Serializable {
+ private final Set<String> modules;
+
+ SchemaContextModules(Set<String> modules){
+ this.modules = modules;
+ }
+
+ public Set<String> getModules() {
+ return modules;
+ }
+ }
}
--- /dev/null
+/*
+ * Copyright (c) 2014 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+
+import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification;
+import org.opendaylight.controller.cluster.datastore.node.NormalizedNodeToNodeCodec;
+import org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.collect.Lists;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import com.google.protobuf.ByteString;
+import com.google.protobuf.InvalidProtocolBufferException;
+
+/**
+ * Coordinates persistence recovery of journal log entries and snapshots for a shard. Each snapshot
+ * and journal log entry batch are de-serialized and applied to their own write transaction
+ * instance in parallel on a thread pool for faster recovery time. However the transactions are
+ * committed to the data store in the order the corresponding snapshot or log batch are received
+ * to preserve data store integrity.
+ *
+ * @author Thomas Panetelis
+ */
+class ShardRecoveryCoordinator {
+
+ private static final int TIME_OUT = 10;
+
+ private static final Logger LOG = LoggerFactory.getLogger(ShardRecoveryCoordinator.class);
+
+ private final List<DOMStoreWriteTransaction> resultingTxList = Lists.newArrayList();
+ private final SchemaContext schemaContext;
+ private final String shardName;
+ private final ExecutorService executor;
+
+ ShardRecoveryCoordinator(String shardName, SchemaContext schemaContext) {
+ this.schemaContext = schemaContext;
+ this.shardName = shardName;
+
+ executor = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors(),
+ new ThreadFactoryBuilder().setDaemon(true)
+ .setNameFormat("ShardRecovery-" + shardName + "-%d").build());
+ }
+
+ /**
+ * Submits a batch of journal log entries.
+ *
+ * @param logEntries the serialized journal log entries
+ * @param resultingTx the write Tx to which to apply the entries
+ */
+ void submit(List<Object> logEntries, DOMStoreWriteTransaction resultingTx) {
+ LogRecoveryTask task = new LogRecoveryTask(logEntries, resultingTx);
+ resultingTxList.add(resultingTx);
+ executor.execute(task);
+ }
+
+ /**
+ * Submits a snapshot.
+ *
+ * @param snapshot the serialized snapshot
+ * @param resultingTx the write Tx to which to apply the entries
+ */
+ void submit(ByteString snapshot, DOMStoreWriteTransaction resultingTx) {
+ SnapshotRecoveryTask task = new SnapshotRecoveryTask(snapshot, resultingTx);
+ resultingTxList.add(resultingTx);
+ executor.execute(task);
+ }
+
+ Collection<DOMStoreWriteTransaction> getTransactions() {
+ // Shutdown the executor and wait for task completion.
+ executor.shutdown();
+
+ try {
+ if(executor.awaitTermination(TIME_OUT, TimeUnit.MINUTES)) {
+ return resultingTxList;
+ } else {
+ LOG.error("Recovery for shard {} timed out after {} minutes", shardName, TIME_OUT);
+ }
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ }
+
+ return Collections.emptyList();
+ }
+
+ private static abstract class ShardRecoveryTask implements Runnable {
+
+ final DOMStoreWriteTransaction resultingTx;
+
+ ShardRecoveryTask(DOMStoreWriteTransaction resultingTx) {
+ this.resultingTx = resultingTx;
+ }
+ }
+
+ private class LogRecoveryTask extends ShardRecoveryTask {
+
+ private final List<Object> logEntries;
+
+ LogRecoveryTask(List<Object> logEntries, DOMStoreWriteTransaction resultingTx) {
+ super(resultingTx);
+ this.logEntries = logEntries;
+ }
+
+ @Override
+ public void run() {
+ for(int i = 0; i < logEntries.size(); i++) {
+ MutableCompositeModification.fromSerializable(
+ logEntries.get(i), schemaContext).apply(resultingTx);
+ // Null out to GC quicker.
+ logEntries.set(i, null);
+ }
+ }
+ }
+
+ private class SnapshotRecoveryTask extends ShardRecoveryTask {
+
+ private final ByteString snapshot;
+
+ SnapshotRecoveryTask(ByteString snapshot, DOMStoreWriteTransaction resultingTx) {
+ super(resultingTx);
+ this.snapshot = snapshot;
+ }
+
+ @Override
+ public void run() {
+ try {
+ NormalizedNodeMessages.Node serializedNode = NormalizedNodeMessages.Node.parseFrom(snapshot);
+ NormalizedNode<?, ?> node = new NormalizedNodeToNodeCodec(schemaContext).decode(
+ YangInstanceIdentifier.builder().build(), serializedNode);
+
+ // delete everything first
+ resultingTx.delete(YangInstanceIdentifier.builder().build());
+
+ // Add everything from the remote node back
+ resultingTx.write(YangInstanceIdentifier.builder().build(), node);
+ } catch (InvalidProtocolBufferException e) {
+ LOG.error("Error deserializing snapshot", e);
+ }
+ }
+ }
+}
DatastoreContext datastoreContext = new DatastoreContext("DistributedConfigDatastore",
InMemoryDOMDataStoreConfigProperties.create(
- props.getMaxShardDataChangeExecutorPoolSize().getValue(),
- props.getMaxShardDataChangeExecutorQueueSize().getValue(),
- props.getMaxShardDataChangeListenerQueueSize().getValue(),
- props.getMaxShardDataStoreExecutorQueueSize().getValue()),
+ props.getMaxShardDataChangeExecutorPoolSize().getValue().intValue(),
+ props.getMaxShardDataChangeExecutorQueueSize().getValue().intValue(),
+ props.getMaxShardDataChangeListenerQueueSize().getValue().intValue(),
+ props.getMaxShardDataStoreExecutorQueueSize().getValue().intValue()),
Duration.create(props.getShardTransactionIdleTimeoutInMinutes().getValue(),
TimeUnit.MINUTES),
- props.getOperationTimeoutInSeconds().getValue());
+ props.getOperationTimeoutInSeconds().getValue(),
+ props.getShardJournalRecoveryLogBatchSize().getValue().intValue(),
+ props.getShardSnapshotBatchCount().getValue().intValue(),
+ props.getShardHearbeatIntervalInMillis().getValue());
return DistributedDataStoreFactory.createInstance("config", getConfigSchemaServiceDependency(),
datastoreContext, bundleContext);
DatastoreContext datastoreContext = new DatastoreContext("DistributedOperationalDatastore",
InMemoryDOMDataStoreConfigProperties.create(
- props.getMaxShardDataChangeExecutorPoolSize().getValue(),
- props.getMaxShardDataChangeExecutorQueueSize().getValue(),
- props.getMaxShardDataChangeListenerQueueSize().getValue(),
- props.getMaxShardDataStoreExecutorQueueSize().getValue()),
+ props.getMaxShardDataChangeExecutorPoolSize().getValue().intValue(),
+ props.getMaxShardDataChangeExecutorQueueSize().getValue().intValue(),
+ props.getMaxShardDataChangeListenerQueueSize().getValue().intValue(),
+ props.getMaxShardDataStoreExecutorQueueSize().getValue().intValue()),
Duration.create(props.getShardTransactionIdleTimeoutInMinutes().getValue(),
TimeUnit.MINUTES),
- props.getOperationTimeoutInSeconds().getValue());
+ props.getOperationTimeoutInSeconds().getValue(),
+ props.getShardJournalRecoveryLogBatchSize().getValue().intValue(),
+ props.getShardSnapshotBatchCount().getValue().intValue(),
+ props.getShardHearbeatIntervalInMillis().getValue());
return DistributedDataStoreFactory.createInstance("operational",
getOperationalSchemaServiceDependency(), datastoreContext, bundleContext);
config:java-name-prefix DistributedOperationalDataStoreProvider;
}
- typedef non-zero-uint16-type {
- type uint16 {
+ typedef non-zero-uint32-type {
+ type uint32 {
range "1..max";
}
}
}
}
+ typedef heartbeat-interval-type {
+ type uint16 {
+ range "100..max";
+ }
+ }
+
grouping data-store-properties {
leaf max-shard-data-change-executor-queue-size {
default 1000;
- type non-zero-uint16-type;
+ type non-zero-uint32-type;
description "The maximum queue size for each shard's data store data change notification executor.";
}
leaf max-shard-data-change-executor-pool-size {
default 20;
- type non-zero-uint16-type;
+ type non-zero-uint32-type;
description "The maximum thread pool size for each shard's data store data change notification executor.";
}
leaf max-shard-data-change-listener-queue-size {
default 1000;
- type non-zero-uint16-type;
+ type non-zero-uint32-type;
description "The maximum queue size for each shard's data store data change listeners.";
}
leaf max-shard-data-store-executor-queue-size {
default 5000;
- type non-zero-uint16-type;
+ type non-zero-uint32-type;
description "The maximum queue size for each shard's data store executor.";
}
leaf shard-transaction-idle-timeout-in-minutes {
default 10;
- type non-zero-uint16-type;
+ type non-zero-uint32-type;
description "The maximum amount of time a shard transaction can be idle without receiving any messages before it self-destructs.";
}
+ leaf shard-snapshot-batch-count {
+ default 20000;
+ type non-zero-uint32-type;
+ description "The minimum number of entries to be present in the in-memory journal log before a snapshot to be taken.";
+ }
+
+ leaf shard-hearbeat-interval-in-millis {
+ default 500;
+ type heartbeat-interval-type;
+ description "The interval at which a shard will send a heart beat message to its remote shard.";
+ }
+
leaf operation-timeout-in-seconds {
default 5;
type operation-timeout-type;
description "The maximum amount of time for akka operations (remote or local) to complete before failing.";
}
+ leaf shard-journal-recovery-log-batch-size {
+ default 5000;
+ type non-zero-uint32-type;
+ description "The maximum number of journal log entries to batch on recovery for a shard before committing to the data store.";
+ }
+
leaf enable-metric-capture {
default false;
type boolean;
leaf bounded-mailbox-capacity {
default 1000;
- type non-zero-uint16-type;
+ type non-zero-uint32-type;
description "Max queue size that an actor's mailbox can reach";
}
}
import akka.actor.ActorSystem;
import akka.testkit.JavaTestKit;
-import org.apache.commons.io.FileUtils;
+
import org.junit.AfterClass;
import org.junit.BeforeClass;
-import java.io.File;
import java.io.IOException;
public abstract class AbstractActorTest {
System.setProperty("shard.persistent", "false");
system = ActorSystem.create("test");
-
- deletePersistenceFiles();
}
@AfterClass
public static void tearDownClass() throws IOException {
JavaTestKit.shutdownActorSystem(system);
system = null;
-
- deletePersistenceFiles();
- }
-
- protected static void deletePersistenceFiles() throws IOException {
- File journal = new File("journal");
-
- if(journal.exists()) {
- FileUtils.deleteDirectory(journal);
- }
-
- File snapshots = new File("snapshots");
-
- if(snapshots.exists()){
- FileUtils.deleteDirectory(snapshots);
- }
-
}
protected ActorSystem getSystem() {
return system;
}
-
}
import akka.actor.ActorRef;
import akka.actor.ActorSystem;
import akka.actor.Props;
+import akka.dispatch.Futures;
+import akka.japi.Procedure;
+import akka.persistence.PersistentConfirmation;
+import akka.persistence.PersistentId;
+import akka.persistence.PersistentImpl;
+import akka.persistence.PersistentRepr;
+import akka.persistence.journal.japi.AsyncWriteJournal;
import akka.testkit.JavaTestKit;
import akka.testkit.TestActorRef;
-import junit.framework.Assert;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Maps;
+import com.google.common.util.concurrent.Uninterruptibles;
+import com.typesafe.config.Config;
+import com.typesafe.config.ConfigFactory;
+import com.typesafe.config.ConfigValueFactory;
import org.junit.AfterClass;
+import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.opendaylight.controller.cluster.datastore.messages.FindLocalShard;
import org.opendaylight.controller.cluster.datastore.utils.MockClusterWrapper;
import org.opendaylight.controller.cluster.datastore.utils.MockConfiguration;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
-import scala.concurrent.duration.Duration;
+import org.opendaylight.yangtools.yang.model.api.ModuleIdentifier;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import scala.concurrent.Future;
+
+import java.net.URI;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.Callable;
+import java.util.concurrent.TimeUnit;
import static junit.framework.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
public class ShardManagerTest {
private static ActorSystem system;
@BeforeClass
- public static void setUp() {
- system = ActorSystem.create("test");
+ public static void setUpClass() {
+ Map<String, String> myJournal = new HashMap<>();
+ myJournal.put("class", "org.opendaylight.controller.cluster.datastore.ShardManagerTest$MyJournal");
+ myJournal.put("plugin-dispatcher", "akka.actor.default-dispatcher");
+ Config config = ConfigFactory.load()
+ .withValue("akka.persistence.journal.plugin",
+ ConfigValueFactory.fromAnyRef("my-journal"))
+ .withValue("my-journal", ConfigValueFactory.fromMap(myJournal));
+
+ MyJournal.clear();
+
+ system = ActorSystem.create("test", config);
}
@AfterClass
system = null;
}
+ @Before
+ public void setUpTest(){
+ MyJournal.clear();
+ }
+
@Test
public void testOnReceiveFindPrimaryForNonExistentShard() throws Exception {
- new JavaTestKit(system) {{
- final Props props = ShardManager
- .props("config", new MockClusterWrapper(),
- new MockConfiguration(), new DatastoreContext());
- final TestActorRef<ShardManager> subject =
- TestActorRef.create(system, props);
+ new JavaTestKit(system) {
+ {
+ final Props props = ShardManager
+ .props("config", new MockClusterWrapper(),
+ new MockConfiguration(), new DatastoreContext());
- new Within(duration("10 seconds")) {
- @Override
- protected void run() {
+ final ActorRef subject = getSystem().actorOf(props);
- subject.tell(new FindPrimary("inventory").toSerializable(), getRef());
+ subject.tell(new FindPrimary("inventory").toSerializable(), getRef());
- expectMsgEquals(Duration.Zero(),
- new PrimaryNotFound("inventory").toSerializable());
-
- expectNoMsg();
- }
- };
- }};
+ expectMsgEquals(duration("2 seconds"),
+ new PrimaryNotFound("inventory").toSerializable());
+ }};
}
@Test
final Props props = ShardManager
.props("config", new MockClusterWrapper(),
new MockConfiguration(), new DatastoreContext());
- final TestActorRef<ShardManager> subject =
- TestActorRef.create(system, props);
-
- subject.tell(new UpdateSchemaContext(TestModel.createTestContext()), getRef());
- new Within(duration("10 seconds")) {
- @Override
- protected void run() {
+ final ActorRef subject = getSystem().actorOf(props);
- subject.tell(new FindPrimary(Shard.DEFAULT_NAME).toSerializable(), getRef());
+ subject.tell(new UpdateSchemaContext(TestModel.createTestContext()), getRef());
- expectMsgClass(duration("1 seconds"), PrimaryFound.SERIALIZABLE_CLASS);
+ subject.tell(new FindPrimary(Shard.DEFAULT_NAME).toSerializable(), getRef());
- expectNoMsg();
- }
- };
+ expectMsgClass(duration("1 seconds"), PrimaryFound.SERIALIZABLE_CLASS);
}};
}
final Props props = ShardManager
.props("config", new MockClusterWrapper(),
new MockConfiguration(), new DatastoreContext());
- final TestActorRef<ShardManager> subject =
- TestActorRef.create(system, props);
- new Within(duration("10 seconds")) {
- @Override
- protected void run() {
-
- subject.tell(new FindLocalShard("inventory"), getRef());
-
- final String out = new ExpectMsg<String>(duration("10 seconds"), "find local") {
- @Override
- protected String match(Object in) {
- if (in instanceof LocalShardNotFound) {
- return ((LocalShardNotFound) in).getShardName();
- } else {
- throw noMatch();
- }
- }
- }.get(); // this extracts the received message
+ final ActorRef subject = getSystem().actorOf(props);
- assertEquals("inventory", out);
+ subject.tell(new FindLocalShard("inventory"), getRef());
- expectNoMsg();
+ final String out = new ExpectMsg<String>(duration("3 seconds"), "find local") {
+ @Override
+ protected String match(Object in) {
+ if (in instanceof LocalShardNotFound) {
+ return ((LocalShardNotFound) in).getShardName();
+ } else {
+ throw noMatch();
+ }
}
- };
+ }.get(); // this extracts the received message
+
+ assertEquals("inventory", out);
}};
}
final Props props = ShardManager
.props("config", mockClusterWrapper,
new MockConfiguration(), new DatastoreContext());
- final TestActorRef<ShardManager> subject =
- TestActorRef.create(system, props);
+
+ final ActorRef subject = getSystem().actorOf(props);
subject.tell(new UpdateSchemaContext(TestModel.createTestContext()), getRef());
- new Within(duration("10 seconds")) {
+ subject.tell(new FindLocalShard(Shard.DEFAULT_NAME), getRef());
+
+ final ActorRef out = new ExpectMsg<ActorRef>(duration("3 seconds"), "find local") {
@Override
- protected void run() {
-
- subject.tell(new FindLocalShard(Shard.DEFAULT_NAME), getRef());
-
- final ActorRef out = new ExpectMsg<ActorRef>(duration("10 seconds"), "find local") {
- @Override
- protected ActorRef match(Object in) {
- if (in instanceof LocalShardFound) {
- return ((LocalShardFound) in).getPath();
- } else {
- throw noMatch();
- }
- }
- }.get(); // this extracts the received message
+ protected ActorRef match(Object in) {
+ if (in instanceof LocalShardFound) {
+ return ((LocalShardFound) in).getPath();
+ } else {
+ throw noMatch();
+ }
+ }
+ }.get(); // this extracts the received message
+
+ assertTrue(out.path().toString(),
+ out.path().toString().contains("member-1-shard-default-config"));
+ }};
+ }
+
+ @Test
+ public void testOnReceiveMemberUp() throws Exception {
+
+ new JavaTestKit(system) {{
+ final Props props = ShardManager
+ .props("config", new MockClusterWrapper(),
+ new MockConfiguration(), new DatastoreContext());
- assertTrue(out.path().toString(), out.path().toString().contains("member-1-shard-default-config"));
+ final ActorRef subject = getSystem().actorOf(props);
+ MockClusterWrapper.sendMemberUp(subject, "member-2", getRef().path().toString());
- expectNoMsg();
+ subject.tell(new FindPrimary("astronauts").toSerializable(), getRef());
+
+ final String out = new ExpectMsg<String>(duration("3 seconds"), "primary found") {
+ // do not put code outside this method, will run afterwards
+ @Override
+ protected String match(Object in) {
+ if (in.getClass().equals(PrimaryFound.SERIALIZABLE_CLASS)) {
+ PrimaryFound f = PrimaryFound.fromSerializable(in);
+ return f.getPrimaryPath();
+ } else {
+ throw noMatch();
+ }
}
- };
+ }.get(); // this extracts the received message
+
+ assertTrue(out, out.contains("member-2-shard-astronauts-config"));
}};
}
@Test
- public void testOnReceiveMemberUp() throws Exception {
+ public void testOnReceiveMemberDown() throws Exception {
+ new JavaTestKit(system) {{
+ final Props props = ShardManager
+ .props("config", new MockClusterWrapper(),
+ new MockConfiguration(), new DatastoreContext());
+
+ final ActorRef subject = getSystem().actorOf(props);
+
+ MockClusterWrapper.sendMemberUp(subject, "member-2", getRef().path().toString());
+
+ subject.tell(new FindPrimary("astronauts").toSerializable(), getRef());
+
+ expectMsgClass(duration("3 seconds"), PrimaryFound.SERIALIZABLE_CLASS);
+
+ MockClusterWrapper.sendMemberRemoved(subject, "member-2", getRef().path().toString());
+
+ subject.tell(new FindPrimary("astronauts").toSerializable(), getRef());
+
+ expectMsgClass(duration("1 seconds"), PrimaryNotFound.SERIALIZABLE_CLASS);
+ }};
+ }
+
+ @Test
+ public void testOnRecoveryJournalIsEmptied(){
+ MyJournal.addToJournal(1L, new ShardManager.SchemaContextModules(
+ ImmutableSet.of("foo")));
+
+ assertEquals(1, MyJournal.get().size());
+
+ new JavaTestKit(system) {{
+ final Props props = ShardManager
+ .props("config", new MockClusterWrapper(),
+ new MockConfiguration(), new DatastoreContext());
+
+ final ActorRef subject = getSystem().actorOf(props);
+
+ // Send message to check that ShardManager is ready
+ subject.tell(new FindPrimary("unknown").toSerializable(), getRef());
+
+ expectMsgClass(duration("3 seconds"), PrimaryNotFound.SERIALIZABLE_CLASS);
+
+ assertEquals(0, MyJournal.get().size());
+ }};
+ }
+
+ @Test
+ public void testOnRecoveryPreviouslyKnownModulesAreDiscovered() throws Exception {
new JavaTestKit(system) {{
final Props props = ShardManager
.props("config", new MockClusterWrapper(),
final TestActorRef<ShardManager> subject =
TestActorRef.create(system, props);
- // the run() method needs to finish within 3 seconds
- new Within(duration("10 seconds")) {
- @Override
- protected void run() {
-
- MockClusterWrapper.sendMemberUp(subject, "member-2", getRef().path().toString());
-
- subject.tell(new FindPrimary("astronauts").toSerializable(), getRef());
-
- final String out = new ExpectMsg<String>(duration("1 seconds"), "primary found") {
- // do not put code outside this method, will run afterwards
- @Override
- protected String match(Object in) {
- if (in.getClass().equals(PrimaryFound.SERIALIZABLE_CLASS)) {
- PrimaryFound f = PrimaryFound.fromSerializable(in);
- return f.getPrimaryPath();
- } else {
- throw noMatch();
- }
- }
- }.get(); // this extracts the received message
+ subject.underlyingActor().onReceiveRecover(new ShardManager.SchemaContextModules(ImmutableSet.of("foo")));
- Assert.assertTrue(out, out.contains("member-2-shard-astronauts-config"));
+ Collection<String> knownModules = subject.underlyingActor().getKnownModules();
- expectNoMsg();
- }
- };
+ assertTrue(knownModules.contains("foo"));
}};
}
@Test
- public void testOnReceiveMemberDown() throws Exception {
+ public void testOnUpdateSchemaContextUpdateKnownModulesIfTheyContainASuperSetOfTheKnownModules()
+ throws Exception {
+ new JavaTestKit(system) {{
+ final Props props = ShardManager
+ .props("config", new MockClusterWrapper(),
+ new MockConfiguration(), new DatastoreContext());
+ final TestActorRef<ShardManager> subject =
+ TestActorRef.create(system, props);
+
+ Collection<String> knownModules = subject.underlyingActor().getKnownModules();
+
+ assertEquals(0, knownModules.size());
+
+ SchemaContext schemaContext = mock(SchemaContext.class);
+ Set<ModuleIdentifier> moduleIdentifierSet = new HashSet<>();
+
+ ModuleIdentifier foo = mock(ModuleIdentifier.class);
+ when(foo.getNamespace()).thenReturn(new URI("foo"));
+
+ moduleIdentifierSet.add(foo);
+
+ when(schemaContext.getAllModuleIdentifiers()).thenReturn(moduleIdentifierSet);
+
+ subject.underlyingActor().onReceiveCommand(new UpdateSchemaContext(schemaContext));
+
+ assertTrue(knownModules.contains("foo"));
+
+ assertEquals(1, knownModules.size());
+
+ ModuleIdentifier bar = mock(ModuleIdentifier.class);
+ when(bar.getNamespace()).thenReturn(new URI("bar"));
+
+ moduleIdentifierSet.add(bar);
+
+ subject.underlyingActor().onReceiveCommand(new UpdateSchemaContext(schemaContext));
+
+ assertTrue(knownModules.contains("bar"));
+ assertEquals(2, knownModules.size());
+
+ }};
+
+ }
+
+
+ @Test
+ public void testOnUpdateSchemaContextDoNotUpdateKnownModulesIfTheyDoNotContainASuperSetOfKnownModules()
+ throws Exception {
new JavaTestKit(system) {{
final Props props = ShardManager
.props("config", new MockClusterWrapper(),
final TestActorRef<ShardManager> subject =
TestActorRef.create(system, props);
- // the run() method needs to finish within 3 seconds
- new Within(duration("10 seconds")) {
- @Override
- protected void run() {
+ Collection<String> knownModules = subject.underlyingActor().getKnownModules();
- MockClusterWrapper.sendMemberUp(subject, "member-2", getRef().path().toString());
+ assertEquals(0, knownModules.size());
- subject.tell(new FindPrimary("astronauts").toSerializable(), getRef());
+ SchemaContext schemaContext = mock(SchemaContext.class);
+ Set<ModuleIdentifier> moduleIdentifierSet = new HashSet<>();
- expectMsgClass(duration("1 seconds"), PrimaryFound.SERIALIZABLE_CLASS);
+ ModuleIdentifier foo = mock(ModuleIdentifier.class);
+ when(foo.getNamespace()).thenReturn(new URI("foo"));
- MockClusterWrapper.sendMemberRemoved(subject, "member-2", getRef().path().toString());
+ moduleIdentifierSet.add(foo);
- subject.tell(new FindPrimary("astronauts").toSerializable(), getRef());
+ when(schemaContext.getAllModuleIdentifiers()).thenReturn(moduleIdentifierSet);
- expectMsgClass(duration("1 seconds"), PrimaryNotFound.SERIALIZABLE_CLASS);
+ subject.underlyingActor().onReceiveCommand(new UpdateSchemaContext(schemaContext));
+
+ assertTrue(knownModules.contains("foo"));
+
+ assertEquals(1, knownModules.size());
+
+ //Create a completely different SchemaContext with only the bar module in it
+ schemaContext = mock(SchemaContext.class);
+ moduleIdentifierSet = new HashSet<>();
+ ModuleIdentifier bar = mock(ModuleIdentifier.class);
+ when(bar.getNamespace()).thenReturn(new URI("bar"));
+
+ moduleIdentifierSet.add(bar);
+
+ subject.underlyingActor().onReceiveCommand(new UpdateSchemaContext(schemaContext));
+
+ assertFalse(knownModules.contains("bar"));
+
+ assertEquals(1, knownModules.size());
- expectNoMsg();
- }
- };
}};
+
+ }
+
+
+ private void sleep(long period){
+ Uninterruptibles.sleepUninterruptibly(period, TimeUnit.MILLISECONDS);
}
+ public static class MyJournal extends AsyncWriteJournal {
+
+ private static Map<Long, Object> journal = Maps.newTreeMap();
+
+ public static void addToJournal(Long sequenceNr, Object value){
+ journal.put(sequenceNr, value);
+ }
+
+ public static Map<Long, Object> get(){
+ return journal;
+ }
+
+ public static void clear(){
+ journal.clear();
+ }
+ @Override public Future<Void> doAsyncReplayMessages(final String persistenceId, long fromSequenceNr, long toSequenceNr, long max,
+ final Procedure<PersistentRepr> replayCallback) {
+ if(journal.size() == 0){
+ return Futures.successful(null);
+ }
+ return Futures.future(new Callable<Void>() {
+ @Override
+ public Void call() throws Exception {
+ for (Map.Entry<Long, Object> entry : journal.entrySet()) {
+ PersistentRepr persistentMessage =
+ new PersistentImpl(entry.getValue(), entry.getKey(), persistenceId,
+ false, null, null);
+ replayCallback.apply(persistentMessage);
+ }
+ return null;
+ }
+ }, context().dispatcher());
+ }
+
+ @Override public Future<Long> doAsyncReadHighestSequenceNr(String s, long l) {
+ return Futures.successful(-1L);
+ }
+
+ @Override public Future<Void> doAsyncWriteMessages(
+ final Iterable<PersistentRepr> persistentReprs) {
+ return Futures.future(new Callable<Void>() {
+ @Override
+ public Void call() throws Exception {
+ for (PersistentRepr repr : persistentReprs){
+ if(repr.payload() instanceof ShardManager.SchemaContextModules) {
+ journal.put(repr.sequenceNr(), repr.payload());
+ }
+ }
+ return null;
+ }
+ }, context().dispatcher());
+ }
+
+ @Override public Future<Void> doAsyncWriteConfirmations(
+ Iterable<PersistentConfirmation> persistentConfirmations) {
+ return Futures.successful(null);
+ }
+
+ @Override public Future<Void> doAsyncDeleteMessages(Iterable<PersistentId> persistentIds,
+ boolean b) {
+ clear();
+ return Futures.successful(null);
+ }
+
+ @Override public Future<Void> doAsyncDeleteMessagesTo(String s, long l, boolean b) {
+ clear();
+ return Futures.successful(null);
+ }
+ }
}
import akka.actor.ActorSystem;
import akka.actor.Props;
import akka.event.Logging;
+import akka.japi.Creator;
import akka.testkit.JavaTestKit;
import akka.testkit.TestActorRef;
import com.google.common.base.Optional;
import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.MoreExecutors;
+import org.junit.After;
import org.junit.Assert;
+import org.junit.Before;
import org.junit.Test;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
+import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
import org.opendaylight.controller.cluster.datastore.messages.EnableNotification;
+import org.opendaylight.controller.cluster.datastore.messages.ForwardedCommitTransaction;
import org.opendaylight.controller.cluster.datastore.messages.PeerAddressResolved;
import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListener;
import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListenerReply;
import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
+import org.opendaylight.controller.cluster.datastore.modification.MergeModification;
+import org.opendaylight.controller.cluster.datastore.modification.Modification;
+import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification;
+import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
import org.opendaylight.controller.cluster.datastore.node.NormalizedNodeToNodeCodec;
+import org.opendaylight.controller.cluster.datastore.utils.InMemoryJournal;
+import org.opendaylight.controller.cluster.datastore.utils.InMemorySnapshotStore;
+import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
+import org.opendaylight.controller.cluster.raft.ReplicatedLogImplEntry;
+import org.opendaylight.controller.cluster.raft.Snapshot;
+import org.opendaylight.controller.cluster.raft.base.messages.ApplyLogEntries;
+import org.opendaylight.controller.cluster.raft.base.messages.ApplySnapshot;
+import org.opendaylight.controller.cluster.raft.base.messages.ApplyState;
import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshot;
+import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationPayload;
+import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
+import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStoreFactory;
import org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages;
import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.CreateTransactionReply;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
+import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
+import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import scala.concurrent.duration.Duration;
import java.io.IOException;
import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
+import java.util.HashSet;
+import java.util.Set;
+import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
-
+import java.util.concurrent.TimeUnit;
import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.verify;
public class ShardTest extends AbstractActorTest {
- private static final DatastoreContext DATA_STORE_CONTEXT = new DatastoreContext();
+ private static final DatastoreContext DATA_STORE_CONTEXT =
+ new DatastoreContext("", null, Duration.create(10, TimeUnit.MINUTES), 5, 3, 5000, 500);
- @Test
- public void testOnReceiveRegisterListener() throws Exception {
- new JavaTestKit(getSystem()) {{
- final ShardIdentifier identifier =
- ShardIdentifier.builder().memberName("member-1")
- .shardName("inventory").type("config").build();
+ private static final SchemaContext SCHEMA_CONTEXT = TestModel.createTestContext();
- final Props props = Shard.props(identifier, Collections.EMPTY_MAP, DATA_STORE_CONTEXT, TestModel.createTestContext());
- final ActorRef subject =
- getSystem().actorOf(props, "testRegisterChangeListener");
+ private static final ShardIdentifier IDENTIFIER = ShardIdentifier.builder().memberName("member-1")
+ .shardName("inventory").type("config").build();
- new Within(duration("3 seconds")) {
- @Override
- protected void run() {
+ @Before
+ public void setUp() {
+ System.setProperty("shard.persistent", "false");
- subject.tell(
- new UpdateSchemaContext(SchemaContextHelper.full()),
- getRef());
+ InMemorySnapshotStore.clear();
+ InMemoryJournal.clear();
+ }
- subject.tell(new RegisterChangeListener(TestModel.TEST_PATH,
- getRef().path(), AsyncDataBroker.DataChangeScope.BASE),
- getRef());
+ @After
+ public void tearDown() {
+ InMemorySnapshotStore.clear();
+ InMemoryJournal.clear();
+ }
- final Boolean notificationEnabled = new ExpectMsg<Boolean>(
- duration("3 seconds"), "enable notification") {
- // do not put code outside this method, will run afterwards
- @Override
- protected Boolean match(Object in) {
- if(in instanceof EnableNotification){
- return ((EnableNotification) in).isEnabled();
- } else {
- throw noMatch();
- }
- }
- }.get(); // this extracts the received message
-
- assertFalse(notificationEnabled);
-
- final String out = new ExpectMsg<String>(duration("3 seconds"), "match hint") {
- // do not put code outside this method, will run afterwards
- @Override
- protected String match(Object in) {
- if (in.getClass().equals(RegisterChangeListenerReply.class)) {
- RegisterChangeListenerReply reply =
- (RegisterChangeListenerReply) in;
- return reply.getListenerRegistrationPath()
- .toString();
- } else {
- throw noMatch();
- }
- }
- }.get(); // this extracts the received message
+ private Props newShardProps() {
+ return Shard.props(IDENTIFIER, Collections.<ShardIdentifier,String>emptyMap(),
+ DATA_STORE_CONTEXT, SCHEMA_CONTEXT);
+ }
- assertTrue(out.matches(
- "akka:\\/\\/test\\/user\\/testRegisterChangeListener\\/\\$.*"));
- }
+ @Test
+ public void testOnReceiveRegisterListener() throws Exception {
+ new JavaTestKit(getSystem()) {{
+ ActorRef subject = getSystem().actorOf(newShardProps(), "testRegisterChangeListener");
+ subject.tell(new UpdateSchemaContext(SchemaContextHelper.full()), getRef());
- };
+ subject.tell(new RegisterChangeListener(TestModel.TEST_PATH,
+ getRef().path(), AsyncDataBroker.DataChangeScope.BASE), getRef());
+
+ EnableNotification enable = expectMsgClass(duration("3 seconds"), EnableNotification.class);
+ assertEquals("isEnabled", false, enable.isEnabled());
+
+ RegisterChangeListenerReply reply = expectMsgClass(duration("3 seconds"),
+ RegisterChangeListenerReply.class);
+ assertTrue(reply.getListenerRegistrationPath().toString().matches(
+ "akka:\\/\\/test\\/user\\/testRegisterChangeListener\\/\\$.*"));
}};
}
@Test
public void testCreateTransaction(){
- new JavaTestKit(getSystem()) {{
- final ShardIdentifier identifier =
- ShardIdentifier.builder().memberName("member-1")
- .shardName("inventory").type("config").build();
-
- final Props props = Shard.props(identifier, Collections.EMPTY_MAP, DATA_STORE_CONTEXT, TestModel.createTestContext());
- final ActorRef subject =
- getSystem().actorOf(props, "testCreateTransaction");
-
- // Wait for a specific log message to show up
- final boolean result =
- new JavaTestKit.EventFilter<Boolean>(Logging.Info.class
- ) {
- @Override
- protected Boolean run() {
- return true;
- }
- }.from(subject.path().toString())
- .message("Switching from state Candidate to Leader")
- .occurrences(1).exec();
-
- Assert.assertEquals(true, result);
+ new ShardTestKit(getSystem()) {{
+ ActorRef subject = getSystem().actorOf(newShardProps(), "testCreateTransaction");
- new Within(duration("3 seconds")) {
- @Override
- protected void run() {
+ waitUntilLeader(subject);
- subject.tell(
- new UpdateSchemaContext(TestModel.createTestContext()),
- getRef());
+ subject.tell(new UpdateSchemaContext(TestModel.createTestContext()), getRef());
- subject.tell(new CreateTransaction("txn-1", TransactionProxy.TransactionType.READ_ONLY.ordinal() ).toSerializable(),
- getRef());
+ subject.tell(new CreateTransaction("txn-1",
+ TransactionProxy.TransactionType.READ_ONLY.ordinal() ).toSerializable(), getRef());
- final String out = new ExpectMsg<String>(duration("3 seconds"), "match hint") {
- // do not put code outside this method, will run afterwards
- @Override
- protected String match(Object in) {
- if (in instanceof CreateTransactionReply) {
- CreateTransactionReply reply =
- (CreateTransactionReply) in;
- return reply.getTransactionActorPath()
- .toString();
- } else {
- throw noMatch();
- }
- }
- }.get(); // this extracts the received message
+ CreateTransactionReply reply = expectMsgClass(duration("3 seconds"),
+ CreateTransactionReply.class);
- assertTrue("Unexpected transaction path " + out,
- out.contains("akka://test/user/testCreateTransaction/shard-txn-1"));
- expectNoMsg();
- }
- };
+ String path = reply.getTransactionActorPath().toString();
+ assertTrue("Unexpected transaction path " + path,
+ path.contains("akka://test/user/testCreateTransaction/shard-txn-1"));
+ expectNoMsg();
}};
}
@Test
public void testCreateTransactionOnChain(){
- new JavaTestKit(getSystem()) {{
- final ShardIdentifier identifier =
- ShardIdentifier.builder().memberName("member-1")
- .shardName("inventory").type("config").build();
-
- final Props props = Shard.props(identifier, Collections.EMPTY_MAP, DATA_STORE_CONTEXT, TestModel.createTestContext());
- final ActorRef subject =
- getSystem().actorOf(props, "testCreateTransactionOnChain");
-
- // Wait for a specific log message to show up
- final boolean result =
- new JavaTestKit.EventFilter<Boolean>(Logging.Info.class
- ) {
- @Override
- protected Boolean run() {
- return true;
- }
- }.from(subject.path().toString())
- .message("Switching from state Candidate to Leader")
- .occurrences(1).exec();
-
- Assert.assertEquals(true, result);
-
- new Within(duration("3 seconds")) {
- @Override
- protected void run() {
+ new ShardTestKit(getSystem()) {{
+ final ActorRef subject = getSystem().actorOf(newShardProps(), "testCreateTransactionOnChain");
- subject.tell(
- new UpdateSchemaContext(TestModel.createTestContext()),
- getRef());
+ waitUntilLeader(subject);
- subject.tell(new CreateTransaction("txn-1", TransactionProxy.TransactionType.READ_ONLY.ordinal() , "foobar").toSerializable(),
- getRef());
+ subject.tell(new CreateTransaction("txn-1",
+ TransactionProxy.TransactionType.READ_ONLY.ordinal() , "foobar").toSerializable(),
+ getRef());
- final String out = new ExpectMsg<String>(duration("3 seconds"), "match hint") {
- // do not put code outside this method, will run afterwards
- @Override
- protected String match(Object in) {
- if (in instanceof CreateTransactionReply) {
- CreateTransactionReply reply =
- (CreateTransactionReply) in;
- return reply.getTransactionActorPath()
- .toString();
- } else {
- throw noMatch();
- }
- }
- }.get(); // this extracts the received message
+ CreateTransactionReply reply = expectMsgClass(duration("3 seconds"),
+ CreateTransactionReply.class);
- assertTrue("Unexpected transaction path " + out,
- out.contains("akka://test/user/testCreateTransactionOnChain/shard-txn-1"));
- expectNoMsg();
- }
- };
+ String path = reply.getTransactionActorPath().toString();
+ assertTrue("Unexpected transaction path " + path,
+ path.contains("akka://test/user/testCreateTransactionOnChain/shard-txn-1"));
+ expectNoMsg();
}};
}
@Test
public void testPeerAddressResolved(){
new JavaTestKit(getSystem()) {{
- Map<ShardIdentifier, String> peerAddresses = new HashMap<>();
-
final ShardIdentifier identifier =
ShardIdentifier.builder().memberName("member-1")
.shardName("inventory").type("config").build();
- peerAddresses.put(identifier, null);
- final Props props = Shard.props(identifier, peerAddresses, DATA_STORE_CONTEXT, TestModel.createTestContext());
- final ActorRef subject =
- getSystem().actorOf(props, "testPeerAddressResolved");
+ Props props = Shard.props(identifier,
+ Collections.<ShardIdentifier, String>singletonMap(identifier, null),
+ DATA_STORE_CONTEXT, SCHEMA_CONTEXT);
+ final ActorRef subject = getSystem().actorOf(props, "testPeerAddressResolved");
new Within(duration("3 seconds")) {
@Override
@Test
public void testApplySnapshot() throws ExecutionException, InterruptedException {
- Map<ShardIdentifier, String> peerAddresses = new HashMap<>();
+ TestActorRef<Shard> ref = TestActorRef.create(getSystem(), newShardProps());
- final ShardIdentifier identifier =
- ShardIdentifier.builder().memberName("member-1")
- .shardName("inventory").type("config").build();
+ NormalizedNodeToNodeCodec codec =
+ new NormalizedNodeToNodeCodec(SCHEMA_CONTEXT);
- peerAddresses.put(identifier, null);
- final Props props = Shard.props(identifier, peerAddresses, DATA_STORE_CONTEXT, TestModel.createTestContext());
+ ref.underlyingActor().writeToStore(TestModel.TEST_PATH, ImmutableNodes.containerNode(
+ TestModel.TEST_QNAME));
- TestActorRef<Shard> ref = TestActorRef.create(getSystem(), props);
+ YangInstanceIdentifier root = YangInstanceIdentifier.builder().build();
+ NormalizedNode<?,?> expected = ref.underlyingActor().readStore(root);
- ref.underlyingActor().updateSchemaContext(TestModel.createTestContext());
+ NormalizedNodeMessages.Container encode = codec.encode(root, expected);
- NormalizedNodeToNodeCodec codec =
- new NormalizedNodeToNodeCodec(TestModel.createTestContext());
+ ApplySnapshot applySnapshot = new ApplySnapshot(Snapshot.create(
+ encode.getNormalizedNode().toByteString().toByteArray(),
+ Collections.<ReplicatedLogEntry>emptyList(), 1, 2, 3, 4));
- ref.underlyingActor().writeToStore(TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
+ ref.underlyingActor().onReceiveCommand(applySnapshot);
- NormalizedNode expected = ref.underlyingActor().readStore();
+ NormalizedNode<?,?> actual = ref.underlyingActor().readStore(root);
- NormalizedNodeMessages.Container encode = codec
- .encode(YangInstanceIdentifier.builder().build(), expected);
+ assertEquals(expected, actual);
+ }
+ @Test
+ public void testApplyState() throws Exception {
- ref.underlyingActor().applySnapshot(encode.getNormalizedNode().toByteString());
+ TestActorRef<Shard> shard = TestActorRef.create(getSystem(), newShardProps());
- NormalizedNode actual = ref.underlyingActor().readStore();
+ NormalizedNode<?, ?> node = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- assertEquals(expected, actual);
- }
+ MutableCompositeModification compMod = new MutableCompositeModification();
+ compMod.addModification(new WriteModification(TestModel.TEST_PATH, node, SCHEMA_CONTEXT));
+ Payload payload = new CompositeModificationPayload(compMod.toSerializable());
+ ApplyState applyState = new ApplyState(null, "test",
+ new ReplicatedLogImplEntry(1, 2, payload));
- private static class ShardTestKit extends JavaTestKit {
+ shard.underlyingActor().onReceiveCommand(applyState);
- private ShardTestKit(ActorSystem actorSystem) {
- super(actorSystem);
+ NormalizedNode<?,?> actual = shard.underlyingActor().readStore(TestModel.TEST_PATH);
+ assertEquals("Applied state", node, actual);
+ }
+
+ @SuppressWarnings("serial")
+ @Test
+ public void testRecovery() throws Exception {
+
+ // Set up the InMemorySnapshotStore.
+
+ InMemoryDOMDataStore testStore = InMemoryDOMDataStoreFactory.create("Test", null, null);
+ testStore.onGlobalContextUpdated(SCHEMA_CONTEXT);
+
+ DOMStoreWriteTransaction writeTx = testStore.newWriteOnlyTransaction();
+ writeTx.write(TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
+ DOMStoreThreePhaseCommitCohort commitCohort = writeTx.ready();
+ commitCohort.preCommit().get();
+ commitCohort.commit().get();
+
+ DOMStoreReadTransaction readTx = testStore.newReadOnlyTransaction();
+ NormalizedNode<?, ?> root = readTx.read(YangInstanceIdentifier.builder().build()).get().get();
+
+ InMemorySnapshotStore.addSnapshot(IDENTIFIER.toString(), Snapshot.create(
+ new NormalizedNodeToNodeCodec(SCHEMA_CONTEXT).encode(
+ YangInstanceIdentifier.builder().build(), root).
+ getNormalizedNode().toByteString().toByteArray(),
+ Collections.<ReplicatedLogEntry>emptyList(), 0, 1, -1, -1));
+
+ // Set up the InMemoryJournal.
+
+ InMemoryJournal.addEntry(IDENTIFIER.toString(), 0, new ReplicatedLogImplEntry(0, 1, newPayload(
+ new WriteModification(TestModel.OUTER_LIST_PATH,
+ ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME).build(),
+ SCHEMA_CONTEXT))));
+
+ int nListEntries = 11;
+ Set<Integer> listEntryKeys = new HashSet<>();
+ for(int i = 1; i <= nListEntries; i++) {
+ listEntryKeys.add(Integer.valueOf(i));
+ YangInstanceIdentifier path = YangInstanceIdentifier.builder(TestModel.OUTER_LIST_PATH)
+ .nodeWithKey(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, i).build();
+ Modification mod = new MergeModification(path,
+ ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, i),
+ SCHEMA_CONTEXT);
+ InMemoryJournal.addEntry(IDENTIFIER.toString(), i, new ReplicatedLogImplEntry(i, 1,
+ newPayload(mod)));
}
- protected void waitForLogMessage(final Class logLevel, ActorRef subject, String logMessage){
- // Wait for a specific log message to show up
- final boolean result =
- new JavaTestKit.EventFilter<Boolean>(logLevel
- ) {
+ InMemoryJournal.addEntry(IDENTIFIER.toString(), nListEntries + 1,
+ new ApplyLogEntries(nListEntries));
+
+ // Create the actor and wait for recovery complete.
+
+ final CountDownLatch recoveryComplete = new CountDownLatch(1);
+
+ Creator<Shard> creator = new Creator<Shard>() {
+ @Override
+ public Shard create() throws Exception {
+ return new Shard(IDENTIFIER, Collections.<ShardIdentifier,String>emptyMap(),
+ DATA_STORE_CONTEXT, SCHEMA_CONTEXT) {
@Override
- protected Boolean run() {
- return true;
+ protected void onRecoveryComplete() {
+ try {
+ super.onRecoveryComplete();
+ } finally {
+ recoveryComplete.countDown();
+ }
}
- }.from(subject.path().toString())
- .message(logMessage)
- .occurrences(1).exec();
+ };
+ }
+ };
- Assert.assertEquals(true, result);
+ TestActorRef<Shard> shard = TestActorRef.create(getSystem(),
+ Props.create(new DelegatingShardCreator(creator)), "testRecovery");
+
+ assertEquals("Recovery complete", true, recoveryComplete.await(5, TimeUnit.SECONDS));
+
+ // Verify data in the data store.
+
+ NormalizedNode<?, ?> outerList = shard.underlyingActor().readStore(TestModel.OUTER_LIST_PATH);
+ assertNotNull(TestModel.OUTER_LIST_QNAME.getLocalName() + " not found", outerList);
+ assertTrue(TestModel.OUTER_LIST_QNAME.getLocalName() + " value is not Iterable",
+ outerList.getValue() instanceof Iterable);
+ for(Object entry: (Iterable<?>) outerList.getValue()) {
+ assertTrue(TestModel.OUTER_LIST_QNAME.getLocalName() + " entry is not MapEntryNode",
+ entry instanceof MapEntryNode);
+ MapEntryNode mapEntry = (MapEntryNode)entry;
+ Optional<DataContainerChild<? extends PathArgument, ?>> idLeaf =
+ mapEntry.getChild(new YangInstanceIdentifier.NodeIdentifier(TestModel.ID_QNAME));
+ assertTrue("Missing leaf " + TestModel.ID_QNAME.getLocalName(), idLeaf.isPresent());
+ Object value = idLeaf.get().getValue();
+ assertTrue("Unexpected value for leaf "+ TestModel.ID_QNAME.getLocalName() + ": " + value,
+ listEntryKeys.remove(value));
+ }
+ if(!listEntryKeys.isEmpty()) {
+ fail("Missing " + TestModel.OUTER_LIST_QNAME.getLocalName() + " entries with keys: " +
+ listEntryKeys);
}
+ assertEquals("Last log index", nListEntries,
+ shard.underlyingActor().getShardMBean().getLastLogIndex());
+ assertEquals("Commit index", nListEntries,
+ shard.underlyingActor().getShardMBean().getCommitIndex());
+ assertEquals("Last applied", nListEntries,
+ shard.underlyingActor().getShardMBean().getLastApplied());
}
+ private CompositeModificationPayload newPayload(Modification... mods) {
+ MutableCompositeModification compMod = new MutableCompositeModification();
+ for(Modification mod: mods) {
+ compMod.addModification(mod);
+ }
+
+ return new CompositeModificationPayload(compMod.toSerializable());
+ }
+
+ @SuppressWarnings("unchecked")
@Test
- public void testCreateSnapshot() throws IOException, InterruptedException {
+ public void testForwardedCommitTransactionWithPersistence() throws IOException {
+ System.setProperty("shard.persistent", "true");
+
new ShardTestKit(getSystem()) {{
- final ShardIdentifier identifier =
- ShardIdentifier.builder().memberName("member-1")
- .shardName("inventory").type("config").build();
+ TestActorRef<Shard> shard = TestActorRef.create(getSystem(), newShardProps());
- final Props props = Shard.props(identifier, Collections.EMPTY_MAP, DATA_STORE_CONTEXT, TestModel.createTestContext());
- final ActorRef subject =
- getSystem().actorOf(props, "testCreateSnapshot");
+ waitUntilLeader(shard);
- // Wait for a specific log message to show up
- this.waitForLogMessage(Logging.Info.class, subject, "Switching from state Candidate to Leader");
+ NormalizedNode<?, ?> node = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+ DOMStoreThreePhaseCommitCohort cohort = mock(DOMStoreThreePhaseCommitCohort.class);
+ doReturn(Futures.immediateFuture(null)).when(cohort).commit();
- new Within(duration("3 seconds")) {
- @Override
- protected void run() {
+ MutableCompositeModification modification = new MutableCompositeModification();
+ modification.addModification(new WriteModification(TestModel.TEST_PATH, node,
+ SCHEMA_CONTEXT));
- subject.tell(
- new UpdateSchemaContext(TestModel.createTestContext()),
- getRef());
+ shard.tell(new ForwardedCommitTransaction(cohort, modification), getRef());
- subject.tell(new CaptureSnapshot(-1,-1,-1,-1),
- getRef());
+ expectMsgClass(duration("5 seconds"), CommitTransactionReply.SERIALIZABLE_CLASS);
- waitForLogMessage(Logging.Info.class, subject, "CaptureSnapshotReply received by actor");
+ verify(cohort).commit();
- subject.tell(new CaptureSnapshot(-1,-1,-1,-1),
- getRef());
+ assertEquals("Last log index", 0, shard.underlyingActor().getShardMBean().getLastLogIndex());
+ }};
+ }
- waitForLogMessage(Logging.Info.class, subject, "CaptureSnapshotReply received by actor");
+ @Test
+ public void testCreateSnapshot() throws IOException, InterruptedException {
+ new ShardTestKit(getSystem()) {{
+ final ActorRef subject = getSystem().actorOf(newShardProps(), "testCreateSnapshot");
- }
- };
+ waitUntilLeader(subject);
+
+ subject.tell(new CaptureSnapshot(-1,-1,-1,-1), getRef());
+
+ waitForLogMessage(Logging.Info.class, subject, "CaptureSnapshotReply received by actor");
+
+ subject.tell(new CaptureSnapshot(-1,-1,-1,-1), getRef());
- deletePersistenceFiles();
+ waitForLogMessage(Logging.Info.class, subject, "CaptureSnapshotReply received by actor");
}};
}
InMemoryDOMDataStore store = new InMemoryDOMDataStore("test", MoreExecutors.listeningDecorator(
MoreExecutors.sameThreadExecutor()), MoreExecutors.sameThreadExecutor());
- store.onGlobalContextUpdated(TestModel.createTestContext());
+ store.onGlobalContextUpdated(SCHEMA_CONTEXT);
DOMStoreWriteTransaction putTransaction = store.newWriteOnlyTransaction();
putTransaction.write(TestModel.TEST_PATH,
}
};
}
+
+ private static final class DelegatingShardCreator implements Creator<Shard> {
+ private final Creator<Shard> delegate;
+
+ DelegatingShardCreator(Creator<Shard> delegate) {
+ this.delegate = delegate;
+ }
+
+ @Override
+ public Shard create() throws Exception {
+ return delegate.create();
+ }
+ }
+
+ private static class ShardTestKit extends JavaTestKit {
+
+ private ShardTestKit(ActorSystem actorSystem) {
+ super(actorSystem);
+ }
+
+ protected void waitForLogMessage(final Class logLevel, ActorRef subject, String logMessage){
+ // Wait for a specific log message to show up
+ final boolean result =
+ new JavaTestKit.EventFilter<Boolean>(logLevel
+ ) {
+ @Override
+ protected Boolean run() {
+ return true;
+ }
+ }.from(subject.path().toString())
+ .message(logMessage)
+ .occurrences(1).exec();
+
+ Assert.assertEquals(true, result);
+
+ }
+
+ protected void waitUntilLeader(ActorRef subject) {
+ waitForLogMessage(Logging.Info.class, subject,
+ "Switching from state Candidate to Leader");
+ }
+ }
}
datastoreContext = new DatastoreContext("Test",
InMemoryDOMDataStoreConfigProperties.getDefault(),
- Duration.create(500, TimeUnit.MILLISECONDS), 5);
+ Duration.create(500, TimeUnit.MILLISECONDS), 5, 1000, 1000, 500);
new JavaTestKit(getSystem()) {{
final ActorRef shard = createShard();
--- /dev/null
+/*
+ * Copyright (c) 2014 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.utils;
+
+import java.util.Map;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ConcurrentHashMap;
+import com.google.common.collect.Maps;
+import scala.concurrent.Future;
+import akka.dispatch.Futures;
+import akka.japi.Procedure;
+import akka.persistence.PersistentConfirmation;
+import akka.persistence.PersistentId;
+import akka.persistence.PersistentImpl;
+import akka.persistence.PersistentRepr;
+import akka.persistence.journal.japi.AsyncWriteJournal;
+
+public class InMemoryJournal extends AsyncWriteJournal {
+
+ private static Map<String, Map<Long, Object>> journals = new ConcurrentHashMap<>();
+
+ public static void addEntry(String persistenceId, long sequenceNr, Object data) {
+ Map<Long, Object> journal = journals.get(persistenceId);
+ if(journal == null) {
+ journal = Maps.newLinkedHashMap();
+ journals.put(persistenceId, journal);
+ }
+
+ journal.put(sequenceNr, data);
+ }
+
+ public static void clear() {
+ journals.clear();
+ }
+
+ @Override
+ public Future<Void> doAsyncReplayMessages(final String persistenceId, long fromSequenceNr,
+ long toSequenceNr, long max, final Procedure<PersistentRepr> replayCallback) {
+ return Futures.future(new Callable<Void>() {
+ @Override
+ public Void call() throws Exception {
+ Map<Long, Object> journal = journals.get(persistenceId);
+ if(journal == null) {
+ return null;
+ }
+
+ for (Map.Entry<Long,Object> entry : journal.entrySet()) {
+ PersistentRepr persistentMessage =
+ new PersistentImpl(entry.getValue(), entry.getKey(), persistenceId, false, null, null);
+ replayCallback.apply(persistentMessage);
+ }
+
+ return null;
+ }
+ }, context().dispatcher());
+ }
+
+ @Override
+ public Future<Long> doAsyncReadHighestSequenceNr(String persistenceId, long fromSequenceNr) {
+ return Futures.successful(new Long(0));
+ }
+
+ @Override
+ public Future<Void> doAsyncWriteMessages(Iterable<PersistentRepr> messages) {
+ return Futures.successful(null);
+ }
+
+ @Override
+ public Future<Void> doAsyncWriteConfirmations(Iterable<PersistentConfirmation> confirmations) {
+ return Futures.successful(null);
+ }
+
+ @Override
+ public Future<Void> doAsyncDeleteMessages(Iterable<PersistentId> messageIds, boolean permanent) {
+ return Futures.successful(null);
+ }
+
+ @Override
+ public Future<Void> doAsyncDeleteMessagesTo(String persistenceId, long toSequenceNr, boolean permanent) {
+ return Futures.successful(null);
+ }
+}
import akka.persistence.snapshot.japi.SnapshotStore;
import com.google.common.collect.Iterables;
import scala.concurrent.Future;
-
import java.util.ArrayList;
-import java.util.HashMap;
import java.util.List;
import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import org.opendaylight.controller.cluster.raft.Snapshot;
public class InMemorySnapshotStore extends SnapshotStore {
- Map<String, List<Snapshot>> snapshots = new HashMap<>();
+ private static Map<String, List<StoredSnapshot>> snapshots = new ConcurrentHashMap<>();
+
+ public static void addSnapshot(String persistentId, Snapshot snapshot) {
+ List<StoredSnapshot> snapshotList = snapshots.get(persistentId);
+
+ if(snapshotList == null) {
+ snapshotList = new ArrayList<>();
+ snapshots.put(persistentId, snapshotList);
+ }
+
+ snapshotList.add(new StoredSnapshot(new SnapshotMetadata(persistentId, snapshotList.size(),
+ System.currentTimeMillis()), snapshot));
+ }
+
+ public static void clear() {
+ snapshots.clear();
+ }
- @Override public Future<Option<SelectedSnapshot>> doLoadAsync(String s,
+ @Override
+ public Future<Option<SelectedSnapshot>> doLoadAsync(String s,
SnapshotSelectionCriteria snapshotSelectionCriteria) {
- List<Snapshot> snapshotList = snapshots.get(s);
+ List<StoredSnapshot> snapshotList = snapshots.get(s);
if(snapshotList == null){
return Futures.successful(Option.<SelectedSnapshot>none());
}
- Snapshot snapshot = Iterables.getLast(snapshotList);
+ StoredSnapshot snapshot = Iterables.getLast(snapshotList);
SelectedSnapshot selectedSnapshot =
new SelectedSnapshot(snapshot.getMetadata(), snapshot.getData());
return Futures.successful(Option.some(selectedSnapshot));
}
- @Override public Future<Void> doSaveAsync(SnapshotMetadata snapshotMetadata, Object o) {
- List<Snapshot> snapshotList = snapshots.get(snapshotMetadata.persistenceId());
+ @Override
+ public Future<Void> doSaveAsync(SnapshotMetadata snapshotMetadata, Object o) {
+ List<StoredSnapshot> snapshotList = snapshots.get(snapshotMetadata.persistenceId());
if(snapshotList == null){
snapshotList = new ArrayList<>();
snapshots.put(snapshotMetadata.persistenceId(), snapshotList);
}
- snapshotList.add(new Snapshot(snapshotMetadata, o));
+ snapshotList.add(new StoredSnapshot(snapshotMetadata, o));
return Futures.successful(null);
}
- @Override public void onSaved(SnapshotMetadata snapshotMetadata) throws Exception {
+ @Override
+ public void onSaved(SnapshotMetadata snapshotMetadata) throws Exception {
}
- @Override public void doDelete(SnapshotMetadata snapshotMetadata) throws Exception {
- List<Snapshot> snapshotList = snapshots.get(snapshotMetadata.persistenceId());
+ @Override
+ public void doDelete(SnapshotMetadata snapshotMetadata) throws Exception {
+ List<StoredSnapshot> snapshotList = snapshots.get(snapshotMetadata.persistenceId());
if(snapshotList == null){
return;
int deleteIndex = -1;
for(int i=0;i<snapshotList.size(); i++){
- Snapshot snapshot = snapshotList.get(i);
+ StoredSnapshot snapshot = snapshotList.get(i);
if(snapshotMetadata.equals(snapshot.getMetadata())){
deleteIndex = i;
break;
}
- @Override public void doDelete(String s, SnapshotSelectionCriteria snapshotSelectionCriteria)
+ @Override
+ public void doDelete(String s, SnapshotSelectionCriteria snapshotSelectionCriteria)
throws Exception {
- List<Snapshot> snapshotList = snapshots.get(s);
+ List<StoredSnapshot> snapshotList = snapshots.get(s);
if(snapshotList == null){
return;
snapshots.remove(s);
}
- private static class Snapshot {
+ private static class StoredSnapshot {
private final SnapshotMetadata metadata;
private final Object data;
- private Snapshot(SnapshotMetadata metadata, Object data) {
+ private StoredSnapshot(SnapshotMetadata metadata, Object data) {
this.metadata = metadata;
this.data = data;
}
akka {
persistence.snapshot-store.plugin = "in-memory-snapshot-store"
+ persistence.journal.plugin = "in-memory-journal"
loggers = ["akka.testkit.TestEventListener", "akka.event.slf4j.Slf4jLogger"]
}
}
+in-memory-journal {
+ class = "org.opendaylight.controller.cluster.datastore.utils.InMemoryJournal"
+}
+
in-memory-snapshot-store {
# Class name of the plugin.
class = "org.opendaylight.controller.cluster.datastore.utils.InMemorySnapshotStore"
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.config.yang.md.sal.dom.impl;
-
-import org.opendaylight.controller.sal.dom.broker.impl.HashMapDataStore;
-
-/**
-*
-*/
-public final class HashMapDataStoreModule extends org.opendaylight.controller.config.yang.md.sal.dom.impl.AbstractHashMapDataStoreModule
-{
-
- public HashMapDataStoreModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier, org.opendaylight.controller.config.api.DependencyResolver dependencyResolver) {
- super(identifier, dependencyResolver);
- }
-
- public HashMapDataStoreModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier, org.opendaylight.controller.config.api.DependencyResolver dependencyResolver, HashMapDataStoreModule oldModule, java.lang.AutoCloseable oldInstance) {
- super(identifier, dependencyResolver, oldModule, oldInstance);
- }
-
- @Override
- public void validate(){
- super.validate();
- // Add custom validation for module attributes here.
- }
-
- @Override
- public java.lang.AutoCloseable createInstance() {
- HashMapDataStore store = new HashMapDataStore();
- return store;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.config.yang.md.sal.dom.impl;
-
-/**
-*
-*/
-public class HashMapDataStoreModuleFactory extends org.opendaylight.controller.config.yang.md.sal.dom.impl.AbstractHashMapDataStoreModuleFactory
-{
-
-
-}
package org.opendaylight.controller.md.sal.dom.broker.impl;
import static com.google.common.base.Preconditions.checkState;
-import com.google.common.base.Optional;
import com.google.common.util.concurrent.CheckedFuture;
import com.google.common.util.concurrent.ListeningExecutorService;
import java.util.EnumMap;
public CheckedFuture<Void,TransactionCommitFailedException> submit(final DOMDataWriteTransaction transaction,
final Iterable<DOMStoreThreePhaseCommitCohort> cohorts) {
LOG.debug("Transaction: {} submitted with cohorts {}.", transaction.getIdentifier(), cohorts);
- return coordinator.submit(transaction, cohorts, Optional.<DOMDataCommitErrorListener> absent());
+ return coordinator.submit(transaction, cohorts);
}
}
*/
package org.opendaylight.controller.md.sal.dom.broker.impl;
-import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
import java.util.Map;
+import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener;
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
* {@link org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType} type.
*
*/
-public class DOMDataBrokerTransactionChainImpl extends AbstractDOMForwardedTransactionFactory<DOMStoreTransactionChain>
- implements DOMTransactionChain, DOMDataCommitErrorListener {
+final class DOMDataBrokerTransactionChainImpl extends AbstractDOMForwardedTransactionFactory<DOMStoreTransactionChain>
+ implements DOMTransactionChain {
+ private static enum State {
+ RUNNING,
+ CLOSING,
+ CLOSED,
+ FAILED,
+ }
+ private static final AtomicIntegerFieldUpdater<DOMDataBrokerTransactionChainImpl> COUNTER_UPDATER =
+ AtomicIntegerFieldUpdater.newUpdater(DOMDataBrokerTransactionChainImpl.class, "counter");
+ private static final AtomicReferenceFieldUpdater<DOMDataBrokerTransactionChainImpl, State> STATE_UPDATER =
+ AtomicReferenceFieldUpdater.newUpdater(DOMDataBrokerTransactionChainImpl.class, State.class, "state");
private static final Logger LOG = LoggerFactory.getLogger(DOMDataBrokerTransactionChainImpl.class);
private final AtomicLong txNum = new AtomicLong();
private final DOMDataCommitExecutor coordinator;
private final TransactionChainListener listener;
private final long chainId;
- private volatile boolean failed = false;
+ private volatile State state = State.RUNNING;
+ private volatile int counter = 0;
/**
*
this.listener = Preconditions.checkNotNull(listener);
}
+ private void checkNotFailed() {
+ Preconditions.checkState(state != State.FAILED, "Transaction chain has failed");
+ }
+
@Override
protected Object newTransactionIdentifier() {
return "DOM-CHAIN-" + chainId + "-" + txNum.getAndIncrement();
}
@Override
- public CheckedFuture<Void,TransactionCommitFailedException> submit(
+ public CheckedFuture<Void, TransactionCommitFailedException> submit(
final DOMDataWriteTransaction transaction, final Iterable<DOMStoreThreePhaseCommitCohort> cohorts) {
+ checkNotFailed();
checkNotClosed();
- return coordinator.submit(transaction, cohorts, Optional.<DOMDataCommitErrorListener> of(this));
+ final CheckedFuture<Void, TransactionCommitFailedException> ret = coordinator.submit(transaction, cohorts);
+
+ COUNTER_UPDATER.incrementAndGet(this);
+ Futures.addCallback(ret, new FutureCallback<Void>() {
+ @Override
+ public void onSuccess(final Void result) {
+ transactionCompleted();
+ }
+
+ @Override
+ public void onFailure(final Throwable t) {
+ transactionFailed(transaction, t);
+ }
+ });
+
+ return ret;
}
@Override
public void close() {
- super.close();
+ final boolean success = STATE_UPDATER.compareAndSet(this, State.RUNNING, State.CLOSING);
+ if (!success) {
+ LOG.debug("Chain {} is no longer running", this);
+ return;
+ }
+ super.close();
for (DOMStoreTransactionChain subChain : getTxFactories().values()) {
subChain.close();
}
- if (!failed) {
- LOG.debug("Transaction chain {}Â successfully finished.", this);
- // FIXME: this event should be emitted once all operations complete
- listener.onTransactionChainSuccessful(this);
+ if (counter == 0) {
+ finishClose();
}
}
- @Override
- public void onCommitFailed(final DOMDataWriteTransaction tx, final Throwable cause) {
- failed = true;
+ private void finishClose() {
+ state = State.CLOSED;
+ listener.onTransactionChainSuccessful(this);
+ }
+
+ private void transactionCompleted() {
+ if (COUNTER_UPDATER.decrementAndGet(this) == 0 && state == State.CLOSING) {
+ finishClose();
+ }
+ }
+
+ private void transactionFailed(final DOMDataWriteTransaction tx, final Throwable cause) {
+ state = State.FAILED;
LOG.debug("Transaction chain {}Â failed.", this, cause);
listener.onTransactionChainFailed(this, tx, cause);
}
*/
package org.opendaylight.controller.md.sal.dom.broker.impl;
-import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Throwables;
import com.google.common.collect.Iterables;
@Override
public CheckedFuture<Void,TransactionCommitFailedException> submit(final DOMDataWriteTransaction transaction,
- final Iterable<DOMStoreThreePhaseCommitCohort> cohorts, final Optional<DOMDataCommitErrorListener> listener) {
+ final Iterable<DOMStoreThreePhaseCommitCohort> cohorts) {
Preconditions.checkArgument(transaction != null, "Transaction must not be null.");
Preconditions.checkArgument(cohorts != null, "Cohorts must not be null.");
- Preconditions.checkArgument(listener != null, "Listener must not be null");
LOG.debug("Tx: {} is submitted for execution.", transaction.getIdentifier());
ListenableFuture<Void> commitFuture = null;
try {
commitFuture = executor.submit(new CommitCoordinationTask(transaction, cohorts,
- listener, commitStatsTracker));
+ commitStatsTracker));
} catch(RejectedExecutionException e) {
LOG.error("The commit executor's queue is full - submit task was rejected. \n" +
executor, e);
"Could not submit the commit task - the commit queue capacity has been exceeded.", e));
}
- if (listener.isPresent()) {
- Futures.addCallback(commitFuture, new DOMDataCommitErrorInvoker(transaction, listener.get()));
- }
-
return MappingCheckedFuture.create(commitFuture,
TransactionCommitFailedExceptionMapper.COMMIT_ERROR_MAPPER);
}
public CommitCoordinationTask(final DOMDataWriteTransaction transaction,
final Iterable<DOMStoreThreePhaseCommitCohort> cohorts,
- final Optional<DOMDataCommitErrorListener> listener,
final DurationStatsTracker commitStatTracker) {
this.tx = Preconditions.checkNotNull(transaction, "transaction must not be null");
this.cohorts = Preconditions.checkNotNull(cohorts, "cohorts must not be null");
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.broker.impl;
-
-import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
-import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.FutureCallback;
-
-/**
- *
- * Utility implemetation of {@link FutureCallback} which is responsible
- * for invoking {@link DOMDataCommitErrorListener} on TransactionCommit failed.
- *
- * When {@link #onFailure(Throwable)} is invoked, supplied {@link DOMDataCommitErrorListener}
- * callback is invoked with associated transaction and throwable is invoked on listener.
- *
- */
-class DOMDataCommitErrorInvoker implements FutureCallback<Void> {
-
- private final DOMDataWriteTransaction tx;
- private final DOMDataCommitErrorListener listener;
-
-
- /**
- *
- * Construct new DOMDataCommitErrorInvoker.
- *
- * @param transaction Transaction which should be passed as argument to {@link DOMDataCommitErrorListener#onCommitFailed(DOMDataWriteTransaction, Throwable)}
- * @param listener Listener which should be invoked on error.
- */
- public DOMDataCommitErrorInvoker(DOMDataWriteTransaction transaction, DOMDataCommitErrorListener listener) {
- this.tx = Preconditions.checkNotNull(transaction, "Transaction must not be null");
- this.listener = Preconditions.checkNotNull(listener, "Listener must not be null");
- }
-
- @Override
- public void onFailure(Throwable t) {
- listener.onCommitFailed(tx, t);
- }
-
- @Override
- public void onSuccess(Void result) {
- // NOOP
- }
-}
\ No newline at end of file
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.broker.impl;
-
-import java.util.EventListener;
-
-import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
-
-/**
- *
- * Listener on transaction failure which may be passed to
- * {@link DOMDataCommitExecutor}. This listener is notified during transaction
- * processing, before result is delivered to other client code outside MD-SAL.
- * This allows implementors to update their internal state before transaction
- * failure is visible to client code.
- *
- * This is internal API for MD-SAL implementations, for consumer facing error
- * listeners see {@link org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener}.
- *
- */
-interface DOMDataCommitErrorListener extends EventListener {
-
- /**
- *
- * Callback which is invoked on transaction failure during three phase
- * commit in {@link DOMDataCommitExecutor}.
- *
- *
- * Implementation of this callback MUST NOT do any blocking calls or any
- * calls to MD-SAL, since this callback is invoked synchronously on MD-SAL
- * Broker coordination thread.
- *
- * @param tx
- * Transaction which failed
- * @param cause
- * Failure reason
- */
- void onCommitFailed(DOMDataWriteTransaction tx, Throwable cause);
-
-}
*/
package org.opendaylight.controller.md.sal.dom.broker.impl;
+import com.google.common.util.concurrent.CheckedFuture;
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
-import com.google.common.base.Optional;
-import com.google.common.util.concurrent.CheckedFuture;
/**
* Executor of Three Phase Commit coordination for
* Transaction to be used as context for reporting
* @param cohort
* DOM Store cohorts representing provided transaction, its
- * subtransactoins.
- * @param listener
- * Error listener which should be notified if transaction failed.
+ * subtransactions.
* @return a CheckedFuture. if commit coordination on cohorts finished successfully,
* nothing is returned from the Future, On failure,
* the Future fails with a {@link TransactionCommitFailedException}.
*
*/
CheckedFuture<Void,TransactionCommitFailedException> submit(DOMDataWriteTransaction tx,
- Iterable<DOMStoreThreePhaseCommitCohort> cohort, Optional<DOMDataCommitErrorListener> listener);
+ Iterable<DOMStoreThreePhaseCommitCohort> cohort);
}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.dom.broker.impl;
-
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-
-import org.opendaylight.controller.md.sal.common.api.data.DataCommitHandler;
-import org.opendaylight.controller.md.sal.common.api.data.DataModification;
-import org.opendaylight.controller.sal.core.api.data.DataStore;
-import org.opendaylight.yangtools.yang.common.RpcResult;
-import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
-import org.opendaylight.yangtools.yang.data.api.CompositeNode;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public final class HashMapDataStore implements DataStore, AutoCloseable {
- private static final Logger LOG = LoggerFactory
- .getLogger(HashMapDataStore.class);
-
- private final Map<YangInstanceIdentifier, CompositeNode> configuration = new ConcurrentHashMap<YangInstanceIdentifier, CompositeNode>();
- private final Map<YangInstanceIdentifier, CompositeNode> operational = new ConcurrentHashMap<YangInstanceIdentifier, CompositeNode>();
-
- @Override
- public boolean containsConfigurationPath(final YangInstanceIdentifier path) {
- return configuration.containsKey(path);
- }
-
- @Override
- public boolean containsOperationalPath(final YangInstanceIdentifier path) {
- return operational.containsKey(path);
- }
-
- @Override
- public Iterable<YangInstanceIdentifier> getStoredConfigurationPaths() {
- return configuration.keySet();
- }
-
- @Override
- public Iterable<YangInstanceIdentifier> getStoredOperationalPaths() {
- return operational.keySet();
- }
-
- @Override
- public CompositeNode readConfigurationData(final YangInstanceIdentifier path) {
- LOG.trace("Reading configuration path {}", path);
- return configuration.get(path);
- }
-
- @Override
- public CompositeNode readOperationalData(YangInstanceIdentifier path) {
- LOG.trace("Reading operational path {}", path);
- return operational.get(path);
- }
-
- @Override
- public DataCommitHandler.DataCommitTransaction<YangInstanceIdentifier, CompositeNode> requestCommit(
- final DataModification<YangInstanceIdentifier, CompositeNode> modification) {
- return new HashMapDataStoreTransaction(modification, this);
- }
-
- public RpcResult<Void> rollback(HashMapDataStoreTransaction transaction) {
- return RpcResultBuilder.<Void> success().build();
- }
-
- public RpcResult<Void> finish(HashMapDataStoreTransaction transaction) {
- final DataModification<YangInstanceIdentifier, CompositeNode> modification = transaction
- .getModification();
- for (final YangInstanceIdentifier removal : modification
- .getRemovedConfigurationData()) {
- LOG.trace("Removing configuration path {}", removal);
- remove(configuration, removal);
- }
- for (final YangInstanceIdentifier removal : modification
- .getRemovedOperationalData()) {
- LOG.trace("Removing operational path {}", removal);
- remove(operational, removal);
- }
- if (LOG.isTraceEnabled()) {
- for (final YangInstanceIdentifier a : modification
- .getUpdatedConfigurationData().keySet()) {
- LOG.trace("Adding configuration path {}", a);
- }
- for (final YangInstanceIdentifier a : modification
- .getUpdatedOperationalData().keySet()) {
- LOG.trace("Adding operational path {}", a);
- }
- }
- configuration.putAll(modification.getUpdatedConfigurationData());
- operational.putAll(modification.getUpdatedOperationalData());
-
- return RpcResultBuilder.<Void> success().build();
- }
-
- public void remove(final Map<YangInstanceIdentifier, CompositeNode> map,
- final YangInstanceIdentifier identifier) {
- Set<YangInstanceIdentifier> affected = new HashSet<YangInstanceIdentifier>();
- for (final YangInstanceIdentifier path : map.keySet()) {
- if (identifier.contains(path)) {
- affected.add(path);
- }
- }
- for (final YangInstanceIdentifier pathToRemove : affected) {
- LOG.trace("Removed path {}", pathToRemove);
- map.remove(pathToRemove);
- }
- }
-
- @Override
- public void close() {
- // NOOP
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.dom.broker.impl;
-
-import org.opendaylight.controller.md.sal.common.api.data.DataModification;
-import org.opendaylight.controller.md.sal.common.api.data.DataCommitHandler.DataCommitTransaction;
-import org.opendaylight.yangtools.yang.common.RpcResult;
-import org.opendaylight.yangtools.yang.data.api.CompositeNode;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-
-public class HashMapDataStoreTransaction implements
- DataCommitTransaction<YangInstanceIdentifier, CompositeNode> {
- private final DataModification<YangInstanceIdentifier, CompositeNode> modification;
- private final HashMapDataStore datastore;
-
- HashMapDataStoreTransaction(
- final DataModification<YangInstanceIdentifier, CompositeNode> modify,
- final HashMapDataStore store) {
- modification = modify;
- datastore = store;
- }
-
- @Override
- public RpcResult<Void> finish() throws IllegalStateException {
- return datastore.finish(this);
- }
-
- @Override
- public DataModification<YangInstanceIdentifier, CompositeNode> getModification() {
- return this.modification;
- }
-
- @Override
- public RpcResult<Void> rollback() throws IllegalStateException {
- return datastore.rollback(this);
- }
-}
\ No newline at end of file
config:provided-service sal:dom-async-data-broker;
}
- identity hash-map-data-store {
- base config:module-type;
- config:provided-service sal:dom-data-store;
- config:java-name-prefix HashMapDataStore;
- }
-
identity schema-service-singleton {
base config:module-type;
config:provided-service sal:schema-service;
}
}
- augment "/config:modules/config:module/config:state" {
- case hash-map-data-store {
- when "/config:modules/config:module/config:type = 'hash-map-data-store'";
- }
- }
-
augment "/config:modules/config:module/config:state" {
case schema-service-singleton {
when "/config:modules/config:module/config:type = 'schema-service-singleton'";
}
}
}
-}
\ No newline at end of file
+}
package org.opendaylight.xsql;
-import java.util.concurrent.ExecutionException;
-
import org.opendaylight.controller.sal.binding.api.data.DataModificationTransaction;
import org.opendaylight.controller.sal.binding.api.data.DataProviderService;
import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.xsql.rev140626.XSQL;
XSQLBuilder builder = new XSQLBuilder();
builder.setPort("34343");
XSQL xsql = builder.build();
- if (dps != null) {
- final DataModificationTransaction t = dps.beginTransaction();
- t.removeOperationalData(ID);
- t.putOperationalData(ID,xsql);
-
- try {
+ try {
+ if (dps != null) {
+ final DataModificationTransaction t = dps.beginTransaction();
+ t.removeOperationalData(ID);
+ t.putOperationalData(ID,xsql);
t.commit().get();
- } catch (InterruptedException | ExecutionException e) {
- LOG.warn("Failed to update toaster status, operational otherwise", e);
}
+ } catch (Exception e) {
+ LOG.warn("Failed to update XSQL port status, ", e);
}
return xsql;
}
<?xml version="1.0" encoding="UTF-8"?>\r
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">\r
\r
- <!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
+ <!--\r
+ Licensed to the Apache Software Foundation (ASF) under one or more\r
+ contributor license agreements. See the NOTICE file distributed with\r
+ this work for additional information regarding copyright ownership.\r
+ The ASF licenses this file to You under the Apache License, Version 2.0\r
+ (the "License"); you may not use this file except in compliance with\r
+ the License. You may obtain a copy of the License at\r
+\r
+ http://www.apache.org/licenses/LICENSE-2.0\r
+\r
+ Unless required by applicable law or agreed to in writing, software\r
+ distributed under the License is distributed on an "AS IS" BASIS,\r
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
+ See the License for the specific language governing permissions and\r
+ limitations under the License.\r
-->\r
\r
<modelVersion>4.0.0</modelVersion>\r
<version>1.1-SNAPSHOT</version>\r
</parent>\r
\r
- <groupId>xsqlcommand</groupId>\r
- <artifactId>xsqlcommand</artifactId>\r
+ <groupId>org.opendaylight.controller</groupId>\r
+ <artifactId>sal-karaf-xsql</artifactId>\r
<packaging>bundle</packaging>\r
- <version>1.0.0-SNAPSHOT</version>\r
<name>Apache Karaf :: Shell odl/xsql Commands</name>\r
\r
<description>Provides the OSGi odl commands</description>\r
<dependency>\r
<groupId>org.opendaylight.controller</groupId>\r
<artifactId>sal-dom-xsql</artifactId>\r
- <type>bundle</type>\r
<version>1.1-SNAPSHOT</version>\r
</dependency>\r
</dependencies>\r
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+
+<blueprint xmlns="http://www.osgi.org/xmlns/blueprint/v1.0.0"
+ xmlns:cm="http://aries.apache.org/blueprint/xmlns/blueprint-cm/v1.0.0">
+
+ <command-bundle xmlns="http://karaf.apache.org/xmlns/shell/v1.1.0">
+ <command>
+ <action class="org.opendaylight.controller.xsql.xsql">
+ </action>
+ </command>
+ </command-bundle>
+</blueprint>
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
+ <parent>
+ <groupId>org.opendaylight.controller.samples</groupId>
+ <artifactId>sal-samples</artifactId>
+ <version>1.1-SNAPSHOT</version>
+ </parent>
<artifactId>l2switch.aggregator</artifactId>
<groupId>org.opendaylight.controller.samples.l2switch</groupId>
*/
package org.opendaylight.md.controller.topology.manager;
-import static org.opendaylight.md.controller.topology.manager.FlowCapableNodeMapping.getNodeConnectorKey;
-import static org.opendaylight.md.controller.topology.manager.FlowCapableNodeMapping.getNodeKey;
-import static org.opendaylight.md.controller.topology.manager.FlowCapableNodeMapping.toTerminationPoint;
-import static org.opendaylight.md.controller.topology.manager.FlowCapableNodeMapping.toTerminationPointId;
-import static org.opendaylight.md.controller.topology.manager.FlowCapableNodeMapping.toTopologyLink;
-import static org.opendaylight.md.controller.topology.manager.FlowCapableNodeMapping.toTopologyNode;
-import static org.opendaylight.md.controller.topology.manager.FlowCapableNodeMapping.toTopologyNodeId;
-
-import java.util.Collection;
-import java.util.Collections;
-import java.util.List;
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-import com.google.common.util.concurrent.CheckedFuture;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
+import java.util.Collections;
+import java.util.List;
+
+import static org.opendaylight.md.controller.topology.manager.FlowCapableNodeMapping.getNodeConnectorKey;
+import static org.opendaylight.md.controller.topology.manager.FlowCapableNodeMapping.getNodeKey;
+import static org.opendaylight.md.controller.topology.manager.FlowCapableNodeMapping.toTerminationPoint;
+import static org.opendaylight.md.controller.topology.manager.FlowCapableNodeMapping.toTerminationPointId;
+import static org.opendaylight.md.controller.topology.manager.FlowCapableNodeMapping.toTopologyLink;
+import static org.opendaylight.md.controller.topology.manager.FlowCapableNodeMapping.toTopologyNode;
+import static org.opendaylight.md.controller.topology.manager.FlowCapableNodeMapping.toTopologyNodeId;
class FlowCapableTopologyExporter implements FlowTopologyDiscoveryListener, OpendaylightInventoryListener {
final NodeId nodeId = toTopologyNodeId(getNodeKey(notification.getNodeRef()).getId());
final InstanceIdentifier<Node> nodeInstance = toNodeIdentifier(notification.getNodeRef());
+
processor.enqueueOperation(new TopologyOperation() {
@Override
public void applyOperation(ReadWriteTransaction transaction) {
- removeAffectedLinks(nodeId, transaction);
- transaction.delete(LogicalDatastoreType.OPERATIONAL, nodeInstance);
+ Optional<Node> nodeOptional = Optional.absent();
+ try {
+ nodeOptional = transaction.read(LogicalDatastoreType.OPERATIONAL, nodeInstance).checkedGet();
+ } catch (ReadFailedException e) {
+ LOG.error("Error occured when trying to read Node ", e);
+ }
+ if (nodeOptional.isPresent()) {
+ removeAffectedLinks(nodeId, transaction);
+ transaction.delete(LogicalDatastoreType.OPERATIONAL, nodeInstance);
+ }
}
@Override
processor.enqueueOperation(new TopologyOperation() {
@Override
public void applyOperation(ReadWriteTransaction transaction) {
- removeAffectedLinks(tpId, transaction);
- transaction.delete(LogicalDatastoreType.OPERATIONAL, tpInstance);
+ Optional<TerminationPoint> terminationPointOptional = Optional.absent();
+ try {
+ terminationPointOptional = transaction.read(LogicalDatastoreType.OPERATIONAL, tpInstance).checkedGet();
+ } catch (ReadFailedException e) {
+ LOG.error("Error occured when trying to read NodeConnector ", e);
+ }
+ if (terminationPointOptional.isPresent()) {
+ removeAffectedLinks(tpId, transaction);
+ transaction.delete(LogicalDatastoreType.OPERATIONAL, tpInstance);
+ }
}
@Override
public void applyOperation(final ReadWriteTransaction transaction) {
final Link link = toTopologyLink(notification);
final InstanceIdentifier<Link> path = linkPath(link);
- transaction.merge(LogicalDatastoreType.OPERATIONAL, path, link, true);
+ transaction.put(LogicalDatastoreType.OPERATIONAL, path, link, true);
}
@Override
processor.enqueueOperation(new TopologyOperation() {
@Override
public void applyOperation(final ReadWriteTransaction transaction) {
- transaction.delete(LogicalDatastoreType.OPERATIONAL, linkPath(toTopologyLink(notification)));
+ Optional<Link> linkOptional = Optional.absent();
+ try {
+ // read that checks if link exists (if we do not do this we might get an exception on delete)
+ linkOptional = transaction.read(LogicalDatastoreType.OPERATIONAL,
+ linkPath(toTopologyLink(notification))).checkedGet();
+ } catch (ReadFailedException e) {
+ LOG.error("Error occured when trying to read Link ", e);
+ }
+ if (linkOptional.isPresent()) {
+ transaction.delete(LogicalDatastoreType.OPERATIONAL, linkPath(toTopologyLink(notification)));
+ }
}
@Override
});
}
+
@Override
public void onLinkUtilizationNormal(final LinkUtilizationNormal notification) {
// NOOP
}
private void removeAffectedLinks(final NodeId id, final ReadWriteTransaction transaction) {
- CheckedFuture<Optional<Topology>, ReadFailedException> topologyDataFuture =
- transaction.read(LogicalDatastoreType.OPERATIONAL, topology);
- Futures.addCallback(topologyDataFuture, new FutureCallback<Optional<Topology>>() {
- @Override
- public void onSuccess(Optional<Topology> topologyOptional) {
- removeAffectedLinks(id, topologyOptional);
- }
-
- @Override
- public void onFailure(Throwable throwable) {
- LOG.error("Error reading topology data for topology {}", topology, throwable);
- }
- });
+ Optional<Topology> topologyOptional = Optional.absent();
+ try {
+ topologyOptional = transaction.read(LogicalDatastoreType.OPERATIONAL, topology).checkedGet();
+ } catch (ReadFailedException e) {
+ LOG.error("Error reading topology data for topology {}", topology, e);
+ }
+ if (topologyOptional.isPresent()) {
+ removeAffectedLinks(id, topologyOptional, transaction);
+ }
}
- private void removeAffectedLinks(final NodeId id, Optional<Topology> topologyOptional) {
+ private void removeAffectedLinks(final NodeId id, Optional<Topology> topologyOptional, ReadWriteTransaction transaction) {
if (!topologyOptional.isPresent()) {
return;
}
List<Link> linkList = topologyOptional.get().getLink() != null ?
topologyOptional.get().getLink() : Collections.<Link> emptyList();
- final List<InstanceIdentifier<Link>> linkIDsToDelete = Lists.newArrayList();
for (Link link : linkList) {
if (id.equals(link.getSource().getSourceNode()) ||
id.equals(link.getDestination().getDestNode())) {
- linkIDsToDelete.add(linkPath(link));
+ transaction.delete(LogicalDatastoreType.OPERATIONAL, linkPath(link));
}
}
-
- enqueueLinkDeletes(linkIDsToDelete);
- }
-
- private void enqueueLinkDeletes(final Collection<InstanceIdentifier<Link>> linkIDsToDelete) {
- if(!linkIDsToDelete.isEmpty()) {
- processor.enqueueOperation(new TopologyOperation() {
- @Override
- public void applyOperation(ReadWriteTransaction transaction) {
- for(InstanceIdentifier<Link> linkID: linkIDsToDelete) {
- transaction.delete(LogicalDatastoreType.OPERATIONAL, linkID);
- }
- }
-
- @Override
- public String toString() {
- return "Delete Links " + linkIDsToDelete.size();
- }
- });
- }
}
private void removeAffectedLinks(final TpId id, final ReadWriteTransaction transaction) {
- CheckedFuture<Optional<Topology>, ReadFailedException> topologyDataFuture =
- transaction.read(LogicalDatastoreType.OPERATIONAL, topology);
- Futures.addCallback(topologyDataFuture, new FutureCallback<Optional<Topology>>() {
- @Override
- public void onSuccess(Optional<Topology> topologyOptional) {
- removeAffectedLinks(id, topologyOptional);
- }
-
- @Override
- public void onFailure(Throwable throwable) {
- LOG.error("Error reading topology data for topology {}", topology, throwable);
- }
- });
+ Optional<Topology> topologyOptional = Optional.absent();
+ try {
+ topologyOptional = transaction.read(LogicalDatastoreType.OPERATIONAL, topology).checkedGet();
+ } catch (ReadFailedException e) {
+ LOG.error("Error reading topology data for topology {}", topology, e);
+ }
+ if (topologyOptional.isPresent()) {
+ removeAffectedLinks(id, topologyOptional, transaction);
+ }
}
- private void removeAffectedLinks(final TpId id, Optional<Topology> topologyOptional) {
+ private void removeAffectedLinks(final TpId id, Optional<Topology> topologyOptional, ReadWriteTransaction transaction) {
if (!topologyOptional.isPresent()) {
return;
}
List<Link> linkList = topologyOptional.get().getLink() != null
? topologyOptional.get().getLink() : Collections.<Link> emptyList();
- final List<InstanceIdentifier<Link>> linkIDsToDelete = Lists.newArrayList();
for (Link link : linkList) {
if (id.equals(link.getSource().getSourceTp()) ||
id.equals(link.getDestination().getDestTp())) {
- linkIDsToDelete.add(linkPath(link));
+ transaction.delete(LogicalDatastoreType.OPERATIONAL, linkPath(link));
}
}
-
- enqueueLinkDeletes(linkIDsToDelete);
}
private InstanceIdentifier<Node> getNodePath(final NodeId nodeId) {
package org.opendaylight.md.controller.topology.manager;
import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.CheckedFuture;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-
-import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.LinkedBlockingQueue;
-
import org.opendaylight.controller.md.sal.binding.api.BindingTransactionChain;
import org.opendaylight.controller.md.sal.binding.api.DataBroker;
import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.LinkedBlockingQueue;
+
final class OperationProcessor implements AutoCloseable, Runnable, TransactionChainListener {
private static final Logger LOG = LoggerFactory.getLogger(OperationProcessor.class);
private static final int MAX_TRANSACTION_OPERATIONS = 100;
private final BlockingQueue<TopologyOperation> queue = new LinkedBlockingQueue<>(OPERATION_QUEUE_DEPTH);
private final DataBroker dataBroker;
- private final BindingTransactionChain transactionChain;
+ private BindingTransactionChain transactionChain;
OperationProcessor(final DataBroker dataBroker) {
this.dataBroker = Preconditions.checkNotNull(dataBroker);
LOG.debug("Processed {} operations, submitting transaction", ops);
- CheckedFuture<Void, TransactionCommitFailedException> txResultFuture = tx.submit();
- Futures.addCallback(txResultFuture, new FutureCallback<Void>() {
- @Override
- public void onSuccess(Void notUsed) {
- LOG.debug("Topology export successful for tx :{}", tx.getIdentifier());
- }
-
- @Override
- public void onFailure(Throwable throwable) {
- LOG.error("Topology export transaction {} failed", tx.getIdentifier(), throwable.getCause());
- }
- });
+ try {
+ tx.submit().checkedGet();
+ } catch (final TransactionCommitFailedException e) {
+ LOG.warn("Stat DataStoreOperation unexpected State!", e);
+ transactionChain.close();
+ transactionChain = dataBroker.createTransactionChain(this);
+ cleanDataStoreOperQueue();
+ }
}
- } catch (InterruptedException e) {
- LOG.info("Interrupted processing, terminating", e);
+ } catch (final IllegalStateException e) {
+ LOG.warn("Stat DataStoreOperation unexpected State!", e);
+ transactionChain.close();
+ transactionChain = dataBroker.createTransactionChain(this);
+ cleanDataStoreOperQueue();
+ } catch (final InterruptedException e) {
+ LOG.warn("Stat Manager DS Operation thread interupted!", e);
+ } catch (final Exception e) {
+ LOG.warn("Stat DataStore Operation executor fail!", e);
}
+ // Drain all events, making sure any blocked threads are unblocked
+ cleanDataStoreOperQueue();
+
+ }
+
+ private void cleanDataStoreOperQueue() {
// Drain all events, making sure any blocked threads are unblocked
while (!queue.isEmpty()) {
queue.poll();
package org.opendaylight.md.controller.topology.manager;
-import static org.junit.Assert.fail;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertNotNull;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.doAnswer;
-import static org.mockito.Mockito.any;
-import static org.mockito.Mockito.eq;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.inOrder;
-import static org.mockito.Mockito.atLeast;
-import static org.mockito.Mockito.never;
-import static org.mockito.Mockito.verify;
-
-import java.util.Arrays;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.TimeUnit;
-
+import com.google.common.base.Optional;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.SettableFuture;
+import com.google.common.util.concurrent.Uninterruptibles;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.LinkBuilder;
import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.LinkKey;
import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.NodeBuilder;
import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.NodeKey;
import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.node.TerminationPoint;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.node.TerminationPointBuilder;
import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.node.TerminationPointKey;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
import org.opendaylight.yangtools.yang.binding.KeyedInstanceIdentifier;
-import com.google.common.base.Optional;
-import com.google.common.util.concurrent.CheckedFuture;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.SettableFuture;
-import com.google.common.util.concurrent.Uninterruptibles;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.atLeast;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.eq;
+import static org.mockito.Mockito.inOrder;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.verify;
public class FlowCapableTopologyExporterTest {
@Test
public void testOnNodeRemoved() {
+ NodeKey topoNodeKey = new NodeKey(new NodeId("node1"));
+ InstanceIdentifier<Node> topoNodeII = topologyIID.child(Node.class, topoNodeKey);
+ Node topoNode = new NodeBuilder().setKey(topoNodeKey).build();
+
org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
- nodeKey = newInvNodeKey("node1");
+ nodeKey = newInvNodeKey(topoNodeKey.getNodeId().getValue());
InstanceIdentifier<?> invNodeID = InstanceIdentifier.create(Nodes.class).child(
org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node.class,
nodeKey);
};
SettableFuture<Optional<Topology>> readFuture = SettableFuture.create();
+ readFuture.set(Optional.of(topology));
ReadWriteTransaction mockTx1 = mock(ReadWriteTransaction.class);
doReturn(Futures.makeChecked(readFuture, ReadFailedException.MAPPER)).when(mockTx1)
.read(LogicalDatastoreType.OPERATIONAL, topologyIID);
+ SettableFuture<Optional<Node>> readFutureNode = SettableFuture.create();
+ readFutureNode.set(Optional.of(topoNode));
+ doReturn(Futures.makeChecked(readFutureNode, ReadFailedException.MAPPER)).when(mockTx1)
+ .read(LogicalDatastoreType.OPERATIONAL, topoNodeII);
+
CountDownLatch submitLatch1 = setupStubbedSubmit(mockTx1);
int expDeleteCalls = expDeletedIIDs.length;
ArgumentCaptor.forClass(InstanceIdentifier.class);
setupStubbedDeletes(mockTx1, deletedLinkIDs, deleteLatch);
- ReadWriteTransaction mockTx2 = mock(ReadWriteTransaction.class);
- setupStubbedDeletes(mockTx2, deletedLinkIDs, deleteLatch);
- CountDownLatch submitLatch2 = setupStubbedSubmit(mockTx2);
-
- doReturn(mockTx1).doReturn(mockTx2).when(mockTxChain).newReadWriteTransaction();
+ doReturn(mockTx1).when(mockTxChain).newReadWriteTransaction();
exporter.onNodeRemoved(new NodeRemovedBuilder().setNodeRef(new NodeRef(invNodeID)).build());
waitForDeletes(expDeleteCalls, deleteLatch);
- waitForSubmit(submitLatch2);
-
assertDeletedIDs(expDeletedIIDs, deletedLinkIDs);
verifyMockTx(mockTx1);
- verifyMockTx(mockTx2);
}
@SuppressWarnings({ "rawtypes" })
@Test
public void testOnNodeRemovedWithNoTopology() {
+ NodeKey topoNodeKey = new NodeKey(new NodeId("node1"));
+ InstanceIdentifier<Node> topoNodeII = topologyIID.child(Node.class, topoNodeKey);
+ Node topoNode = new NodeBuilder().setKey(topoNodeKey).build();
+
org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
- nodeKey = newInvNodeKey("node1");
+ nodeKey = newInvNodeKey(topoNodeKey.getNodeId().getValue());
InstanceIdentifier<?> invNodeID = InstanceIdentifier.create(Nodes.class).child(
org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node.class,
nodeKey);
.read(LogicalDatastoreType.OPERATIONAL, topologyIID);
CountDownLatch submitLatch = setupStubbedSubmit(mockTx);
+ SettableFuture<Optional<Node>> readFutureNode = SettableFuture.create();
+ readFutureNode.set(Optional.of(topoNode));
+ doReturn(Futures.makeChecked(readFutureNode, ReadFailedException.MAPPER)).when(mockTx)
+ .read(LogicalDatastoreType.OPERATIONAL, topoNodeII);
+
CountDownLatch deleteLatch = new CountDownLatch(1);
ArgumentCaptor<InstanceIdentifier> deletedLinkIDs =
ArgumentCaptor.forClass(InstanceIdentifier.class);
@Test
public void testOnNodeConnectorRemoved() {
+ NodeKey topoNodeKey = new NodeKey(new NodeId("node1"));
+ TerminationPointKey terminationPointKey = new TerminationPointKey(new TpId("tp1"));
+
+ InstanceIdentifier<TerminationPoint> topoTermPointII = topologyIID.child(Node.class, topoNodeKey)
+ .child(TerminationPoint.class, terminationPointKey);
+ TerminationPoint topoTermPoint = new TerminationPointBuilder().setKey(terminationPointKey).build();
+
org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
- nodeKey = newInvNodeKey("node1");
+ nodeKey = newInvNodeKey(topoNodeKey.getNodeId().getValue());
org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey ncKey =
- newInvNodeConnKey("tp1");
+ newInvNodeConnKey(terminationPointKey.getTpId().getValue());
InstanceIdentifier<?> invNodeConnID = newNodeConnID(nodeKey, ncKey);
};
final SettableFuture<Optional<Topology>> readFuture = SettableFuture.create();
+ readFuture.set(Optional.of(topology));
ReadWriteTransaction mockTx1 = mock(ReadWriteTransaction.class);
doReturn(Futures.makeChecked(readFuture, ReadFailedException.MAPPER)).when(mockTx1)
.read(LogicalDatastoreType.OPERATIONAL, topologyIID);
+ SettableFuture<Optional<TerminationPoint>> readFutureNode = SettableFuture.create();
+ readFutureNode.set(Optional.of(topoTermPoint));
+ doReturn(Futures.makeChecked(readFutureNode, ReadFailedException.MAPPER)).when(mockTx1)
+ .read(LogicalDatastoreType.OPERATIONAL, topoTermPointII);
+
CountDownLatch submitLatch1 = setupStubbedSubmit(mockTx1);
int expDeleteCalls = expDeletedIIDs.length;
ArgumentCaptor.forClass(InstanceIdentifier.class);
setupStubbedDeletes(mockTx1, deletedLinkIDs, deleteLatch);
- ReadWriteTransaction mockTx2 = mock(ReadWriteTransaction.class);
- setupStubbedDeletes(mockTx2, deletedLinkIDs, deleteLatch);
- CountDownLatch submitLatch2 = setupStubbedSubmit(mockTx2);
- doReturn(mockTx1).doReturn(mockTx2).when(mockTxChain).newReadWriteTransaction();
+ doReturn(mockTx1).when(mockTxChain).newReadWriteTransaction();
exporter.onNodeConnectorRemoved(new NodeConnectorRemovedBuilder().setNodeConnectorRef(
new NodeConnectorRef(invNodeConnID)).build());
waitForDeletes(expDeleteCalls, deleteLatch);
- waitForSubmit(submitLatch2);
-
assertDeletedIDs(expDeletedIIDs, deletedLinkIDs);
verifyMockTx(mockTx1);
- verifyMockTx(mockTx2);
}
@SuppressWarnings("rawtypes")
@Test
public void testOnNodeConnectorRemovedWithNoTopology() {
+ NodeKey topoNodeKey = new NodeKey(new NodeId("node1"));
+ TerminationPointKey terminationPointKey = new TerminationPointKey(new TpId("tp1"));
+
+ InstanceIdentifier<TerminationPoint> topoTermPointII = topologyIID.child(Node.class, topoNodeKey)
+ .child(TerminationPoint.class, terminationPointKey);
+ TerminationPoint topoTermPoint = new TerminationPointBuilder().setKey(terminationPointKey).build();
+
org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
- nodeKey = newInvNodeKey("node1");
+ nodeKey = newInvNodeKey(topoNodeKey.getNodeId().getValue());
org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey ncKey =
- newInvNodeConnKey("tp1");
+ newInvNodeConnKey(terminationPointKey.getTpId().getValue());
InstanceIdentifier<?> invNodeConnID = newNodeConnID(nodeKey, ncKey);
.read(LogicalDatastoreType.OPERATIONAL, topologyIID);
CountDownLatch submitLatch = setupStubbedSubmit(mockTx);
+ SettableFuture<Optional<TerminationPoint>> readFutureNode = SettableFuture.create();
+ readFutureNode.set(Optional.of(topoTermPoint));
+ doReturn(Futures.makeChecked(readFutureNode, ReadFailedException.MAPPER)).when(mockTx)
+ .read(LogicalDatastoreType.OPERATIONAL, topoTermPointII);
+
CountDownLatch deleteLatch = new CountDownLatch(1);
ArgumentCaptor<InstanceIdentifier> deletedLinkIDs =
ArgumentCaptor.forClass(InstanceIdentifier.class);
waitForSubmit(submitLatch);
ArgumentCaptor<Link> mergedNode = ArgumentCaptor.forClass(Link.class);
- verify(mockTx).merge(eq(LogicalDatastoreType.OPERATIONAL), eq(topologyIID.child(
- Link.class, new LinkKey(new LinkId(sourceNodeConnKey.getId())))),
+ verify(mockTx).put(eq(LogicalDatastoreType.OPERATIONAL), eq(topologyIID.child(
+ Link.class, new LinkKey(new LinkId(sourceNodeConnKey.getId())))),
mergedNode.capture(), eq(true));
assertEquals("Source node ID", "sourceNode",
mergedNode.getValue().getSource().getSourceNode().getValue());
}
@Test
- public void testOnLinkRemoved() {
+ public void testOnLinkRemovedLinkExists() {
org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
sourceNodeKey = newInvNodeKey("sourceNode");
destNodeConnKey = newInvNodeConnKey("destTP");
InstanceIdentifier<?> destConnID = newNodeConnID(destNodeKey, destNodeConnKey);
+ Link link = newLink(sourceNodeConnKey.getId().getValue(), newSourceTp(sourceNodeConnKey.getId().getValue()),
+ newDestTp(destNodeConnKey.getId().getValue()));
+
ReadWriteTransaction mockTx = mock(ReadWriteTransaction.class);
CountDownLatch submitLatch = setupStubbedSubmit(mockTx);
doReturn(mockTx).when(mockTxChain).newReadWriteTransaction();
+ doReturn(Futures.immediateCheckedFuture(Optional.of(link))).when(mockTx).read(LogicalDatastoreType.OPERATIONAL, topologyIID.child(
+ Link.class, new LinkKey(new LinkId(sourceNodeConnKey.getId()))));
exporter.onLinkRemoved(new LinkRemovedBuilder().setSource(
new NodeConnectorRef(sourceConnID)).setDestination(
- new NodeConnectorRef(destConnID)).build());
+ new NodeConnectorRef(destConnID)).build());
waitForSubmit(submitLatch);
Link.class, new LinkKey(new LinkId(sourceNodeConnKey.getId()))));
}
+ @Test
+ public void testOnLinkRemovedLinkDoesNotExist() {
+
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+ sourceNodeKey = newInvNodeKey("sourceNode");
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey
+ sourceNodeConnKey = newInvNodeConnKey("sourceTP");
+ InstanceIdentifier<?> sourceConnID = newNodeConnID(sourceNodeKey, sourceNodeConnKey);
+
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+ destNodeKey = newInvNodeKey("destNode");
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey
+ destNodeConnKey = newInvNodeConnKey("destTP");
+ InstanceIdentifier<?> destConnID = newNodeConnID(destNodeKey, destNodeConnKey);
+
+ ReadWriteTransaction mockTx = mock(ReadWriteTransaction.class);
+ CountDownLatch submitLatch = setupStubbedSubmit(mockTx);
+ doReturn(mockTx).when(mockTxChain).newReadWriteTransaction();
+ doReturn(Futures.immediateCheckedFuture(Optional.<Link>absent())).when(mockTx).read(LogicalDatastoreType.OPERATIONAL, topologyIID.child(
+ Link.class, new LinkKey(new LinkId(sourceNodeConnKey.getId()))));
+
+ exporter.onLinkRemoved(new LinkRemovedBuilder().setSource(
+ new NodeConnectorRef(sourceConnID)).setDestination(
+ new NodeConnectorRef(destConnID)).build());
+
+ waitForSubmit(submitLatch);
+
+ verify(mockTx, never()).delete(LogicalDatastoreType.OPERATIONAL, topologyIID.child(
+ Link.class, new LinkKey(new LinkId(sourceNodeConnKey.getId()))));
+ }
+
private void verifyMockTx(ReadWriteTransaction mockTx) {
InOrder inOrder = inOrder(mockTx);
inOrder.verify(mockTx, atLeast(0)).submit();
import com.google.common.base.Preconditions;
+import com.google.common.base.Strings;
import java.util.List;
import java.util.Map;
public static String checkPrefixAndExtractServiceName(XmlElement typeElement, Map.Entry<String, String> prefixNamespace) throws NetconfDocumentedException {
String serviceName = typeElement.getTextContent();
// FIXME: comparing Entry with String:
- Preconditions.checkState(!prefixNamespace.equals(""), "Service %s value not prefixed with namespace",
+ Preconditions.checkState(!Strings.isNullOrEmpty(prefixNamespace.getKey()), "Service %s value not prefixed with namespace",
XmlNetconfConstants.TYPE_KEY);
String prefix = prefixNamespace.getKey() + PREFIX_SEPARATOR;
Preconditions.checkState(serviceName.startsWith(prefix),
Date revision = null;
Map<Date, EditConfig.IdentityMapping> revisions = identityMap.get(namespace);
if(revisions.keySet().size() > 1) {
- for (Date date : revisions.keySet()) {
- if(revisions.get(date).containsIdName(localName)) {
+ for (Map.Entry<Date, EditConfig.IdentityMapping> revisionToIdentityEntry : revisions.entrySet()) {
+ if(revisionToIdentityEntry.getValue().containsIdName(localName)) {
Preconditions.checkState(revision == null, "Duplicate identity %s, in namespace %s, with revisions: %s, %s detected. Cannot map attribute",
- localName, namespace, revision, date);
- revision = date;
+ localName, namespace, revision, revisionToIdentityEntry.getKey());
+ revision = revisionToIdentityEntry.getKey();
}
}
} else {
Map<String, Map<String, Collection<ObjectName>>> retVal = Maps.newLinkedHashMap();
- for (String namespace : configs.keySet()) {
+ for (Entry<String, Map<String, ModuleConfig>> namespaceToModuleToConfigEntry : configs.entrySet()) {
Map<String, Collection<ObjectName>> innerRetVal = Maps.newHashMap();
- for (Entry<String, ModuleConfig> mbeEntry : configs.get(namespace).entrySet()) {
+ for (Entry<String, ModuleConfig> mbeEntry : namespaceToModuleToConfigEntry.getValue().entrySet()) {
String moduleName = mbeEntry.getKey();
Collection<ObjectName> instances = moduleToInstances.get(moduleName);
}
- retVal.put(namespace, innerRetVal);
+ retVal.put(namespaceToModuleToConfigEntry.getKey(), innerRetVal);
}
return retVal;
}
Element modulesElement = XmlUtil.createElement(document, XmlNetconfConstants.MODULES_KEY, Optional.of(XmlNetconfConstants.URN_OPENDAYLIGHT_PARAMS_XML_NS_YANG_CONTROLLER_CONFIG));
dataElement.appendChild(modulesElement);
- for (String moduleNamespace : moduleToInstances.keySet()) {
- for (Entry<String, Collection<ObjectName>> moduleMappingEntry : moduleToInstances.get(moduleNamespace)
+ for (Entry<String, Map<String, Collection<ObjectName>>> moduleToInstanceEntry : moduleToInstances.entrySet()) {
+ for (Entry<String, Collection<ObjectName>> moduleMappingEntry : moduleToInstanceEntry.getValue()
.entrySet()) {
- ModuleConfig mapping = moduleConfigs.get(moduleNamespace).get(moduleMappingEntry.getKey());
+ ModuleConfig mapping = moduleConfigs.get(moduleToInstanceEntry.getKey()).get(moduleMappingEntry.getKey());
if (moduleMappingEntry.getValue().isEmpty()) {
continue;
}
for (ObjectName objectName : moduleMappingEntry.getValue()) {
- modulesElement.appendChild(mapping.toXml(objectName, document, moduleNamespace));
+ modulesElement.appendChild(mapping.toXml(objectName, document, moduleToInstanceEntry.getKey()));
}
}
this.configServiceRefRegistry = configServiceRefRegistry;
}
-
public ObjectName getByServiceAndRefName(String namespace, String serviceName, String refName) {
Map<String, Map<String, String>> serviceNameToRefNameToInstance = getMappedServices().get(namespace);
Map<String, Map<String, Map<String, String>>> retVal = Maps.newHashMap();
Map<String, Map<String, ObjectName>> serviceMapping = configServiceRefRegistry.getServiceMapping();
- for (String serviceQName : serviceMapping.keySet()){
- for (String refName : serviceMapping.get(serviceQName).keySet()) {
+ for (Map.Entry<String, Map<String, ObjectName>> qNameToRefNameEntry : serviceMapping.entrySet()){
+ for (String refName : qNameToRefNameEntry.getValue().keySet()) {
- ObjectName on = serviceMapping.get(serviceQName).get(refName);
+ ObjectName on = qNameToRefNameEntry.getValue().get(refName);
Services.ServiceInstance si = Services.ServiceInstance.fromObjectName(on);
- QName qname = QName.create(serviceQName);
+ QName qname = QName.create(qNameToRefNameEntry.getKey());
String namespace = qname.getNamespace().toString();
Map<String, Map<String, String>> serviceToRefs = retVal.get(namespace);
if(serviceToRefs==null) {
Element root = XmlUtil.createElement(document, XmlNetconfConstants.SERVICES_KEY, Optional.of(XmlNetconfConstants.URN_OPENDAYLIGHT_PARAMS_XML_NS_YANG_CONTROLLER_CONFIG));
Map<String, Map<String, Map<String, String>>> mappedServices = serviceRegistryWrapper.getMappedServices();
- for (String namespace : mappedServices.keySet()) {
+ for (Entry<String, Map<String, Map<String, String>>> namespaceToRefEntry : mappedServices.entrySet()) {
- for (Entry<String, Map<String, String>> serviceEntry : mappedServices.get(namespace).entrySet()) {
+ for (Entry<String, Map<String, String>> serviceEntry : namespaceToRefEntry.getValue().entrySet()) {
// service belongs to config.yang namespace
Element serviceElement = XmlUtil.createElement(document, SERVICE_KEY, Optional.<String>absent());
root.appendChild(serviceElement);
// type belongs to config.yang namespace
String serviceType = serviceEntry.getKey();
Element typeElement = XmlUtil.createTextElementWithNamespacedContent(document, XmlNetconfConstants.TYPE_KEY,
- XmlNetconfConstants.PREFIX, namespace, serviceType);
+ XmlNetconfConstants.PREFIX, namespaceToRefEntry.getKey(), serviceType);
serviceElement.appendChild(typeElement);
Map<String, Map<String, Map<String, Services.ServiceInstance>>> namespaceToServiceNameToRefNameToInstance = services
.getNamespaceToServiceNameToRefNameToInstance();
- for (String serviceNamespace : namespaceToServiceNameToRefNameToInstance.keySet()) {
- for (String serviceName : namespaceToServiceNameToRefNameToInstance.get(serviceNamespace).keySet()) {
+ for (Map.Entry<String, Map<String, Map<String, Services.ServiceInstance>>> namespaceToServiceToRefEntry : namespaceToServiceNameToRefNameToInstance.entrySet()) {
+ for (Map.Entry<String, Map<String, Services.ServiceInstance>> serviceToRefEntry : namespaceToServiceToRefEntry.getValue().entrySet()) {
- String qnameOfService = getQname(ta, serviceNamespace, serviceName);
- Map<String, Services.ServiceInstance> refNameToInstance = namespaceToServiceNameToRefNameToInstance
- .get(serviceNamespace).get(serviceName);
+ String qnameOfService = getQname(ta, namespaceToServiceToRefEntry.getKey(), serviceToRefEntry.getKey());
+ Map<String, Services.ServiceInstance> refNameToInstance = serviceToRefEntry.getValue();
- for (String refName : refNameToInstance.keySet()) {
- ObjectName on = refNameToInstance.get(refName).getObjectName(ta.getTransactionName());
+ for (Map.Entry<String, Services.ServiceInstance> refNameToServiceEntry : refNameToInstance.entrySet()) {
+ ObjectName on = refNameToServiceEntry.getValue().getObjectName(ta.getTransactionName());
try {
- ObjectName saved = ta.saveServiceReference(qnameOfService, refName, on);
+ ObjectName saved = ta.saveServiceReference(qnameOfService, refNameToServiceEntry.getKey(), on);
logger.debug("Saving service {} with on {} under name {} with service on {}", qnameOfService,
- on, refName, saved);
+ on, refNameToServiceEntry.getKey(), saved);
} catch (InstanceNotFoundException e) {
- throw new NetconfDocumentedException(String.format("Unable to save ref name " + refName + " for instance " + on, e),
+ throw new NetconfDocumentedException(String.format("Unable to save ref name " + refNameToServiceEntry.getKey() + " for instance " + on, e),
ErrorType.application,
ErrorTag.operation_failed,
ErrorSeverity.error);
Map<String, Map<String, ModuleConfig>> namespaceToModuleNameToModuleConfig = Maps.newHashMap();
- for (String namespace : mBeanEntries.keySet()) {
- for (Map.Entry<String, ModuleMXBeanEntry> moduleNameToMbe : mBeanEntries.get(namespace).entrySet()) {
+ for (Map.Entry<String, Map<String, ModuleMXBeanEntry>> namespaceToModuleToMbe : mBeanEntries.entrySet()) {
+ for (Map.Entry<String, ModuleMXBeanEntry> moduleNameToMbe : namespaceToModuleToMbe.getValue().entrySet()) {
String moduleName = moduleNameToMbe.getKey();
ModuleMXBeanEntry moduleMXBeanEntry = moduleNameToMbe.getValue();
ModuleConfig moduleConfig = new ModuleConfig(moduleName,
new InstanceConfig(configRegistryClient,moduleMXBeanEntry.getAttributes(), moduleMXBeanEntry.getNullableDummyContainerName()));
- Map<String, ModuleConfig> moduleNameToModuleConfig = namespaceToModuleNameToModuleConfig.get(namespace);
+ Map<String, ModuleConfig> moduleNameToModuleConfig = namespaceToModuleNameToModuleConfig.get(namespaceToModuleToMbe.getKey());
if(moduleNameToModuleConfig == null) {
moduleNameToModuleConfig = Maps.newHashMap();
- namespaceToModuleNameToModuleConfig.put(namespace, moduleNameToModuleConfig);
+ namespaceToModuleNameToModuleConfig.put(namespaceToModuleToMbe.getKey(), moduleNameToModuleConfig);
}
moduleNameToModuleConfig.put(moduleName, moduleConfig);
Map<String, Map<String, ModuleMXBeanEntry>> mBeanEntries) {
Map<String, Map<String, ModuleRuntime>> retVal = Maps.newHashMap();
- for (String namespace : mBeanEntries.keySet()) {
+ for (Map.Entry<String, Map<String, ModuleMXBeanEntry>> namespaceToModuleEntry : mBeanEntries.entrySet()) {
Map<String, ModuleRuntime> innerMap = Maps.newHashMap();
- Map<String, ModuleMXBeanEntry> entriesFromNamespace = mBeanEntries.get(namespace);
- for (String module : entriesFromNamespace.keySet()) {
+ Map<String, ModuleMXBeanEntry> entriesFromNamespace = namespaceToModuleEntry.getValue();
+ for (Map.Entry<String, ModuleMXBeanEntry> moduleToMXEntry : entriesFromNamespace.entrySet()) {
- ModuleMXBeanEntry mbe = entriesFromNamespace.get(module);
+ ModuleMXBeanEntry mbe = moduleToMXEntry.getValue();
Map<RuntimeBeanEntry, InstanceConfig> cache = Maps.newHashMap();
RuntimeBeanEntry root = null;
InstanceRuntime rootInstanceRuntime = createInstanceRuntime(root, cache);
ModuleRuntime moduleRuntime = new ModuleRuntime(rootInstanceRuntime);
- innerMap.put(module, moduleRuntime);
+ innerMap.put(moduleToMXEntry.getKey(), moduleRuntime);
}
- retVal.put(namespace, innerMap);
+ retVal.put(namespaceToModuleEntry.getKey(), innerMap);
}
return retVal;
}
final String[] signature = new String[attributes.size()];
int i = 0;
- for (final String attrName : attributes.keySet()) {
- final AttributeConfigElement attribute = attributes.get(attrName);
+ for (final AttributeConfigElement attribute : attributes.values()) {
final Optional<?> resolvedValueOpt = attribute.getResolvedValue();
params[i] = resolvedValueOpt.isPresent() ? resolvedValueOpt.get() : attribute.getResolvedDefaultValue();
final Map<String, Map<String, ModuleRpcs>> map = Maps.newHashMap();
- for (final String namespace : mBeanEntries.keySet()) {
+ for (final Map.Entry<String, Map<String, ModuleMXBeanEntry>> namespaceToModuleEntry : mBeanEntries.entrySet()) {
- Map<String, ModuleRpcs> namespaceToModules = map.get(namespace);
+ Map<String, ModuleRpcs> namespaceToModules = map.get(namespaceToModuleEntry.getKey());
if (namespaceToModules == null) {
namespaceToModules = Maps.newHashMap();
- map.put(namespace, namespaceToModules);
+ map.put(namespaceToModuleEntry.getKey(), namespaceToModules);
}
- for (final String moduleName : mBeanEntries.get(namespace).keySet()) {
+ for (final Map.Entry<String, ModuleMXBeanEntry> moduleEntry : namespaceToModuleEntry.getValue().entrySet()) {
- ModuleRpcs rpcMapping = namespaceToModules.get(moduleName);
+ ModuleRpcs rpcMapping = namespaceToModules.get(moduleEntry.getKey());
if (rpcMapping == null) {
rpcMapping = new ModuleRpcs();
- namespaceToModules.put(moduleName, rpcMapping);
+ namespaceToModules.put(moduleEntry.getKey(), rpcMapping);
}
- final ModuleMXBeanEntry entry = mBeanEntries.get(namespace).get(moduleName);
+ final ModuleMXBeanEntry entry = moduleEntry.getValue();
for (final RuntimeBeanEntry runtimeEntry : entry.getRuntimeBeans()) {
rpcMapping.addNameMapping(runtimeEntry);
allOpenedTransactions.clear();
}
- public Optional<ObjectName> getTransaction() {
+ public synchronized Optional<ObjectName> getTransaction() {
if (transaction == null){
return Optional.absent();
import static com.google.common.base.Preconditions.checkNotNull;
+import com.google.common.base.Function;
+import com.google.common.base.Stopwatch;
+import com.google.common.collect.Collections2;
import java.io.IOException;
import java.io.InputStream;
import java.util.Collection;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;
-
+import javax.annotation.Nonnull;
import javax.annotation.concurrent.Immutable;
import javax.management.MBeanServerConnection;
-
import org.opendaylight.controller.config.api.ConflictingVersionException;
import org.opendaylight.controller.config.persist.api.ConfigPusher;
import org.opendaylight.controller.config.persist.api.ConfigSnapshotHolder;
import org.w3c.dom.Element;
import org.xml.sax.SAXException;
-import com.google.common.base.Function;
-import com.google.common.base.Stopwatch;
-import com.google.common.collect.Collections2;
-
@Immutable
public class ConfigPusherImpl implements ConfigPusher {
private static final Logger logger = LoggerFactory.getLogger(ConfigPusherImpl.class);
private static Set<String> computeNotFoundCapabilities(Set<String> expectedCapabilities, NetconfOperationService serviceCandidate) {
Collection<String> actual = Collections2.transform(serviceCandidate.getCapabilities(), new Function<Capability, String>() {
@Override
- public String apply(Capability input) {
+ public String apply(@Nonnull final Capability input) {
return input.getCapabilityUri();
}
});
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import io.netty.util.internal.ConcurrentSet;
+import java.util.Collections;
+import java.util.List;
+import java.util.Set;
+import javax.annotation.Nonnull;
import org.opendaylight.controller.netconf.api.monitoring.NetconfManagementSession;
import org.opendaylight.controller.netconf.api.monitoring.NetconfMonitoringService;
import org.opendaylight.controller.netconf.mapping.api.Capability;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import javax.annotation.Nullable;
-import java.util.Collections;
-import java.util.List;
-import java.util.Set;
-
public class NetconfMonitoringServiceImpl implements NetconfMonitoringService, SessionMonitoringService {
private static final Logger logger = LoggerFactory.getLogger(NetconfMonitoringServiceImpl.class);
private List<Session> transformSessions(Set<NetconfManagementSession> sessions) {
return Lists.newArrayList(Collections2.transform(sessions, new Function<NetconfManagementSession, Session>() {
- @Nullable
@Override
- public Session apply(@Nullable NetconfManagementSession input) {
+ public Session apply(@Nonnull NetconfManagementSession input) {
return input.toManagementSession();
}
}));
this.netconfOperationServiceSnapshot = netconfOperationServiceSnapshot;
}
- private void initNetconfOperations(Set<NetconfOperation> allOperations) {
+ private synchronized void initNetconfOperations(Set<NetconfOperation> allOperations) {
allNetconfOperations = allOperations;
}
/**
* Test all requests are handled properly and no mismatch occurs in listener
*/
- @Test(timeout = 5*60*1000)
+ @Test(timeout = 6*60*1000)
public void testSecureStress() throws Exception {
- final int requests = 10000;
+ final int requests = 4000;
final NetconfClientDispatcher dispatch = new NetconfClientDispatcherImpl(getNettyThreadgroup(), getNettyThreadgroup(), getHashedWheelTimer());
final NetconfDeviceCommunicator sessionListener = getSessionListener();
import com.google.common.base.Function;
import com.google.common.base.Preconditions;
import com.google.common.collect.Collections2;
+import javax.annotation.Nonnull;
import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.monitoring.rev101004.Yang;
import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.monitoring.rev101004.netconf.state.schemas.Schema;
return Collections2.transform(schema.getLocation(), new Function<Schema.Location, String>() {
@Nullable
@Override
- public String apply(@Nullable Schema.Location input) {
+ public String apply(@Nonnull Schema.Location input) {
return input.getEnumeration().toString();
}
});
"<session-id>1</session-id>" +
"<in-bad-rpcs>0</in-bad-rpcs>" +
"<in-rpcs>0</in-rpcs>" +
- "<login-time>loginTime</login-time>" +
+ "<login-time>2010-10-10T12:32:32Z</login-time>" +
"<out-notifications>0</out-notifications>" +
"<out-rpc-errors>0</out-rpc-errors>" +
"<ncme:session-identifier>client</ncme:session-identifier>" +
- "<source-host>address/port</source-host>" +
+ "<source-host>192.168.1.1</source-host>" +
"<transport>ncme:netconf-tcp</transport>" +
"<username>username</username>" +
"</session>"));
final Session1 mockedSession1 = mock(Session1.class);
doReturn("client").when(mockedSession1).getSessionIdentifier();
doReturn(1L).when(mocked).getSessionId();
- doReturn(new DateAndTime("loginTime")).when(mocked).getLoginTime();
- doReturn(new Host(new DomainName("address/port"))).when(mocked).getSourceHost();
+ doReturn(new DateAndTime("2010-10-10T12:32:32Z")).when(mocked).getLoginTime();
+ doReturn(new Host(new DomainName("192.168.1.1"))).when(mocked).getSourceHost();
doReturn(new ZeroBasedCounter32(0L)).when(mocked).getInBadRpcs();
doReturn(new ZeroBasedCounter32(0L)).when(mocked).getInRpcs();
doReturn(new ZeroBasedCounter32(0L)).when(mocked).getOutNotifications();
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.nettyutil.handler.ssh.client;
+
+import io.netty.buffer.Unpooled;
+import io.netty.channel.ChannelHandlerContext;
+import io.netty.channel.ChannelOutboundHandler;
+import org.apache.sshd.common.future.SshFutureListener;
+import org.apache.sshd.common.io.IoInputStream;
+import org.apache.sshd.common.io.IoReadFuture;
+import org.apache.sshd.common.util.Buffer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Listener on async input stream from SSH session.
+ * This listeners schedules reads in a loop until the session is closed or read fails.
+ */
+final class AsyncSshHanderReader implements SshFutureListener<IoReadFuture>, AutoCloseable {
+
+ private static final Logger logger = LoggerFactory.getLogger(AsyncSshHandler.class);
+
+ private static final int BUFFER_SIZE = 8192;
+
+ private final ChannelOutboundHandler asyncSshHandler;
+ private final ChannelHandlerContext ctx;
+
+ private IoInputStream asyncOut;
+ private Buffer buf;
+ private IoReadFuture currentReadFuture;
+
+ public AsyncSshHanderReader(final ChannelOutboundHandler asyncSshHandler, final ChannelHandlerContext ctx, final IoInputStream asyncOut) {
+ this.asyncSshHandler = asyncSshHandler;
+ this.ctx = ctx;
+ this.asyncOut = asyncOut;
+ buf = new Buffer(BUFFER_SIZE);
+ asyncOut.read(buf).addListener(this);
+ }
+
+ @Override
+ public synchronized void operationComplete(final IoReadFuture future) {
+ if(future.getException() != null) {
+ if(asyncOut.isClosed() || asyncOut.isClosing()) {
+ // Ssh dropped
+ logger.debug("Ssh session dropped on channel: {}", ctx.channel(), future.getException());
+ } else {
+ logger.warn("Exception while reading from SSH remote on channel {}", ctx.channel(), future.getException());
+ }
+ invokeDisconnect();
+ return;
+ }
+
+ if (future.getRead() > 0) {
+ ctx.fireChannelRead(Unpooled.wrappedBuffer(buf.array(), 0, future.getRead()));
+
+ // Schedule next read
+ buf = new Buffer(BUFFER_SIZE);
+ currentReadFuture = asyncOut.read(buf);
+ currentReadFuture.addListener(this);
+ }
+ }
+
+ private void invokeDisconnect() {
+ try {
+ asyncSshHandler.disconnect(ctx, ctx.newPromise());
+ } catch (final Exception e) {
+ // This should not happen
+ throw new IllegalStateException(e);
+ }
+ }
+
+ @Override
+ public synchronized void close() {
+ // Remove self as listener on close to prevent reading from closed input
+ if(currentReadFuture != null) {
+ currentReadFuture.removeListener(this);
+ }
+
+ asyncOut = null;
+ }
+}
package org.opendaylight.controller.netconf.nettyutil.handler.ssh.client;
import com.google.common.base.Preconditions;
-import io.netty.buffer.ByteBuf;
-import io.netty.buffer.Unpooled;
import io.netty.channel.ChannelHandlerContext;
-import io.netty.channel.ChannelOutboundHandler;
import io.netty.channel.ChannelOutboundHandlerAdapter;
import io.netty.channel.ChannelPromise;
import java.io.IOException;
import org.apache.sshd.client.future.OpenFuture;
import org.apache.sshd.common.future.CloseFuture;
import org.apache.sshd.common.future.SshFutureListener;
-import org.apache.sshd.common.io.IoInputStream;
-import org.apache.sshd.common.io.IoOutputStream;
-import org.apache.sshd.common.io.IoReadFuture;
-import org.apache.sshd.common.io.IoWriteFuture;
-import org.apache.sshd.common.io.WritePendingException;
-import org.apache.sshd.common.util.Buffer;
import org.opendaylight.controller.netconf.nettyutil.handler.ssh.authentication.AuthenticationHandler;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
private final AuthenticationHandler authenticationHandler;
private final SshClient sshClient;
- private SshReadAsyncListener sshReadAsyncListener;
- private SshWriteAsyncHandler sshWriteAsyncHandler;
+ private AsyncSshHanderReader sshReadAsyncListener;
+ private AsyncSshHandlerWriter sshWriteAsyncHandler;
private ClientChannel channel;
private ClientSession session;
connectPromise.setSuccess();
connectPromise = null;
- sshReadAsyncListener = new SshReadAsyncListener(this, ctx, channel.getAsyncOut());
+ sshReadAsyncListener = new AsyncSshHanderReader(this, ctx, channel.getAsyncOut());
// if readAsyncListener receives immediate close, it will close this handler and closing this handler sets channel variable to null
if(channel != null) {
- sshWriteAsyncHandler = new SshWriteAsyncHandler(this, channel.getAsyncIn());
+ sshWriteAsyncHandler = new AsyncSshHandlerWriter(channel.getAsyncIn());
ctx.fireChannelActive();
}
}
ctx.fireChannelInactive();
}
- /**
- * Listener over async input stream from SSH session.
- * This listeners schedules reads in a loop until the session is closed or read fails.
- */
- private static class SshReadAsyncListener implements SshFutureListener<IoReadFuture>, AutoCloseable {
- private static final int BUFFER_SIZE = 8192;
-
- private final ChannelOutboundHandler asyncSshHandler;
- private final ChannelHandlerContext ctx;
-
- private IoInputStream asyncOut;
- private Buffer buf;
- private IoReadFuture currentReadFuture;
-
- public SshReadAsyncListener(final ChannelOutboundHandler asyncSshHandler, final ChannelHandlerContext ctx, final IoInputStream asyncOut) {
- this.asyncSshHandler = asyncSshHandler;
- this.ctx = ctx;
- this.asyncOut = asyncOut;
- buf = new Buffer(BUFFER_SIZE);
- asyncOut.read(buf).addListener(this);
- }
-
- @Override
- public synchronized void operationComplete(final IoReadFuture future) {
- if(future.getException() != null) {
- if(asyncOut.isClosed() || asyncOut.isClosing()) {
- // Ssh dropped
- logger.debug("Ssh session dropped on channel: {}", ctx.channel(), future.getException());
- } else {
- logger.warn("Exception while reading from SSH remote on channel {}", ctx.channel(), future.getException());
- }
- invokeDisconnect();
- return;
- }
-
- if (future.getRead() > 0) {
- ctx.fireChannelRead(Unpooled.wrappedBuffer(buf.array(), 0, future.getRead()));
-
- // Schedule next read
- buf = new Buffer(BUFFER_SIZE);
- currentReadFuture = asyncOut.read(buf);
- currentReadFuture.addListener(this);
- }
- }
-
- private void invokeDisconnect() {
- try {
- asyncSshHandler.disconnect(ctx, ctx.newPromise());
- } catch (final Exception e) {
- // This should not happen
- throw new IllegalStateException(e);
- }
- }
-
- @Override
- public synchronized void close() {
- // Remove self as listener on close to prevent reading from closed input
- if(currentReadFuture != null) {
- currentReadFuture.removeListener(this);
- }
-
- asyncOut = null;
- }
- }
-
- private static final class SshWriteAsyncHandler implements AutoCloseable {
- public static final int MAX_PENDING_WRITES = 100;
-
- private final ChannelOutboundHandler channelHandler;
- private IoOutputStream asyncIn;
-
- // Counter that holds the amount of pending write messages
- // Pending write can occur in case remote window is full
- // In such case, we need to wait for the pending write to finish
- private int pendingWriteCounter;
- // Last write future, that can be pending
- private IoWriteFuture lastWriteFuture;
-
- public SshWriteAsyncHandler(final ChannelOutboundHandler channelHandler, final IoOutputStream asyncIn) {
- this.channelHandler = channelHandler;
- this.asyncIn = asyncIn;
- }
-
- int c = 0;
-
- public synchronized void write(final ChannelHandlerContext ctx, final Object msg, final ChannelPromise promise) {
- try {
- if(asyncIn == null || asyncIn.isClosed() || asyncIn.isClosing()) {
- // If we are closed/closing, set immediate fail
- promise.setFailure(new IllegalStateException("Channel closed"));
- } else {
- lastWriteFuture = asyncIn.write(toBuffer(msg));
- lastWriteFuture.addListener(new SshFutureListener<IoWriteFuture>() {
-
- @Override
- public void operationComplete(final IoWriteFuture future) {
- ((ByteBuf) msg).release();
-
- // Notify success or failure
- if (future.isWritten()) {
- promise.setSuccess();
- } else {
- promise.setFailure(future.getException());
- }
-
- // Reset last pending future
- synchronized (SshWriteAsyncHandler.this) {
- lastWriteFuture = null;
- }
- }
- });
- }
- } catch (final WritePendingException e) {
- // Check limit for pending writes
- pendingWriteCounter++;
- if(pendingWriteCounter > MAX_PENDING_WRITES) {
- promise.setFailure(e);
- handlePendingFailed(ctx, new IllegalStateException("Too much pending writes(" + MAX_PENDING_WRITES + ") on channel: " + ctx.channel() +
- ", remote window is not getting read or is too small"));
- }
-
- // We need to reset buffer read index, since we've already read it when we tried to write it the first time
- ((ByteBuf) msg).resetReaderIndex();
- logger.debug("Write pending to SSH remote on channel: {}, current pending count: {}", ctx.channel(), pendingWriteCounter);
-
- // In case of pending, re-invoke write after pending is finished
- Preconditions.checkNotNull(lastWriteFuture, "Write is pending, but there was no previous write attempt", e);
- lastWriteFuture.addListener(new SshFutureListener<IoWriteFuture>() {
- @Override
- public void operationComplete(final IoWriteFuture future) {
- // FIXME possible minor race condition, we cannot guarantee that this callback when pending is finished will be executed first
- // External thread could trigger write on this instance while we are on this line
- // Verify
- if (future.isWritten()) {
- synchronized (SshWriteAsyncHandler.this) {
- // Pending done, decrease counter
- pendingWriteCounter--;
- write(ctx, msg, promise);
- }
- } else {
- // Cannot reschedule pending, fail
- handlePendingFailed(ctx, e);
- }
- }
-
- });
- }
- }
-
- private void handlePendingFailed(final ChannelHandlerContext ctx, final Exception e) {
- logger.warn("Exception while writing to SSH remote on channel {}", ctx.channel(), e);
- try {
- channelHandler.disconnect(ctx, ctx.newPromise());
- } catch (final Exception ex) {
- // This should not happen
- throw new IllegalStateException(ex);
- }
- }
-
- @Override
- public void close() {
- asyncIn = null;
- }
-
- private Buffer toBuffer(final Object msg) {
- // TODO Buffer vs ByteBuf translate, Can we handle that better ?
- Preconditions.checkState(msg instanceof ByteBuf);
- final ByteBuf byteBuf = (ByteBuf) msg;
- final byte[] temp = new byte[byteBuf.readableBytes()];
- byteBuf.readBytes(temp, 0, byteBuf.readableBytes());
- return new Buffer(temp);
- }
-
- }
}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.nettyutil.handler.ssh.client;
+
+import com.google.common.base.Charsets;
+import com.google.common.base.Preconditions;
+import io.netty.buffer.ByteBuf;
+import io.netty.channel.ChannelHandlerContext;
+import io.netty.channel.ChannelPromise;
+import java.util.Deque;
+import java.util.LinkedList;
+import java.util.Queue;
+import org.apache.sshd.common.future.SshFutureListener;
+import org.apache.sshd.common.io.IoOutputStream;
+import org.apache.sshd.common.io.IoWriteFuture;
+import org.apache.sshd.common.io.WritePendingException;
+import org.apache.sshd.common.util.Buffer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Async Ssh writer. Takes messages(byte arrays) and sends them encrypted to remote server.
+ * Also handles pending writes by caching requests until pending state is over.
+ */
+final class AsyncSshHandlerWriter implements AutoCloseable {
+
+ private static final Logger logger = LoggerFactory
+ .getLogger(AsyncSshHandlerWriter.class);
+
+ // public static final int MAX_PENDING_WRITES = 1000;
+ // TODO implement Limiting mechanism for pending writes
+ // But there is a possible issue with limiting:
+ // 1. What to do when queue is full ? Immediate Fail for every request ?
+ // 2. At this level we might be dealing with Chunks of messages(not whole messages) and unexpected behavior might occur
+ // when we send/queue 1 chunk and fail the other chunks
+
+ private IoOutputStream asyncIn;
+
+ // Order has to be preserved for queued writes
+ private final Deque<PendingWriteRequest> pending = new LinkedList<>();
+
+ public AsyncSshHandlerWriter(final IoOutputStream asyncIn) {
+ this.asyncIn = asyncIn;
+ }
+
+ public synchronized void write(final ChannelHandlerContext ctx,
+ final Object msg, final ChannelPromise promise) {
+ // TODO check for isClosed, isClosing might be performed by mina SSH internally and is not required here
+ // If we are closed/closing, set immediate fail
+ if (asyncIn == null || asyncIn.isClosed() || asyncIn.isClosing()) {
+ promise.setFailure(new IllegalStateException("Channel closed"));
+ } else {
+ final ByteBuf byteBufMsg = (ByteBuf) msg;
+ if (pending.isEmpty() == false) {
+ queueRequest(ctx, byteBufMsg, promise);
+ return;
+ }
+
+ writeWithPendingDetection(ctx, promise, byteBufMsg);
+ }
+ }
+
+ private void writeWithPendingDetection(final ChannelHandlerContext ctx, final ChannelPromise promise, final ByteBuf byteBufMsg) {
+ try {
+ if (logger.isTraceEnabled()) {
+ logger.trace("Writing request on channel: {}, message: {}", ctx.channel(), byteBufToString(byteBufMsg));
+ }
+ asyncIn.write(toBuffer(byteBufMsg)).addListener(new SshFutureListener<IoWriteFuture>() {
+
+ @Override
+ public void operationComplete(final IoWriteFuture future) {
+ if (logger.isTraceEnabled()) {
+ logger.trace("Ssh write request finished on channel: {} with result: {}: and ex:{}, message: {}",
+ ctx.channel(), future.isWritten(), future.getException(), byteBufToString(byteBufMsg));
+ }
+
+ // Notify success or failure
+ if (future.isWritten()) {
+ promise.setSuccess();
+ } else {
+ logger.warn("Ssh write request failed on channel: {} for message: {}", ctx.channel(), byteBufToString(byteBufMsg), future.getException());
+ promise.setFailure(future.getException());
+ }
+
+ // Not needed anymore, release
+ byteBufMsg.release();
+
+ // Check pending queue and schedule next
+ // At this time we are guaranteed that we are not in pending state anymore so the next request should succeed
+ writePendingIfAny();
+ }
+ });
+ } catch (final WritePendingException e) {
+ queueRequest(ctx, byteBufMsg, promise);
+ }
+ }
+
+ private synchronized void writePendingIfAny() {
+ if (pending.peek() == null) {
+ return;
+ }
+
+ // In case of pending, reschedule next message from queue
+ final PendingWriteRequest pendingWrite = pending.poll();
+ final ByteBuf msg = pendingWrite.msg;
+ if (logger.isTraceEnabled()) {
+ logger.trace("Writing pending request on channel: {}, message: {}", pendingWrite.ctx.channel(), byteBufToString(msg));
+ }
+
+ writeWithPendingDetection(pendingWrite.ctx, pendingWrite.promise, msg);
+ }
+
+ private static String byteBufToString(final ByteBuf msg) {
+ msg.resetReaderIndex();
+ final String s = msg.toString(Charsets.UTF_8);
+ msg.resetReaderIndex();
+ return s;
+ }
+
+ private void queueRequest(final ChannelHandlerContext ctx, final ByteBuf msg, final ChannelPromise promise) {
+// try {
+ logger.debug("Write pending on channel: {}, queueing, current queue size: {}", ctx.channel(), pending.size());
+ if (logger.isTraceEnabled()) {
+ logger.trace("Queueing request due to pending: {}", byteBufToString(msg));
+ }
+ new PendingWriteRequest(ctx, msg, promise).pend(pending);
+// } catch (final Exception ex) {
+// logger.warn("Unable to queue write request on channel: {}. Setting fail for the request: {}", ctx.channel(), ex, byteBufToString(msg));
+// msg.release();
+// promise.setFailure(ex);
+// }
+ }
+
+ @Override
+ public synchronized void close() {
+ asyncIn = null;
+ }
+
+ private Buffer toBuffer(final ByteBuf msg) {
+ // TODO Buffer vs ByteBuf translate, Can we handle that better ?
+ final byte[] temp = new byte[msg.readableBytes()];
+ msg.readBytes(temp, 0, msg.readableBytes());
+ return new Buffer(temp);
+ }
+
+ private static final class PendingWriteRequest {
+ private final ChannelHandlerContext ctx;
+ private final ByteBuf msg;
+ private final ChannelPromise promise;
+
+ public PendingWriteRequest(final ChannelHandlerContext ctx, final ByteBuf msg, final ChannelPromise promise) {
+ this.ctx = ctx;
+ // Reset reader index, last write (failed) attempt moved index to the end
+ msg.resetReaderIndex();
+ this.msg = msg;
+ this.promise = promise;
+ }
+
+ public void pend(final Queue<PendingWriteRequest> pending) {
+ // Preconditions.checkState(pending.size() < MAX_PENDING_WRITES,
+ // "Too much pending writes(%s) on channel: %s, remote window is not getting read or is too small",
+ // pending.size(), ctx.channel());
+ Preconditions.checkState(pending.offer(this), "Cannot pend another request write (pending count: %s) on channel: %s",
+ pending.size(), ctx.channel());
+ }
+ }
+}
import static org.mockito.Mockito.verifyNoMoreInteractions;
import static org.mockito.Mockito.verifyZeroInteractions;
-import io.netty.buffer.ByteBuf;
-import io.netty.buffer.Unpooled;
import java.io.IOException;
import java.net.SocketAddress;
-import java.nio.channels.WritePendingException;
import org.apache.sshd.ClientChannel;
import org.apache.sshd.ClientSession;
import org.apache.sshd.SshClient;
import org.apache.sshd.common.util.Buffer;
import org.junit.After;
import org.junit.Before;
+import org.junit.Ignore;
import org.junit.Test;
import org.mockito.Matchers;
import org.mockito.Mock;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.SettableFuture;
+import io.netty.buffer.ByteBuf;
+import io.netty.buffer.Unpooled;
import io.netty.channel.Channel;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelPromise;
// make first write stop pending
firstWriteListener.operationComplete(ioWriteFuture);
- // intercept third listener, this is regular listener for second write to determine success or failure
- final ListenableFuture<SshFutureListener<IoWriteFuture>> afterPendingListener = stubAddListener(ioWriteFuture);
// notify listener for second write that pending has ended
pendingListener.get().operationComplete(ioWriteFuture);
- // Notify third listener (regular listener for second write) that second write succeeded
- afterPendingListener.get().operationComplete(ioWriteFuture);
// verify both write promises successful
verify(firstWritePromise).setSuccess();
verify(secondWritePromise).setSuccess();
}
+ @Ignore("Pending queue is not limited")
@Test
public void testWritePendingMax() throws Exception {
asyncSshHandler.connect(ctx, remoteAddress, localAddress, promise);
final ChannelPromise secondWritePromise = getMockedPromise();
// now make write throw pending exception
doThrow(org.apache.sshd.common.io.WritePendingException.class).when(asyncIn).write(any(Buffer.class));
- for (int i = 0; i < 1000; i++) {
+ for (int i = 0; i < 1001; i++) {
asyncSshHandler.write(ctx, Unpooled.copiedBuffer(new byte[]{0, 1, 2, 3, 4, 5}), secondWritePromise);
}
- verify(ctx).fireChannelInactive();
+ verify(secondWritePromise, times(1)).setFailure(any(Throwable.class));
}
@Test
final AuthProvider authService = bundleContext.getService(reference);
final Integer newServicePreference = getPreference(reference);
if(isBetter(newServicePreference)) {
+ maxPreference = newServicePreference;
server.setAuthProvider(authService);
if(sshThread == null) {
sshThread = runNetconfSshThread(server);
}
@VisibleForTesting
- void setNullableUserManager(final IUserManager nullableUserManager) {
+ synchronized void setNullableUserManager(final IUserManager nullableUserManager) {
this.nullableUserManager = nullableUserManager;
}
}
rpcReply.appendChild(responseNS);
}
- for (String attrName : attributes.keySet()) {
- rpcReply.setAttributeNode((Attr) document.importNode(attributes.get(attrName), true));
+ for (Attr attribute : attributes.values()) {
+ rpcReply.setAttributeNode((Attr) document.importNode(attribute, true));
}
document.appendChild(rpcReply);
return document;
import com.google.common.base.Function;
import com.google.common.collect.Collections2;
-
+import java.util.Collection;
+import java.util.List;
+import javax.annotation.Nonnull;
import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
import org.opendaylight.controller.netconf.api.NetconfMessage;
import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
import org.slf4j.LoggerFactory;
import org.w3c.dom.Document;
-import javax.annotation.Nullable;
-
-import java.util.Collection;
-import java.util.List;
-
public final class NetconfMessageUtil {
private static final Logger logger = LoggerFactory.getLogger(NetconfMessageUtil.class);
List<XmlElement> caps = capabilitiesElement.getChildElements(XmlNetconfConstants.CAPABILITY);
return Collections2.transform(caps, new Function<XmlElement, String>() {
- @Nullable
@Override
- public String apply(@Nullable XmlElement input) {
+ public String apply(@Nonnull XmlElement input) {
// Trim possible leading/tailing whitespace
try {
return input.getTextContent().trim();
}
}
- /**
- * Get extracted address or default.
- *
- * @throws java.lang.IllegalStateException if neither address is present.
- */
- private static InetSocketAddress getNetconfAddress(final InetSocketAddress defaultAddress, Optional<InetSocketAddress> extractedAddress, InfixProp infix) {
- InetSocketAddress inetSocketAddress;
-
- if (extractedAddress.isPresent() == false) {
- logger.debug("Netconf {} address not found, falling back to default {}", infix, defaultAddress);
-
- if (defaultAddress == null) {
- logger.warn("Netconf {} address not found, default address not provided", infix);
- throw new IllegalStateException("Netconf " + infix + " address not found, default address not provided");
- }
- inetSocketAddress = defaultAddress;
- } else {
- inetSocketAddress = extractedAddress.get();
- }
-
- return inetSocketAddress;
- }
-
public static String getPrivateKeyPath(final BundleContext context) {
return getPropertyValue(context, getPrivateKeyKey());
}
import java.util.HashMap;
import java.util.List;
import java.util.Map;
-import javax.annotation.Nullable;
import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
import org.opendaylight.controller.netconf.util.exception.MissingNameSpaceException;
import org.opendaylight.controller.netconf.util.exception.UnexpectedElementException;
return Lists.newArrayList(Collections2.filter(getChildElementsWithinNamespace(namespace),
new Predicate<XmlElement>() {
@Override
- public boolean apply(@Nullable XmlElement xmlElement) {
+ public boolean apply(XmlElement xmlElement) {
return xmlElement.getName().equals(childName);
}
}));
List<XmlElement> children = getChildElementsWithinNamespace(namespace);
children = Lists.newArrayList(Collections2.filter(children, new Predicate<XmlElement>() {
@Override
- public boolean apply(@Nullable XmlElement xmlElement) {
+ public boolean apply(XmlElement xmlElement) {
return xmlElement.getName().equals(childName);
}
}));
List<XmlElement> children = getChildElementsWithinNamespace(getNamespace());
return Lists.newArrayList(Collections2.filter(children, new Predicate<XmlElement>() {
@Override
- public boolean apply(@Nullable XmlElement xmlElement) {
+ public boolean apply(XmlElement xmlElement) {
return xmlElement.getName().equals(childName);
}
}));
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.mapping;
+
+import org.junit.Before;
+import org.junit.Test;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.netconf.mapping.api.HandlingPriority;
+import org.opendaylight.controller.netconf.mapping.api.NetconfOperationChainedExecution;
+import org.opendaylight.controller.netconf.util.xml.XmlElement;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+
+public class AbstractLastNetconfOperationTest {
+ class LastNetconfOperationImplTest extends AbstractLastNetconfOperation {
+
+ boolean handleWithNoSubsequentOperationsRun;
+
+ protected LastNetconfOperationImplTest(String netconfSessionIdForReporting) {
+ super(netconfSessionIdForReporting);
+ handleWithNoSubsequentOperationsRun = false;
+ }
+
+ @Override
+ protected Element handleWithNoSubsequentOperations(Document document, XmlElement operationElement) throws NetconfDocumentedException {
+ handleWithNoSubsequentOperationsRun = true;
+ return null;
+ }
+
+ @Override
+ protected String getOperationName() {
+ return "";
+ }
+ }
+
+ LastNetconfOperationImplTest netconfOperation;
+
+ @Before
+ public void setUp() throws Exception {
+ netconfOperation = new LastNetconfOperationImplTest("");
+ }
+
+ @Test
+ public void testNetconfOperation() throws Exception {
+ netconfOperation.handleWithNoSubsequentOperations(null, null);
+ assertTrue(netconfOperation.handleWithNoSubsequentOperationsRun);
+ assertEquals(HandlingPriority.HANDLE_WITH_DEFAULT_PRIORITY, netconfOperation.getHandlingPriority());
+ }
+
+ @Test(expected = NetconfDocumentedException.class)
+ public void testHandle() throws Exception {
+ NetconfOperationChainedExecution operation = mock(NetconfOperationChainedExecution.class);
+ doReturn("").when(operation).toString();
+
+ doReturn(false).when(operation).isExecutionTermination();
+ netconfOperation.handle(null, null, operation);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.mapping;
+
+import java.io.IOException;
+import org.junit.Before;
+import org.junit.Test;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.netconf.mapping.api.HandlingPriority;
+import org.opendaylight.controller.netconf.mapping.api.NetconfOperationChainedExecution;
+import org.opendaylight.controller.netconf.util.test.XmlFileLoader;
+import org.opendaylight.controller.netconf.util.xml.XmlElement;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+import org.xml.sax.SAXException;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.mock;
+
+public class AbstractNetconfOperationTest {
+
+ class NetconfOperationImpl extends AbstractNetconfOperation {
+
+ public boolean handleRun;
+
+ protected NetconfOperationImpl(String netconfSessionIdForReporting) {
+ super(netconfSessionIdForReporting);
+ this.handleRun = false;
+ }
+
+ @Override
+ protected String getOperationName() {
+ return null;
+ }
+
+ @Override
+ protected Element handle(Document document, XmlElement message, NetconfOperationChainedExecution subsequentOperation) throws NetconfDocumentedException {
+ this.handleRun = true;
+ try {
+ return XmlUtil.readXmlToElement("<element/>");
+ } catch (SAXException | IOException e) {
+ throw new RuntimeException(e);
+ }
+ }
+ }
+
+ private NetconfOperationImpl netconfOperation;
+ private NetconfOperationChainedExecution operation;
+
+ @Before
+ public void setUp() throws Exception {
+ netconfOperation = new NetconfOperationImpl("str");
+ operation = mock(NetconfOperationChainedExecution.class);
+ }
+
+ @Test
+ public void testAbstractNetconfOperation() throws Exception {
+ Document helloMessage = XmlFileLoader.xmlFileToDocument("netconfMessages/edit_config.xml");
+ assertEquals(netconfOperation.getNetconfSessionIdForReporting(), "str");
+ assertNotNull(netconfOperation.canHandle(helloMessage));
+ assertEquals(netconfOperation.getHandlingPriority(), HandlingPriority.HANDLE_WITH_DEFAULT_PRIORITY);
+
+ netconfOperation.handle(helloMessage, operation);
+ assertTrue(netconfOperation.handleRun);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.mapping;
+
+import org.junit.Test;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.netconf.mapping.api.HandlingPriority;
+import org.opendaylight.controller.netconf.util.xml.XmlElement;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+
+import static org.junit.Assert.assertEquals;
+
+public class AbstractSingletonNetconfOperationTest {
+ class SingletonNCOperationImpl extends AbstractSingletonNetconfOperation {
+
+ protected SingletonNCOperationImpl(String netconfSessionIdForReporting) {
+ super(netconfSessionIdForReporting);
+ }
+
+ @Override
+ protected Element handleWithNoSubsequentOperations(Document document, XmlElement operationElement) throws NetconfDocumentedException {
+ return null;
+ }
+
+ @Override
+ protected String getOperationName() {
+ return null;
+ }
+ }
+
+ @Test
+ public void testAbstractSingletonNetconfOperation() throws Exception {
+ SingletonNCOperationImpl operation = new SingletonNCOperationImpl("");
+ assertEquals(operation.getHandlingPriority(), HandlingPriority.HANDLE_WITH_MAX_PRIORITY);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.messages;
+
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+
+public class NetconfHelloMessageAdditionalHeaderTest {
+
+
+ private String customHeader = "[user;1.1.1.1:40;tcp;client;]";
+ private NetconfHelloMessageAdditionalHeader header;
+
+ @Before
+ public void setUp() throws Exception {
+ header = new NetconfHelloMessageAdditionalHeader("user", "1.1.1.1", "40", "tcp", "client");
+ }
+
+ @Test
+ public void testGetters() throws Exception {
+ assertEquals(header.getAddress(), "1.1.1.1");
+ assertEquals(header.getUserName(), "user");
+ assertEquals(header.getPort(), "40");
+ assertEquals(header.getTransport(), "tcp");
+ assertEquals(header.getSessionIdentifier(), "client");
+ }
+
+ @Test
+ public void testStaticConstructor() throws Exception {
+ NetconfHelloMessageAdditionalHeader h = NetconfHelloMessageAdditionalHeader.fromString(customHeader);
+ assertEquals(h.toString(), header.toString());
+ assertEquals(h.toFormattedString(), header.toFormattedString());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.messages;
+
+
+import com.google.common.base.Optional;
+import java.util.Set;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.internal.util.collections.Sets;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+public class NetconfHelloMessageTest {
+
+ Set<String> caps;
+
+ @Before
+ public void setUp() throws Exception {
+ caps = Sets.newSet("cap1");
+ }
+
+ @Test
+ public void testConstructor() throws Exception {
+ NetconfHelloMessageAdditionalHeader additionalHeader = new NetconfHelloMessageAdditionalHeader("name","host","1","transp","id");
+ NetconfHelloMessage message = NetconfHelloMessage.createClientHello(caps, Optional.of(additionalHeader));
+ assertTrue(message.isHelloMessage(message));
+ assertEquals(Optional.of(additionalHeader), message.getAdditionalHeader());
+
+ NetconfHelloMessage serverMessage = NetconfHelloMessage.createServerHello(caps, 100L);
+ assertTrue(serverMessage.isHelloMessage(serverMessage));
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.messages;
+
+import com.google.common.base.Charsets;
+import java.util.Arrays;
+import org.junit.Test;
+
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+
+public class NetconfMessageHeaderTest {
+ @Test
+ public void testGet() throws Exception {
+ NetconfMessageHeader header = new NetconfMessageHeader(10);
+ assertEquals(header.getLength(), 10);
+
+ byte[] expectedValue = "\n#10\n".getBytes(Charsets.US_ASCII);
+ assertArrayEquals(expectedValue, header.toBytes());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.messages;
+
+import java.util.Collection;
+import org.junit.Test;
+import org.opendaylight.controller.netconf.api.NetconfMessage;
+import org.opendaylight.controller.netconf.util.test.XmlFileLoader;
+import org.w3c.dom.Document;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+public class NetconfMessageUtilTest {
+ @Test
+ public void testNetconfMessageUtil() throws Exception {
+ Document okMessage = XmlFileLoader.xmlFileToDocument("netconfMessages/rpc-reply_ok.xml");
+ assertTrue(NetconfMessageUtil.isOKMessage(new NetconfMessage(okMessage)));
+ assertFalse(NetconfMessageUtil.isErrorMessage(new NetconfMessage(okMessage)));
+
+ Document errorMessage = XmlFileLoader.xmlFileToDocument("netconfMessages/communicationError/testClientSendsRpcReply_expectedResponse.xml");
+ assertTrue(NetconfMessageUtil.isErrorMessage(new NetconfMessage(errorMessage)));
+ assertFalse(NetconfMessageUtil.isOKMessage(new NetconfMessage(errorMessage)));
+
+ Document helloMessage = XmlFileLoader.xmlFileToDocument("netconfMessages/client_hello.xml");
+ Collection<String> caps = NetconfMessageUtil.extractCapabilitiesFromHello(new NetconfMessage(helloMessage).getDocument());
+ assertTrue(caps.contains("urn:ietf:params:netconf:base:1.0"));
+ assertTrue(caps.contains("urn:ietf:params:netconf:base:1.1"));
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.messages;
+
+import io.netty.channel.Channel;
+import io.netty.channel.ChannelFuture;
+import io.netty.util.concurrent.GenericFutureListener;
+import org.junit.Before;
+import org.junit.Test;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.netconf.api.NetconfMessage;
+import org.opendaylight.controller.netconf.api.NetconfSession;
+import org.opendaylight.controller.netconf.util.test.XmlFileLoader;
+import org.w3c.dom.Document;
+
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.*;
+
+public class SendErrorExceptionUtilTest {
+
+ NetconfSession netconfSession;
+ ChannelFuture channelFuture;
+ Channel channel;
+ private NetconfDocumentedException exception;
+
+ @Before
+ public void setUp() throws Exception {
+ netconfSession = mock(NetconfSession.class);
+ channelFuture = mock(ChannelFuture.class);
+ channel = mock(Channel.class);
+ doReturn(channelFuture).when(netconfSession).sendMessage(any(NetconfMessage.class));
+ doReturn(channelFuture).when(channelFuture).addListener(any(GenericFutureListener.class));
+ doReturn(channelFuture).when(channel).writeAndFlush(any(NetconfMessage.class));
+ exception = new NetconfDocumentedException("err");
+ }
+
+ @Test
+ public void testSendErrorMessage1() throws Exception {
+ SendErrorExceptionUtil.sendErrorMessage(netconfSession, exception);
+ verify(channelFuture, times(1)).addListener(any(GenericFutureListener.class));
+ verify(netconfSession, times(1)).sendMessage(any(NetconfMessage.class));
+ }
+
+ @Test
+ public void testSendErrorMessage2() throws Exception {
+ SendErrorExceptionUtil.sendErrorMessage(channel, exception);
+ verify(channelFuture, times(1)).addListener(any(GenericFutureListener.class));
+ }
+
+ @Test
+ public void testSendErrorMessage3() throws Exception {
+ Document helloMessage = XmlFileLoader.xmlFileToDocument("netconfMessages/rpc.xml");
+ SendErrorExceptionUtil.sendErrorMessage(netconfSession, exception, new NetconfMessage(helloMessage));
+ verify(channelFuture, times(1)).addListener(any(GenericFutureListener.class));
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.osgi;
+
+import com.google.common.base.Optional;
+import io.netty.channel.local.LocalAddress;
+import java.net.InetSocketAddress;
+import org.junit.Before;
+import org.junit.Test;
+import org.opendaylight.controller.netconf.util.NetconfUtil;
+import org.osgi.framework.BundleContext;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+
+public class NetconfConfigUtilTest {
+
+ private BundleContext bundleContext;
+
+ @Before
+ public void setUp() throws Exception {
+ bundleContext = mock(BundleContext.class);
+ }
+
+ @Test
+ public void testNetconfConfigUtil() throws Exception {
+ assertEquals(NetconfConfigUtil.getNetconfLocalAddress(), new LocalAddress("netconf"));
+
+ doReturn("").when(bundleContext).getProperty("netconf.connectionTimeoutMillis");
+ assertEquals(NetconfConfigUtil.extractTimeoutMillis(bundleContext), 5000);
+
+ doReturn("a").when(bundleContext).getProperty("netconf.connectionTimeoutMillis");
+ assertEquals(NetconfConfigUtil.extractTimeoutMillis(bundleContext), 5000);
+ }
+
+ @Test
+ public void testgetPrivateKeyKey() throws Exception {
+ assertEquals(NetconfConfigUtil.getPrivateKeyKey(), "netconf.ssh.pk.path");
+ }
+
+ @Test
+ public void testgetNetconfServerAddressKey() throws Exception {
+ NetconfConfigUtil.InfixProp prop = NetconfConfigUtil.InfixProp.tcp;
+ assertEquals(NetconfConfigUtil.getNetconfServerAddressKey(prop), "netconf.tcp.address");
+ }
+
+ @Test
+ public void testExtractNetconfServerAddress() throws Exception {
+ NetconfConfigUtil.InfixProp prop = NetconfConfigUtil.InfixProp.tcp;
+ doReturn("").when(bundleContext).getProperty(anyString());
+ assertEquals(NetconfConfigUtil.extractNetconfServerAddress(bundleContext, prop), Optional.absent());
+ }
+
+ @Test
+ public void testExtractNetconfServerAddress2() throws Exception {
+ NetconfConfigUtil.InfixProp prop = NetconfConfigUtil.InfixProp.tcp;
+ doReturn("1.1.1.1").when(bundleContext).getProperty("netconf.tcp.address");
+ doReturn("20").when(bundleContext).getProperty("netconf.tcp.port");
+ Optional<InetSocketAddress> inetSocketAddressOptional = NetconfConfigUtil.extractNetconfServerAddress(bundleContext, prop);
+ assertTrue(inetSocketAddressOptional.isPresent());
+ assertEquals(inetSocketAddressOptional.get(), new InetSocketAddress("1.1.1.1", 20));
+ }
+
+ @Test
+ public void testGetPrivateKeyPath() throws Exception {
+ doReturn("path").when(bundleContext).getProperty("netconf.ssh.pk.path");
+ assertEquals(NetconfConfigUtil.getPrivateKeyPath(bundleContext), "path");
+ }
+
+ @Test(expected = IllegalStateException.class)
+ public void testGetPrivateKeyPath2() throws Exception {
+ doReturn(null).when(bundleContext).getProperty("netconf.ssh.pk.path");
+ assertEquals(NetconfConfigUtil.getPrivateKeyPath(bundleContext), "path");
+ }
+}
org.eclipse.persistence.jaxb.rs,
com.sun.jersey.spi.container.servlet,
javax.ws.rs,
+ javax.ws.rs.ext,
javax.ws.rs.core,
javax.xml.bind.annotation,
javax.xml.bind,
import org.codehaus.enunciate.jaxrs.StatusCodes;
import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerPoolAware;
import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerPoolCRUD;
-import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerPoolMemberCRUD;
import org.opendaylight.controller.networkconfig.neutron.NeutronCRUDInterfaces;
import org.opendaylight.controller.networkconfig.neutron.NeutronLoadBalancerPool;
import org.opendaylight.controller.networkconfig.neutron.NeutronLoadBalancerPoolMember;
/**
* For now, the LB pool member data is maintained with the INeutronLoadBalancerPoolCRUD,
- * although there may be an overlap with INeutronLoadBalancerPoolMemberCRUD's cache.
- * TODO: Consolidate and maintain a single copy
+ * and not duplicated within the INeutronLoadBalancerPoolMemberCRUD's cache.
*/
@Path("/pools")
service.neutronLoadBalancerPoolDeleted(singleton);
}
}
-
- /*
- * remove corresponding members from the member cache too
- */
- INeutronLoadBalancerPoolMemberCRUD loadBalancerPoolMemberInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerPoolMemberCRUD(this);
- if (loadBalancerPoolMemberInterface != null) {
- List<NeutronLoadBalancerPoolMember> allLoadBalancerPoolMembers = new
- ArrayList<NeutronLoadBalancerPoolMember>(loadBalancerPoolMemberInterface.getAllNeutronLoadBalancerPoolMembers());
- Iterator<NeutronLoadBalancerPoolMember> i = allLoadBalancerPoolMembers.iterator();
- while (i.hasNext()) {
- NeutronLoadBalancerPoolMember member = i.next();
- if (member.getPoolID() == loadBalancerPoolUUID)
- loadBalancerPoolMemberInterface.removeNeutronLoadBalancerPoolMember(member.getPoolMemberID());
- }
- }
return Response.status(204).build();
}
}