<properties>
<features.file>features.xml</features.file>
+ <org.json.version>20131018</org.json.version>
</properties>
<dependencies>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-dom-xsql</artifactId>
</dependency>
+
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-karaf-xsql</artifactId>
+ </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-dom-xsql-config</artifactId>
<type>xml</type>
<classifier>config</classifier>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller.samples</groupId>
+ <artifactId>clustering-it-config</artifactId>
+ <version>${mdsal.version}</version>
+ <type>xml</type>
+ <classifier>testmoduleshardconf</classifier>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller.samples</groupId>
+ <artifactId>clustering-it-config</artifactId>
+ <version>${mdsal.version}</version>
+ <type>xml</type>
+ <classifier>testmoduleconf</classifier>
+ </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-rest-docgen</artifactId>
<feature name='odl-mdsal-broker' version='${project.version}' description="OpenDaylight :: MDSAL :: Broker">
<feature version='${yangtools.version}'>odl-yangtools-common</feature>
<feature version='${yangtools.version}'>odl-yangtools-binding</feature>
+ <feature version='${yangtools.version}'>odl-yangtools-models</feature>
<feature version='${mdsal.version}'>odl-mdsal-common</feature>
<feature version='${config.version}'>odl-config-startup</feature>
<feature version='${config.version}'>odl-config-netty</feature>
<feature name ='odl-mdsal-xsql' version='${project.version}'>
<feature version='${project.version}'>odl-mdsal-broker</feature>
<bundle>mvn:org.opendaylight.controller/sal-dom-xsql/${project.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/sal-karaf-xsql/${project.version}</bundle>
<configfile finalname="${config.configfile.directory}/${config.xsql.configfile}">mvn:org.opendaylight.controller/sal-dom-xsql-config/${project.version}/xml/config</configfile>
</feature>
<feature name ='odl-mdsal-apidocs' version='${project.version}'>
<feature version='${project.version}'>odl-netconf-mapping-api</feature>
<feature version='${project.version}'>odl-netconf-util</feature>
<feature version='${project.version}'>odl-netconf-impl</feature>
- <feature version='${project.version}'>odl-netconf-tcp</feature>
- <feature version='${project.version}'>odl-netconf-ssh</feature>
<feature version='${project.version}'>odl-config-netconf-connector</feature>
<feature version='${project.version}'>odl-netconf-netty-util</feature>
<feature version='${project.version}'>odl-netconf-client</feature>
<artifactId>sal-dom-xsql</artifactId>
<version>${mdsal.version}</version>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-karaf-xsql</artifactId>
+ <version>${mdsal.version}</version>
+ </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-dom-xsql-config</artifactId>
# export KARAF_ETC # Karaf etc folder
# export KARAF_OPTS # Additional available Karaf options
# export KARAF_DEBUG # Enable debug mode
-if [ "x$JAVA_MAX_PERM_MEM" == "x" ]; then
+if [ "x$JAVA_MAX_PERM_MEM" = "x" ]; then
export JAVA_MAX_PERM_MEM="512m"
fi
-if [ "x$JAVA_MAX_MEM" == "x" ]; then
+if [ "x$JAVA_MAX_MEM" = "x" ]; then
export JAVA_MAX_MEM="2048m"
fi
#Hosttracker hostsdb key scheme setting
hosttracker.keyscheme=IP
+# LISP Flow Mapping configuration
+# Map-Register messages overwrite existing RLOC sets in EID-to-RLOC mappings
+lisp.mappingOverwrite = true
+# Enable the Solicit-Map-Request (SMR) mechanism
+lisp.smr = false
+
*/
package org.opendaylight.controller.md.sal.dom.store.benchmark;
+import com.google.common.util.concurrent.ListeningExecutorService;
+import com.google.common.util.concurrent.MoreExecutors;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.TimeUnit;
import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
import org.opendaylight.yangtools.util.concurrent.SpecialExecutors;
-import org.openjdk.jmh.annotations.Level;
-import org.openjdk.jmh.annotations.Setup;
-import org.openjdk.jmh.annotations.TearDown;
-import org.openjdk.jmh.annotations.Fork;
-import org.openjdk.jmh.annotations.Scope;
-import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.annotations.BenchmarkMode;
+import org.openjdk.jmh.annotations.Fork;
+import org.openjdk.jmh.annotations.Level;
import org.openjdk.jmh.annotations.Mode;
import org.openjdk.jmh.annotations.OutputTimeUnit;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.Setup;
+import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.annotations.TearDown;
/**
* Benchmark for testing of performance of write operations for InMemoryDataStore. The instance
private static final int MAX_DATA_CHANGE_EXECUTOR_QUEUE_SIZE = 1000;
private static final int MAX_DATA_STORE_EXECUTOR_QUEUE_SIZE = 5000;
+ @Override
@Setup(Level.Trial)
public void setUp() throws Exception {
final String name = "DS_BENCHMARK";
final ExecutorService dataChangeListenerExecutor = SpecialExecutors.newBlockingBoundedFastThreadPool(
MAX_DATA_CHANGE_EXECUTOR_POOL_SIZE, MAX_DATA_CHANGE_EXECUTOR_QUEUE_SIZE, name + "-DCL");
- final ExecutorService domStoreExecutor = SpecialExecutors.newBoundedSingleThreadExecutor(
- MAX_DATA_STORE_EXECUTOR_QUEUE_SIZE, "DOMStore-" + name );
+ final ListeningExecutorService domStoreExecutor = MoreExecutors.listeningDecorator(SpecialExecutors.newBoundedSingleThreadExecutor(
+ MAX_DATA_STORE_EXECUTOR_QUEUE_SIZE, "DOMStore-" + name ));
domStore = new InMemoryDOMDataStore(name, domStoreExecutor,
dataChangeListenerExecutor);
initTestNode();
}
+ @Override
@TearDown
public void tearDown() {
schemaContext = null;
import com.google.common.net.InetAddresses;
-public class FromSalConversionsUtils {
+/**
+ * MD-SAL to AD-SAL conversions collection
+ */
+public final class FromSalConversionsUtils {
- private FromSalConversionsUtils() {
+ /** http://en.wikipedia.org/wiki/IPv4#Packet_structure (end of octet number 1, bit 14.+15.) */
+ public static final int ENC_FIELD_BIT_SIZE = 2;
+ private FromSalConversionsUtils() {
+ throw new IllegalAccessError("forcing no instance for factory");
}
@SuppressWarnings("unused")
return true;
}
+ /**
+ * @param nwDscp NW-DSCP
+ * @return shifted to NW-TOS (with empty ECN part)
+ */
+ public static int dscpToTos(int nwDscp) {
+ return (short) (nwDscp << ENC_FIELD_BIT_SIZE);
+ }
}
private static SetNwTosActionCase _toAction(final SetNwTos sourceAction) {
return new SetNwTosActionCaseBuilder()
- .setSetNwTosAction(new SetNwTosActionBuilder().setTos(sourceAction.getNwTos()).build())
+ .setSetNwTosAction(new SetNwTosActionBuilder().setTos(FromSalConversionsUtils.dscpToTos(sourceAction.getNwTos())).build())
.build();
}
private static final Logger LOG = LoggerFactory.getLogger(ToSalConversionsUtils.class);
private ToSalConversionsUtils() {
-
+ throw new IllegalAccessError("forcing no instance for factory");
}
public static Flow toFlow(org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.Flow source, Node node) {
} else if (sourceAction instanceof SetNwTosActionCase) {
Integer tos = ((SetNwTosActionCase) sourceAction).getSetNwTosAction().getTos();
if (tos != null) {
- targetAction.add(new SetNwTos(tos));
+ targetAction.add(new SetNwTos(ToSalConversionsUtils.tosToNwDscp(tos)));
}
} else if (sourceAction instanceof SetTpDstActionCase) {
PortNumber port = ((SetTpDstActionCase) sourceAction).getSetTpDstAction().getPort();
return mac;
}
+
+ /**
+ * @param nwTos NW-TOS
+ * @return shifted to NW-DSCP
+ */
+ public static int tosToNwDscp(int nwTos) {
+ return (short) (nwTos >>> FromSalConversionsUtils.ENC_FIELD_BIT_SIZE);
+ }
}
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.sal.compatibility.test;
+
+import org.junit.Assert;
+import org.junit.Test;
+import org.opendaylight.controller.sal.compatibility.FromSalConversionsUtils;
+
+/**
+ * test of {@link FromSalConversionsUtils}
+ */
+public class FromSalConversionsUtilsTest {
+
+ /**
+ * Test method for {@link org.opendaylight.controller.sal.compatibility.FromSalConversionsUtils#dscpToTos(int)}.
+ */
+ @Test
+ public void testDscpToTos() {
+ Assert.assertEquals(0, FromSalConversionsUtils.dscpToTos(0));
+ Assert.assertEquals(4, FromSalConversionsUtils.dscpToTos(1));
+ Assert.assertEquals(252, FromSalConversionsUtils.dscpToTos(63));
+ Assert.assertEquals(256, FromSalConversionsUtils.dscpToTos(64));
+ Assert.assertEquals(-4, FromSalConversionsUtils.dscpToTos(-1));
+ }
+
+}
}
assertTrue("Ipv4 address wasn't found.", ipv4AddressFound);
} else if (innerAction instanceof SetNwTosActionCase) {
- assertEquals("Wrong TOS in SetNwTosAction.", (Integer) 63, ((SetNwTosActionCase) innerAction).getSetNwTosAction().getTos());
+ assertEquals("Wrong TOS in SetNwTosAction.", (Integer) 252, ((SetNwTosActionCase) innerAction).getSetNwTosAction().getTos());
} else if (innerAction instanceof SetNwDstActionCase) {
Address address = ((SetNwDstActionCase) innerAction).getSetNwDstAction().getAddress();
boolean ipv4AddressFound = false;
private void prepareActionSetNwTos(SetNwTosActionCaseBuilder wrapper) {
SetNwTosActionBuilder setNwTosActionBuilder = new SetNwTosActionBuilder();
- setNwTosActionBuilder.setTos(63);
+ setNwTosActionBuilder.setTos(252);
wrapper.setSetNwTosAction(setNwTosActionBuilder.build());
}
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.sal.compatibility.test;
+
+import org.junit.Assert;
+import org.junit.Test;
+import org.opendaylight.controller.sal.compatibility.ToSalConversionsUtils;
+
+/**
+ * test of {@link ToSalConversionsUtils}
+ */
+public class ToSalConversionsUtilsTest {
+
+ /**
+ * Test method for {@link org.opendaylight.controller.sal.compatibility.ToSalConversionsUtils#tosToNwDscp(int)}.
+ */
+ @Test
+ public void testTosToNwDscp() {
+ Assert.assertEquals(0, ToSalConversionsUtils.tosToNwDscp(0));
+ Assert.assertEquals(0, ToSalConversionsUtils.tosToNwDscp(1));
+ Assert.assertEquals(1, ToSalConversionsUtils.tosToNwDscp(4));
+ Assert.assertEquals(63, ToSalConversionsUtils.tosToNwDscp(252));
+ Assert.assertEquals(63, ToSalConversionsUtils.tosToNwDscp(253));
+ Assert.assertEquals(-1, ToSalConversionsUtils.tosToNwDscp(-1));
+ }
+}
*/
package org.opendaylight.controller.frm.impl;
-import com.google.common.base.Preconditions;
import org.opendaylight.controller.frm.ForwardingRulesManager;
import org.opendaylight.controller.md.sal.binding.api.DataBroker;
import org.opendaylight.controller.md.sal.binding.api.DataChangeListener;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import com.google.common.base.Preconditions;
+
/**
* GroupForwarder
* It implements {@link org.opendaylight.controller.md.sal.binding.api.DataChangeListener}}
public FlowForwarder (final ForwardingRulesManager manager, final DataBroker db) {
super(manager, Flow.class);
Preconditions.checkNotNull(db, "DataBroker can not be null!");
- this.listenerRegistration = db.registerDataChangeListener(LogicalDatastoreType.CONFIGURATION,
- getWildCardPath(), FlowForwarder.this, DataChangeScope.SUBTREE);
+ registrationListener(db, 5);
+ }
+
+ private void registrationListener(final DataBroker db, int i) {
+ try {
+ listenerRegistration = db.registerDataChangeListener(LogicalDatastoreType.CONFIGURATION,
+ getWildCardPath(), FlowForwarder.this, DataChangeScope.SUBTREE);
+ } catch (final Exception e) {
+ if (i >= 1) {
+ try {
+ Thread.sleep(100);
+ } catch (InterruptedException e1) {
+ LOG.error("Thread interrupted '{}'", e1);
+ Thread.currentThread().interrupt();
+ }
+ registrationListener(db, --i);
+ } else {
+ LOG.error("FRM Flow DataChange listener registration fail!", e);
+ throw new IllegalStateException("FlowForwarder registration Listener fail! System needs restart.", e);
+ }
+ }
}
@Override
if (listenerRegistration != null) {
try {
listenerRegistration.close();
- } catch (Exception e) {
+ } catch (final Exception e) {
LOG.error("Error by stop FRM FlowChangeListener.", e);
}
listenerRegistration = null;
builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
builder.setFlowTable(new FlowTableRef(nodeIdent.child(Table.class, tableKey)));
builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
- this.provider.getSalFlowService().removeFlow(builder.build());
+ provider.getSalFlowService().removeFlow(builder.build());
}
}
builder.setUpdatedFlow((new UpdatedFlowBuilder(update)).build());
builder.setOriginalFlow((new OriginalFlowBuilder(original)).build());
- this.provider.getSalFlowService().updateFlow(builder.build());
+ provider.getSalFlowService().updateFlow(builder.build());
}
}
builder.setFlowRef(new FlowRef(identifier));
builder.setFlowTable(new FlowTableRef(nodeIdent.child(Table.class, tableKey)));
builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
- this.provider.getSalFlowService().addFlow(builder.build());
+ provider.getSalFlowService().addFlow(builder.build());
}
}
private boolean tableIdValidationPrecondition (final TableKey tableKey, final Flow flow) {
Preconditions.checkNotNull(tableKey, "TableKey can not be null or empty!");
Preconditions.checkNotNull(flow, "Flow can not be null or empty!");
- if (flow.getTableId() != tableKey.getId()) {
+ if (! tableKey.getId().equals(flow.getTableId())) {
LOG.error("TableID in URI tableId={} and in palyload tableId={} is not same.",
flow.getTableId(), tableKey.getId());
return false;
<!-- XSQL -->
<module>sal-dom-xsql</module>
+ <module>sal-karaf-xsql</module>
<module>sal-dom-xsql-config</module>
<!-- Yang Test Models for MD-SAL -->
import org.opendaylight.controller.cluster.example.messages.PrintState;
import org.opendaylight.controller.cluster.raft.ConfigParams;
import org.opendaylight.controller.cluster.raft.client.messages.AddRaftPeer;
-import org.opendaylight.controller.cluster.raft.client.messages.RemoveRaftPeer;
import java.io.BufferedReader;
import java.io.InputStreamReader;
actorSystem.stop(actorRef);
actorRefs.remove(actorName);
-
- for (ActorRef actor : actorRefs.values()) {
- actor.tell(new RemoveRaftPeer(actorName), null);
- }
-
allPeers.remove(actorName);
}
allPeers.put(actorName, address);
ActorRef exampleActor = createExampleActor(actorName);
-
- for (ActorRef actor : actorRefs.values()) {
- actor.tell(new AddRaftPeer(actorName, address), null);
- }
-
actorRefs.put(actorName, exampleActor);
addClientsToNode(actorName, 1);
import akka.persistence.UntypedPersistentActor;
import com.google.common.base.Optional;
import com.google.protobuf.ByteString;
+import org.opendaylight.controller.cluster.raft.base.messages.ApplyLogEntries;
import org.opendaylight.controller.cluster.raft.base.messages.ApplySnapshot;
import org.opendaylight.controller.cluster.raft.base.messages.ApplyState;
import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshot;
* This context should NOT be passed directly to any other actor it is
* only to be consumed by the RaftActorBehaviors
*/
- private RaftActorContext context;
+ protected RaftActorContext context;
/**
* The in-memory journal
context.setReplicatedLog(replicatedLog);
context.setLastApplied(snapshot.getLastAppliedIndex());
+ context.setCommitIndex(snapshot.getLastAppliedIndex());
LOG.info("Applied snapshot to replicatedLog. " +
"snapshotIndex={}, snapshotTerm={}, journal-size={}",
} else if (message instanceof ReplicatedLogEntry) {
ReplicatedLogEntry logEntry = (ReplicatedLogEntry) message;
-
- // Apply State immediately
+ LOG.info("Received ReplicatedLogEntry for recovery:{}", logEntry.getIndex());
replicatedLog.append(logEntry);
- applyState(null, "recovery", logEntry.getData());
- context.setLastApplied(logEntry.getIndex());
- context.setCommitIndex(logEntry.getIndex());
+
+ } else if (message instanceof ApplyLogEntries) {
+ ApplyLogEntries ale = (ApplyLogEntries) message;
+
+ LOG.info("Received ApplyLogEntries for recovery, applying to state:{} to {}",
+ context.getLastApplied() + 1, ale.getToIndex());
+
+ for (long i = context.getLastApplied() + 1; i <= ale.getToIndex(); i++) {
+ applyState(null, "recovery", replicatedLog.get(i).getData());
+ }
+ context.setLastApplied(ale.getToIndex());
+ context.setCommitIndex(ale.getToIndex());
+
} else if (message instanceof DeleteEntries) {
replicatedLog.removeFrom(((DeleteEntries) message).getFromIndex());
+
} else if (message instanceof UpdateElectionTerm) {
- context.getTermInformation().update(((UpdateElectionTerm) message).getCurrentTerm(), ((UpdateElectionTerm) message).getVotedFor());
+ context.getTermInformation().update(((UpdateElectionTerm) message).getCurrentTerm(),
+ ((UpdateElectionTerm) message).getVotedFor());
+
} else if (message instanceof RecoveryCompleted) {
- if(LOG.isDebugEnabled()) {
- LOG.debug(
- "RecoveryCompleted - Switching actor to Follower - " +
- "Persistence Id = " + persistenceId() +
- " Last index in log:{}, snapshotIndex={}, snapshotTerm={}, " +
- "journal-size={}",
- replicatedLog.lastIndex(), replicatedLog.snapshotIndex,
- replicatedLog.snapshotTerm, replicatedLog.size()
- );
- }
+ LOG.info(
+ "RecoveryCompleted - Switching actor to Follower - " +
+ "Persistence Id = " + persistenceId() +
+ " Last index in log:{}, snapshotIndex={}, snapshotTerm={}, " +
+ "journal-size={}",
+ replicatedLog.lastIndex(), replicatedLog.snapshotIndex,
+ replicatedLog.snapshotTerm, replicatedLog.size());
currentBehavior = switchBehavior(RaftState.Follower);
onStateChanged();
}
applyState(applyState.getClientActor(), applyState.getIdentifier(),
applyState.getReplicatedLogEntry().getData());
+ } else if (message instanceof ApplyLogEntries){
+ ApplyLogEntries ale = (ApplyLogEntries) message;
+ LOG.info("Persisting ApplyLogEntries with index={}", ale.getToIndex());
+ persist(new ApplyLogEntries(ale.getToIndex()), new Procedure<ApplyLogEntries>() {
+ @Override
+ public void apply(ApplyLogEntries param) throws Exception {
+ }
+ });
+
} else if(message instanceof ApplySnapshot ) {
Snapshot snapshot = ((ApplySnapshot) message).getSnapshot();
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.base.messages;
+
+import java.io.Serializable;
+
+/**
+ * ApplyLogEntries serves as a message which is stored in the akka's persistent
+ * journal.
+ * During recovery if this message is found, then all in-mem journal entries from
+ * context.lastApplied to ApplyLogEntries.toIndex are applied to the state
+ *
+ * This class is also used as a internal message sent from Behaviour to
+ * RaftActor to persist the ApplyLogEntries
+ *
+ */
+public class ApplyLogEntries implements Serializable {
+ private final int toIndex;
+
+ public ApplyLogEntries(int toIndex) {
+ this.toIndex = toIndex;
+ }
+
+ public int getToIndex() {
+ return toIndex;
+ }
+}
import org.opendaylight.controller.cluster.raft.RaftState;
import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
import org.opendaylight.controller.cluster.raft.SerializationUtils;
+import org.opendaylight.controller.cluster.raft.base.messages.ApplyLogEntries;
import org.opendaylight.controller.cluster.raft.base.messages.ApplyState;
import org.opendaylight.controller.cluster.raft.base.messages.ElectionTimeout;
import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
}
context.getLogger().debug("Setting last applied to {}", newLastApplied);
context.setLastApplied(newLastApplied);
+
+ // send a message to persist a ApplyLogEntries marker message into akka's persistent journal
+ // will be used during recovery
+ //in case if the above code throws an error and this message is not sent, it would be fine
+ // as the append entries received later would initiate add this message to the journal
+ actor().tell(new ApplyLogEntries((int) context.getLastApplied()), actor());
}
protected Object fromSerializableMessage(Object serializable){
import akka.actor.ActorRef;
import akka.actor.ActorSystem;
+import akka.actor.PoisonPill;
import akka.actor.Props;
import akka.event.Logging;
import akka.japi.Creator;
import akka.testkit.JavaTestKit;
+import akka.testkit.TestActorRef;
import com.google.protobuf.ByteString;
+import org.junit.After;
import org.junit.Test;
+import org.opendaylight.controller.cluster.raft.base.messages.ApplyLogEntries;
import org.opendaylight.controller.cluster.raft.client.messages.FindLeader;
import org.opendaylight.controller.cluster.raft.client.messages.FindLeaderReply;
-
+import org.opendaylight.controller.cluster.raft.utils.MockAkkaJournal;
+import org.opendaylight.controller.cluster.raft.utils.MockSnapshotStore;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
+import java.util.ArrayList;
+import java.util.Arrays;
import java.util.Collections;
+import java.util.List;
import java.util.Map;
+import static junit.framework.Assert.assertTrue;
import static junit.framework.TestCase.assertEquals;
public class RaftActorTest extends AbstractActorTest {
+ @After
+ public void tearDown() {
+ MockAkkaJournal.clearJournal();
+ MockSnapshotStore.setMockSnapshot(null);
+ }
+
public static class MockRaftActor extends RaftActor {
+ private boolean applySnapshotCalled = false;
+ private List<Object> state;
+
public MockRaftActor(String id,
Map<String, String> peerAddresses) {
super(id, peerAddresses);
+ state = new ArrayList<>();
+ }
+
+ public RaftActorContext getRaftActorContext() {
+ return context;
+ }
+
+ public boolean isApplySnapshotCalled() {
+ return applySnapshotCalled;
+ }
+
+ public List<Object> getState() {
+ return state;
}
public static Props props(final String id, final Map<String, String> peerAddresses){
});
}
- @Override protected void applyState(ActorRef clientActor,
- String identifier,
- Object data) {
+ @Override protected void applyState(ActorRef clientActor, String identifier, Object data) {
+ state.add(data);
}
@Override protected void createSnapshot() {
}
@Override protected void applySnapshot(ByteString snapshot) {
- throw new UnsupportedOperationException("applySnapshot");
+ applySnapshotCalled = true;
+ try {
+ Object data = toObject(snapshot);
+ if (data instanceof List) {
+ state.addAll((List) data);
+ }
+ } catch (ClassNotFoundException e) {
+ e.printStackTrace();
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
}
@Override protected void onStateChanged() {
return this.getId();
}
+ private Object toObject(ByteString bs) throws ClassNotFoundException, IOException {
+ Object obj = null;
+ ByteArrayInputStream bis = null;
+ ObjectInputStream ois = null;
+ try {
+ bis = new ByteArrayInputStream(bs.toByteArray());
+ ois = new ObjectInputStream(bis);
+ obj = ois.readObject();
+ } finally {
+ if (bis != null) {
+ bis.close();
+ }
+ if (ois != null) {
+ ois.close();
+ }
+ }
+ return obj;
+ }
+
+
}
kit.findLeader(kit.getRaftActor().path().toString());
}
+ @Test
+ public void testRaftActorRecovery() {
+ new JavaTestKit(getSystem()) {{
+ new Within(duration("1 seconds")) {
+ protected void run() {
+
+ String persistenceId = "follower10";
+
+ ActorRef followerActor = getSystem().actorOf(
+ MockRaftActor.props(persistenceId, Collections.EMPTY_MAP), persistenceId);
+
+ List<ReplicatedLogEntry> snapshotUnappliedEntries = new ArrayList<>();
+ ReplicatedLogEntry entry1 = new MockRaftActorContext.MockReplicatedLogEntry(1, 4, new MockRaftActorContext.MockPayload("E"));
+ snapshotUnappliedEntries.add(entry1);
+
+ int lastAppliedDuringSnapshotCapture = 3;
+ int lastIndexDuringSnapshotCapture = 4;
+
+ ByteString snapshotBytes = null;
+ try {
+ // 4 messages as part of snapshot, which are applied to state
+ snapshotBytes = fromObject(Arrays.asList(new MockRaftActorContext.MockPayload("A"),
+ new MockRaftActorContext.MockPayload("B"),
+ new MockRaftActorContext.MockPayload("C"),
+ new MockRaftActorContext.MockPayload("D")));
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+ Snapshot snapshot = Snapshot.create(snapshotBytes.toByteArray(),
+ snapshotUnappliedEntries, lastIndexDuringSnapshotCapture, 1 ,
+ lastAppliedDuringSnapshotCapture, 1);
+ MockSnapshotStore.setMockSnapshot(snapshot);
+ MockSnapshotStore.setPersistenceId(persistenceId);
+
+ // add more entries after snapshot is taken
+ List<ReplicatedLogEntry> entries = new ArrayList<>();
+ ReplicatedLogEntry entry2 = new MockRaftActorContext.MockReplicatedLogEntry(1, 5, new MockRaftActorContext.MockPayload("F"));
+ ReplicatedLogEntry entry3 = new MockRaftActorContext.MockReplicatedLogEntry(1, 6, new MockRaftActorContext.MockPayload("G"));
+ ReplicatedLogEntry entry4 = new MockRaftActorContext.MockReplicatedLogEntry(1, 7, new MockRaftActorContext.MockPayload("H"));
+ entries.add(entry2);
+ entries.add(entry3);
+ entries.add(entry4);
+
+ int lastAppliedToState = 5;
+ int lastIndex = 7;
+
+ MockAkkaJournal.addToJournal(5, entry2);
+ // 2 entries are applied to state besides the 4 entries in snapshot
+ MockAkkaJournal.addToJournal(6, new ApplyLogEntries(lastAppliedToState));
+ MockAkkaJournal.addToJournal(7, entry3);
+ MockAkkaJournal.addToJournal(8, entry4);
+
+ // kill the actor
+ followerActor.tell(PoisonPill.getInstance(), null);
+
+ try {
+ // give some time for actor to die
+ Thread.sleep(200);
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+
+ //reinstate the actor
+ TestActorRef<MockRaftActor> ref = TestActorRef.create(getSystem(),
+ MockRaftActor.props(persistenceId, Collections.EMPTY_MAP));
+ try {
+ //give some time for snapshot offer to get called.
+ Thread.sleep(200);
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+
+ RaftActorContext context = ref.underlyingActor().getRaftActorContext();
+ assertEquals(snapshotUnappliedEntries.size() + entries.size(), context.getReplicatedLog().size());
+ assertEquals(lastIndex, context.getReplicatedLog().lastIndex());
+ assertEquals(lastAppliedToState, context.getLastApplied());
+ assertEquals(lastAppliedToState, context.getCommitIndex());
+ assertTrue(ref.underlyingActor().isApplySnapshotCalled());
+ assertEquals(6, ref.underlyingActor().getState().size());
+ }
+ };
+ }};
+
+ }
+
+ private ByteString fromObject(Object snapshot) throws Exception {
+ ByteArrayOutputStream b = null;
+ ObjectOutputStream o = null;
+ try {
+ b = new ByteArrayOutputStream();
+ o = new ObjectOutputStream(b);
+ o.writeObject(snapshot);
+ byte[] snapshotBytes = b.toByteArray();
+ return ByteString.copyFrom(snapshotBytes);
+ } finally {
+ if (o != null) {
+ o.flush();
+ o.close();
+ }
+ if (b != null) {
+ b.close();
+ }
+ }
+ }
}
import akka.actor.ActorRef;
import akka.actor.Props;
import akka.testkit.JavaTestKit;
+import akka.util.Timeout;
+import com.google.protobuf.ByteString;
import junit.framework.Assert;
import org.junit.Test;
import org.opendaylight.controller.cluster.raft.DefaultConfigParamsImpl;
import org.opendaylight.controller.cluster.raft.RaftActorContext;
import org.opendaylight.controller.cluster.raft.RaftState;
import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
+import org.opendaylight.controller.cluster.raft.base.messages.ApplySnapshot;
import org.opendaylight.controller.cluster.raft.base.messages.ElectionTimeout;
import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
+import org.opendaylight.controller.cluster.raft.messages.InstallSnapshot;
+import org.opendaylight.controller.cluster.raft.messages.InstallSnapshotReply;
import org.opendaylight.controller.cluster.raft.messages.RequestVote;
import org.opendaylight.controller.cluster.raft.messages.RequestVoteReply;
import org.opendaylight.controller.cluster.raft.utils.DoNothingActor;
-
+import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor;
+import scala.concurrent.Await;
+import scala.concurrent.Future;
+import scala.concurrent.duration.Duration;
+import scala.concurrent.duration.FiniteDuration;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.ObjectOutputStream;
import java.util.ArrayList;
import java.util.Arrays;
+import java.util.HashMap;
import java.util.List;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+import static akka.pattern.Patterns.ask;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
public class FollowerTest extends AbstractRaftActorBehaviorTest {
return new Follower(actorContext);
}
- @Override protected RaftActorContext createActorContext() {
- return new MockRaftActorContext("test", getSystem(), followerActor);
+ @Override protected RaftActorContext createActorContext() {
+ return createActorContext(followerActor);
+ }
+
+ protected RaftActorContext createActorContext(ActorRef actorRef){
+ return new MockRaftActorContext("test", getSystem(), actorRef);
}
@Test
createActorContext();
context.setLastApplied(100);
- setLastLogEntry((MockRaftActorContext) context, 1, 100, new MockRaftActorContext.MockPayload(""));
+ setLastLogEntry((MockRaftActorContext) context, 1, 100,
+ new MockRaftActorContext.MockPayload(""));
((MockRaftActorContext) context).getReplicatedLog().setSnapshotIndex(99);
List<ReplicatedLogEntry> entries =
Arrays.asList(
- (ReplicatedLogEntry) new MockRaftActorContext.MockReplicatedLogEntry(2, 101,
- new MockRaftActorContext.MockPayload("foo"))
+ (ReplicatedLogEntry) new MockRaftActorContext.MockReplicatedLogEntry(2, 101,
+ new MockRaftActorContext.MockPayload("foo"))
);
// The new commitIndex is 101
}};
}
+
+ /**
+ * This test verifies that when InstallSnapshot is received by
+ * the follower its applied correctly.
+ *
+ * @throws Exception
+ */
+ @Test
+ public void testHandleInstallSnapshot() throws Exception {
+ JavaTestKit javaTestKit = new JavaTestKit(getSystem()) {{
+
+ ActorRef leaderActor = getSystem().actorOf(Props.create(
+ MessageCollectorActor.class));
+
+ MockRaftActorContext context = (MockRaftActorContext)
+ createActorContext(getRef());
+
+ Follower follower = (Follower)createBehavior(context);
+
+ HashMap<String, String> followerSnapshot = new HashMap<>();
+ followerSnapshot.put("1", "A");
+ followerSnapshot.put("2", "B");
+ followerSnapshot.put("3", "C");
+
+ ByteString bsSnapshot = toByteString(followerSnapshot);
+ ByteString chunkData = ByteString.EMPTY;
+ int offset = 0;
+ int snapshotLength = bsSnapshot.size();
+ int i = 1;
+
+ do {
+ chunkData = getNextChunk(bsSnapshot, offset);
+ final InstallSnapshot installSnapshot =
+ new InstallSnapshot(1, "leader-1", i, 1,
+ chunkData, i, 3);
+ follower.handleMessage(leaderActor, installSnapshot);
+ offset = offset + 50;
+ i++;
+ } while ((offset+50) < snapshotLength);
+
+ final InstallSnapshot installSnapshot3 = new InstallSnapshot(1, "leader-1", 3, 1, chunkData, 3, 3);
+ follower.handleMessage(leaderActor, installSnapshot3);
+
+ String[] matches = new ReceiveWhile<String>(String.class, duration("2 seconds")) {
+ @Override
+ protected String match(Object o) throws Exception {
+ if (o instanceof ApplySnapshot) {
+ ApplySnapshot as = (ApplySnapshot)o;
+ if (as.getSnapshot().getLastIndex() != installSnapshot3.getLastIncludedIndex()) {
+ return "applySnapshot-lastIndex-mismatch";
+ }
+ if (as.getSnapshot().getLastAppliedTerm() != installSnapshot3.getLastIncludedTerm()) {
+ return "applySnapshot-lastAppliedTerm-mismatch";
+ }
+ if (as.getSnapshot().getLastAppliedIndex() != installSnapshot3.getLastIncludedIndex()) {
+ return "applySnapshot-lastAppliedIndex-mismatch";
+ }
+ if (as.getSnapshot().getLastTerm() != installSnapshot3.getLastIncludedTerm()) {
+ return "applySnapshot-lastTerm-mismatch";
+ }
+ return "applySnapshot";
+ }
+
+ return "ignoreCase";
+ }
+ }.get();
+
+ String applySnapshotMatch = "";
+ for (String reply: matches) {
+ if (reply.startsWith("applySnapshot")) {
+ applySnapshotMatch = reply;
+ }
+ }
+
+ assertEquals("applySnapshot", applySnapshotMatch);
+
+ Object messages = executeLocalOperation(leaderActor, "get-all-messages");
+
+ assertNotNull(messages);
+ assertTrue(messages instanceof List);
+ List<Object> listMessages = (List<Object>) messages;
+
+ int installSnapshotReplyReceivedCount = 0;
+ for (Object message: listMessages) {
+ if (message instanceof InstallSnapshotReply) {
+ ++installSnapshotReplyReceivedCount;
+ }
+ }
+
+ assertEquals(3, installSnapshotReplyReceivedCount);
+
+ }};
+ }
+
+ public Object executeLocalOperation(ActorRef actor, Object message) throws Exception {
+ FiniteDuration operationDuration = Duration.create(5, TimeUnit.SECONDS);
+ Timeout operationTimeout = new Timeout(operationDuration);
+ Future<Object> future = ask(actor, message, operationTimeout);
+
+ try {
+ return Await.result(future, operationDuration);
+ } catch (Exception e) {
+ throw e;
+ }
+ }
+
+ public ByteString getNextChunk (ByteString bs, int offset){
+ int snapshotLength = bs.size();
+ int start = offset;
+ int size = 50;
+ if (50 > snapshotLength) {
+ size = snapshotLength;
+ } else {
+ if ((start + 50) > snapshotLength) {
+ size = snapshotLength - start;
+ }
+ }
+ return bs.substring(start, start + size);
+ }
+
+ private ByteString toByteString(Map<String, String> state) {
+ ByteArrayOutputStream b = null;
+ ObjectOutputStream o = null;
+ try {
+ try {
+ b = new ByteArrayOutputStream();
+ o = new ObjectOutputStream(b);
+ o.writeObject(state);
+ byte[] snapshotBytes = b.toByteArray();
+ return ByteString.copyFrom(snapshotBytes);
+ } finally {
+ if (o != null) {
+ o.flush();
+ o.close();
+ }
+ if (b != null) {
+ b.close();
+ }
+ }
+ } catch (IOException e) {
+ org.junit.Assert.fail("IOException in converting Hashmap to Bytestring:" + e);
+ }
+ return null;
+ }
}
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.raft.utils;
+
+import akka.actor.UntypedActor;
+
+import java.util.ArrayList;
+import java.util.List;
+
+
+public class MessageCollectorActor extends UntypedActor {
+ private List<Object> messages = new ArrayList<>();
+
+ @Override public void onReceive(Object message) throws Exception {
+ if(message instanceof String){
+ if("get-all-messages".equals(message)){
+ getSender().tell(messages, getSelf());
+ }
+ } else {
+ messages.add(message);
+ }
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.utils;
+
+import akka.dispatch.Futures;
+import akka.japi.Procedure;
+import akka.persistence.PersistentConfirmation;
+import akka.persistence.PersistentId;
+import akka.persistence.PersistentImpl;
+import akka.persistence.PersistentRepr;
+import akka.persistence.journal.japi.AsyncWriteJournal;
+import com.google.common.collect.Maps;
+import scala.concurrent.Future;
+
+import java.util.Map;
+import java.util.concurrent.Callable;
+
+public class MockAkkaJournal extends AsyncWriteJournal {
+
+ private static Map<Long, Object> journal = Maps.newHashMap();
+
+ public static void addToJournal(long sequenceNr, Object message) {
+ journal.put(sequenceNr, message);
+ }
+
+ public static void clearJournal() {
+ journal.clear();
+ }
+
+ @Override
+ public Future<Void> doAsyncReplayMessages(final String persistenceId, long fromSequenceNr,
+ long toSequenceNr, long max, final Procedure<PersistentRepr> replayCallback) {
+
+ return Futures.future(new Callable<Void>() {
+ @Override
+ public Void call() throws Exception {
+ for (Map.Entry<Long,Object> entry : journal.entrySet()) {
+ PersistentRepr persistentMessage =
+ new PersistentImpl(entry.getValue(), entry.getKey(), persistenceId, false, null, null);
+ replayCallback.apply(persistentMessage);
+ }
+ return null;
+ }
+ }, context().dispatcher());
+ }
+
+ @Override
+ public Future<Long> doAsyncReadHighestSequenceNr(String s, long l) {
+ return Futures.successful(new Long(0));
+ }
+
+ @Override
+ public Future<Void> doAsyncWriteMessages(Iterable<PersistentRepr> persistentReprs) {
+ return Futures.successful(null);
+ }
+
+ @Override
+ public Future<Void> doAsyncWriteConfirmations(Iterable<PersistentConfirmation> persistentConfirmations) {
+ return Futures.successful(null);
+ }
+
+ @Override
+ public Future<Void> doAsyncDeleteMessages(Iterable<PersistentId> persistentIds, boolean b) {
+ return Futures.successful(null);
+ }
+
+ @Override
+ public Future<Void> doAsyncDeleteMessagesTo(String s, long l, boolean b) {
+ return Futures.successful(null);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.raft.utils;
+
+import akka.dispatch.Futures;
+import akka.japi.Option;
+import akka.persistence.SelectedSnapshot;
+import akka.persistence.SnapshotMetadata;
+import akka.persistence.SnapshotSelectionCriteria;
+import akka.persistence.snapshot.japi.SnapshotStore;
+import org.opendaylight.controller.cluster.raft.Snapshot;
+import scala.concurrent.Future;
+
+
+public class MockSnapshotStore extends SnapshotStore {
+
+ private static Snapshot mockSnapshot;
+ private static String persistenceId;
+
+ public static void setMockSnapshot(Snapshot s) {
+ mockSnapshot = s;
+ }
+
+ public static void setPersistenceId(String pId) {
+ persistenceId = pId;
+ }
+
+ @Override
+ public Future<Option<SelectedSnapshot>> doLoadAsync(String s, SnapshotSelectionCriteria snapshotSelectionCriteria) {
+ if (mockSnapshot == null) {
+ return Futures.successful(Option.<SelectedSnapshot>none());
+ }
+
+ SnapshotMetadata smd = new SnapshotMetadata(persistenceId, 1, 12345);
+ SelectedSnapshot selectedSnapshot =
+ new SelectedSnapshot(smd, mockSnapshot);
+ return Futures.successful(Option.some(selectedSnapshot));
+ }
+
+ @Override
+ public Future<Void> doSaveAsync(SnapshotMetadata snapshotMetadata, Object o) {
+ return null;
+ }
+
+ @Override
+ public void onSaved(SnapshotMetadata snapshotMetadata) throws Exception {
+
+ }
+
+ @Override
+ public void doDelete(SnapshotMetadata snapshotMetadata) throws Exception {
+
+ }
+
+ @Override
+ public void doDelete(String s, SnapshotSelectionCriteria snapshotSelectionCriteria) throws Exception {
+
+ }
+}
akka {
+ persistence.snapshot-store.plugin = "mock-snapshot-store"
+ persistence.journal.plugin = "mock-journal"
+
loglevel = "DEBUG"
loggers = ["akka.testkit.TestEventListener", "akka.event.slf4j.Slf4jLogger"]
}
}
}
+
+mock-snapshot-store {
+ # Class name of the plugin.
+ class = "org.opendaylight.controller.cluster.raft.utils.MockSnapshotStore"
+ # Dispatcher for the plugin actor.
+ plugin-dispatcher = "akka.persistence.dispatchers.default-plugin-dispatcher"
+}
+
+mock-journal {
+ # Class name of the plugin.
+ class = "org.opendaylight.controller.cluster.raft.utils.MockAkkaJournal"
+ # Dispatcher for the plugin actor.
+ plugin-dispatcher = "akka.persistence.dispatchers.default-plugin-dispatcher"
+}
private final DOMTransactionChain delegate;
private final BindingToNormalizedNodeCodec codec;
- private final DelegateChainListener delegatingListener;
- private final TransactionChainListener listener;
+ private final DelegateChainListener domListener;
+ private final TransactionChainListener bindingListener;
public BindingTranslatedTransactionChain(final DOMDataBroker chainFactory,
final BindingToNormalizedNodeCodec codec, final TransactionChainListener listener) {
Preconditions.checkNotNull(chainFactory, "DOM Transaction chain factory must not be null");
- this.delegatingListener = new DelegateChainListener();
- this.listener = listener;
- this.delegate = chainFactory.createTransactionChain(listener);
+ this.domListener = new DelegateChainListener();
+ this.bindingListener = listener;
+ this.delegate = chainFactory.createTransactionChain(domListener);
this.codec = codec;
}
* chain, so we are not changing any of our internal state
* to mark that we failed.
*/
- this.delegatingListener.onTransactionChainFailed(this, tx, t);
+ this.bindingListener.onTransactionChainFailed(this, tx, t);
}
@Override
public void onTransactionChainSuccessful(final TransactionChain<?, ?> chain) {
Preconditions.checkState(delegate.equals(chain),
"Illegal state - listener for %s was invoked for incorrect chain %s.", delegate, chain);
- listener.onTransactionChainSuccessful(BindingTranslatedTransactionChain.this);
+ bindingListener.onTransactionChainSuccessful(BindingTranslatedTransactionChain.this);
}
}
this.executor = Preconditions.checkNotNull(executor);
}
+ private static ThreadExecutorStatsMXBeanImpl createInternal(final Executor executor,
+ final String mBeanName, final String mBeanType, final String mBeanCategory) {
+ if (executor instanceof ThreadPoolExecutor) {
+ final ThreadExecutorStatsMXBeanImpl ret = new ThreadExecutorStatsMXBeanImpl(
+ (ThreadPoolExecutor) executor, mBeanName, mBeanType, mBeanCategory);
+ return ret;
+ }
+
+ LOG.info("Executor {} is not supported", executor);
+ return null;
+ }
+
/**
- * Create a new bean for the statistics, which is already registered.
+ * Creates a new bean if the backing executor is a ThreadPoolExecutor and registers it.
*
- * @param executor
- * @param mBeanName
- * @param mBeanType
- * @param mBeanCategory
- * @return
+ * @param executor the backing {@link Executor}
+ * @param mBeanName Used as the <code>name</code> property in the bean's ObjectName.
+ * @param mBeanType Used as the <code>type</code> property in the bean's ObjectName.
+ * @param mBeanCategory Used as the <code>Category</code> property in the bean's ObjectName.
+ * @return a registered ThreadExecutorStatsMXBeanImpl instance if the backing executor
+ * is a ThreadPoolExecutor, otherwise null.
*/
public static ThreadExecutorStatsMXBeanImpl create(final Executor executor, final String mBeanName,
final String mBeanType, @Nullable final String mBeanCategory) {
- if (executor instanceof ThreadPoolExecutor) {
- final ThreadExecutorStatsMXBeanImpl ret = new ThreadExecutorStatsMXBeanImpl((ThreadPoolExecutor) executor, mBeanName, mBeanType, mBeanCategory);
+ ThreadExecutorStatsMXBeanImpl ret = createInternal(executor, mBeanName, mBeanType, mBeanCategory);
+ if(ret != null) {
ret.registerMBean();
- return ret;
}
- LOG.info("Executor {} is not supported", executor);
- return null;
+ return ret;
+ }
+
+ /**
+ * Creates a new bean if the backing executor is a ThreadPoolExecutor.
+ *
+ * @param executor the backing {@link Executor}
+ * @return a ThreadExecutorStatsMXBeanImpl instance if the backing executor
+ * is a ThreadPoolExecutor, otherwise null.
+ */
+ public static ThreadExecutorStatsMXBeanImpl create(final Executor executor) {
+ return createInternal(executor, "", "", null);
}
@Override
}
public void setDataStoreExecutor(ExecutorService dsExecutor) {
- this.dataStoreExecutorStatsBean = ThreadExecutorStatsMXBeanImpl.create(dsExecutor,
- "notification-executor", getMBeanType(), getMBeanCategory());
+ this.dataStoreExecutorStatsBean = ThreadExecutorStatsMXBeanImpl.create(dsExecutor);
}
public void setNotificationManager(QueuedNotificationManager<?, ?> manager) {
this.notificationManagerStatsBean = new QueuedNotificationManagerMXBeanImpl(manager,
"notification-manager", getMBeanType(), getMBeanCategory());
- this.notificationExecutorStatsBean = ThreadExecutorStatsMXBeanImpl.create(manager.getExecutor(),
- "data-store-executor", getMBeanType(), getMBeanCategory());
+ this.notificationExecutorStatsBean = ThreadExecutorStatsMXBeanImpl.create(manager.getExecutor());
}
@Override
@Override
public ThreadExecutorStats getDataStoreExecutorStats() {
- return dataStoreExecutorStatsBean.toThreadExecutorStats();
+ return dataStoreExecutorStatsBean == null ? null :
+ dataStoreExecutorStatsBean.toThreadExecutorStats();
}
@Override
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.config.yang.md.sal.dom.impl;
-
-import org.opendaylight.controller.sal.dom.broker.impl.HashMapDataStore;
-
-/**
-*
-*/
-public final class HashMapDataStoreModule extends org.opendaylight.controller.config.yang.md.sal.dom.impl.AbstractHashMapDataStoreModule
-{
-
- public HashMapDataStoreModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier, org.opendaylight.controller.config.api.DependencyResolver dependencyResolver) {
- super(identifier, dependencyResolver);
- }
-
- public HashMapDataStoreModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier, org.opendaylight.controller.config.api.DependencyResolver dependencyResolver, HashMapDataStoreModule oldModule, java.lang.AutoCloseable oldInstance) {
- super(identifier, dependencyResolver, oldModule, oldInstance);
- }
-
- @Override
- public void validate(){
- super.validate();
- // Add custom validation for module attributes here.
- }
-
- @Override
- public java.lang.AutoCloseable createInstance() {
- HashMapDataStore store = new HashMapDataStore();
- return store;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.config.yang.md.sal.dom.impl;
-
-/**
-*
-*/
-public class HashMapDataStoreModuleFactory extends org.opendaylight.controller.config.yang.md.sal.dom.impl.AbstractHashMapDataStoreModuleFactory
-{
-
-
-}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.dom.broker.impl;
-
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-
-import org.opendaylight.controller.md.sal.common.api.data.DataCommitHandler;
-import org.opendaylight.controller.md.sal.common.api.data.DataModification;
-import org.opendaylight.controller.sal.core.api.data.DataStore;
-import org.opendaylight.yangtools.yang.common.RpcResult;
-import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
-import org.opendaylight.yangtools.yang.data.api.CompositeNode;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public final class HashMapDataStore implements DataStore, AutoCloseable {
- private static final Logger LOG = LoggerFactory
- .getLogger(HashMapDataStore.class);
-
- private final Map<YangInstanceIdentifier, CompositeNode> configuration = new ConcurrentHashMap<YangInstanceIdentifier, CompositeNode>();
- private final Map<YangInstanceIdentifier, CompositeNode> operational = new ConcurrentHashMap<YangInstanceIdentifier, CompositeNode>();
-
- @Override
- public boolean containsConfigurationPath(final YangInstanceIdentifier path) {
- return configuration.containsKey(path);
- }
-
- @Override
- public boolean containsOperationalPath(final YangInstanceIdentifier path) {
- return operational.containsKey(path);
- }
-
- @Override
- public Iterable<YangInstanceIdentifier> getStoredConfigurationPaths() {
- return configuration.keySet();
- }
-
- @Override
- public Iterable<YangInstanceIdentifier> getStoredOperationalPaths() {
- return operational.keySet();
- }
-
- @Override
- public CompositeNode readConfigurationData(final YangInstanceIdentifier path) {
- LOG.trace("Reading configuration path {}", path);
- return configuration.get(path);
- }
-
- @Override
- public CompositeNode readOperationalData(YangInstanceIdentifier path) {
- LOG.trace("Reading operational path {}", path);
- return operational.get(path);
- }
-
- @Override
- public DataCommitHandler.DataCommitTransaction<YangInstanceIdentifier, CompositeNode> requestCommit(
- final DataModification<YangInstanceIdentifier, CompositeNode> modification) {
- return new HashMapDataStoreTransaction(modification, this);
- }
-
- public RpcResult<Void> rollback(HashMapDataStoreTransaction transaction) {
- return RpcResultBuilder.<Void> success().build();
- }
-
- public RpcResult<Void> finish(HashMapDataStoreTransaction transaction) {
- final DataModification<YangInstanceIdentifier, CompositeNode> modification = transaction
- .getModification();
- for (final YangInstanceIdentifier removal : modification
- .getRemovedConfigurationData()) {
- LOG.trace("Removing configuration path {}", removal);
- remove(configuration, removal);
- }
- for (final YangInstanceIdentifier removal : modification
- .getRemovedOperationalData()) {
- LOG.trace("Removing operational path {}", removal);
- remove(operational, removal);
- }
- if (LOG.isTraceEnabled()) {
- for (final YangInstanceIdentifier a : modification
- .getUpdatedConfigurationData().keySet()) {
- LOG.trace("Adding configuration path {}", a);
- }
- for (final YangInstanceIdentifier a : modification
- .getUpdatedOperationalData().keySet()) {
- LOG.trace("Adding operational path {}", a);
- }
- }
- configuration.putAll(modification.getUpdatedConfigurationData());
- operational.putAll(modification.getUpdatedOperationalData());
-
- return RpcResultBuilder.<Void> success().build();
- }
-
- public void remove(final Map<YangInstanceIdentifier, CompositeNode> map,
- final YangInstanceIdentifier identifier) {
- Set<YangInstanceIdentifier> affected = new HashSet<YangInstanceIdentifier>();
- for (final YangInstanceIdentifier path : map.keySet()) {
- if (identifier.contains(path)) {
- affected.add(path);
- }
- }
- for (final YangInstanceIdentifier pathToRemove : affected) {
- LOG.trace("Removed path {}", pathToRemove);
- map.remove(pathToRemove);
- }
- }
-
- @Override
- public void close() {
- // NOOP
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.dom.broker.impl;
-
-import org.opendaylight.controller.md.sal.common.api.data.DataModification;
-import org.opendaylight.controller.md.sal.common.api.data.DataCommitHandler.DataCommitTransaction;
-import org.opendaylight.yangtools.yang.common.RpcResult;
-import org.opendaylight.yangtools.yang.data.api.CompositeNode;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-
-public class HashMapDataStoreTransaction implements
- DataCommitTransaction<YangInstanceIdentifier, CompositeNode> {
- private final DataModification<YangInstanceIdentifier, CompositeNode> modification;
- private final HashMapDataStore datastore;
-
- HashMapDataStoreTransaction(
- final DataModification<YangInstanceIdentifier, CompositeNode> modify,
- final HashMapDataStore store) {
- modification = modify;
- datastore = store;
- }
-
- @Override
- public RpcResult<Void> finish() throws IllegalStateException {
- return datastore.finish(this);
- }
-
- @Override
- public DataModification<YangInstanceIdentifier, CompositeNode> getModification() {
- return this.modification;
- }
-
- @Override
- public RpcResult<Void> rollback() throws IllegalStateException {
- return datastore.rollback(this);
- }
-}
\ No newline at end of file
config:provided-service sal:dom-async-data-broker;
}
- identity hash-map-data-store {
- base config:module-type;
- config:provided-service sal:dom-data-store;
- config:java-name-prefix HashMapDataStore;
- }
-
identity schema-service-singleton {
base config:module-type;
config:provided-service sal:schema-service;
}
}
- augment "/config:modules/config:module/config:state" {
- case hash-map-data-store {
- when "/config:modules/config:module/config:type = 'hash-map-data-store'";
- }
- }
-
augment "/config:modules/config:module/config:state" {
case schema-service-singleton {
when "/config:modules/config:module/config:type = 'schema-service-singleton'";
}
}
}
-}
\ No newline at end of file
+}
package org.opendaylight.xsql;
-import java.util.concurrent.ExecutionException;
-
import org.opendaylight.controller.sal.binding.api.data.DataModificationTransaction;
import org.opendaylight.controller.sal.binding.api.data.DataProviderService;
import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.xsql.rev140626.XSQL;
XSQLBuilder builder = new XSQLBuilder();
builder.setPort("34343");
XSQL xsql = builder.build();
- if (dps != null) {
- final DataModificationTransaction t = dps.beginTransaction();
- t.removeOperationalData(ID);
- t.putOperationalData(ID,xsql);
-
- try {
+ try {
+ if (dps != null) {
+ final DataModificationTransaction t = dps.beginTransaction();
+ t.removeOperationalData(ID);
+ t.putOperationalData(ID,xsql);
t.commit().get();
- } catch (InterruptedException | ExecutionException e) {
- LOG.warn("Failed to update toaster status, operational otherwise", e);
}
+ } catch (Exception e) {
+ LOG.warn("Failed to update XSQL port status, ", e);
}
return xsql;
}
import com.google.common.base.Preconditions;
import com.google.common.collect.ArrayListMultimap;
import com.google.common.collect.Multimap;
-
import java.util.Collection;
import java.util.Map.Entry;
-
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker.DataChangeScope;
import org.opendaylight.controller.md.sal.dom.store.impl.DOMImmutableDataChangeEvent.Builder;
import org.opendaylight.controller.md.sal.dom.store.impl.DOMImmutableDataChangeEvent.SimpleEventFactory;
Preconditions.checkArgument(node.getDataAfter().isPresent(),
"Modification at {} has type {} but no after-data", state.getPath(), node.getModificationType());
if (!node.getDataBefore().isPresent()) {
- resolveCreateEvent(state, node.getDataAfter().get());
+ @SuppressWarnings({ "unchecked", "rawtypes" })
+ final NormalizedNode<PathArgument, ?> afterNode = (NormalizedNode)node.getDataAfter().get();
+ resolveSameEventRecursivelly(state, afterNode, DOMImmutableDataChangeEvent.getCreateEventFactory());
return true;
}
case DELETE:
Preconditions.checkArgument(node.getDataBefore().isPresent(),
"Modification at {} has type {} but no before-data", state.getPath(), node.getModificationType());
- resolveDeleteEvent(state, node.getDataBefore().get());
+
+ @SuppressWarnings({ "unchecked", "rawtypes" })
+ final NormalizedNode<PathArgument, ?> beforeNode = (NormalizedNode)node.getDataBefore().get();
+ resolveSameEventRecursivelly(state, beforeNode, DOMImmutableDataChangeEvent.getRemoveEventFactory());
return true;
case UNMODIFIED:
return false;
return true;
}
- /**
- * Resolves create events deep down the interest listener tree.
- *
- * @param path
- * @param listeners
- * @param afterState
- * @return
- */
- private void resolveCreateEvent(final ResolveDataChangeState state, final NormalizedNode<?, ?> afterState) {
- @SuppressWarnings({ "unchecked", "rawtypes" })
- final NormalizedNode<PathArgument, ?> node = (NormalizedNode) afterState;
- resolveSameEventRecursivelly(state, node, DOMImmutableDataChangeEvent.getCreateEventFactory());
- }
-
- private void resolveDeleteEvent(final ResolveDataChangeState state, final NormalizedNode<?, ?> beforeState) {
- @SuppressWarnings({ "unchecked", "rawtypes" })
- final NormalizedNode<PathArgument, ?> node = (NormalizedNode) beforeState;
- resolveSameEventRecursivelly(state, node, DOMImmutableDataChangeEvent.getRemoveEventFactory());
- }
-
private void resolveSameEventRecursivelly(final ResolveDataChangeState state,
final NormalizedNode<PathArgument, ?> node, final SimpleEventFactory eventFactory) {
if (!state.needsProcessing()) {
Preconditions.checkArgument(modification.getDataBefore().isPresent(), "Subtree change with before-data not present at path %s", state.getPath());
Preconditions.checkArgument(modification.getDataAfter().isPresent(), "Subtree change with after-data not present at path %s", state.getPath());
+ if (!state.needsProcessing()) {
+ LOG.trace("Not processing modified subtree {}", state.getPath());
+ return true;
+ }
+
DataChangeScope scope = null;
for (DataTreeCandidateNode childMod : modification.getChildNodes()) {
final ResolveDataChangeState childState = state.child(childMod.getIdentifier());
<?xml version="1.0" encoding="UTF-8"?>\r
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">\r
\r
- <!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
+ <!--\r
+ Licensed to the Apache Software Foundation (ASF) under one or more\r
+ contributor license agreements. See the NOTICE file distributed with\r
+ this work for additional information regarding copyright ownership.\r
+ The ASF licenses this file to You under the Apache License, Version 2.0\r
+ (the "License"); you may not use this file except in compliance with\r
+ the License. You may obtain a copy of the License at\r
+\r
+ http://www.apache.org/licenses/LICENSE-2.0\r
+\r
+ Unless required by applicable law or agreed to in writing, software\r
+ distributed under the License is distributed on an "AS IS" BASIS,\r
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
+ See the License for the specific language governing permissions and\r
+ limitations under the License.\r
-->\r
\r
<modelVersion>4.0.0</modelVersion>\r
<version>1.1-SNAPSHOT</version>\r
</parent>\r
\r
- <groupId>xsqlcommand</groupId>\r
- <artifactId>xsqlcommand</artifactId>\r
+ <groupId>org.opendaylight.controller</groupId>\r
+ <artifactId>sal-karaf-xsql</artifactId>\r
<packaging>bundle</packaging>\r
- <version>1.0.0-SNAPSHOT</version>\r
<name>Apache Karaf :: Shell odl/xsql Commands</name>\r
\r
<description>Provides the OSGi odl commands</description>\r
<dependency>\r
<groupId>org.opendaylight.controller</groupId>\r
<artifactId>sal-dom-xsql</artifactId>\r
- <type>bundle</type>\r
<version>1.1-SNAPSHOT</version>\r
</dependency>\r
</dependencies>\r
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Iterables;
import com.google.common.collect.Sets;
-
+import java.net.URI;
import java.util.Collection;
import java.util.HashSet;
import java.util.Set;
-
import org.opendaylight.controller.netconf.client.NetconfClientSession;
import org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil;
import org.opendaylight.yangtools.yang.common.QName;
return fromStrings(session.getServerCapabilities());
}
- private static final QName cachedQName(String namespace, String revision, String moduleName) {
+ private static QName cachedQName(final String namespace, final String revision, final String moduleName) {
return QName.cachedReference(QName.create(namespace, revision, moduleName));
}
+ private static QName cachedQName(final String namespace, final String moduleName) {
+ return QName.cachedReference(QName.create(URI.create(namespace), null, moduleName).withoutRevision());
+ }
+
public static NetconfSessionCapabilities fromStrings(final Collection<String> capabilities) {
final Set<QName> moduleBasedCaps = new HashSet<>();
final Set<String> nonModuleCaps = Sets.newHashSet(capabilities);
String revision = REVISION_PARAM.from(queryParams);
if (revision != null) {
- moduleBasedCaps.add(cachedQName(namespace, revision, moduleName));
- nonModuleCaps.remove(capability);
+ addModuleQName(moduleBasedCaps, nonModuleCaps, capability, cachedQName(namespace, revision, moduleName));
continue;
}
* We have seen devices which mis-escape revision, but the revision may not
* even be there. First check if there is a substring that matches revision.
*/
- if (!Iterables.any(queryParams, CONTAINS_REVISION)) {
+ if (Iterables.any(queryParams, CONTAINS_REVISION)) {
+
+ LOG.debug("Netconf device was not reporting revision correctly, trying to get amp;revision=");
+ revision = BROKEN_REVISON_PARAM.from(queryParams);
+ if (revision == null) {
+ LOG.warn("Netconf device returned revision incorrectly escaped for {}, ignoring it", capability);
+ addModuleQName(moduleBasedCaps, nonModuleCaps, capability, cachedQName(namespace, moduleName));
+ } else {
+ addModuleQName(moduleBasedCaps, nonModuleCaps, capability, cachedQName(namespace, revision, moduleName));
+ }
continue;
}
- LOG.debug("Netconf device was not reporting revision correctly, trying to get amp;revision=");
- revision = BROKEN_REVISON_PARAM.from(queryParams);
- if (revision == null) {
- LOG.warn("Netconf device returned revision incorrectly escaped for {}, ignoring it", capability);
- }
-
- // FIXME: do we really want to continue here?
- moduleBasedCaps.add(cachedQName(namespace, revision, moduleName));
- nonModuleCaps.remove(capability);
+ // Fallback, no revision provided for module
+ addModuleQName(moduleBasedCaps, nonModuleCaps, capability, cachedQName(namespace, moduleName));
}
return new NetconfSessionCapabilities(ImmutableSet.copyOf(nonModuleCaps), ImmutableSet.copyOf(moduleBasedCaps));
}
+
+
+ private static void addModuleQName(final Set<QName> moduleBasedCaps, final Set<String> nonModuleCaps, final String capability, final QName qName) {
+ moduleBasedCaps.add(qName);
+ nonModuleCaps.remove(capability);
+ }
}
assertThat(merged.getNonModuleCaps(), JUnitMatchers.hasItem("urn:ietf:params:netconf:capability:rollback-on-error:1.0"));
}
+ @Test
+ public void testCapabilityNoRevision() throws Exception {
+ final List<String> caps1 = Lists.newArrayList(
+ "namespace:2?module=module2",
+ "namespace:2?module=module2&revision=2012-12-12",
+ "namespace:2?module=module1&RANDOMSTRING;revision=2013-12-12",
+ "namespace:2?module=module2&RANDOMSTRING;revision=2013-12-12" // This one should be ignored(same as first), since revision is in wrong format
+ );
+
+ final NetconfSessionCapabilities sessionCaps1 = NetconfSessionCapabilities.fromStrings(caps1);
+ assertCaps(sessionCaps1, 0, 3);
+ }
+
private void assertCaps(final NetconfSessionCapabilities sessionCaps1, final int nonModuleCaps, final int moduleCaps) {
assertEquals(nonModuleCaps, sessionCaps1.getNonModuleCaps().size());
assertEquals(moduleCaps, sessionCaps1.getModuleBasedCaps().size());
@GET
@Path("/modules")
- @Produces({ Draft02.MediaTypes.API + XML, Draft02.MediaTypes.API + JSON, MediaType.APPLICATION_JSON,
+ @Produces({ Draft02.MediaTypes.API + JSON, Draft02.MediaTypes.API + XML, MediaType.APPLICATION_JSON,
MediaType.APPLICATION_XML, MediaType.TEXT_XML })
public StructuredData getModules(@Context UriInfo uriInfo);
@GET
@Path("/modules/{identifier:.+}")
- @Produces({ Draft02.MediaTypes.API + XML, Draft02.MediaTypes.API + JSON, MediaType.APPLICATION_JSON,
+ @Produces({ Draft02.MediaTypes.API + JSON, Draft02.MediaTypes.API + XML, MediaType.APPLICATION_JSON,
MediaType.APPLICATION_XML, MediaType.TEXT_XML })
public StructuredData getModules(@PathParam("identifier") String identifier, @Context UriInfo uriInfo);
@GET
@Path("/modules/module/{identifier:.+}")
- @Produces({ Draft02.MediaTypes.API + XML, Draft02.MediaTypes.API + JSON, MediaType.APPLICATION_JSON,
+ @Produces({ Draft02.MediaTypes.API + JSON, Draft02.MediaTypes.API + XML, MediaType.APPLICATION_JSON,
MediaType.APPLICATION_XML, MediaType.TEXT_XML })
public StructuredData getModule(@PathParam("identifier") String identifier, @Context UriInfo uriInfo);
@GET
@Path("/operations")
- @Produces({ Draft02.MediaTypes.API + XML, Draft02.MediaTypes.API + JSON, MediaType.APPLICATION_JSON,
+ @Produces({ Draft02.MediaTypes.API + JSON, Draft02.MediaTypes.API + XML, MediaType.APPLICATION_JSON,
MediaType.APPLICATION_XML, MediaType.TEXT_XML })
public StructuredData getOperations(@Context UriInfo uriInfo);
@GET
@Path("/operations/{identifier:.+}")
- @Produces({ Draft02.MediaTypes.API + XML, Draft02.MediaTypes.API + JSON, MediaType.APPLICATION_JSON,
+ @Produces({ Draft02.MediaTypes.API + JSON, Draft02.MediaTypes.API + XML, MediaType.APPLICATION_JSON,
MediaType.APPLICATION_XML, MediaType.TEXT_XML })
public StructuredData getOperations(@PathParam("identifier") String identifier, @Context UriInfo uriInfo);
@GET
@Path("/streams")
- @Produces({ Draft02.MediaTypes.API + XML, Draft02.MediaTypes.API + JSON, MediaType.APPLICATION_JSON,
+ @Produces({ Draft02.MediaTypes.API + JSON, Draft02.MediaTypes.API + XML, MediaType.APPLICATION_JSON,
MediaType.APPLICATION_XML, MediaType.TEXT_XML })
public StructuredData getAvailableStreams(@Context UriInfo uriInfo);
LOG.debug("In toResponse: {}", exception.getMessage());
- // Default to the content type if there's no Accept header
- MediaType mediaType = headers.getMediaType();
List<MediaType> accepts = headers.getAcceptableMediaTypes();
+ accepts.remove(MediaType.WILDCARD_TYPE);
LOG.debug("Accept headers: {}", accepts);
+ final MediaType mediaType;
if (accepts != null && accepts.size() > 0) {
mediaType = accepts.get(0); // just pick the first one
+ } else {
+ // Default to the content type if there's no Accept header
+ mediaType = MediaType.APPLICATION_JSON_TYPE;
}
LOG.debug("Using MediaType: {}", mediaType);
import com.google.common.base.Preconditions;
+import com.google.common.base.Strings;
import java.util.List;
import java.util.Map;
public static String checkPrefixAndExtractServiceName(XmlElement typeElement, Map.Entry<String, String> prefixNamespace) throws NetconfDocumentedException {
String serviceName = typeElement.getTextContent();
// FIXME: comparing Entry with String:
- Preconditions.checkState(!prefixNamespace.equals(""), "Service %s value not prefixed with namespace",
+ Preconditions.checkState(!Strings.isNullOrEmpty(prefixNamespace.getKey()), "Service %s value not prefixed with namespace",
XmlNetconfConstants.TYPE_KEY);
String prefix = prefixNamespace.getKey() + PREFIX_SEPARATOR;
Preconditions.checkState(serviceName.startsWith(prefix),
Date revision = null;
Map<Date, EditConfig.IdentityMapping> revisions = identityMap.get(namespace);
if(revisions.keySet().size() > 1) {
- for (Date date : revisions.keySet()) {
- if(revisions.get(date).containsIdName(localName)) {
+ for (Map.Entry<Date, EditConfig.IdentityMapping> revisionToIdentityEntry : revisions.entrySet()) {
+ if(revisionToIdentityEntry.getValue().containsIdName(localName)) {
Preconditions.checkState(revision == null, "Duplicate identity %s, in namespace %s, with revisions: %s, %s detected. Cannot map attribute",
- localName, namespace, revision, date);
- revision = date;
+ localName, namespace, revision, revisionToIdentityEntry.getKey());
+ revision = revisionToIdentityEntry.getKey();
}
}
} else {
Map<String, Map<String, Collection<ObjectName>>> retVal = Maps.newLinkedHashMap();
- for (String namespace : configs.keySet()) {
+ for (Entry<String, Map<String, ModuleConfig>> namespaceToModuleToConfigEntry : configs.entrySet()) {
Map<String, Collection<ObjectName>> innerRetVal = Maps.newHashMap();
- for (Entry<String, ModuleConfig> mbeEntry : configs.get(namespace).entrySet()) {
+ for (Entry<String, ModuleConfig> mbeEntry : namespaceToModuleToConfigEntry.getValue().entrySet()) {
String moduleName = mbeEntry.getKey();
Collection<ObjectName> instances = moduleToInstances.get(moduleName);
}
- retVal.put(namespace, innerRetVal);
+ retVal.put(namespaceToModuleToConfigEntry.getKey(), innerRetVal);
}
return retVal;
}
Element modulesElement = XmlUtil.createElement(document, XmlNetconfConstants.MODULES_KEY, Optional.of(XmlNetconfConstants.URN_OPENDAYLIGHT_PARAMS_XML_NS_YANG_CONTROLLER_CONFIG));
dataElement.appendChild(modulesElement);
- for (String moduleNamespace : moduleToInstances.keySet()) {
- for (Entry<String, Collection<ObjectName>> moduleMappingEntry : moduleToInstances.get(moduleNamespace)
+ for (Entry<String, Map<String, Collection<ObjectName>>> moduleToInstanceEntry : moduleToInstances.entrySet()) {
+ for (Entry<String, Collection<ObjectName>> moduleMappingEntry : moduleToInstanceEntry.getValue()
.entrySet()) {
- ModuleConfig mapping = moduleConfigs.get(moduleNamespace).get(moduleMappingEntry.getKey());
+ ModuleConfig mapping = moduleConfigs.get(moduleToInstanceEntry.getKey()).get(moduleMappingEntry.getKey());
if (moduleMappingEntry.getValue().isEmpty()) {
continue;
}
for (ObjectName objectName : moduleMappingEntry.getValue()) {
- modulesElement.appendChild(mapping.toXml(objectName, document, moduleNamespace));
+ modulesElement.appendChild(mapping.toXml(objectName, document, moduleToInstanceEntry.getKey()));
}
}
this.configServiceRefRegistry = configServiceRefRegistry;
}
-
public ObjectName getByServiceAndRefName(String namespace, String serviceName, String refName) {
Map<String, Map<String, String>> serviceNameToRefNameToInstance = getMappedServices().get(namespace);
Map<String, Map<String, Map<String, String>>> retVal = Maps.newHashMap();
Map<String, Map<String, ObjectName>> serviceMapping = configServiceRefRegistry.getServiceMapping();
- for (String serviceQName : serviceMapping.keySet()){
- for (String refName : serviceMapping.get(serviceQName).keySet()) {
+ for (Map.Entry<String, Map<String, ObjectName>> qNameToRefNameEntry : serviceMapping.entrySet()){
+ for (String refName : qNameToRefNameEntry.getValue().keySet()) {
- ObjectName on = serviceMapping.get(serviceQName).get(refName);
+ ObjectName on = qNameToRefNameEntry.getValue().get(refName);
Services.ServiceInstance si = Services.ServiceInstance.fromObjectName(on);
- QName qname = QName.create(serviceQName);
+ QName qname = QName.create(qNameToRefNameEntry.getKey());
String namespace = qname.getNamespace().toString();
Map<String, Map<String, String>> serviceToRefs = retVal.get(namespace);
if(serviceToRefs==null) {
Element root = XmlUtil.createElement(document, XmlNetconfConstants.SERVICES_KEY, Optional.of(XmlNetconfConstants.URN_OPENDAYLIGHT_PARAMS_XML_NS_YANG_CONTROLLER_CONFIG));
Map<String, Map<String, Map<String, String>>> mappedServices = serviceRegistryWrapper.getMappedServices();
- for (String namespace : mappedServices.keySet()) {
+ for (Entry<String, Map<String, Map<String, String>>> namespaceToRefEntry : mappedServices.entrySet()) {
- for (Entry<String, Map<String, String>> serviceEntry : mappedServices.get(namespace).entrySet()) {
+ for (Entry<String, Map<String, String>> serviceEntry : namespaceToRefEntry.getValue().entrySet()) {
// service belongs to config.yang namespace
Element serviceElement = XmlUtil.createElement(document, SERVICE_KEY, Optional.<String>absent());
root.appendChild(serviceElement);
// type belongs to config.yang namespace
String serviceType = serviceEntry.getKey();
Element typeElement = XmlUtil.createTextElementWithNamespacedContent(document, XmlNetconfConstants.TYPE_KEY,
- XmlNetconfConstants.PREFIX, namespace, serviceType);
+ XmlNetconfConstants.PREFIX, namespaceToRefEntry.getKey(), serviceType);
serviceElement.appendChild(typeElement);
Map<String, Map<String, Map<String, Services.ServiceInstance>>> namespaceToServiceNameToRefNameToInstance = services
.getNamespaceToServiceNameToRefNameToInstance();
- for (String serviceNamespace : namespaceToServiceNameToRefNameToInstance.keySet()) {
- for (String serviceName : namespaceToServiceNameToRefNameToInstance.get(serviceNamespace).keySet()) {
+ for (Map.Entry<String, Map<String, Map<String, Services.ServiceInstance>>> namespaceToServiceToRefEntry : namespaceToServiceNameToRefNameToInstance.entrySet()) {
+ for (Map.Entry<String, Map<String, Services.ServiceInstance>> serviceToRefEntry : namespaceToServiceToRefEntry.getValue().entrySet()) {
- String qnameOfService = getQname(ta, serviceNamespace, serviceName);
- Map<String, Services.ServiceInstance> refNameToInstance = namespaceToServiceNameToRefNameToInstance
- .get(serviceNamespace).get(serviceName);
+ String qnameOfService = getQname(ta, namespaceToServiceToRefEntry.getKey(), serviceToRefEntry.getKey());
+ Map<String, Services.ServiceInstance> refNameToInstance = serviceToRefEntry.getValue();
- for (String refName : refNameToInstance.keySet()) {
- ObjectName on = refNameToInstance.get(refName).getObjectName(ta.getTransactionName());
+ for (Map.Entry<String, Services.ServiceInstance> refNameToServiceEntry : refNameToInstance.entrySet()) {
+ ObjectName on = refNameToServiceEntry.getValue().getObjectName(ta.getTransactionName());
try {
- ObjectName saved = ta.saveServiceReference(qnameOfService, refName, on);
+ ObjectName saved = ta.saveServiceReference(qnameOfService, refNameToServiceEntry.getKey(), on);
logger.debug("Saving service {} with on {} under name {} with service on {}", qnameOfService,
- on, refName, saved);
+ on, refNameToServiceEntry.getKey(), saved);
} catch (InstanceNotFoundException e) {
- throw new NetconfDocumentedException(String.format("Unable to save ref name " + refName + " for instance " + on, e),
+ throw new NetconfDocumentedException(String.format("Unable to save ref name " + refNameToServiceEntry.getKey() + " for instance " + on, e),
ErrorType.application,
ErrorTag.operation_failed,
ErrorSeverity.error);
Map<String, Map<String, ModuleConfig>> namespaceToModuleNameToModuleConfig = Maps.newHashMap();
- for (String namespace : mBeanEntries.keySet()) {
- for (Map.Entry<String, ModuleMXBeanEntry> moduleNameToMbe : mBeanEntries.get(namespace).entrySet()) {
+ for (Map.Entry<String, Map<String, ModuleMXBeanEntry>> namespaceToModuleToMbe : mBeanEntries.entrySet()) {
+ for (Map.Entry<String, ModuleMXBeanEntry> moduleNameToMbe : namespaceToModuleToMbe.getValue().entrySet()) {
String moduleName = moduleNameToMbe.getKey();
ModuleMXBeanEntry moduleMXBeanEntry = moduleNameToMbe.getValue();
ModuleConfig moduleConfig = new ModuleConfig(moduleName,
new InstanceConfig(configRegistryClient,moduleMXBeanEntry.getAttributes(), moduleMXBeanEntry.getNullableDummyContainerName()));
- Map<String, ModuleConfig> moduleNameToModuleConfig = namespaceToModuleNameToModuleConfig.get(namespace);
+ Map<String, ModuleConfig> moduleNameToModuleConfig = namespaceToModuleNameToModuleConfig.get(namespaceToModuleToMbe.getKey());
if(moduleNameToModuleConfig == null) {
moduleNameToModuleConfig = Maps.newHashMap();
- namespaceToModuleNameToModuleConfig.put(namespace, moduleNameToModuleConfig);
+ namespaceToModuleNameToModuleConfig.put(namespaceToModuleToMbe.getKey(), moduleNameToModuleConfig);
}
moduleNameToModuleConfig.put(moduleName, moduleConfig);
Map<String, Map<String, ModuleMXBeanEntry>> mBeanEntries) {
Map<String, Map<String, ModuleRuntime>> retVal = Maps.newHashMap();
- for (String namespace : mBeanEntries.keySet()) {
+ for (Map.Entry<String, Map<String, ModuleMXBeanEntry>> namespaceToModuleEntry : mBeanEntries.entrySet()) {
Map<String, ModuleRuntime> innerMap = Maps.newHashMap();
- Map<String, ModuleMXBeanEntry> entriesFromNamespace = mBeanEntries.get(namespace);
- for (String module : entriesFromNamespace.keySet()) {
+ Map<String, ModuleMXBeanEntry> entriesFromNamespace = namespaceToModuleEntry.getValue();
+ for (Map.Entry<String, ModuleMXBeanEntry> moduleToMXEntry : entriesFromNamespace.entrySet()) {
- ModuleMXBeanEntry mbe = entriesFromNamespace.get(module);
+ ModuleMXBeanEntry mbe = moduleToMXEntry.getValue();
Map<RuntimeBeanEntry, InstanceConfig> cache = Maps.newHashMap();
RuntimeBeanEntry root = null;
InstanceRuntime rootInstanceRuntime = createInstanceRuntime(root, cache);
ModuleRuntime moduleRuntime = new ModuleRuntime(rootInstanceRuntime);
- innerMap.put(module, moduleRuntime);
+ innerMap.put(moduleToMXEntry.getKey(), moduleRuntime);
}
- retVal.put(namespace, innerMap);
+ retVal.put(namespaceToModuleEntry.getKey(), innerMap);
}
return retVal;
}
final String[] signature = new String[attributes.size()];
int i = 0;
- for (final String attrName : attributes.keySet()) {
- final AttributeConfigElement attribute = attributes.get(attrName);
+ for (final AttributeConfigElement attribute : attributes.values()) {
final Optional<?> resolvedValueOpt = attribute.getResolvedValue();
params[i] = resolvedValueOpt.isPresent() ? resolvedValueOpt.get() : attribute.getResolvedDefaultValue();
final Map<String, Map<String, ModuleRpcs>> map = Maps.newHashMap();
- for (final String namespace : mBeanEntries.keySet()) {
+ for (final Map.Entry<String, Map<String, ModuleMXBeanEntry>> namespaceToModuleEntry : mBeanEntries.entrySet()) {
- Map<String, ModuleRpcs> namespaceToModules = map.get(namespace);
+ Map<String, ModuleRpcs> namespaceToModules = map.get(namespaceToModuleEntry.getKey());
if (namespaceToModules == null) {
namespaceToModules = Maps.newHashMap();
- map.put(namespace, namespaceToModules);
+ map.put(namespaceToModuleEntry.getKey(), namespaceToModules);
}
- for (final String moduleName : mBeanEntries.get(namespace).keySet()) {
+ for (final Map.Entry<String, ModuleMXBeanEntry> moduleEntry : namespaceToModuleEntry.getValue().entrySet()) {
- ModuleRpcs rpcMapping = namespaceToModules.get(moduleName);
+ ModuleRpcs rpcMapping = namespaceToModules.get(moduleEntry.getKey());
if (rpcMapping == null) {
rpcMapping = new ModuleRpcs();
- namespaceToModules.put(moduleName, rpcMapping);
+ namespaceToModules.put(moduleEntry.getKey(), rpcMapping);
}
- final ModuleMXBeanEntry entry = mBeanEntries.get(namespace).get(moduleName);
+ final ModuleMXBeanEntry entry = moduleEntry.getValue();
for (final RuntimeBeanEntry runtimeEntry : entry.getRuntimeBeans()) {
rpcMapping.addNameMapping(runtimeEntry);
allOpenedTransactions.clear();
}
- public Optional<ObjectName> getTransaction() {
+ public synchronized Optional<ObjectName> getTransaction() {
if (transaction == null){
return Optional.absent();
import static com.google.common.base.Preconditions.checkNotNull;
+import com.google.common.base.Function;
+import com.google.common.base.Stopwatch;
+import com.google.common.collect.Collections2;
import java.io.IOException;
import java.io.InputStream;
import java.util.Collection;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;
-
+import javax.annotation.Nonnull;
import javax.annotation.concurrent.Immutable;
import javax.management.MBeanServerConnection;
-
import org.opendaylight.controller.config.api.ConflictingVersionException;
import org.opendaylight.controller.config.persist.api.ConfigPusher;
import org.opendaylight.controller.config.persist.api.ConfigSnapshotHolder;
import org.w3c.dom.Element;
import org.xml.sax.SAXException;
-import com.google.common.base.Function;
-import com.google.common.base.Stopwatch;
-import com.google.common.collect.Collections2;
-
@Immutable
public class ConfigPusherImpl implements ConfigPusher {
private static final Logger logger = LoggerFactory.getLogger(ConfigPusherImpl.class);
private static Set<String> computeNotFoundCapabilities(Set<String> expectedCapabilities, NetconfOperationService serviceCandidate) {
Collection<String> actual = Collections2.transform(serviceCandidate.getCapabilities(), new Function<Capability, String>() {
@Override
- public String apply(Capability input) {
+ public String apply(@Nonnull final Capability input) {
return input.getCapabilityUri();
}
});
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import io.netty.util.internal.ConcurrentSet;
+import java.util.Collections;
+import java.util.List;
+import java.util.Set;
+import javax.annotation.Nonnull;
import org.opendaylight.controller.netconf.api.monitoring.NetconfManagementSession;
import org.opendaylight.controller.netconf.api.monitoring.NetconfMonitoringService;
import org.opendaylight.controller.netconf.mapping.api.Capability;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import javax.annotation.Nullable;
-import java.util.Collections;
-import java.util.List;
-import java.util.Set;
-
public class NetconfMonitoringServiceImpl implements NetconfMonitoringService, SessionMonitoringService {
private static final Logger logger = LoggerFactory.getLogger(NetconfMonitoringServiceImpl.class);
private List<Session> transformSessions(Set<NetconfManagementSession> sessions) {
return Lists.newArrayList(Collections2.transform(sessions, new Function<NetconfManagementSession, Session>() {
- @Nullable
@Override
- public Session apply(@Nullable NetconfManagementSession input) {
+ public Session apply(@Nonnull NetconfManagementSession input) {
return input.toManagementSession();
}
}));
this.netconfOperationServiceSnapshot = netconfOperationServiceSnapshot;
}
- private void initNetconfOperations(Set<NetconfOperation> allOperations) {
+ private synchronized void initNetconfOperations(Set<NetconfOperation> allOperations) {
allNetconfOperations = allOperations;
}
<artifactId>config-util</artifactId>
<scope>test</scope>
</dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>sal-netconf-connector</artifactId>
+ <scope>test</scope>
+ </dependency>
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>netconf-api</artifactId>
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.doNothing;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import com.google.common.collect.Lists;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
import io.netty.channel.local.LocalAddress;
-import io.netty.util.concurrent.Future;
-import io.netty.util.concurrent.GenericFutureListener;
import io.netty.util.concurrent.GlobalEventExecutor;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.List;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicInteger;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
import org.opendaylight.controller.netconf.api.NetconfMessage;
import org.opendaylight.controller.netconf.auth.AuthProvider;
import org.opendaylight.controller.netconf.client.NetconfClientDispatcher;
import org.opendaylight.controller.netconf.client.NetconfClientDispatcherImpl;
+import org.opendaylight.controller.netconf.client.NetconfClientSessionListener;
import org.opendaylight.controller.netconf.client.SimpleNetconfClientSessionListener;
import org.opendaylight.controller.netconf.client.conf.NetconfClientConfiguration;
import org.opendaylight.controller.netconf.client.conf.NetconfClientConfigurationBuilder;
import org.opendaylight.controller.netconf.util.messages.NetconfMessageUtil;
import org.opendaylight.controller.netconf.util.osgi.NetconfConfigUtil;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.opendaylight.controller.sal.connect.api.RemoteDevice;
+import org.opendaylight.controller.sal.connect.api.RemoteDeviceCommunicator;
+import org.opendaylight.controller.sal.connect.netconf.listener.NetconfDeviceCommunicator;
+import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionCapabilities;
+import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
import org.opendaylight.protocol.framework.NeverReconnectStrategy;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.xml.sax.SAXException;
public class NetconfITSecureTest extends AbstractNetconfConfigTest {
@Test
public void testSecure() throws Exception {
final NetconfClientDispatcher dispatch = new NetconfClientDispatcherImpl(getNettyThreadgroup(), getNettyThreadgroup(), getHashedWheelTimer());
- try (TestingNetconfClient netconfClient = new TestingNetconfClient("testing-ssh-client", dispatch, getClientConfiguration())) {
+ try (TestingNetconfClient netconfClient = new TestingNetconfClient("testing-ssh-client", dispatch, getClientConfiguration(new SimpleNetconfClientSessionListener()))) {
NetconfMessage response = netconfClient.sendMessage(getGetConfig());
assertFalse("Unexpected error message " + XmlUtil.toString(response.getDocument()),
NetconfMessageUtil.isErrorMessage(response));
/**
* Test all requests are handled properly and no mismatch occurs in listener
*/
- @Test(timeout = 3*60*1000)
+ @Test(timeout = 6*60*1000)
public void testSecureStress() throws Exception {
+ final int requests = 4000;
+
final NetconfClientDispatcher dispatch = new NetconfClientDispatcherImpl(getNettyThreadgroup(), getNettyThreadgroup(), getHashedWheelTimer());
- try (TestingNetconfClient netconfClient = new TestingNetconfClient("testing-ssh-client", dispatch, getClientConfiguration())) {
+ final NetconfDeviceCommunicator sessionListener = getSessionListener();
+ try (TestingNetconfClient netconfClient = new TestingNetconfClient("testing-ssh-client", dispatch, getClientConfiguration(sessionListener))) {
final AtomicInteger responseCounter = new AtomicInteger(0);
- final List<Future<?>> futures = Lists.newArrayList();
+ final List<ListenableFuture<RpcResult<NetconfMessage>>> futures = Lists.newArrayList();
- final int requests = 1000;
for (int i = 0; i < requests; i++) {
- final Future<NetconfMessage> netconfMessageFuture = netconfClient.sendRequest(getGetConfig());
+ NetconfMessage getConfig = getGetConfig();
+ getConfig = changeMessageId(getConfig, i);
+ final ListenableFuture<RpcResult<NetconfMessage>> netconfMessageFuture = sessionListener.sendRequest(getConfig, QName.create("namespace", "2012-12-12", "get"));
futures.add(netconfMessageFuture);
- netconfMessageFuture.addListener(new GenericFutureListener<Future<? super NetconfMessage>>() {
+ Futures.addCallback(netconfMessageFuture, new FutureCallback<RpcResult<NetconfMessage>>() {
@Override
- public void operationComplete(final Future<? super NetconfMessage> future) throws Exception {
- assertTrue("Request unsuccessful " + future.cause(), future.isSuccess());
+ public void onSuccess(final RpcResult<NetconfMessage> result) {
responseCounter.incrementAndGet();
}
+
+ @Override
+ public void onFailure(final Throwable t) {
+ throw new RuntimeException(t);
+ }
});
}
- for (final Future<?> future : futures) {
- future.await();
+ // Wait for every future
+ for (final ListenableFuture<RpcResult<NetconfMessage>> future : futures) {
+ try {
+ future.get(3, TimeUnit.MINUTES);
+ } catch (final TimeoutException e) {
+ fail("Request " + futures.indexOf(future) + " is not responding");
+ }
}
// Give future listeners some time to finish counter incrementation
}
}
- public NetconfClientConfiguration getClientConfiguration() throws IOException {
+ private NetconfMessage changeMessageId(final NetconfMessage getConfig, final int i) throws IOException, SAXException {
+ String s = XmlUtil.toString(getConfig.getDocument(), false);
+ s = s.replace("101", Integer.toString(i));
+ return new NetconfMessage(XmlUtil.readXmlToDocument(s));
+ }
+
+ public NetconfClientConfiguration getClientConfiguration(final NetconfClientSessionListener sessionListener) throws IOException {
final NetconfClientConfigurationBuilder b = NetconfClientConfigurationBuilder.create();
b.withAddress(TLS_ADDRESS);
- b.withSessionListener(new SimpleNetconfClientSessionListener());
+ // Using session listener from sal-netconf-connector since stress test cannot be performed with simple listener
+ b.withSessionListener(sessionListener);
b.withReconnectStrategy(new NeverReconnectStrategy(GlobalEventExecutor.INSTANCE, 5000));
b.withProtocol(NetconfClientConfiguration.NetconfClientProtocol.SSH);
b.withConnectionTimeoutMillis(5000);
return b.build();
}
+ @Mock
+ private RemoteDevice<NetconfSessionCapabilities, NetconfMessage> mockedRemoteDevice;
+
+ private NetconfDeviceCommunicator getSessionListener() {
+ MockitoAnnotations.initMocks(this);
+ doNothing().when(mockedRemoteDevice).onRemoteSessionUp(any(NetconfSessionCapabilities.class), any(RemoteDeviceCommunicator.class));
+ doNothing().when(mockedRemoteDevice).onRemoteSessionDown();
+ return new NetconfDeviceCommunicator(new RemoteDeviceId("secure-test"), mockedRemoteDevice);
+ }
+
public AuthProvider getAuthProvider() throws Exception {
final AuthProvider mockAuth = mock(AuthProvider.class);
doReturn("mockedAuth").when(mockAuth).toString();
</appender>
<logger name="org.opendaylight.controller.netconf" level="TRACE"/>
+ <logger name="org.opendaylight.controller.sal.connect.netconf" level="TRACE"/>
<root level="error">
<appender-ref ref="STDOUT" />
import com.google.common.base.Function;
import com.google.common.base.Preconditions;
import com.google.common.collect.Collections2;
+import javax.annotation.Nonnull;
import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.monitoring.rev101004.Yang;
import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.monitoring.rev101004.netconf.state.schemas.Schema;
return Collections2.transform(schema.getLocation(), new Function<Schema.Location, String>() {
@Nullable
@Override
- public String apply(@Nullable Schema.Location input) {
+ public String apply(@Nonnull Schema.Location input) {
return input.getEnumeration().toString();
}
});
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.nettyutil.handler.ssh.client;
+
+import io.netty.buffer.Unpooled;
+import io.netty.channel.ChannelHandlerContext;
+import io.netty.channel.ChannelOutboundHandler;
+import org.apache.sshd.common.future.SshFutureListener;
+import org.apache.sshd.common.io.IoInputStream;
+import org.apache.sshd.common.io.IoReadFuture;
+import org.apache.sshd.common.util.Buffer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Listener on async input stream from SSH session.
+ * This listeners schedules reads in a loop until the session is closed or read fails.
+ */
+final class AsyncSshHanderReader implements SshFutureListener<IoReadFuture>, AutoCloseable {
+
+ private static final Logger logger = LoggerFactory.getLogger(AsyncSshHandler.class);
+
+ private static final int BUFFER_SIZE = 8192;
+
+ private final ChannelOutboundHandler asyncSshHandler;
+ private final ChannelHandlerContext ctx;
+
+ private IoInputStream asyncOut;
+ private Buffer buf;
+ private IoReadFuture currentReadFuture;
+
+ public AsyncSshHanderReader(final ChannelOutboundHandler asyncSshHandler, final ChannelHandlerContext ctx, final IoInputStream asyncOut) {
+ this.asyncSshHandler = asyncSshHandler;
+ this.ctx = ctx;
+ this.asyncOut = asyncOut;
+ buf = new Buffer(BUFFER_SIZE);
+ asyncOut.read(buf).addListener(this);
+ }
+
+ @Override
+ public synchronized void operationComplete(final IoReadFuture future) {
+ if(future.getException() != null) {
+ if(asyncOut.isClosed() || asyncOut.isClosing()) {
+ // Ssh dropped
+ logger.debug("Ssh session dropped on channel: {}", ctx.channel(), future.getException());
+ } else {
+ logger.warn("Exception while reading from SSH remote on channel {}", ctx.channel(), future.getException());
+ }
+ invokeDisconnect();
+ return;
+ }
+
+ if (future.getRead() > 0) {
+ ctx.fireChannelRead(Unpooled.wrappedBuffer(buf.array(), 0, future.getRead()));
+
+ // Schedule next read
+ buf = new Buffer(BUFFER_SIZE);
+ currentReadFuture = asyncOut.read(buf);
+ currentReadFuture.addListener(this);
+ }
+ }
+
+ private void invokeDisconnect() {
+ try {
+ asyncSshHandler.disconnect(ctx, ctx.newPromise());
+ } catch (final Exception e) {
+ // This should not happen
+ throw new IllegalStateException(e);
+ }
+ }
+
+ @Override
+ public synchronized void close() {
+ // Remove self as listener on close to prevent reading from closed input
+ if(currentReadFuture != null) {
+ currentReadFuture.removeListener(this);
+ }
+
+ asyncOut = null;
+ }
+}
package org.opendaylight.controller.netconf.nettyutil.handler.ssh.client;
import com.google.common.base.Preconditions;
-import io.netty.buffer.ByteBuf;
-import io.netty.buffer.Unpooled;
import io.netty.channel.ChannelHandlerContext;
-import io.netty.channel.ChannelOutboundHandler;
import io.netty.channel.ChannelOutboundHandlerAdapter;
import io.netty.channel.ChannelPromise;
import java.io.IOException;
import org.apache.sshd.client.future.OpenFuture;
import org.apache.sshd.common.future.CloseFuture;
import org.apache.sshd.common.future.SshFutureListener;
-import org.apache.sshd.common.io.IoInputStream;
-import org.apache.sshd.common.io.IoOutputStream;
-import org.apache.sshd.common.io.IoReadFuture;
-import org.apache.sshd.common.io.IoWriteFuture;
-import org.apache.sshd.common.io.WritePendingException;
-import org.apache.sshd.common.util.Buffer;
import org.opendaylight.controller.netconf.nettyutil.handler.ssh.authentication.AuthenticationHandler;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
private final AuthenticationHandler authenticationHandler;
private final SshClient sshClient;
- private SshReadAsyncListener sshReadAsyncListener;
- private SshWriteAsyncHandler sshWriteAsyncHandler;
+ private AsyncSshHanderReader sshReadAsyncListener;
+ private AsyncSshHandlerWriter sshWriteAsyncHandler;
private ClientChannel channel;
private ClientSession session;
connectPromise.setSuccess();
connectPromise = null;
- sshReadAsyncListener = new SshReadAsyncListener(this, ctx, channel.getAsyncOut());
+ sshReadAsyncListener = new AsyncSshHanderReader(this, ctx, channel.getAsyncOut());
// if readAsyncListener receives immediate close, it will close this handler and closing this handler sets channel variable to null
if(channel != null) {
- sshWriteAsyncHandler = new SshWriteAsyncHandler(this, channel.getAsyncIn());
+ sshWriteAsyncHandler = new AsyncSshHandlerWriter(channel.getAsyncIn());
ctx.fireChannelActive();
}
}
ctx.fireChannelInactive();
}
- /**
- * Listener over async input stream from SSH session.
- * This listeners schedules reads in a loop until the session is closed or read fails.
- */
- private static class SshReadAsyncListener implements SshFutureListener<IoReadFuture>, AutoCloseable {
- private static final int BUFFER_SIZE = 8192;
-
- private final ChannelOutboundHandler asyncSshHandler;
- private final ChannelHandlerContext ctx;
-
- private IoInputStream asyncOut;
- private Buffer buf;
- private IoReadFuture currentReadFuture;
-
- public SshReadAsyncListener(final ChannelOutboundHandler asyncSshHandler, final ChannelHandlerContext ctx, final IoInputStream asyncOut) {
- this.asyncSshHandler = asyncSshHandler;
- this.ctx = ctx;
- this.asyncOut = asyncOut;
- buf = new Buffer(BUFFER_SIZE);
- asyncOut.read(buf).addListener(this);
- }
-
- @Override
- public synchronized void operationComplete(final IoReadFuture future) {
- if(future.getException() != null) {
- if(asyncOut.isClosed() || asyncOut.isClosing()) {
- // Ssh dropped
- logger.debug("Ssh session dropped on channel: {}", ctx.channel(), future.getException());
- } else {
- logger.warn("Exception while reading from SSH remote on channel {}", ctx.channel(), future.getException());
- }
- invokeDisconnect();
- return;
- }
-
- if (future.getRead() > 0) {
- ctx.fireChannelRead(Unpooled.wrappedBuffer(buf.array(), 0, future.getRead()));
-
- // Schedule next read
- buf = new Buffer(BUFFER_SIZE);
- currentReadFuture = asyncOut.read(buf);
- currentReadFuture.addListener(this);
- }
- }
-
- private void invokeDisconnect() {
- try {
- asyncSshHandler.disconnect(ctx, ctx.newPromise());
- } catch (final Exception e) {
- // This should not happen
- throw new IllegalStateException(e);
- }
- }
-
- @Override
- public synchronized void close() {
- // Remove self as listener on close to prevent reading from closed input
- if(currentReadFuture != null) {
- currentReadFuture.removeListener(this);
- }
-
- asyncOut = null;
- }
- }
-
- private static final class SshWriteAsyncHandler implements AutoCloseable {
- public static final int MAX_PENDING_WRITES = 100;
-
- private final ChannelOutboundHandler channelHandler;
- private IoOutputStream asyncIn;
-
- // Counter that holds the amount of pending write messages
- // Pending write can occur in case remote window is full
- // In such case, we need to wait for the pending write to finish
- private int pendingWriteCounter;
- // Last write future, that can be pending
- private IoWriteFuture lastWriteFuture;
-
- public SshWriteAsyncHandler(final ChannelOutboundHandler channelHandler, final IoOutputStream asyncIn) {
- this.channelHandler = channelHandler;
- this.asyncIn = asyncIn;
- }
-
- int c = 0;
-
- public synchronized void write(final ChannelHandlerContext ctx, final Object msg, final ChannelPromise promise) {
- try {
- if(asyncIn == null || asyncIn.isClosed() || asyncIn.isClosing()) {
- // If we are closed/closing, set immediate fail
- promise.setFailure(new IllegalStateException("Channel closed"));
- } else {
- lastWriteFuture = asyncIn.write(toBuffer(msg));
- lastWriteFuture.addListener(new SshFutureListener<IoWriteFuture>() {
-
- @Override
- public void operationComplete(final IoWriteFuture future) {
- ((ByteBuf) msg).release();
-
- // Notify success or failure
- if (future.isWritten()) {
- promise.setSuccess();
- } else {
- promise.setFailure(future.getException());
- }
-
- // Reset last pending future
- synchronized (SshWriteAsyncHandler.this) {
- lastWriteFuture = null;
- }
- }
- });
- }
- } catch (final WritePendingException e) {
- // Check limit for pending writes
- pendingWriteCounter++;
- if(pendingWriteCounter > MAX_PENDING_WRITES) {
- promise.setFailure(e);
- handlePendingFailed(ctx, new IllegalStateException("Too much pending writes(" + MAX_PENDING_WRITES + ") on channel: " + ctx.channel() +
- ", remote window is not getting read or is too small"));
- }
-
- logger.debug("Write pending to SSH remote on channel: {}, current pending count: {}", ctx.channel(), pendingWriteCounter);
-
- // In case of pending, re-invoke write after pending is finished
- Preconditions.checkNotNull(lastWriteFuture, "Write is pending, but there was no previous write attempt", e);
- lastWriteFuture.addListener(new SshFutureListener<IoWriteFuture>() {
- @Override
- public void operationComplete(final IoWriteFuture future) {
- if (future.isWritten()) {
- synchronized (SshWriteAsyncHandler.this) {
- // Pending done, decrease counter
- pendingWriteCounter--;
- }
- write(ctx, msg, promise);
- } else {
- // Cannot reschedule pending, fail
- handlePendingFailed(ctx, e);
- }
- }
-
- });
- }
- }
-
- private void handlePendingFailed(final ChannelHandlerContext ctx, final Exception e) {
- logger.warn("Exception while writing to SSH remote on channel {}", ctx.channel(), e);
- try {
- channelHandler.disconnect(ctx, ctx.newPromise());
- } catch (final Exception ex) {
- // This should not happen
- throw new IllegalStateException(ex);
- }
- }
-
- @Override
- public void close() {
- asyncIn = null;
- }
-
- private Buffer toBuffer(final Object msg) {
- // TODO Buffer vs ByteBuf translate, Can we handle that better ?
- Preconditions.checkState(msg instanceof ByteBuf);
- final ByteBuf byteBuf = (ByteBuf) msg;
- final byte[] temp = new byte[byteBuf.readableBytes()];
- byteBuf.readBytes(temp, 0, byteBuf.readableBytes());
- return new Buffer(temp);
- }
-
- }
}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.nettyutil.handler.ssh.client;
+
+import com.google.common.base.Charsets;
+import com.google.common.base.Preconditions;
+import io.netty.buffer.ByteBuf;
+import io.netty.channel.ChannelHandlerContext;
+import io.netty.channel.ChannelPromise;
+import java.util.Deque;
+import java.util.LinkedList;
+import java.util.Queue;
+import org.apache.sshd.common.future.SshFutureListener;
+import org.apache.sshd.common.io.IoOutputStream;
+import org.apache.sshd.common.io.IoWriteFuture;
+import org.apache.sshd.common.io.WritePendingException;
+import org.apache.sshd.common.util.Buffer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Async Ssh writer. Takes messages(byte arrays) and sends them encrypted to remote server.
+ * Also handles pending writes by caching requests until pending state is over.
+ */
+final class AsyncSshHandlerWriter implements AutoCloseable {
+
+ private static final Logger logger = LoggerFactory
+ .getLogger(AsyncSshHandlerWriter.class);
+
+ // public static final int MAX_PENDING_WRITES = 1000;
+ // TODO implement Limiting mechanism for pending writes
+ // But there is a possible issue with limiting:
+ // 1. What to do when queue is full ? Immediate Fail for every request ?
+ // 2. At this level we might be dealing with Chunks of messages(not whole messages) and unexpected behavior might occur
+ // when we send/queue 1 chunk and fail the other chunks
+
+ private IoOutputStream asyncIn;
+
+ // Order has to be preserved for queued writes
+ private final Deque<PendingWriteRequest> pending = new LinkedList<>();
+
+ public AsyncSshHandlerWriter(final IoOutputStream asyncIn) {
+ this.asyncIn = asyncIn;
+ }
+
+ public synchronized void write(final ChannelHandlerContext ctx,
+ final Object msg, final ChannelPromise promise) {
+ // TODO check for isClosed, isClosing might be performed by mina SSH internally and is not required here
+ // If we are closed/closing, set immediate fail
+ if (asyncIn == null || asyncIn.isClosed() || asyncIn.isClosing()) {
+ promise.setFailure(new IllegalStateException("Channel closed"));
+ } else {
+ final ByteBuf byteBufMsg = (ByteBuf) msg;
+ if (pending.isEmpty() == false) {
+ queueRequest(ctx, byteBufMsg, promise);
+ return;
+ }
+
+ writeWithPendingDetection(ctx, promise, byteBufMsg);
+ }
+ }
+
+ private void writeWithPendingDetection(final ChannelHandlerContext ctx, final ChannelPromise promise, final ByteBuf byteBufMsg) {
+ try {
+ if (logger.isTraceEnabled()) {
+ logger.trace("Writing request on channel: {}, message: {}", ctx.channel(), byteBufToString(byteBufMsg));
+ }
+ asyncIn.write(toBuffer(byteBufMsg)).addListener(new SshFutureListener<IoWriteFuture>() {
+
+ @Override
+ public void operationComplete(final IoWriteFuture future) {
+ if (logger.isTraceEnabled()) {
+ logger.trace("Ssh write request finished on channel: {} with result: {}: and ex:{}, message: {}",
+ ctx.channel(), future.isWritten(), future.getException(), byteBufToString(byteBufMsg));
+ }
+
+ // Notify success or failure
+ if (future.isWritten()) {
+ promise.setSuccess();
+ } else {
+ logger.warn("Ssh write request failed on channel: {} for message: {}", ctx.channel(), byteBufToString(byteBufMsg), future.getException());
+ promise.setFailure(future.getException());
+ }
+
+ // Not needed anymore, release
+ byteBufMsg.release();
+
+ // Check pending queue and schedule next
+ // At this time we are guaranteed that we are not in pending state anymore so the next request should succeed
+ writePendingIfAny();
+ }
+ });
+ } catch (final WritePendingException e) {
+ queueRequest(ctx, byteBufMsg, promise);
+ }
+ }
+
+ private synchronized void writePendingIfAny() {
+ if (pending.peek() == null) {
+ return;
+ }
+
+ // In case of pending, reschedule next message from queue
+ final PendingWriteRequest pendingWrite = pending.poll();
+ final ByteBuf msg = pendingWrite.msg;
+ if (logger.isTraceEnabled()) {
+ logger.trace("Writing pending request on channel: {}, message: {}", pendingWrite.ctx.channel(), byteBufToString(msg));
+ }
+
+ writeWithPendingDetection(pendingWrite.ctx, pendingWrite.promise, msg);
+ }
+
+ private static String byteBufToString(final ByteBuf msg) {
+ msg.resetReaderIndex();
+ final String s = msg.toString(Charsets.UTF_8);
+ msg.resetReaderIndex();
+ return s;
+ }
+
+ private void queueRequest(final ChannelHandlerContext ctx, final ByteBuf msg, final ChannelPromise promise) {
+// try {
+ logger.debug("Write pending on channel: {}, queueing, current queue size: {}", ctx.channel(), pending.size());
+ if (logger.isTraceEnabled()) {
+ logger.trace("Queueing request due to pending: {}", byteBufToString(msg));
+ }
+ new PendingWriteRequest(ctx, msg, promise).pend(pending);
+// } catch (final Exception ex) {
+// logger.warn("Unable to queue write request on channel: {}. Setting fail for the request: {}", ctx.channel(), ex, byteBufToString(msg));
+// msg.release();
+// promise.setFailure(ex);
+// }
+ }
+
+ @Override
+ public synchronized void close() {
+ asyncIn = null;
+ }
+
+ private Buffer toBuffer(final ByteBuf msg) {
+ // TODO Buffer vs ByteBuf translate, Can we handle that better ?
+ final byte[] temp = new byte[msg.readableBytes()];
+ msg.readBytes(temp, 0, msg.readableBytes());
+ return new Buffer(temp);
+ }
+
+ private static final class PendingWriteRequest {
+ private final ChannelHandlerContext ctx;
+ private final ByteBuf msg;
+ private final ChannelPromise promise;
+
+ public PendingWriteRequest(final ChannelHandlerContext ctx, final ByteBuf msg, final ChannelPromise promise) {
+ this.ctx = ctx;
+ // Reset reader index, last write (failed) attempt moved index to the end
+ msg.resetReaderIndex();
+ this.msg = msg;
+ this.promise = promise;
+ }
+
+ public void pend(final Queue<PendingWriteRequest> pending) {
+ // Preconditions.checkState(pending.size() < MAX_PENDING_WRITES,
+ // "Too much pending writes(%s) on channel: %s, remote window is not getting read or is too small",
+ // pending.size(), ctx.channel());
+ Preconditions.checkState(pending.offer(this), "Cannot pend another request write (pending count: %s) on channel: %s",
+ pending.size(), ctx.channel());
+ }
+ }
+}
import static org.mockito.Mockito.verifyNoMoreInteractions;
import static org.mockito.Mockito.verifyZeroInteractions;
-import io.netty.buffer.ByteBuf;
-import io.netty.buffer.Unpooled;
import java.io.IOException;
import java.net.SocketAddress;
-import java.nio.channels.WritePendingException;
import org.apache.sshd.ClientChannel;
import org.apache.sshd.ClientSession;
import org.apache.sshd.SshClient;
import org.apache.sshd.common.util.Buffer;
import org.junit.After;
import org.junit.Before;
+import org.junit.Ignore;
import org.junit.Test;
import org.mockito.Matchers;
import org.mockito.Mock;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.SettableFuture;
+import io.netty.buffer.ByteBuf;
+import io.netty.buffer.Unpooled;
import io.netty.channel.Channel;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelPromise;
// make first write stop pending
firstWriteListener.operationComplete(ioWriteFuture);
- // intercept third listener, this is regular listener for second write to determine success or failure
- final ListenableFuture<SshFutureListener<IoWriteFuture>> afterPendingListener = stubAddListener(ioWriteFuture);
// notify listener for second write that pending has ended
pendingListener.get().operationComplete(ioWriteFuture);
- // Notify third listener (regular listener for second write) that second write succeeded
- afterPendingListener.get().operationComplete(ioWriteFuture);
// verify both write promises successful
verify(firstWritePromise).setSuccess();
verify(secondWritePromise).setSuccess();
}
+ @Ignore("Pending queue is not limited")
@Test
public void testWritePendingMax() throws Exception {
asyncSshHandler.connect(ctx, remoteAddress, localAddress, promise);
final ChannelPromise secondWritePromise = getMockedPromise();
// now make write throw pending exception
doThrow(org.apache.sshd.common.io.WritePendingException.class).when(asyncIn).write(any(Buffer.class));
- for (int i = 0; i < 1000; i++) {
+ for (int i = 0; i < 1001; i++) {
asyncSshHandler.write(ctx, Unpooled.copiedBuffer(new byte[]{0, 1, 2, 3, 4, 5}), secondWritePromise);
}
- verify(ctx).fireChannelInactive();
+ verify(secondWritePromise, times(1)).setFailure(any(Throwable.class));
}
@Test
final AuthProvider authService = bundleContext.getService(reference);
final Integer newServicePreference = getPreference(reference);
if(isBetter(newServicePreference)) {
+ maxPreference = newServicePreference;
server.setAuthProvider(authService);
if(sshThread == null) {
sshThread = runNetconfSshThread(server);
}
@VisibleForTesting
- void setNullableUserManager(final IUserManager nullableUserManager) {
+ synchronized void setNullableUserManager(final IUserManager nullableUserManager) {
this.nullableUserManager = nullableUserManager;
}
}
rpcReply.appendChild(responseNS);
}
- for (String attrName : attributes.keySet()) {
- rpcReply.setAttributeNode((Attr) document.importNode(attributes.get(attrName), true));
+ for (Attr attribute : attributes.values()) {
+ rpcReply.setAttributeNode((Attr) document.importNode(attribute, true));
}
document.appendChild(rpcReply);
return document;
import com.google.common.base.Function;
import com.google.common.collect.Collections2;
-
+import java.util.Collection;
+import java.util.List;
+import javax.annotation.Nonnull;
import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
import org.opendaylight.controller.netconf.api.NetconfMessage;
import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
import org.slf4j.LoggerFactory;
import org.w3c.dom.Document;
-import javax.annotation.Nullable;
-
-import java.util.Collection;
-import java.util.List;
-
public final class NetconfMessageUtil {
private static final Logger logger = LoggerFactory.getLogger(NetconfMessageUtil.class);
List<XmlElement> caps = capabilitiesElement.getChildElements(XmlNetconfConstants.CAPABILITY);
return Collections2.transform(caps, new Function<XmlElement, String>() {
- @Nullable
@Override
- public String apply(@Nullable XmlElement input) {
+ public String apply(@Nonnull XmlElement input) {
// Trim possible leading/tailing whitespace
try {
return input.getTextContent().trim();
}
}
- /**
- * Get extracted address or default.
- *
- * @throws java.lang.IllegalStateException if neither address is present.
- */
- private static InetSocketAddress getNetconfAddress(final InetSocketAddress defaultAddress, Optional<InetSocketAddress> extractedAddress, InfixProp infix) {
- InetSocketAddress inetSocketAddress;
-
- if (extractedAddress.isPresent() == false) {
- logger.debug("Netconf {} address not found, falling back to default {}", infix, defaultAddress);
-
- if (defaultAddress == null) {
- logger.warn("Netconf {} address not found, default address not provided", infix);
- throw new IllegalStateException("Netconf " + infix + " address not found, default address not provided");
- }
- inetSocketAddress = defaultAddress;
- } else {
- inetSocketAddress = extractedAddress.get();
- }
-
- return inetSocketAddress;
- }
-
public static String getPrivateKeyPath(final BundleContext context) {
return getPropertyValue(context, getPrivateKeyKey());
}
import java.util.HashMap;
import java.util.List;
import java.util.Map;
-import javax.annotation.Nullable;
import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
import org.opendaylight.controller.netconf.util.exception.MissingNameSpaceException;
import org.opendaylight.controller.netconf.util.exception.UnexpectedElementException;
return Lists.newArrayList(Collections2.filter(getChildElementsWithinNamespace(namespace),
new Predicate<XmlElement>() {
@Override
- public boolean apply(@Nullable XmlElement xmlElement) {
+ public boolean apply(XmlElement xmlElement) {
return xmlElement.getName().equals(childName);
}
}));
List<XmlElement> children = getChildElementsWithinNamespace(namespace);
children = Lists.newArrayList(Collections2.filter(children, new Predicate<XmlElement>() {
@Override
- public boolean apply(@Nullable XmlElement xmlElement) {
+ public boolean apply(XmlElement xmlElement) {
return xmlElement.getName().equals(childName);
}
}));
List<XmlElement> children = getChildElementsWithinNamespace(getNamespace());
return Lists.newArrayList(Collections2.filter(children, new Predicate<XmlElement>() {
@Override
- public boolean apply(@Nullable XmlElement xmlElement) {
+ public boolean apply(XmlElement xmlElement) {
return xmlElement.getName().equals(childName);
}
}));
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.mapping;
+
+import org.junit.Before;
+import org.junit.Test;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.netconf.mapping.api.HandlingPriority;
+import org.opendaylight.controller.netconf.mapping.api.NetconfOperationChainedExecution;
+import org.opendaylight.controller.netconf.util.xml.XmlElement;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+
+public class AbstractLastNetconfOperationTest {
+ class LastNetconfOperationImplTest extends AbstractLastNetconfOperation {
+
+ boolean handleWithNoSubsequentOperationsRun;
+
+ protected LastNetconfOperationImplTest(String netconfSessionIdForReporting) {
+ super(netconfSessionIdForReporting);
+ handleWithNoSubsequentOperationsRun = false;
+ }
+
+ @Override
+ protected Element handleWithNoSubsequentOperations(Document document, XmlElement operationElement) throws NetconfDocumentedException {
+ handleWithNoSubsequentOperationsRun = true;
+ return null;
+ }
+
+ @Override
+ protected String getOperationName() {
+ return "";
+ }
+ }
+
+ LastNetconfOperationImplTest netconfOperation;
+
+ @Before
+ public void setUp() throws Exception {
+ netconfOperation = new LastNetconfOperationImplTest("");
+ }
+
+ @Test
+ public void testNetconfOperation() throws Exception {
+ netconfOperation.handleWithNoSubsequentOperations(null, null);
+ assertTrue(netconfOperation.handleWithNoSubsequentOperationsRun);
+ assertEquals(HandlingPriority.HANDLE_WITH_DEFAULT_PRIORITY, netconfOperation.getHandlingPriority());
+ }
+
+ @Test(expected = NetconfDocumentedException.class)
+ public void testHandle() throws Exception {
+ NetconfOperationChainedExecution operation = mock(NetconfOperationChainedExecution.class);
+ doReturn("").when(operation).toString();
+
+ doReturn(false).when(operation).isExecutionTermination();
+ netconfOperation.handle(null, null, operation);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.mapping;
+
+import java.io.IOException;
+import org.junit.Before;
+import org.junit.Test;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.netconf.mapping.api.HandlingPriority;
+import org.opendaylight.controller.netconf.mapping.api.NetconfOperationChainedExecution;
+import org.opendaylight.controller.netconf.util.test.XmlFileLoader;
+import org.opendaylight.controller.netconf.util.xml.XmlElement;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+import org.xml.sax.SAXException;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.mock;
+
+public class AbstractNetconfOperationTest {
+
+ class NetconfOperationImpl extends AbstractNetconfOperation {
+
+ public boolean handleRun;
+
+ protected NetconfOperationImpl(String netconfSessionIdForReporting) {
+ super(netconfSessionIdForReporting);
+ this.handleRun = false;
+ }
+
+ @Override
+ protected String getOperationName() {
+ return null;
+ }
+
+ @Override
+ protected Element handle(Document document, XmlElement message, NetconfOperationChainedExecution subsequentOperation) throws NetconfDocumentedException {
+ this.handleRun = true;
+ try {
+ return XmlUtil.readXmlToElement("<element/>");
+ } catch (SAXException | IOException e) {
+ throw new RuntimeException(e);
+ }
+ }
+ }
+
+ private NetconfOperationImpl netconfOperation;
+ private NetconfOperationChainedExecution operation;
+
+ @Before
+ public void setUp() throws Exception {
+ netconfOperation = new NetconfOperationImpl("str");
+ operation = mock(NetconfOperationChainedExecution.class);
+ }
+
+ @Test
+ public void testAbstractNetconfOperation() throws Exception {
+ Document helloMessage = XmlFileLoader.xmlFileToDocument("netconfMessages/edit_config.xml");
+ assertEquals(netconfOperation.getNetconfSessionIdForReporting(), "str");
+ assertNotNull(netconfOperation.canHandle(helloMessage));
+ assertEquals(netconfOperation.getHandlingPriority(), HandlingPriority.HANDLE_WITH_DEFAULT_PRIORITY);
+
+ netconfOperation.handle(helloMessage, operation);
+ assertTrue(netconfOperation.handleRun);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.mapping;
+
+import org.junit.Test;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.netconf.mapping.api.HandlingPriority;
+import org.opendaylight.controller.netconf.util.xml.XmlElement;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+
+import static org.junit.Assert.assertEquals;
+
+public class AbstractSingletonNetconfOperationTest {
+ class SingletonNCOperationImpl extends AbstractSingletonNetconfOperation {
+
+ protected SingletonNCOperationImpl(String netconfSessionIdForReporting) {
+ super(netconfSessionIdForReporting);
+ }
+
+ @Override
+ protected Element handleWithNoSubsequentOperations(Document document, XmlElement operationElement) throws NetconfDocumentedException {
+ return null;
+ }
+
+ @Override
+ protected String getOperationName() {
+ return null;
+ }
+ }
+
+ @Test
+ public void testAbstractSingletonNetconfOperation() throws Exception {
+ SingletonNCOperationImpl operation = new SingletonNCOperationImpl("");
+ assertEquals(operation.getHandlingPriority(), HandlingPriority.HANDLE_WITH_MAX_PRIORITY);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.messages;
+
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+
+public class NetconfHelloMessageAdditionalHeaderTest {
+
+
+ private String customHeader = "[user;1.1.1.1:40;tcp;client;]";
+ private NetconfHelloMessageAdditionalHeader header;
+
+ @Before
+ public void setUp() throws Exception {
+ header = new NetconfHelloMessageAdditionalHeader("user", "1.1.1.1", "40", "tcp", "client");
+ }
+
+ @Test
+ public void testGetters() throws Exception {
+ assertEquals(header.getAddress(), "1.1.1.1");
+ assertEquals(header.getUserName(), "user");
+ assertEquals(header.getPort(), "40");
+ assertEquals(header.getTransport(), "tcp");
+ assertEquals(header.getSessionIdentifier(), "client");
+ }
+
+ @Test
+ public void testStaticConstructor() throws Exception {
+ NetconfHelloMessageAdditionalHeader h = NetconfHelloMessageAdditionalHeader.fromString(customHeader);
+ assertEquals(h.toString(), header.toString());
+ assertEquals(h.toFormattedString(), header.toFormattedString());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.messages;
+
+
+import com.google.common.base.Optional;
+import java.util.Set;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.internal.util.collections.Sets;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+public class NetconfHelloMessageTest {
+
+ Set<String> caps;
+
+ @Before
+ public void setUp() throws Exception {
+ caps = Sets.newSet("cap1");
+ }
+
+ @Test
+ public void testConstructor() throws Exception {
+ NetconfHelloMessageAdditionalHeader additionalHeader = new NetconfHelloMessageAdditionalHeader("name","host","1","transp","id");
+ NetconfHelloMessage message = NetconfHelloMessage.createClientHello(caps, Optional.of(additionalHeader));
+ assertTrue(message.isHelloMessage(message));
+ assertEquals(Optional.of(additionalHeader), message.getAdditionalHeader());
+
+ NetconfHelloMessage serverMessage = NetconfHelloMessage.createServerHello(caps, 100L);
+ assertTrue(serverMessage.isHelloMessage(serverMessage));
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.messages;
+
+import com.google.common.base.Charsets;
+import java.util.Arrays;
+import org.junit.Test;
+
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+
+public class NetconfMessageHeaderTest {
+ @Test
+ public void testGet() throws Exception {
+ NetconfMessageHeader header = new NetconfMessageHeader(10);
+ assertEquals(header.getLength(), 10);
+
+ byte[] expectedValue = "\n#10\n".getBytes(Charsets.US_ASCII);
+ assertArrayEquals(expectedValue, header.toBytes());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.messages;
+
+import java.util.Collection;
+import org.junit.Test;
+import org.opendaylight.controller.netconf.api.NetconfMessage;
+import org.opendaylight.controller.netconf.util.test.XmlFileLoader;
+import org.w3c.dom.Document;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+public class NetconfMessageUtilTest {
+ @Test
+ public void testNetconfMessageUtil() throws Exception {
+ Document okMessage = XmlFileLoader.xmlFileToDocument("netconfMessages/rpc-reply_ok.xml");
+ assertTrue(NetconfMessageUtil.isOKMessage(new NetconfMessage(okMessage)));
+ assertFalse(NetconfMessageUtil.isErrorMessage(new NetconfMessage(okMessage)));
+
+ Document errorMessage = XmlFileLoader.xmlFileToDocument("netconfMessages/communicationError/testClientSendsRpcReply_expectedResponse.xml");
+ assertTrue(NetconfMessageUtil.isErrorMessage(new NetconfMessage(errorMessage)));
+ assertFalse(NetconfMessageUtil.isOKMessage(new NetconfMessage(errorMessage)));
+
+ Document helloMessage = XmlFileLoader.xmlFileToDocument("netconfMessages/client_hello.xml");
+ Collection<String> caps = NetconfMessageUtil.extractCapabilitiesFromHello(new NetconfMessage(helloMessage).getDocument());
+ assertTrue(caps.contains("urn:ietf:params:netconf:base:1.0"));
+ assertTrue(caps.contains("urn:ietf:params:netconf:base:1.1"));
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.messages;
+
+import io.netty.channel.Channel;
+import io.netty.channel.ChannelFuture;
+import io.netty.util.concurrent.GenericFutureListener;
+import org.junit.Before;
+import org.junit.Test;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.netconf.api.NetconfMessage;
+import org.opendaylight.controller.netconf.api.NetconfSession;
+import org.opendaylight.controller.netconf.util.test.XmlFileLoader;
+import org.w3c.dom.Document;
+
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.*;
+
+public class SendErrorExceptionUtilTest {
+
+ NetconfSession netconfSession;
+ ChannelFuture channelFuture;
+ Channel channel;
+ private NetconfDocumentedException exception;
+
+ @Before
+ public void setUp() throws Exception {
+ netconfSession = mock(NetconfSession.class);
+ channelFuture = mock(ChannelFuture.class);
+ channel = mock(Channel.class);
+ doReturn(channelFuture).when(netconfSession).sendMessage(any(NetconfMessage.class));
+ doReturn(channelFuture).when(channelFuture).addListener(any(GenericFutureListener.class));
+ doReturn(channelFuture).when(channel).writeAndFlush(any(NetconfMessage.class));
+ exception = new NetconfDocumentedException("err");
+ }
+
+ @Test
+ public void testSendErrorMessage1() throws Exception {
+ SendErrorExceptionUtil.sendErrorMessage(netconfSession, exception);
+ verify(channelFuture, times(1)).addListener(any(GenericFutureListener.class));
+ verify(netconfSession, times(1)).sendMessage(any(NetconfMessage.class));
+ }
+
+ @Test
+ public void testSendErrorMessage2() throws Exception {
+ SendErrorExceptionUtil.sendErrorMessage(channel, exception);
+ verify(channelFuture, times(1)).addListener(any(GenericFutureListener.class));
+ }
+
+ @Test
+ public void testSendErrorMessage3() throws Exception {
+ Document helloMessage = XmlFileLoader.xmlFileToDocument("netconfMessages/rpc.xml");
+ SendErrorExceptionUtil.sendErrorMessage(netconfSession, exception, new NetconfMessage(helloMessage));
+ verify(channelFuture, times(1)).addListener(any(GenericFutureListener.class));
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.osgi;
+
+import com.google.common.base.Optional;
+import io.netty.channel.local.LocalAddress;
+import java.net.InetSocketAddress;
+import org.junit.Before;
+import org.junit.Test;
+import org.opendaylight.controller.netconf.util.NetconfUtil;
+import org.osgi.framework.BundleContext;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+
+public class NetconfConfigUtilTest {
+
+ private BundleContext bundleContext;
+
+ @Before
+ public void setUp() throws Exception {
+ bundleContext = mock(BundleContext.class);
+ }
+
+ @Test
+ public void testNetconfConfigUtil() throws Exception {
+ assertEquals(NetconfConfigUtil.getNetconfLocalAddress(), new LocalAddress("netconf"));
+
+ doReturn("").when(bundleContext).getProperty("netconf.connectionTimeoutMillis");
+ assertEquals(NetconfConfigUtil.extractTimeoutMillis(bundleContext), 5000);
+
+ doReturn("a").when(bundleContext).getProperty("netconf.connectionTimeoutMillis");
+ assertEquals(NetconfConfigUtil.extractTimeoutMillis(bundleContext), 5000);
+ }
+
+ @Test
+ public void testgetPrivateKeyKey() throws Exception {
+ assertEquals(NetconfConfigUtil.getPrivateKeyKey(), "netconf.ssh.pk.path");
+ }
+
+ @Test
+ public void testgetNetconfServerAddressKey() throws Exception {
+ NetconfConfigUtil.InfixProp prop = NetconfConfigUtil.InfixProp.tcp;
+ assertEquals(NetconfConfigUtil.getNetconfServerAddressKey(prop), "netconf.tcp.address");
+ }
+
+ @Test
+ public void testExtractNetconfServerAddress() throws Exception {
+ NetconfConfigUtil.InfixProp prop = NetconfConfigUtil.InfixProp.tcp;
+ doReturn("").when(bundleContext).getProperty(anyString());
+ assertEquals(NetconfConfigUtil.extractNetconfServerAddress(bundleContext, prop), Optional.absent());
+ }
+
+ @Test
+ public void testExtractNetconfServerAddress2() throws Exception {
+ NetconfConfigUtil.InfixProp prop = NetconfConfigUtil.InfixProp.tcp;
+ doReturn("1.1.1.1").when(bundleContext).getProperty("netconf.tcp.address");
+ doReturn("20").when(bundleContext).getProperty("netconf.tcp.port");
+ Optional<InetSocketAddress> inetSocketAddressOptional = NetconfConfigUtil.extractNetconfServerAddress(bundleContext, prop);
+ assertTrue(inetSocketAddressOptional.isPresent());
+ assertEquals(inetSocketAddressOptional.get(), new InetSocketAddress("1.1.1.1", 20));
+ }
+
+ @Test
+ public void testGetPrivateKeyPath() throws Exception {
+ doReturn("path").when(bundleContext).getProperty("netconf.ssh.pk.path");
+ assertEquals(NetconfConfigUtil.getPrivateKeyPath(bundleContext), "path");
+ }
+
+ @Test(expected = IllegalStateException.class)
+ public void testGetPrivateKeyPath2() throws Exception {
+ doReturn(null).when(bundleContext).getProperty("netconf.ssh.pk.path");
+ assertEquals(NetconfConfigUtil.getPrivateKeyPath(bundleContext), "path");
+ }
+}
/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ * Copyright (c) 2013-2014 Cisco Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
end += rawPayload.length;
}
int checksumStartByte = start + getfieldOffset(CHECKSUM) / NetUtils.NumBitsInAByte;
+ int even = end & ~1;
- for (int i = start; i <= (end - 1); i = i + 2) {
+ for (int i = start; i < even; i = i + 2) {
// Skip, if the current bytes are checkSum bytes
if (i == checksumStartByte) {
continue;
wordData = ((data[i] << 8) & 0xFF00) + (data[i + 1] & 0xFF);
sum = sum + wordData;
}
- carry = (sum >> 16) & 0xFF;
+ if (even < end) {
+ // Add the last octet with zero padding.
+ wordData = (data[even] << 8) & 0xFF00;
+ sum = sum + wordData;
+ }
+
+ carry = sum >>> 16;
finalSum = (sum & 0xFFFF) + carry;
return (short) ~((short) finalSum & 0xFFFF);
}
*/
public void setHeaderField(String headerField, byte[] readValue) {
if (headerField.equals(PROTOCOL)) {
- payloadClass = protocolClassMap.get(readValue[0]);
+ // Don't set payloadClass if framgment offset is not zero.
+ byte[] fragoff = hdrFieldsMap.get(FRAGOFFSET);
+ if (fragoff == null || BitBufferHelper.getShort(fragoff) == 0) {
+ payloadClass = protocolClassMap.get(readValue[0]);
+ }
+ } else if (headerField.equals(FRAGOFFSET)) {
+ if (readValue != null && BitBufferHelper.getShort(readValue) != 0) {
+ // Clear payloadClass because protocol header is not present
+ // in this packet.
+ payloadClass = null;
+ }
} else if (headerField.equals(OPTIONS) &&
(readValue == null || readValue.length == 0)) {
hdrFieldsMap.remove(headerField);
/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ * Copyright (c) 2013-2014 Cisco Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
package org.opendaylight.controller.sal.packet;
+import java.util.Arrays;
+
import junit.framework.Assert;
import org.junit.Test;
(byte) 0x2b, (byte) 0x2c, (byte) 0x2d, (byte) 0x2e,
(byte) 0x2f, (byte) 0x30, (byte) 0x31, (byte) 0x32,
(byte) 0x33, (byte) 0x34, (byte) 0x35, (byte) 0x36, (byte) 0x37 };
+ serializeTest(icmpRawPayload, (short)0xe553);
+
+ serializeTest(null, (short)0xb108);
+ serializeTest(new byte[0], (short)0xb108);
+
+ byte[] odd = {
+ (byte)0xba, (byte)0xd4, (byte)0xc7, (byte)0x53,
+ (byte)0xf8, (byte)0x59, (byte)0x68, (byte)0x77,
+ (byte)0xfd, (byte)0x27, (byte)0xe0, (byte)0x5b,
+ (byte)0xd0, (byte)0x2e, (byte)0x28, (byte)0x41,
+ (byte)0xa3, (byte)0x48, (byte)0x5d, (byte)0x2e,
+ (byte)0x7d, (byte)0x5b, (byte)0xd3, (byte)0x60,
+ (byte)0xb3, (byte)0x88, (byte)0x8d, (byte)0x0f,
+ (byte)0x1d, (byte)0x87, (byte)0x51, (byte)0x0f,
+ (byte)0x6a, (byte)0xff, (byte)0xf7, (byte)0xd4,
+ (byte)0x40, (byte)0x35, (byte)0x4e, (byte)0x01,
+ (byte)0x36,
+ };
+ serializeTest(odd, (short)0xd0ad);
+
+ // Large payload that causes 16-bit checksum overflow more than
+ // 255 times.
+ byte[] largeEven = new byte[1024];
+ Arrays.fill(largeEven, (byte)0xff);
+ serializeTest(largeEven, (short)0xb108);
+
+ byte[] largeOdd = new byte[1021];
+ Arrays.fill(largeOdd, (byte)0xff);
+ serializeTest(largeOdd, (short)0xb207);
+ }
- short checksum = (short)0xe553;
-
- // Create ICMP object
+ private void serializeTest(byte[] payload, short checksum)
+ throws PacketException {
ICMP icmp = new ICMP();
- icmp.setType((byte)8);
- icmp.setCode((byte)0);
- icmp.setIdentifier((short) 0x46f5);
- icmp.setSequenceNumber((short) 2);
- icmp.setRawPayload(icmpRawPayload);
- //icmp.setChecksum(checksum);
+ icmp.setType((byte)8).setCode((byte)0).
+ setIdentifier((short)0x46f5).setSequenceNumber((short)2);
+ int payloadSize = 0;
+ if (payload != null) {
+ icmp.setRawPayload(payload);
+ payloadSize = payload.length;
+ }
// Serialize
- byte[] stream = icmp.serialize();
- Assert.assertTrue(stream.length == 64);
+ byte[] data = icmp.serialize();
+ Assert.assertEquals(payloadSize + 8, data.length);
// Deserialize
ICMP icmpDes = new ICMP();
- icmpDes.deserialize(stream, 0, stream.length);
+ icmpDes.deserialize(data, 0, data.length);
Assert.assertFalse(icmpDes.isCorrupted());
- Assert.assertTrue(icmpDes.getChecksum() == checksum);
- Assert.assertTrue(icmp.equals(icmpDes));
+ Assert.assertEquals(checksum, icmpDes.getChecksum());
+ Assert.assertEquals(icmp, icmpDes);
}
}
import java.net.UnknownHostException;
import java.util.Arrays;
-import junit.framework.Assert;
-
+import org.junit.Assert;
import org.junit.Test;
+
import org.opendaylight.controller.sal.match.Match;
import org.opendaylight.controller.sal.match.MatchType;
import org.opendaylight.controller.sal.utils.EtherTypes;
Assert.assertEquals(protocol, (byte) match.getField(MatchType.NW_PROTO).getValue());
Assert.assertEquals(tos, (byte) match.getField(MatchType.NW_TOS).getValue());
}
+
+ @Test
+ public void testFragment() throws Exception {
+ byte[] payload1 = new byte[0];
+ byte[] payload2 = {
+ (byte)0x61, (byte)0xd1, (byte)0x3d, (byte)0x51,
+ (byte)0x1b, (byte)0x75, (byte)0xa7, (byte)0x83,
+ };
+ byte[] payload3 = {
+ (byte)0xe7, (byte)0x0f, (byte)0x2d, (byte)0x7e,
+ (byte)0x15, (byte)0xba, (byte)0xe7, (byte)0x6d,
+ (byte)0xb5, (byte)0xc5, (byte)0xb5, (byte)0x37,
+ (byte)0x59, (byte)0xbc, (byte)0x91, (byte)0x43,
+ (byte)0xb5, (byte)0xb7, (byte)0xe4, (byte)0x28,
+ (byte)0xec, (byte)0x62, (byte)0x6b, (byte)0x6a,
+ (byte)0xd1, (byte)0xcb, (byte)0x79, (byte)0x1e,
+ (byte)0xfc, (byte)0x82, (byte)0xf5, (byte)0xb4,
+ };
+
+ // Ensure that the payload is not deserialized if the fragment offset
+ // is not zero.
+ byte proto = IPProtocols.TCP.byteValue();
+ fragmentTest(payload1, proto, (short)0xf250);
+ fragmentTest(payload2, proto, (short)0xf248);
+ fragmentTest(payload3, proto, (short)0xf230);
+
+ proto = IPProtocols.UDP.byteValue();
+ fragmentTest(payload1, proto, (short)0xf245);
+ fragmentTest(payload2, proto, (short)0xf23d);
+ fragmentTest(payload3, proto, (short)0xf225);
+
+ proto = IPProtocols.ICMP.byteValue();
+ fragmentTest(payload1, proto, (short)0xf255);
+ fragmentTest(payload2, proto, (short)0xf24d);
+ fragmentTest(payload3, proto, (short)0xf235);
+
+ // Ensure that the protocol header in the first fragment is
+ // deserialized.
+ proto = IPProtocols.TCP.byteValue();
+ TCP tcp = new TCP();
+ tcp.setSourcePort((short)1234).setDestinationPort((short)32000).
+ setSequenceNumber((int)0xd541f5f8).setAckNumber((int)0x58da787d).
+ setDataOffset((byte)5).setReserved((byte)0).
+ setHeaderLenFlags((short)0x18).setWindowSize((short)0x40e8).
+ setUrgentPointer((short)0x15f7).setChecksum((short)0x0d4e);
+ firstFragmentTest(tcp, payload1, proto, (short)0xdfe6);
+ tcp.setChecksum((short)0xab2a);
+ firstFragmentTest(tcp, payload2, proto, (short)0xdfde);
+ tcp.setChecksum((short)0x1c75);
+ firstFragmentTest(tcp, payload3, proto, (short)0xdfc6);
+
+ proto = IPProtocols.UDP.byteValue();
+ UDP udp = new UDP();
+ udp.setSourcePort((short)53).setDestinationPort((short)45383).
+ setLength((short)(payload1.length + 8)).setChecksum((short)0);
+ firstFragmentTest(udp, payload1, proto, (short)0xdfe7);
+ udp.setLength((short)(payload2.length + 8));
+ firstFragmentTest(udp, payload2, proto, (short)0xdfdf);
+ udp.setLength((short)(payload3.length + 8));
+ firstFragmentTest(udp, payload3, proto, (short)0xdfc7);
+
+ proto = IPProtocols.ICMP.byteValue();
+ ICMP icmp = new ICMP();
+ icmp.setType((byte)8).setCode((byte)0).setIdentifier((short)0x3d1e).
+ setSequenceNumber((short)1);
+ firstFragmentTest(icmp, payload1, proto, (short)0xdff7);
+ firstFragmentTest(icmp, payload2, proto, (short)0xdfef);
+ firstFragmentTest(icmp, payload3, proto, (short)0xdfd7);
+ }
+
+ private void fragmentTest(byte[] payload, byte proto, short checksum)
+ throws Exception {
+ // Construct a fragmented raw IPv4 packet.
+ int ipv4Len = 20;
+ byte[] rawIp = new byte[ipv4Len + payload.length];
+
+ byte ipVersion = 4;
+ byte dscp = 35;
+ byte ecn = 2;
+ byte tos = (byte)((dscp << 2) | ecn);
+ short totalLen = (short)rawIp.length;
+ short id = 22143;
+ short offset = 0xb9;
+ byte ttl = 64;
+ byte[] srcIp = {(byte)0x0a, (byte)0x00, (byte)0x00, (byte)0x01};
+ byte[] dstIp = {(byte)0xc0, (byte)0xa9, (byte)0x66, (byte)0x23};
+
+ rawIp[0] = (byte)((ipVersion << 4) | (ipv4Len >> 2));
+ rawIp[1] = tos;
+ rawIp[2] = (byte)(totalLen >>> Byte.SIZE);
+ rawIp[3] = (byte)totalLen;
+ rawIp[4] = (byte)(id >>> Byte.SIZE);
+ rawIp[5] = (byte)id;
+ rawIp[6] = (byte)(offset >>> Byte.SIZE);
+ rawIp[7] = (byte)offset;
+ rawIp[8] = ttl;
+ rawIp[9] = proto;
+ rawIp[10] = (byte)(checksum >>> Byte.SIZE);
+ rawIp[11] = (byte)checksum;
+ System.arraycopy(srcIp, 0, rawIp, 12, srcIp.length);
+ System.arraycopy(dstIp, 0, rawIp, 16, srcIp.length);
+ System.arraycopy(payload, 0, rawIp, ipv4Len, payload.length);
+
+ // Deserialize.
+ IPv4 ipv4 = new IPv4();
+ ipv4.deserialize(rawIp, 0, rawIp.length * Byte.SIZE);
+
+ Assert.assertEquals(ipVersion, ipv4.getVersion());
+ Assert.assertEquals(ipv4Len, ipv4.getHeaderLen());
+ Assert.assertEquals(dscp, ipv4.getDiffServ());
+ Assert.assertEquals(ecn, ipv4.getECN());
+ Assert.assertEquals(totalLen, ipv4.getTotalLength());
+ Assert.assertEquals(id, ipv4.getIdentification());
+ Assert.assertEquals((byte)0, ipv4.getFlags());
+ Assert.assertEquals(offset, ipv4.getFragmentOffset());
+ Assert.assertEquals(ttl, ipv4.getTtl());
+ Assert.assertEquals(proto, ipv4.getProtocol());
+ Assert.assertEquals(checksum, ipv4.getChecksum());
+ Assert.assertEquals(NetUtils.byteArray4ToInt(srcIp),
+ ipv4.getSourceAddress());
+ Assert.assertEquals(NetUtils.byteArray4ToInt(dstIp),
+ ipv4.getDestinationAddress());
+ Assert.assertFalse(ipv4.isCorrupted());
+
+ // payloadClass should not be set if fragment offset is not zero.
+ Assert.assertEquals(null, ipv4.getPayload());
+ Assert.assertArrayEquals(payload, ipv4.getRawPayload());
+ }
+
+ private void firstFragmentTest(Packet payload, byte[] rawPayload,
+ byte proto, short checksum)
+ throws Exception {
+ // Construct a raw IPv4 packet with MF flag.
+ int ipv4Len = 20;
+ payload.setRawPayload(rawPayload);
+ byte[] payloadBytes = payload.serialize();
+ byte[] rawIp = new byte[ipv4Len + payloadBytes.length];
+
+ byte ipVersion = 4;
+ byte dscp = 13;
+ byte ecn = 1;
+ byte tos = (byte)((dscp << 2) | ecn);
+ short totalLen = (short)rawIp.length;
+ short id = 19834;
+ byte flags = 0x1;
+ short offset = 0;
+ short off = (short)(((short)flags << 13) | offset);
+ byte ttl = 64;
+ byte[] srcIp = {(byte)0xac, (byte)0x23, (byte)0x5b, (byte)0xfd};
+ byte[] dstIp = {(byte)0xc0, (byte)0xa8, (byte)0x64, (byte)0x71};
+
+ rawIp[0] = (byte)((ipVersion << 4) | (ipv4Len >> 2));
+ rawIp[1] = tos;
+ rawIp[2] = (byte)(totalLen >>> Byte.SIZE);
+ rawIp[3] = (byte)totalLen;
+ rawIp[4] = (byte)(id >>> Byte.SIZE);
+ rawIp[5] = (byte)id;
+ rawIp[6] = (byte)(off >>> Byte.SIZE);
+ rawIp[7] = (byte)off;
+ rawIp[8] = ttl;
+ rawIp[9] = proto;
+ rawIp[10] = (byte)(checksum >>> Byte.SIZE);
+ rawIp[11] = (byte)checksum;
+ System.arraycopy(srcIp, 0, rawIp, 12, srcIp.length);
+ System.arraycopy(dstIp, 0, rawIp, 16, srcIp.length);
+ System.arraycopy(payloadBytes, 0, rawIp, ipv4Len, payloadBytes.length);
+
+ // Deserialize.
+ IPv4 ipv4 = new IPv4();
+ ipv4.deserialize(rawIp, 0, rawIp.length * Byte.SIZE);
+
+ Assert.assertEquals(ipVersion, ipv4.getVersion());
+ Assert.assertEquals(ipv4Len, ipv4.getHeaderLen());
+ Assert.assertEquals(dscp, ipv4.getDiffServ());
+ Assert.assertEquals(ecn, ipv4.getECN());
+ Assert.assertEquals(totalLen, ipv4.getTotalLength());
+ Assert.assertEquals(id, ipv4.getIdentification());
+ Assert.assertEquals(flags, ipv4.getFlags());
+ Assert.assertEquals(offset, ipv4.getFragmentOffset());
+ Assert.assertEquals(ttl, ipv4.getTtl());
+ Assert.assertEquals(proto, ipv4.getProtocol());
+ Assert.assertEquals(checksum, ipv4.getChecksum());
+ Assert.assertEquals(NetUtils.byteArray4ToInt(srcIp),
+ ipv4.getSourceAddress());
+ Assert.assertEquals(NetUtils.byteArray4ToInt(dstIp),
+ ipv4.getDestinationAddress());
+ Assert.assertFalse(ipv4.isCorrupted());
+
+ // Protocol header in the first fragment should be deserialized.
+ Assert.assertEquals(null, ipv4.getRawPayload());
+
+ Packet desPayload = ipv4.getPayload();
+ Assert.assertEquals(payload, desPayload);
+ Assert.assertFalse(desPayload.isCorrupted());
+ Assert.assertArrayEquals(rawPayload, desPayload.getRawPayload());
+ }
}