<configfile finalname="configuration/initial/akka.conf">mvn:org.opendaylight.controller/sal-clustering-config/${project.version}/xml/akkaconf</configfile>
<configfile finalname="configuration/initial/module-shards.conf">mvn:org.opendaylight.controller/sal-clustering-config/${project.version}/xml/moduleshardconf</configfile>
<configfile finalname="configuration/initial/modules.conf">mvn:org.opendaylight.controller/sal-clustering-config/${project.version}/xml/moduleconf</configfile>
+ <configfile finalname="etc/org.opendaylight.controller.cluster.datastore.cfg">mvn:org.opendaylight.controller/sal-clustering-config/${project.version}/cfg/datastore</configfile>
</feature>
<feature name='odl-clustering-test-app' version='${project.version}'>
<bundle>mvn:org.opendaylight.controller/sal-rest-connector/${project.version}</bundle>
<bundle>mvn:com.google.code.gson/gson/${gson.version}</bundle>
<bundle>mvn:org.opendaylight.yangtools/yang-data-codec-gson/${yangtools.version}</bundle>
+ <bundle>mvn:org.opendaylight.yangtools/yang-model-export/${yangtools.version}</bundle>
<bundle>mvn:com.sun.jersey/jersey-core/${jersey.version}</bundle>
<bundle>mvn:com.sun.jersey/jersey-server/${jersey.version}</bundle>
<bundle>mvn:com.sun.jersey/jersey-servlet/${jersey.version}</bundle>
-->
</feature>
+ <feature name="odl-nsf-service" description="OpenDaylight :: NSF :: Network Service Functions in Controller" version="${project.version}">
+ <feature version="${sal.version}">odl-adsal-all</feature>
+ <feature version="${project.version}">odl-nsf-controller-managers</feature>
+ <feature version="${project.version}">odl-adsal-controller-northbound</feature>
+ </feature>
+
<feature name="odl-nsf-managers" description="OpenDaylight :: AD-SAL :: Network Service Functions" version="${project.version}">
<feature version="${commons.opendaylight.version}">odl-base-all</feature>
<feature version="${sal.version}">odl-adsal-all</feature>
<bundle>mvn:org.opendaylight.controller/routing.dijkstra_implementation/${routing.dijkstra_implementation.version}</bundle>
</feature>
+ <feature name="odl-nsf-controller-managers" description="OpenDaylight :: AD-SAL :: Network Service Functions in Controller" version="${project.version}">
+ <feature version="${commons.opendaylight.version}">odl-base-all</feature>
+ <feature version="${sal.version}">odl-adsal-all</feature>
+ <bundle>mvn:org.opendaylight.controller/usermanager/${usermanager.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/usermanager.implementation/${usermanager.version}</bundle>
+
+ <bundle>mvn:org.opendaylight.controller/appauth/${appauth.version}</bundle>
+
+ <bundle>mvn:org.opendaylight.controller/connectionmanager/${connectionmanager.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/connectionmanager.implementation/${connectionmanager.version}</bundle>
+
+ <bundle>mvn:org.opendaylight.controller/containermanager/${containermanager.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/containermanager.implementation/${containermanager.version}</bundle>
+
+ <bundle>mvn:org.opendaylight.controller/statisticsmanager/${statisticsmanager.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/statisticsmanager.implementation/${statisticsmanager.implementation.version}</bundle>
+
+ <bundle>mvn:org.opendaylight.controller/switchmanager/${switchmanager.api.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/switchmanager.implementation/${switchmanager.implementation.version}</bundle>
+
+ <bundle>mvn:org.opendaylight.controller/forwardingrulesmanager/${forwardingrulesmanager.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/forwardingrulesmanager.implementation/${forwardingrulesmanager.implementation.version}</bundle>
+
+ <bundle>mvn:org.opendaylight.controller/topologymanager/${topologymanager.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/topologymanager.shell/${topologymanager.shell.version}</bundle>
+
+ <bundle>mvn:org.opendaylight.controller/hosttracker/${hosttracker.api.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/hosttracker.implementation/${hosttracker.implementation.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/hosttracker.shell/${hosttracker.shell.version}</bundle>
+
+ <bundle>mvn:org.opendaylight.controller/forwarding.staticrouting/${forwarding.staticrouting}</bundle>
+
+ <bundle>mvn:org.opendaylight.controller.thirdparty/net.sf.jung2/2.0.1</bundle>
+ <bundle>mvn:org.opendaylight.controller/routing.dijkstra_implementation/${routing.dijkstra_implementation.version}</bundle>
+ </feature>
+
<feature name="odl-adsal-northbound" description="OpenDaylight :: AD-SAL :: Northbound APIs" version="${project.version}">
<feature version="${commons.opendaylight.version}">odl-base-all</feature>
<feature version="${project.version}">odl-nsf-managers</feature>
<bundle>mvn:org.opendaylight.controller/topology.northbound/${topology.northbound.version}</bundle>
<bundle>mvn:org.opendaylight.controller/usermanager.northbound/${usermanager.northbound.version}</bundle>
</feature>
+
+ <feature name="odl-adsal-controller-northbound" description="OpenDaylight :: AD-SAL :: Northbound APIs in Controller" version="${project.version}">
+ <feature version="${commons.opendaylight.version}">odl-base-all</feature>
+ <feature version="${project.version}">odl-nsf-managers</feature>
+ <bundle>mvn:org.ow2.asm/asm-all/${asm.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/bundlescanner/${bundlescanner.api.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/bundlescanner.implementation/${bundlescanner.implementation.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/commons.northbound/${northbound.commons.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/connectionmanager.northbound/${connectionmanager.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/flowprogrammer.northbound/${flowprogrammer.northbound.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/hosttracker.northbound/${hosttracker.northbound.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/networkconfig.bridgedomain.northbound/${networkconfig.bridgedomain.northbound.version}</bundle>
+ <bundle>mvn:org.eclipse.persistence/org.eclipse.persistence.antlr/${eclipse.persistence.version}</bundle>
+ <bundle>mvn:org.eclipse.persistence/org.eclipse.persistence.core/${eclipse.persistence.version}</bundle>
+ <bundle>mvn:org.eclipse.persistence/org.eclipse.persistence.moxy/${eclipse.persistence.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/forwarding.staticrouting.northbound/${forwarding.staticrouting.northbound.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/statistics.northbound/${statistics.northbound.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/subnets.northbound/${subnets.northbound.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/switchmanager.northbound/${switchmanager.northbound.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/topology.northbound/${topology.northbound.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/usermanager.northbound/${usermanager.northbound.version}</bundle>
+ </feature>
</features>
<properties>
<mdsal.version>1.2.0-SNAPSHOT</mdsal.version>
<yangtools.version>0.7.0-SNAPSHOT</yangtools.version>
+ <configfile.directory>etc/opendaylight/karaf</configfile.directory>
</properties>
<dependencyManagement>
<dependencies>
<repository>mvn:org.opendaylight.yangtools/features-yangtools/${symbol_dollar}{yangtools.version}/xml/features</repository>
<repository>mvn:org.opendaylight.controller/features-mdsal/${symbol_dollar}{mdsal.version}/xml/features</repository>
<repository>mvn:org.opendaylight.controller/features-restconf/${symbol_dollar}{mdsal.version}/xml/features</repository>
- <feature name='odl-${artifactId}-api' version='${symbol_dollar}{project.version}' description='OpenDaylight :: ${artifactId} :: api '>
+ <feature name='odl-${artifactId}-api' version='${symbol_dollar}{project.version}' description='OpenDaylight :: ${artifactId} :: api'>
<feature version='${symbol_dollar}{yangtools.version}'>odl-yangtools-models</feature>
<bundle>mvn:${groupId}/${artifactId}-api/${symbol_dollar}{project.version}</bundle>
</feature>
- <feature name='odl-${artifactId}-impl' version='${symbol_dollar}{project.version}' description='OpenDaylight :: ${artifactId} :: impl '>
+ <feature name='odl-${artifactId}' version='${symbol_dollar}{project.version}' description='OpenDaylight :: ${artifactId}'>
<feature version='${symbol_dollar}{mdsal.version}'>odl-mdsal-broker</feature>
<feature version='${symbol_dollar}{project.version}'>odl-${artifactId}-api</feature>
<bundle>mvn:${groupId}/${artifactId}-impl/${symbol_dollar}{project.version}</bundle>
- <configfile finalname="${artifactId}-impl-default-config.xml">mvn:${groupId}/${artifactId}-impl/${symbol_dollar}{project.version}/xml/config</configfile>
+ <configfile finalname="${configfile.directory}/${artifactId}.xml">mvn:${groupId}/${artifactId}-impl/${symbol_dollar}{project.version}/xml/config</configfile>
</feature>
- <feature name='odl-${artifactId}-impl-rest' version='${symbol_dollar}{project.version}' description='OpenDaylight :: ${artifactId} :: impl :: REST '>
- <feature version="${symbol_dollar}{project.version}">odl-${artifactId}-impl</feature>
+ <feature name='odl-${artifactId}-rest' version='${symbol_dollar}{project.version}' description='OpenDaylight :: ${artifactId} :: REST'>
+ <feature version="${symbol_dollar}{project.version}">odl-${artifactId}</feature>
<feature version="${symbol_dollar}{mdsal.version}">odl-restconf</feature>
</feature>
- <feature name='odl-${artifactId}-impl-ui' version='${symbol_dollar}{project.version}' description='OpenDaylight :: ${artifactId} :: impl :: UI'>
- <feature version="${symbol_dollar}{project.version}">odl-${artifactId}-impl-rest</feature>
+ <feature name='odl-${artifactId}-ui' version='${symbol_dollar}{project.version}' description='OpenDaylight :: ${artifactId} :: UI'>
+ <feature version="${symbol_dollar}{project.version}">odl-${artifactId}-rest</feature>
<feature version="${symbol_dollar}{mdsal.version}">odl-mdsal-apidocs</feature>
<feature version="${symbol_dollar}{mdsal.version}">odl-mdsal-xsql</feature>
</feature>
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package ${package};
+package ${package}.impl;
import org.opendaylight.controller.sal.binding.api.BindingAwareBroker.ProviderContext;
import org.opendaylight.controller.sal.binding.api.BindingAwareProvider;
*/
package org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.${artifactId}.impl.rev141210;
-import ${package}.${classPrefix}Provider;
+import ${package}.impl.${classPrefix}Provider;
public class ${classPrefix}Module extends org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.${artifactId}.impl.rev141210.Abstract${classPrefix}Module {
public ${classPrefix}Module(org.opendaylight.controller.config.api.ModuleIdentifier identifier, org.opendaylight.controller.config.api.DependencyResolver dependencyResolver) {
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package ${package};
+package ${package}.impl;
import org.junit.Test;
import org.opendaylight.controller.sal.binding.api.BindingAwareBroker;
import org.opendaylight.controller.config.api.JmxAttribute;
import org.opendaylight.controller.config.api.ModuleIdentifier;
import org.opendaylight.controller.sal.binding.api.BindingAwareBroker;
-import ${package}.${classPrefix}Provider;
+import ${package}.impl.${classPrefix}Provider;
import javax.management.ObjectName;
<maven>3.1.1</maven>
</prerequisites>
<properties>
- <karaf.localFeature>odl-${artifactId}-impl-ui</karaf.localFeature>
+ <karaf.localFeature>odl-${artifactId}-ui</karaf.localFeature>
</properties>
<dependencyManagement>
<dependencies>
<scope>runtime</scope>
</dependency>
</dependencies>
+ <!-- DO NOT install or deploy the karaf artifact -->
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-deploy-plugin</artifactId>
+ <configuration>
+ <skip>true</skip>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-install-plugin</artifactId>
+ <configuration>
+ <skip>true</skip>
+ </configuration>
+ </plugin>
+ </plugins>
+ </build>
</project>
Set<String> getAvailableModuleNames();
-
- /**
- * Find all runtime beans
- *
- * @return objectNames
- */
- Set<ObjectName> lookupRuntimeBeans();
-
- /**
- * Find all runtime of specified module
- *
- * @param moduleName
- * of bean
- * @param instanceName
- * of bean
- * @return objectNames
- */
- Set<ObjectName> lookupRuntimeBeans(String moduleName, String instanceName);
-
}
*/
Set<String> getAvailableModuleFactoryQNames();
+ /**
+ * Find all runtime beans
+ *
+ * @return objectNames
+ */
+ Set<ObjectName> lookupRuntimeBeans();
+
+ /**
+ * Find all runtime of specified module
+ *
+ * @param moduleName
+ * of bean
+ * @param instanceName
+ * of bean
+ * @return objectNames
+ */
+ Set<ObjectName> lookupRuntimeBeans(String moduleName, String instanceName);
}
public void checkConfigBeanExists(ObjectName objectName) throws InstanceNotFoundException {
txLookupRegistry.checkConfigBeanExists(objectName);
}
+
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public Set<ObjectName> lookupRuntimeBeans() {
+ return txLookupRegistry.lookupRuntimeBeans();
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public Set<ObjectName> lookupRuntimeBeans(String moduleName,
+ String instanceName) {
+ return txLookupRegistry.lookupRuntimeBeans(moduleName, instanceName);
+ }
+
// --
/**
return ModuleQNameUtil.getQNames(allCurrentFactories);
}
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public Set<ObjectName> lookupRuntimeBeans() {
+ return lookupRuntimeBeans("*", "*");
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public Set<ObjectName> lookupRuntimeBeans(String moduleName,
+ String instanceName) {
+ String finalModuleName = moduleName == null ? "*" : moduleName;
+ String finalInstanceName = instanceName == null ? "*" : instanceName;
+ ObjectName namePattern = ObjectNameUtil.createRuntimeBeanPattern(
+ finalModuleName, finalInstanceName);
+ return transactionJMXRegistrator.queryNames(namePattern, null);
+ }
@Override
public String toString() {
throw new UnsupportedOperationException();
}
+ @Override
+ public Set<ObjectName> lookupRuntimeBeans() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public Set<ObjectName> lookupRuntimeBeans(final String moduleName, final String instanceName) {
+ throw new UnsupportedOperationException();
+ }
+
@Override
public String toString() {
return "initial";
--- /dev/null
+package org.opendaylight.controller.config.util;
+
+import javax.management.ObjectName;
+
+/**
+ * Created by mmarsale on 20.2.2015.
+ */
+public interface BeanReader {
+ Object getAttributeCurrentValue(ObjectName on, String attributeName);
+}
import javax.management.ObjectName;
import org.opendaylight.controller.config.api.jmx.ConfigRegistryMXBean;
-public interface ConfigRegistryClient extends ConfigRegistryMXBean {
+public interface ConfigRegistryClient extends ConfigRegistryMXBean, BeanReader {
ConfigTransactionClient createTransaction();
Object invokeMethod(ObjectName on, String name, Object[] params,
String[] signature);
- Object getAttributeCurrentValue(ObjectName on, String attributeName);
-
}
} catch (AttributeNotFoundException | InstanceNotFoundException
| MBeanException | ReflectionException e) {
throw new RuntimeException("Unable to get attribute "
- + attributeName + " for " + on, e);
+ + attributeName + " for " + on + ". Available beans: " + lookupConfigBeans(), e);
}
}
import org.opendaylight.controller.config.api.jmx.ConfigTransactionControllerMXBean;
public interface ConfigTransactionClient extends
- ConfigTransactionControllerMXBean {
+ ConfigTransactionControllerMXBean, BeanReader {
CommitStatus commit() throws ConflictingVersionException,
ValidationException;
* @param on - ObjectName of the Object from which the attribute should be read
* @param jmxName - name of the attribute to be read
*
- * @return Attribute of Object on with attribute name jmxName
+ * @return Object of Object on with attribute name jmxName
*/
Attribute getAttribute(ObjectName on, String jmxName);
}
configTransactionControllerMXBeanProxy.checkServiceReferenceExists(objectName);
}
+ @Override
+ public Attribute getAttribute(ObjectName on, String attrName) {
+ if (ObjectNameUtil.getTransactionName(on) == null) {
+ throw new IllegalArgumentException("Not in transaction instance "
+ + on + ", no transaction name present");
+ }
+
+ try {
+ return new Attribute(attrName, configMBeanServer.getAttribute(on,attrName));
+ } catch (JMException e) {
+ throw new IllegalStateException("Unable to get attribute "
+ + attrName + " for " + on, e);
+ }
+ }
+
+ @Override
+ public Object getAttributeCurrentValue(ObjectName on, String attrName) {
+ return getAttribute(on, attrName).getValue();
+ }
+
@Override
public void validateBean(ObjectName configBeanON)
throws ValidationException {
}
@Override
- public Attribute getAttribute(ObjectName on, String attrName) {
- if (ObjectNameUtil.getTransactionName(on) == null) {
- throw new IllegalArgumentException("Not in transaction instance "
- + on + ", no transaction name present");
- }
+ public Set<String> getAvailableModuleFactoryQNames() {
+ return configTransactionControllerMXBeanProxy.getAvailableModuleFactoryQNames();
+ }
- try {
- return new Attribute(attrName, configMBeanServer.getAttribute(on,attrName));
- } catch (JMException e) {
- throw new IllegalStateException("Unable to get attribute "
- + attrName + " for " + on, e);
- }
+ @Override
+ public Set<ObjectName> lookupRuntimeBeans() {
+ return configTransactionControllerMXBeanProxy.lookupRuntimeBeans();
}
@Override
- public Set<String> getAvailableModuleFactoryQNames() {
- return configTransactionControllerMXBeanProxy.getAvailableModuleFactoryQNames();
+ public Set<ObjectName> lookupRuntimeBeans(final String moduleName, final String instanceName) {
+ return configTransactionControllerMXBeanProxy.lookupRuntimeBeans(moduleName, instanceName);
}
}
package org.opendaylight.controller.config.util;
import com.google.common.collect.Sets;
+import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
return Sets.newHashSet("availableModuleFactoryQNames");
}
+ @Override
+ public Set<ObjectName> lookupRuntimeBeans() {
+ return Collections.emptySet();
+ }
+
+ @Override
+ public Set<ObjectName> lookupRuntimeBeans(final String moduleName, final String instanceName) {
+ return Collections.emptySet();
+ }
+
@Override
public ObjectName getServiceReference(String serviceInterfaceQName, String refName) throws InstanceNotFoundException {
return conf3;
*/
package org.opendaylight.controller.cluster.raft;
+import com.google.common.base.Preconditions;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
@Override
public void snapshotPreCommit(long snapshotCapturedIndex, long snapshotCapturedTerm) {
+ Preconditions.checkArgument(snapshotCapturedIndex >= snapshotIndex,
+ "snapshotCapturedIndex must be greater than or equal to snapshotIndex");
+
snapshottedJournal = new ArrayList<>(journal.size());
- snapshottedJournal.addAll(journal.subList(0, (int)(snapshotCapturedIndex - snapshotIndex)));
+ List<ReplicatedLogEntry> snapshotJournalEntries = journal.subList(0, (int) (snapshotCapturedIndex - snapshotIndex));
+
+ snapshottedJournal.addAll(snapshotJournalEntries);
clear(0, (int) (snapshotCapturedIndex - snapshotIndex));
previousSnapshotIndex = snapshotIndex;
context.getReplicatedLog().snapshotPreCommit(captureSnapshot.getLastAppliedIndex(),
captureSnapshot.getLastAppliedTerm());
- } else {
+ getCurrentBehavior().setReplicatedToAllIndex(captureSnapshot.getReplicatedToAllIndex());
+ } else if(captureSnapshot.getReplicatedToAllIndex() != -1){
// clear the log based on replicatedToAllIndex
context.getReplicatedLog().snapshotPreCommit(captureSnapshot.getReplicatedToAllIndex(),
captureSnapshot.getReplicatedToAllTerm());
+
+ getCurrentBehavior().setReplicatedToAllIndex(captureSnapshot.getReplicatedToAllIndex());
+ } else {
+ // The replicatedToAllIndex was not found in the log
+ // This means that replicatedToAllIndex never moved beyond -1 or that it is already in the snapshot.
+ // In this scenario we may need to save the snapshot to the akka persistence
+ // snapshot for recovery but we do not need to do the replicated log trimming.
+ context.getReplicatedLog().snapshotPreCommit(replicatedLog.getSnapshotIndex(),
+ replicatedLog.getSnapshotTerm());
}
- getCurrentBehavior().setReplicatedToAllIndex(captureSnapshot.getReplicatedToAllIndex());
+
LOG.info("{}: Removed in-memory snapshotted entries, adjusted snaphsotIndex:{} " +
"and term:{}", persistenceId(), captureSnapshot.getLastAppliedIndex(),
}
@Override
+ // FIXME : A lot of tests try to manipulate the replicated log by setting it using this method
+ // This is OK to do if the underlyingActor is not RafActor or a derived class. If not then you should not
+ // used this way to manipulate the log because the RaftActor actually has a field replicatedLog
+ // which it creates internally and sets on the RaftActorContext
+ // The only right way to manipulate the replicated log therefore is to get it from either the RaftActor
+ // or the RaftActorContext and modify the entries in there instead of trying to replace it by using this setter
+ // Simple assertion that will fail if you do so
+ // ReplicatedLog log = new ReplicatedLogImpl();
+ // raftActor.underlyingActor().getRaftActorContext().setReplicatedLog(log);
+ // assertEquals(log, raftActor.underlyingActor().getReplicatedLog())
public void setReplicatedLog(ReplicatedLog replicatedLog) {
this.replicatedLog = replicatedLog;
}
};
}
+
+ private static class NonPersistentProvider implements DataPersistenceProvider {
+ @Override
+ public boolean isRecoveryApplicable() {
+ return false;
+ }
+
+ @Override
+ public <T> void persist(T o, Procedure<T> procedure) {
+ try {
+ procedure.apply(o);
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Override
+ public void saveSnapshot(Object o) {
+
+ }
+
+ @Override
+ public void deleteSnapshots(SnapshotSelectionCriteria criteria) {
+
+ }
+
+ @Override
+ public void deleteMessages(long sequenceNumber) {
+
+ }
+ }
+
+ @Test
+ public void testRealSnapshotWhenReplicatedToAllIndexMinusOne() throws Exception {
+ new JavaTestKit(getSystem()) {{
+ String persistenceId = factory.generateActorId("leader-");
+ DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
+ config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
+ config.setIsolatedLeaderCheckInterval(new FiniteDuration(1, TimeUnit.DAYS));
+ config.setSnapshotBatchCount(5);
+
+ DataPersistenceProvider dataPersistenceProvider = new NonPersistentProvider();
+
+ Map<String, String> peerAddresses = new HashMap<>();
+
+ TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(
+ MockRaftActor.props(persistenceId, peerAddresses,
+ Optional.<ConfigParams>of(config), dataPersistenceProvider), persistenceId);
+
+ MockRaftActor leaderActor = mockActorRef.underlyingActor();
+ leaderActor.getRaftActorContext().setCommitIndex(3);
+ leaderActor.getRaftActorContext().setLastApplied(3);
+ leaderActor.getRaftActorContext().getTermInformation().update(1, persistenceId);
+
+ leaderActor.waitForInitializeBehaviorComplete();
+ for(int i=0;i< 4;i++) {
+ leaderActor.getReplicatedLog()
+ .append(new MockRaftActorContext.MockReplicatedLogEntry(1, i,
+ new MockRaftActorContext.MockPayload("A")));
+ }
+
+ Leader leader = new Leader(leaderActor.getRaftActorContext());
+ leaderActor.setCurrentBehavior(leader);
+ assertEquals(RaftState.Leader, leaderActor.getCurrentBehavior().state());
+
+ // Persist another entry (this will cause a CaptureSnapshot to be triggered
+ leaderActor.persistData(mockActorRef, "x", new MockRaftActorContext.MockPayload("duh"));
+
+ // Now send a CaptureSnapshotReply
+ mockActorRef.tell(new CaptureSnapshotReply(fromObject("foo").toByteArray()), mockActorRef);
+
+ // Trimming log in this scenario is a no-op
+ assertEquals(-1, leaderActor.getReplicatedLog().getSnapshotIndex());
+ assertFalse(leaderActor.getRaftActorContext().isSnapshotCaptureInitiated());
+ assertEquals(-1, leader.getReplicatedToAllIndex());
+
+ }};
+ }
+
+ @Test
+ public void testRealSnapshotWhenReplicatedToAllIndexNotInReplicatedLog() throws Exception {
+ new JavaTestKit(getSystem()) {{
+ String persistenceId = factory.generateActorId("leader-");
+ DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
+ config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
+ config.setIsolatedLeaderCheckInterval(new FiniteDuration(1, TimeUnit.DAYS));
+ config.setSnapshotBatchCount(5);
+
+ DataPersistenceProvider dataPersistenceProvider = new NonPersistentProvider();
+
+ Map<String, String> peerAddresses = new HashMap<>();
+
+ TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(
+ MockRaftActor.props(persistenceId, peerAddresses,
+ Optional.<ConfigParams>of(config), dataPersistenceProvider), persistenceId);
+
+ MockRaftActor leaderActor = mockActorRef.underlyingActor();
+ leaderActor.getRaftActorContext().setCommitIndex(3);
+ leaderActor.getRaftActorContext().setLastApplied(3);
+ leaderActor.getRaftActorContext().getTermInformation().update(1, persistenceId);
+ leaderActor.getReplicatedLog().setSnapshotIndex(3);
+
+ leaderActor.waitForInitializeBehaviorComplete();
+ Leader leader = new Leader(leaderActor.getRaftActorContext());
+ leaderActor.setCurrentBehavior(leader);
+ leader.setReplicatedToAllIndex(3);
+ assertEquals(RaftState.Leader, leaderActor.getCurrentBehavior().state());
+
+ // Persist another entry (this will cause a CaptureSnapshot to be triggered
+ leaderActor.persistData(mockActorRef, "x", new MockRaftActorContext.MockPayload("duh"));
+
+ // Now send a CaptureSnapshotReply
+ mockActorRef.tell(new CaptureSnapshotReply(fromObject("foo").toByteArray()), mockActorRef);
+
+ // Trimming log in this scenario is a no-op
+ assertEquals(3, leaderActor.getReplicatedLog().getSnapshotIndex());
+ assertFalse(leaderActor.getRaftActorContext().isSnapshotCaptureInitiated());
+ assertEquals(3, leader.getReplicatedToAllIndex());
+
+ }};
+ }
+
private ByteString fromObject(Object snapshot) throws Exception {
ByteArrayOutputStream b = null;
ObjectOutputStream o = null;
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html.
+ */
+package org.opendaylight.controller.md.sal.binding.api;
+
+import java.util.Collection;
+import javax.annotation.Nonnull;
+import javax.annotation.Nullable;
+import org.opendaylight.yangtools.concepts.Identifiable;
+import org.opendaylight.yangtools.yang.binding.DataObject;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier.PathArgument;
+
+/**
+ * Modified Data Object.
+ *
+ * Represents modification of Data Object.
+ *
+ */
+public interface DataObjectModification<T extends DataObject> extends Identifiable<PathArgument> {
+
+ enum ModificationType {
+ /**
+ *
+ * Child node (direct or indirect) was modified.
+ *
+ */
+ SUBTREE_MODIFIED,
+ /**
+ *
+ * Node was explicitly created / overwritten.
+ *
+ */
+ WRITE,
+ /**
+ *
+ * Node was deleted.
+ *
+ */
+ DELETE
+ }
+
+ @Override
+ PathArgument getIdentifier();
+
+ /**
+ * Returns type of modified object.
+ *
+ * @return type of modified object.
+ */
+ @Nonnull Class<T> getDataType();
+
+ /**
+ *
+ * Returns type of modification
+ *
+ * @return type Type of performed modification.
+ */
+ @Nonnull ModificationType getModificationType();
+
+ /**
+ * Returns after state of top level container.
+ *
+ * @param root Class representing data container
+ * @return State of object after modification. Null if subtree is not present.
+ */
+ @Nullable T getDataAfter();
+
+ /**
+ * Returns unmodifiable collection of modified direct children.
+ *
+ * @return unmodifiable collection of modified direct children.
+ */
+ @Nonnull Collection<DataObjectModification<? extends DataObject>> getModifiedChildren();
+
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.binding.api;
+
+import java.util.Collection;
+import java.util.EventListener;
+import javax.annotation.Nonnull;
+
+/**
+ * Interface implemented by classes interested in receiving notifications about
+ * data tree changes. This interface differs from {@link DataChangeListener}
+ * in that it provides a cursor-based view of the change, which has potentially
+ * lower overhead and allow more flexible consumption of change event.
+ */
+public interface DataTreeChangeListener extends EventListener {
+ /**
+ * Invoked when there was data change for the supplied path, which was used
+ * to register this listener.
+ *
+ * <p>
+ * This method may be also invoked during registration of the listener if
+ * there is any pre-existing data in the conceptual data tree for supplied
+ * path. This initial event will contain all pre-existing data as created.
+ *
+ * <p>
+ * A data change event may be triggered spuriously, e.g. such that data before
+ * and after compare as equal. Implementations of this interface are expected
+ * to recover from such events. Event producers are expected to exert reasonable
+ * effort to suppress such events.
+ *
+ * In other words, it is completely acceptable to observe
+ * a {@link DataObjectModification}, while the state observed before and
+ * after- data items compare as equal.
+ *
+ * @param changes Collection of change events, may not be null or empty.
+ */
+ void onDataTreeChanged(@Nonnull Collection<DataTreeModification> changes);
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.binding.api;
+
+import javax.annotation.Nonnull;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+
+/**
+ * A {@link DOMService} which allows users to register for changes to a
+ * subtree.
+ */
+public interface DataTreeChangeService extends BindingService {
+ /**
+ * Registers a {@link DataTreeChangeListener} to receive
+ * notifications when data changes under a given path in the conceptual data
+ * tree.
+ * <p>
+ * You are able to register for notifications for any node or subtree
+ * which can be represented using {@link DataTreeIdentifier}.
+ * <p>
+ *
+ * You are able to register for data change notifications for a subtree or leaf
+ * even if it does not exist. You will receive notification once that node is
+ * created.
+ * <p>
+ * If there is any pre-existing data in the data tree for the path for which you are
+ * registering, you will receive an initial data change event, which will
+ * contain all pre-existing data, marked as created.
+ *
+ * <p>
+ * This method returns a {@link ListenerRegistration} object. To
+ * "unregister" your listener for changes call the {@link ListenerRegistration#close()}
+ * method on the returned object.
+ * <p>
+ * You MUST explicitly unregister your listener when you no longer want to receive
+ * notifications. This is especially true in OSGi environments, where failure to
+ * do so during bundle shutdown can lead to stale listeners being still registered.
+ *
+ * @param treeId
+ * Data tree identifier of the subtree which should be watched for
+ * changes.
+ * @param listener
+ * Listener instance which is being registered
+ * @return Listener registration object, which may be used to unregister
+ * your listener using {@link ListenerRegistration#close()} to stop
+ * delivery of change events.
+ */
+ @Nonnull <L extends DataTreeChangeListener> ListenerRegistration<L> registerDataTreeChangeListener(@Nonnull DataTreeIdentifier treeId, @Nonnull L listener);
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.binding.api;
+
+import com.google.common.base.Preconditions;
+import java.io.Serializable;
+import javax.annotation.Nonnull;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.yangtools.concepts.Immutable;
+import org.opendaylight.yangtools.concepts.Path;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+
+/**
+ * A unique identifier for a particular subtree. It is composed of the logical
+ * data store type and the instance identifier of the root node.
+ */
+public final class DataTreeIdentifier implements Immutable, Path<DataTreeIdentifier>, Serializable {
+ private static final long serialVersionUID = 1L;
+ private final InstanceIdentifier<?> rootIdentifier;
+ private final LogicalDatastoreType datastoreType;
+
+ public DataTreeIdentifier(final LogicalDatastoreType datastoreType, final InstanceIdentifier<?> rootIdentifier) {
+ this.datastoreType = Preconditions.checkNotNull(datastoreType);
+ this.rootIdentifier = Preconditions.checkNotNull(rootIdentifier);
+ }
+
+ /**
+ * Return the logical data store type.
+ *
+ * @return Logical data store type. Guaranteed to be non-null.
+ */
+ public @Nonnull LogicalDatastoreType getDatastoreType() {
+ return datastoreType;
+ }
+
+ /**
+ * Return the {@link YangInstanceIdentifier} of the root node.
+ *
+ * @return Instance identifier corresponding to the root node.
+ */
+ public @Nonnull InstanceIdentifier<?> getRootIdentifier() {
+ return rootIdentifier;
+ }
+
+ @Override
+ public boolean contains(final DataTreeIdentifier other) {
+ return datastoreType == other.datastoreType && rootIdentifier.contains(other.rootIdentifier);
+ }
+
+ @Override
+ public int hashCode() {
+ final int prime = 31;
+ int result = 1;
+ result = prime * result + datastoreType.hashCode();
+ result = prime * result + rootIdentifier.hashCode();
+ return result;
+ }
+
+ @Override
+ public boolean equals(final Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (!(obj instanceof DataTreeIdentifier)) {
+ return false;
+ }
+ DataTreeIdentifier other = (DataTreeIdentifier) obj;
+ if (datastoreType != other.datastoreType) {
+ return false;
+ }
+ return rootIdentifier.equals(other.rootIdentifier);
+ }
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html.
+ */
+
+package org.opendaylight.controller.md.sal.binding.api;
+
+import javax.annotation.Nonnull;
+import org.opendaylight.yangtools.yang.binding.DataObject;
+
+/**
+ * Represent root of modification.
+ *
+ * @author Tony Tkacik <ttkacik@cisco.com>
+ *
+ */
+public interface DataTreeModification {
+
+ /**
+ * Get the modification root path. This is the path of the root node
+ * relative to the root of InstanceIdentifier namespace.
+ *
+ * @return absolute path of the root node
+ */
+ @Nonnull DataTreeIdentifier getRootPath();
+
+ /**
+ * Get the modification root node.
+ *
+ * @return modification root node
+ */
+ @Nonnull DataObjectModification<? extends DataObject> getRootNode();
+
+}
<type>xml</type>
<classifier>moduleconf</classifier>
</artifact>
+ <artifact>
+ <file>${project.build.directory}/classes/initial/datastore.cfg</file>
+ <type>cfg</type>
+ <classifier>datastore</classifier>
+ </artifact>
</artifacts>
</configuration>
</execution>
--- /dev/null
+# This file specifies property settings for the clustered data store to control its behavior. A
+# property may be applied to every data store type ("config" and "operational") or can be customized
+# differently for each data store type by prefixing the data store type + '.'. For example, specifying
+# the "shard-election-timeout-factor" property would be applied to both data stores whereas specifying
+# "operational.shard-election-timeout-factor" would only apply to the "operational" data store. Similarly,
+# specifying "config.shard-election-timeout-factor" would only apply to the "config" data store.
+
+# The multiplication factor to be used to determine shard election timeout. The shard election timeout
+# is determined by multiplying shardHeartbeatIntervalInMillis with the shardElectionTimeoutFactor.
+#shard-election-timeout-factor=2
+
+# The interval at which a shard will send a heart beat message to its remote shard.
+#shard-heartbeat-interval-in-millis=500
+
+# The maximum amount of time to wait for a shard to elect a leader before failing an operation (eg transaction create).
+#shard-leader-election-timeout-in-seconds=30
+
+# Enable or disable data persistence.
+#persistent=true
+
+# Disable persistence for the operational data store by default.
+operational.persistent=false
+
+# The maximum amount of time a shard transaction can be idle without receiving any messages before it self-destructs.
+#shard-transaction-idle-timeout-in-minutes=10
+
+# The maximum amount of time a shard transaction three-phase commit can be idle without receiving the
+# next messages before it aborts the transaction.
+#shard-transaction-commit-timeout-in-seconds=30
+
+# The maximum allowed capacity for each shard's transaction commit queue.
+#shard-transaction-commit-queue-capacity=20000
+
+# The maximum amount of time to wait for a shard to initialize from persistence on startup before
+# failing an operation (eg transaction create and change listener registration).
+#shard-initialization-timeout-in-seconds=300
+
+# The minimum number of entries to be present in the in-memory journal log before a snapshot is to be taken.
+#shard-journal-recovery-log-batch-size=5000
+
+# The minimum number of entries to be present in the in-memory journal log before a snapshot is to be taken.
+#shard-snapshot-batch-count=20000
+
+# The percentage of Runtime.totalMemory() used by the in-memory journal log before a snapshot is to be taken.
+#shard-snapshot-data-threshold-percentage=12
+
+# The interval at which the leader of the shard will check if its majority followers are active and
+# term itself as isolated.
+#shard-isolated-leader-check-interval-in-millis=5000
+
+# The number of transaction modification operations (put, merge, delete) to batch before sending to the
+# shard transaction actor. Batching improves performance as less modifications messages are sent to the
+# actor and thus lessens the chance that the transaction actor's mailbox queue could get full.
+#shard-batched-modification-count=100
+
+# The maximum amount of time for akka operations (remote or local) to complete before failing.
+#operation-timeout-in-seconds=5
+
+# The initial number of transactions per second that are allowed before the data store should begin
+# applying back pressure. This number is only used as an initial guidance, subsequently the datastore
+# measures the latency for a commit and auto-adjusts the rate limit.
+#transaction-creation-initial-rate-limit=100
+
+# The maximum thread pool size for each shard's data store data change notification executor.
+#max-shard-data-change-executor-pool-size=20
+
+# The maximum queue size for each shard's data store data change notification executor.
+#max-shard-data-change-executor-queue-size=1000
+
+# The maximum queue size for each shard's data store data change listener.
+#max-shard-data-change-listener-queue-size=1000
+
+# The maximum queue size for each shard's data store executor.
+#max-shard-data-store-executor-queue-size=5000
+
package org.opendaylight.controller.cluster.datastore;
import com.google.common.base.Preconditions;
-import com.google.common.collect.Iterables;
import com.google.common.util.concurrent.AbstractFuture;
import com.google.common.util.concurrent.AbstractListeningExecutorService;
import com.google.common.util.concurrent.CheckedFuture;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
+import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Executor;
@Override
public CheckedFuture<Void, TransactionCommitFailedException> submit(DOMDataWriteTransaction transaction,
- Iterable<DOMStoreThreePhaseCommitCohort> cohorts) {
+ Collection<DOMStoreThreePhaseCommitCohort> cohorts) {
Preconditions.checkArgument(transaction != null, "Transaction must not be null.");
Preconditions.checkArgument(cohorts != null, "Cohorts must not be null.");
LOG.debug("Tx: {} is submitted for execution.", transaction.getIdentifier());
- final int cohortSize = Iterables.size(cohorts);
final AsyncNotifyingSettableFuture clientSubmitFuture =
new AsyncNotifyingSettableFuture(clientFutureCallbackExecutor);
- doCanCommit(clientSubmitFuture, transaction, cohorts, cohortSize);
+ doCanCommit(clientSubmitFuture, transaction, cohorts);
return MappingCheckedFuture.create(clientSubmitFuture,
TransactionCommitFailedExceptionMapper.COMMIT_ERROR_MAPPER);
private void doCanCommit(final AsyncNotifyingSettableFuture clientSubmitFuture,
final DOMDataWriteTransaction transaction,
- final Iterable<DOMStoreThreePhaseCommitCohort> cohorts, final int cohortSize) {
+ final Collection<DOMStoreThreePhaseCommitCohort> cohorts) {
final long startTime = System.nanoTime();
// Not using Futures.allAsList here to avoid its internal overhead.
- final AtomicInteger remaining = new AtomicInteger(cohortSize);
+ final AtomicInteger remaining = new AtomicInteger(cohorts.size());
FutureCallback<Boolean> futureCallback = new FutureCallback<Boolean>() {
@Override
public void onSuccess(Boolean result) {
if (result == null || !result) {
- handleException(clientSubmitFuture, transaction, cohorts, cohortSize,
+ handleException(clientSubmitFuture, transaction, cohorts,
CAN_COMMIT, TransactionCommitFailedExceptionMapper.CAN_COMMIT_ERROR_MAPPER,
new TransactionCommitFailedException(
"Can Commit failed, no detailed cause available."));
} else {
if(remaining.decrementAndGet() == 0) {
// All cohorts completed successfully - we can move on to the preCommit phase
- doPreCommit(startTime, clientSubmitFuture, transaction, cohorts, cohortSize);
+ doPreCommit(startTime, clientSubmitFuture, transaction, cohorts);
}
}
}
@Override
public void onFailure(Throwable t) {
- handleException(clientSubmitFuture, transaction, cohorts, cohortSize, CAN_COMMIT,
+ handleException(clientSubmitFuture, transaction, cohorts, CAN_COMMIT,
TransactionCommitFailedExceptionMapper.CAN_COMMIT_ERROR_MAPPER, t);
}
};
private void doPreCommit(final long startTime, final AsyncNotifyingSettableFuture clientSubmitFuture,
final DOMDataWriteTransaction transaction,
- final Iterable<DOMStoreThreePhaseCommitCohort> cohorts, final int cohortSize) {
+ final Collection<DOMStoreThreePhaseCommitCohort> cohorts) {
// Not using Futures.allAsList here to avoid its internal overhead.
- final AtomicInteger remaining = new AtomicInteger(cohortSize);
+ final AtomicInteger remaining = new AtomicInteger(cohorts.size());
FutureCallback<Void> futureCallback = new FutureCallback<Void>() {
@Override
public void onSuccess(Void notUsed) {
if(remaining.decrementAndGet() == 0) {
// All cohorts completed successfully - we can move on to the commit phase
- doCommit(startTime, clientSubmitFuture, transaction, cohorts, cohortSize);
+ doCommit(startTime, clientSubmitFuture, transaction, cohorts);
}
}
@Override
public void onFailure(Throwable t) {
- handleException(clientSubmitFuture, transaction, cohorts, cohortSize, PRE_COMMIT,
+ handleException(clientSubmitFuture, transaction, cohorts, PRE_COMMIT,
TransactionCommitFailedExceptionMapper.PRE_COMMIT_MAPPER, t);
}
};
private void doCommit(final long startTime, final AsyncNotifyingSettableFuture clientSubmitFuture,
final DOMDataWriteTransaction transaction,
- final Iterable<DOMStoreThreePhaseCommitCohort> cohorts, final int cohortSize) {
+ final Collection<DOMStoreThreePhaseCommitCohort> cohorts) {
// Not using Futures.allAsList here to avoid its internal overhead.
- final AtomicInteger remaining = new AtomicInteger(cohortSize);
+ final AtomicInteger remaining = new AtomicInteger(cohorts.size());
FutureCallback<Void> futureCallback = new FutureCallback<Void>() {
@Override
public void onSuccess(Void notUsed) {
@Override
public void onFailure(Throwable t) {
- handleException(clientSubmitFuture, transaction, cohorts, cohortSize, COMMIT,
+ handleException(clientSubmitFuture, transaction, cohorts, COMMIT,
TransactionCommitFailedExceptionMapper.COMMIT_ERROR_MAPPER, t);
}
};
private void handleException(final AsyncNotifyingSettableFuture clientSubmitFuture,
final DOMDataWriteTransaction transaction,
- final Iterable<DOMStoreThreePhaseCommitCohort> cohorts, int cohortSize,
+ final Collection<DOMStoreThreePhaseCommitCohort> cohorts,
final String phase, final TransactionCommitFailedExceptionMapper exMapper,
final Throwable t) {
// Transaction failed - tell all cohorts to abort.
@SuppressWarnings("unchecked")
- ListenableFuture<Void>[] canCommitFutures = new ListenableFuture[cohortSize];
+ ListenableFuture<Void>[] canCommitFutures = new ListenableFuture[cohorts.size()];
int i = 0;
for(DOMStoreThreePhaseCommitCohort cohort: cohorts) {
canCommitFutures[i++] = cohort.abort();
import akka.actor.ActorSelection;
import akka.actor.PoisonPill;
import akka.dispatch.OnComplete;
+import com.google.common.annotations.VisibleForTesting;
import org.opendaylight.controller.cluster.datastore.exceptions.LocalShardNotFoundException;
import org.opendaylight.controller.cluster.datastore.messages.CloseDataChangeListenerRegistration;
import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListener;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.google.common.annotations.VisibleForTesting;
import scala.concurrent.Future;
/**
public void init(final YangInstanceIdentifier path, final AsyncDataBroker.DataChangeScope scope) {
dataChangeListenerActor = actorContext.getActorSystem().actorOf(
- DataChangeListener.props(listener));
+ DataChangeListener.props(listener).withDispatcher(actorContext.getNotificationDispatcherPath()));
Future<ActorRef> findFuture = actorContext.findLocalShardAsync(shardName);
findFuture.onComplete(new OnComplete<ActorRef>() {
doRegistration(shard, path, scope);
}
}
- }, actorContext.getActorSystem().dispatcher());
+ }, actorContext.getClientDispatcher());
}
private void doRegistration(ActorRef shard, final YangInstanceIdentifier path,
reply.getListenerRegistrationPath()));
}
}
- }, actorContext.getActorSystem().dispatcher());
+ }, actorContext.getClientDispatcher());
}
@Override
public static final int DEFAULT_SHARD_ELECTION_TIMEOUT_FACTOR = 2;
public static final int DEFAULT_TX_CREATION_INITIAL_RATE_LIMIT = 100;
public static final String UNKNOWN_DATA_STORE_TYPE = "unknown";
+ public static final int DEFAULT_SHARD_BATCHED_MODIFICATION_COUNT= 100;
private InMemoryDOMDataStoreConfigProperties dataStoreProperties;
private Duration shardTransactionIdleTimeout = DatastoreContext.DEFAULT_SHARD_TRANSACTION_IDLE_TIMEOUT;
private boolean persistent = DEFAULT_PERSISTENT;
private ConfigurationReader configurationReader = DEFAULT_CONFIGURATION_READER;
private long transactionCreationInitialRateLimit = DEFAULT_TX_CREATION_INITIAL_RATE_LIMIT;
- private DefaultConfigParamsImpl raftConfig = new DefaultConfigParamsImpl();
+ private final DefaultConfigParamsImpl raftConfig = new DefaultConfigParamsImpl();
private String dataStoreType = UNKNOWN_DATA_STORE_TYPE;
+ private int shardBatchedModificationCount = DEFAULT_SHARD_BATCHED_MODIFICATION_COUNT;
- private DatastoreContext(){
+ private DatastoreContext() {
setShardJournalRecoveryLogBatchSize(DEFAULT_JOURNAL_RECOVERY_BATCH_SIZE);
setSnapshotBatchCount(DEFAULT_SNAPSHOT_BATCH_COUNT);
setHeartbeatInterval(DEFAULT_HEARTBEAT_INTERVAL_IN_MILLIS);
setIsolatedLeaderCheckInterval(DEFAULT_ISOLATED_LEADER_CHECK_INTERVAL_IN_MILLIS);
setSnapshotDataThresholdPercentage(DEFAULT_SHARD_SNAPSHOT_DATA_THRESHOLD_PERCENTAGE);
+ setElectionTimeoutFactor(DEFAULT_SHARD_ELECTION_TIMEOUT_FACTOR);
+ }
+
+ private DatastoreContext(DatastoreContext other) {
+ this.dataStoreProperties = other.dataStoreProperties;
+ this.shardTransactionIdleTimeout = other.shardTransactionIdleTimeout;
+ this.operationTimeoutInSeconds = other.operationTimeoutInSeconds;
+ this.dataStoreMXBeanType = other.dataStoreMXBeanType;
+ this.shardTransactionCommitTimeoutInSeconds = other.shardTransactionCommitTimeoutInSeconds;
+ this.shardTransactionCommitQueueCapacity = other.shardTransactionCommitQueueCapacity;
+ this.shardInitializationTimeout = other.shardInitializationTimeout;
+ this.shardLeaderElectionTimeout = other.shardLeaderElectionTimeout;
+ this.persistent = other.persistent;
+ this.configurationReader = other.configurationReader;
+ this.transactionCreationInitialRateLimit = other.transactionCreationInitialRateLimit;
+ this.dataStoreType = other.dataStoreType;
+ this.shardBatchedModificationCount = other.shardBatchedModificationCount;
+
+ setShardJournalRecoveryLogBatchSize(other.raftConfig.getJournalRecoveryLogBatchSize());
+ setSnapshotBatchCount(other.raftConfig.getSnapshotBatchCount());
+ setHeartbeatInterval(other.raftConfig.getHeartBeatInterval().toMillis());
+ setIsolatedLeaderCheckInterval(other.raftConfig.getIsolatedCheckIntervalInMillis());
+ setSnapshotDataThresholdPercentage(other.raftConfig.getSnapshotDataThresholdPercentage());
+ setElectionTimeoutFactor(other.raftConfig.getElectionTimeoutFactor());
}
public static Builder newBuilder() {
- return new Builder();
+ return new Builder(new DatastoreContext());
+ }
+
+ public static Builder newBuilderFrom(DatastoreContext context) {
+ return new Builder(new DatastoreContext(context));
}
public InMemoryDOMDataStoreConfigProperties getDataStoreProperties() {
raftConfig.setSnapshotDataThresholdPercentage(shardSnapshotDataThresholdPercentage);
}
- private void setSnapshotBatchCount(int shardSnapshotBatchCount) {
+ private void setSnapshotBatchCount(long shardSnapshotBatchCount) {
raftConfig.setSnapshotBatchCount(shardSnapshotBatchCount);
}
+ public int getShardBatchedModificationCount() {
+ return shardBatchedModificationCount;
+ }
+
public static class Builder {
- private DatastoreContext datastoreContext = new DatastoreContext();
+ private final DatastoreContext datastoreContext;
+ private int maxShardDataChangeExecutorPoolSize =
+ InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_CHANGE_EXECUTOR_POOL_SIZE;
+ private int maxShardDataChangeExecutorQueueSize =
+ InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_CHANGE_EXECUTOR_QUEUE_SIZE;
+ private int maxShardDataChangeListenerQueueSize =
+ InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_CHANGE_LISTENER_QUEUE_SIZE;
+ private int maxShardDataStoreExecutorQueueSize =
+ InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_STORE_EXECUTOR_QUEUE_SIZE;
+
+ private Builder(DatastoreContext datastoreContext) {
+ this.datastoreContext = datastoreContext;
+
+ if(datastoreContext.getDataStoreProperties() != null) {
+ maxShardDataChangeExecutorPoolSize =
+ datastoreContext.getDataStoreProperties().getMaxDataChangeExecutorPoolSize();
+ maxShardDataChangeExecutorQueueSize =
+ datastoreContext.getDataStoreProperties().getMaxDataChangeExecutorQueueSize();
+ maxShardDataChangeListenerQueueSize =
+ datastoreContext.getDataStoreProperties().getMaxDataChangeListenerQueueSize();
+ maxShardDataStoreExecutorQueueSize =
+ datastoreContext.getDataStoreProperties().getMaxDataStoreExecutorQueueSize();
+ }
+ }
+
+ public Builder boundedMailboxCapacity(int boundedMailboxCapacity) {
+ // TODO - this is defined in the yang DataStoreProperties but not currently used.
+ return this;
+ }
- public Builder shardTransactionIdleTimeout(Duration shardTransactionIdleTimeout) {
- datastoreContext.shardTransactionIdleTimeout = shardTransactionIdleTimeout;
+ public Builder enableMetricCapture(boolean enableMetricCapture) {
+ // TODO - this is defined in the yang DataStoreProperties but not currently used.
return this;
}
+
+ public Builder shardTransactionIdleTimeout(long timeout, TimeUnit unit) {
+ datastoreContext.shardTransactionIdleTimeout = Duration.create(timeout, unit);
+ return this;
+ }
+
+ public Builder shardTransactionIdleTimeoutInMinutes(long timeout) {
+ return shardTransactionIdleTimeout(timeout, TimeUnit.MINUTES);
+ }
+
public Builder operationTimeoutInSeconds(int operationTimeoutInSeconds) {
datastoreContext.operationTimeoutInSeconds = operationTimeoutInSeconds;
return this;
return this;
}
- public Builder dataStoreProperties(InMemoryDOMDataStoreConfigProperties dataStoreProperties) {
- datastoreContext.dataStoreProperties = dataStoreProperties;
- return this;
- }
-
public Builder shardTransactionCommitTimeoutInSeconds(int shardTransactionCommitTimeoutInSeconds) {
datastoreContext.shardTransactionCommitTimeoutInSeconds = shardTransactionCommitTimeoutInSeconds;
return this;
return this;
}
+ public Builder shardInitializationTimeoutInSeconds(long timeout) {
+ return shardInitializationTimeout(timeout, TimeUnit.SECONDS);
+ }
+
public Builder shardLeaderElectionTimeout(long timeout, TimeUnit unit) {
datastoreContext.shardLeaderElectionTimeout = new Timeout(timeout, unit);
return this;
}
+ public Builder shardLeaderElectionTimeoutInSeconds(long timeout) {
+ return shardLeaderElectionTimeout(timeout, TimeUnit.SECONDS);
+ }
+
public Builder configurationReader(ConfigurationReader configurationReader){
datastoreContext.configurationReader = configurationReader;
return this;
return this;
}
+ public Builder shardBatchedModificationCount(int shardBatchedModificationCount) {
+ datastoreContext.shardBatchedModificationCount = shardBatchedModificationCount;
+ return this;
+ }
+
+ public Builder maxShardDataChangeExecutorPoolSize(int maxShardDataChangeExecutorPoolSize) {
+ this.maxShardDataChangeExecutorPoolSize = maxShardDataChangeExecutorPoolSize;
+ return this;
+ }
+
+ public Builder maxShardDataChangeExecutorQueueSize(int maxShardDataChangeExecutorQueueSize) {
+ this.maxShardDataChangeExecutorQueueSize = maxShardDataChangeExecutorQueueSize;
+ return this;
+ }
+
+ public Builder maxShardDataChangeListenerQueueSize(int maxShardDataChangeListenerQueueSize) {
+ this.maxShardDataChangeListenerQueueSize = maxShardDataChangeListenerQueueSize;
+ return this;
+ }
+
+ public Builder maxShardDataStoreExecutorQueueSize(int maxShardDataStoreExecutorQueueSize) {
+ this.maxShardDataStoreExecutorQueueSize = maxShardDataStoreExecutorQueueSize;
+ return this;
+ }
+
public DatastoreContext build() {
+ datastoreContext.dataStoreProperties = InMemoryDOMDataStoreConfigProperties.create(
+ maxShardDataChangeExecutorPoolSize, maxShardDataChangeExecutorQueueSize,
+ maxShardDataChangeListenerQueueSize, maxShardDataStoreExecutorQueueSize);
return datastoreContext;
}
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import java.io.IOException;
+import java.util.Dictionary;
+import org.osgi.framework.BundleContext;
+import org.osgi.framework.ServiceReference;
+import org.osgi.service.cm.Configuration;
+import org.osgi.service.cm.ConfigurationAdmin;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Class that overlays DatastoreContext settings with settings obtained from an OSGi Config Admin
+ * service.
+ *
+ * @author Thomas Pantelis
+ */
+public class DatastoreContextConfigAdminOverlay implements AutoCloseable {
+ public static final String CONFIG_ID = "org.opendaylight.controller.cluster.datastore";
+
+ private static final Logger LOG = LoggerFactory.getLogger(DatastoreContextConfigAdminOverlay.class);
+
+ private final DatastoreContextIntrospector introspector;
+ private final BundleContext bundleContext;
+
+ public DatastoreContextConfigAdminOverlay(DatastoreContextIntrospector introspector, BundleContext bundleContext) {
+ this.introspector = introspector;
+ this.bundleContext = bundleContext;
+
+ ServiceReference<ConfigurationAdmin> configAdminServiceReference =
+ bundleContext.getServiceReference(ConfigurationAdmin.class);
+ if(configAdminServiceReference == null) {
+ LOG.warn("No ConfigurationAdmin service found");
+ } else {
+ overlaySettings(configAdminServiceReference);
+ }
+ }
+
+ private void overlaySettings(ServiceReference<ConfigurationAdmin> configAdminServiceReference) {
+ try {
+ ConfigurationAdmin configAdmin = bundleContext.getService(configAdminServiceReference);
+
+ Configuration config = configAdmin.getConfiguration(CONFIG_ID);
+ if(config != null) {
+ Dictionary<String, Object> properties = config.getProperties();
+
+ LOG.debug("Overlaying settings: {}", properties);
+
+ introspector.update(properties);
+ } else {
+ LOG.debug("No Configuration found for {}", CONFIG_ID);
+ }
+ } catch (IOException e) {
+ LOG.error("Error obtaining Configuration for pid {}", CONFIG_ID, e);
+ } catch(IllegalStateException e) {
+ // Ignore - indicates the bundleContext has been closed.
+ } finally {
+ try {
+ bundleContext.ungetService(configAdminServiceReference);
+ } catch (Exception e) {
+ LOG.debug("Error from ungetService", e);
+ }
+ }
+ }
+
+ @Override
+ public void close() {
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.primitives.Primitives;
+import java.beans.BeanInfo;
+import java.beans.ConstructorProperties;
+import java.beans.IntrospectionException;
+import java.beans.Introspector;
+import java.beans.MethodDescriptor;
+import java.beans.PropertyDescriptor;
+import java.lang.reflect.Constructor;
+import java.lang.reflect.Method;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.Dictionary;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.commons.lang3.text.WordUtils;
+import org.opendaylight.controller.cluster.datastore.DatastoreContext.Builder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.distributed.datastore.provider.rev140612.DataStoreProperties;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Introspects on a DatastoreContext instance to set its properties via reflection.
+ * i
+ * @author Thomas Pantelis
+ */
+public class DatastoreContextIntrospector {
+ private static final Logger LOG = LoggerFactory.getLogger(DatastoreContextIntrospector.class);
+
+ private static final Map<String, Class<?>> dataStorePropTypes = new HashMap<>();
+
+ private static final Map<Class<?>, Constructor<?>> constructors = new HashMap<>();
+
+ private static final Map<Class<?>, Method> yangTypeGetters = new HashMap<>();
+
+ private static final Map<String, Method> builderSetters = new HashMap<>();
+
+ static {
+ try {
+ introspectDatastoreContextBuilder();
+ introspectDataStoreProperties();
+ introspectPrimitiveTypes();
+ } catch (IntrospectionException e) {
+ LOG.error("Error initializing DatastoreContextIntrospector", e);
+ }
+ }
+
+ /**
+ * Introspects each primitive wrapper (ie Integer, Long etc) and String type to find the
+ * constructor that takes a single String argument. For primitive wrappers, this constructor
+ * converts from a String representation.
+ */
+ private static void introspectPrimitiveTypes() {
+
+ Set<Class<?>> primitives = ImmutableSet.<Class<?>>builder().addAll(
+ Primitives.allWrapperTypes()).add(String.class).build();
+ for(Class<?> primitive: primitives) {
+ try {
+ processPropertyType(primitive);
+ } catch (Exception e) {
+ // Ignore primitives that can't be constructed from a String, eg Character and Void.
+ }
+ }
+ }
+
+ /**
+ * Introspects the DatastoreContext.Builder class to find all its setter methods that we will
+ * invoke via reflection. We can't use the bean Introspector here as the Builder setters don't
+ * follow the bean property naming convention, ie setter prefixed with "set", so look for all
+ * the methods that return Builder.
+ */
+ private static void introspectDatastoreContextBuilder() {
+ for(Method method: Builder.class.getMethods()) {
+ if(Builder.class.equals(method.getReturnType())) {
+ builderSetters.put(method.getName(), method);
+ }
+ }
+ }
+
+ /**
+ * Introspects the DataStoreProperties interface that is generated from the DataStoreProperties
+ * yang grouping. We use the bean Introspector to find the types of all the properties defined
+ * in the interface (this is the type returned from the getter method). For each type, we find
+ * the appropriate constructor that we will use.
+ */
+ private static void introspectDataStoreProperties() throws IntrospectionException {
+ BeanInfo beanInfo = Introspector.getBeanInfo(DataStoreProperties.class);
+ for(PropertyDescriptor desc: beanInfo.getPropertyDescriptors()) {
+ processDataStoreProperty(desc.getName(), desc.getPropertyType());
+ }
+
+ // Getter methods that return Boolean and start with "is" instead of "get" aren't recognized as
+ // properties and thus aren't returned from getPropertyDescriptors. A getter starting with
+ // "is" is only supported if it returns primitive boolean. So we'll check for these via
+ // getMethodDescriptors.
+ for(MethodDescriptor desc: beanInfo.getMethodDescriptors()) {
+ String methodName = desc.getName();
+ if(Boolean.class.equals(desc.getMethod().getReturnType()) && methodName.startsWith("is")) {
+ String propertyName = WordUtils.uncapitalize(methodName.substring(2));
+ processDataStoreProperty(propertyName, Boolean.class);
+ }
+ }
+ }
+
+ /**
+ * Processes a property defined on the DataStoreProperties interface.
+ */
+ private static void processDataStoreProperty(String name, Class<?> propertyType) {
+ Preconditions.checkArgument(builderSetters.containsKey(name), String.format(
+ "DataStoreProperties property \"%s\" does not have corresponding setter in DatastoreContext.Builder", name));
+ try {
+ processPropertyType(propertyType);
+ dataStorePropTypes.put(name, propertyType);
+ } catch (Exception e) {
+ LOG.error("Error finding constructor for type {}", propertyType, e);
+ }
+ }
+
+ /**
+ * Finds the appropriate constructor for the specified type that we will use to construct
+ * instances.
+ */
+ private static void processPropertyType(Class<?> propertyType) throws Exception {
+ Class<?> wrappedType = Primitives.wrap(propertyType);
+ if(constructors.containsKey(wrappedType)) {
+ return;
+ }
+
+ // If the type is a primitive (or String type), we look for the constructor that takes a
+ // single String argument, which, for primitives, validates and converts from a String
+ // representation which is the form we get on ingress.
+ if(propertyType.isPrimitive() || Primitives.isWrapperType(propertyType) ||
+ propertyType.equals(String.class))
+ {
+ constructors.put(wrappedType, propertyType.getConstructor(String.class));
+ } else {
+ // This must be a yang-defined type. We need to find the constructor that takes a
+ // primitive as the only argument. This will be used to construct instances to perform
+ // validation (eg range checking). The yang-generated types have a couple single-argument
+ // constructors but the one we want has the bean ConstructorProperties annotation.
+ for(Constructor<?> ctor: propertyType.getConstructors()) {
+ ConstructorProperties ctorPropsAnnotation = ctor.getAnnotation(ConstructorProperties.class);
+ if(ctor.getParameterTypes().length == 1 && ctorPropsAnnotation != null) {
+ findYangTypeGetter(propertyType, ctorPropsAnnotation.value()[0]);
+ constructors.put(propertyType, ctor);
+ break;
+ }
+ }
+ }
+ }
+
+ /**
+ * Finds the getter method on a yang-generated type for the specified property name.
+ */
+ private static void findYangTypeGetter(Class<?> type, String propertyName)
+ throws Exception {
+ for(PropertyDescriptor desc: Introspector.getBeanInfo(type).getPropertyDescriptors()) {
+ if(desc.getName().equals(propertyName)) {
+ yangTypeGetters.put(type, desc.getReadMethod());
+ return;
+ }
+ }
+
+ throw new IllegalArgumentException(String.format(
+ "Getter method for constructor property %s not found for YANG type %s",
+ propertyName, type));
+ }
+
+ private DatastoreContext context;
+
+ public DatastoreContextIntrospector(DatastoreContext context) {
+ this.context = context;
+ }
+
+ public DatastoreContext getContext() {
+ return context;
+ }
+
+ /**
+ * Applies the given properties to the cached DatastoreContext and yields a new DatastoreContext
+ * instance which can be obtained via {@link getContext}.
+ *
+ * @param properties the properties to apply
+ * @return true if the cached DatastoreContext was updated, false otherwise.
+ */
+ public boolean update(Dictionary<String, Object> properties) {
+ if(properties == null || properties.isEmpty()) {
+ return false;
+ }
+
+ Builder builder = DatastoreContext.newBuilderFrom(context);
+
+ final String dataStoreTypePrefix = context.getDataStoreType() + '.';
+
+ // Sort the property keys by putting the names prefixed with the data store type last. This
+ // is done so data store specific settings are applied after global settings.
+ ArrayList<String> keys = Collections.list(properties.keys());
+ Collections.sort(keys, new Comparator<String>() {
+ @Override
+ public int compare(String key1, String key2) {
+ return key1.startsWith(dataStoreTypePrefix) ? 1 :
+ key2.startsWith(dataStoreTypePrefix) ? -1 : key1.compareTo(key2);
+ }
+ });
+
+ boolean updated = false;
+ for(String key: keys) {
+ Object value = properties.get(key);
+ try {
+ // If the key is prefixed with the data store type, strip it off.
+ if(key.startsWith(dataStoreTypePrefix)) {
+ key = key.replaceFirst(dataStoreTypePrefix, "");
+ }
+
+ key = convertToCamelCase(key);
+
+ // Convert the value to the right type.
+ value = convertValue(key, value);
+ if(value == null) {
+ continue;
+ }
+
+ LOG.debug("Converted value for property {}: {} ({})",
+ key, value, value.getClass().getSimpleName());
+
+ // Call the setter method on the Builder instance.
+ Method setter = builderSetters.get(key);
+ setter.invoke(builder, constructorValueRecursively(
+ Primitives.wrap(setter.getParameterTypes()[0]), value.toString()));
+
+ updated = true;
+
+ } catch (Exception e) {
+ LOG.error("Error converting value ({}) for property {}", value, key, e);
+ }
+ }
+
+ if(updated) {
+ context = builder.build();
+ }
+
+ return updated;
+ }
+
+ private String convertToCamelCase(String inString) {
+ String str = inString.trim();
+ if(StringUtils.contains(str, '-') || StringUtils.contains(str, ' ')) {
+ str = inString.replace('-', ' ');
+ str = WordUtils.capitalizeFully(str);
+ str = StringUtils.deleteWhitespace(str);
+ }
+
+ return StringUtils.uncapitalize(str);
+ }
+
+ private Object convertValue(String name, Object from) throws Exception {
+ Class<?> propertyType = dataStorePropTypes.get(name);
+ if(propertyType == null) {
+ LOG.debug("Property not found for {}", name);
+ return null;
+ }
+
+ LOG.debug("Type for property {}: {}, converting value {} ({})",
+ name, propertyType.getSimpleName(), from, from.getClass().getSimpleName());
+
+ // Recurse the chain of constructors depth-first to get the resulting value. Eg, if the
+ // property type is the yang-generated NonZeroUint32Type, it's constructor takes a Long so
+ // we have to first construct a Long instance from the input value.
+ Object converted = constructorValueRecursively(propertyType, from.toString());
+
+ // If the converted type is a yang-generated type, call the getter to obtain the actual value.
+ Method getter = yangTypeGetters.get(converted.getClass());
+ if(getter != null) {
+ converted = getter.invoke(converted);
+ }
+
+ return converted;
+ }
+
+ private Object constructorValueRecursively(Class<?> toType, Object fromValue) throws Exception {
+ LOG.debug("convertValueRecursively - toType: {}, fromValue {} ({})",
+ toType.getSimpleName(), fromValue, fromValue.getClass().getSimpleName());
+
+ Constructor<?> ctor = constructors.get(toType);
+
+ LOG.debug("Found {}", ctor);
+
+ if(ctor == null) {
+ throw new IllegalArgumentException(String.format("Constructor not found for type %s", toType));
+ }
+
+ Object value = fromValue;
+
+ // Since the original input type is a String, once we find a constructor that takes a String
+ // argument, we're done recursing.
+ if(!ctor.getParameterTypes()[0].equals(String.class)) {
+ value = constructorValueRecursively(ctor.getParameterTypes()[0], fromValue);
+ }
+
+ return ctor.newInstance(value);
+ }
+}
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardManagerIdentifier;
+import org.opendaylight.controller.cluster.datastore.jmx.mbeans.DatastoreConfigurationMXBeanImpl;
import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategyFactory;
import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.cluster.datastore.utils.Dispatchers;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
import org.opendaylight.controller.sal.core.spi.data.DOMStore;
private final ActorContext actorContext;
+ private AutoCloseable closeable;
+
+ private DatastoreConfigurationMXBeanImpl datastoreConfigMXBean;
+
public DistributedDataStore(ActorSystem actorSystem, ClusterWrapper cluster,
Configuration configuration, DatastoreContext datastoreContext) {
Preconditions.checkNotNull(actorSystem, "actorSystem should not be null");
LOG.info("Creating ShardManager : {}", shardManagerId);
+ String shardDispatcher =
+ new Dispatchers(actorSystem.dispatchers()).getDispatcherPath(Dispatchers.DispatcherType.Shard);
+
actorContext = new ActorContext(actorSystem, actorSystem.actorOf(
ShardManager.props(cluster, configuration, datastoreContext)
- .withMailbox(ActorContext.MAILBOX), shardManagerId ),
+ .withDispatcher(shardDispatcher).withMailbox(ActorContext.MAILBOX), shardManagerId ),
cluster, configuration, datastoreContext);
+
+ datastoreConfigMXBean = new DatastoreConfigurationMXBeanImpl(datastoreContext.getDataStoreMXBeanType());
+ datastoreConfigMXBean.setContext(datastoreContext);
+ datastoreConfigMXBean.registerMBean();
}
public DistributedDataStore(ActorContext actorContext) {
this.actorContext = Preconditions.checkNotNull(actorContext, "actorContext should not be null");
}
+ public void setCloseable(AutoCloseable closeable) {
+ this.closeable = closeable;
+ }
+
@SuppressWarnings("unchecked")
@Override
public <L extends AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>>
}
@Override
- public void close() throws Exception {
+ public void close() {
+ datastoreConfigMXBean.unregisterMBean();
+
+ if(closeable != null) {
+ try {
+ closeable.close();
+ } catch (Exception e) {
+ LOG.debug("Error closing insance", e);
+ }
+ }
+
actorContext.shutdown();
}
private static volatile ActorSystem persistentActorSystem = null;
public static DistributedDataStore createInstance(SchemaService schemaService,
- DatastoreContext datastoreContext, BundleContext bundleContext) {
+ DatastoreContext datastoreContext, BundleContext bundleContext) {
+
+ DatastoreContextIntrospector introspector = new DatastoreContextIntrospector(datastoreContext);
+ DatastoreContextConfigAdminOverlay overlay = new DatastoreContextConfigAdminOverlay(
+ introspector, bundleContext);
ActorSystem actorSystem = getOrCreateInstance(bundleContext, datastoreContext.getConfigurationReader());
Configuration config = new ConfigurationImpl("module-shards.conf", "modules.conf");
- final DistributedDataStore dataStore =
- new DistributedDataStore(actorSystem, new ClusterWrapperImpl(actorSystem),
- config, datastoreContext);
+ final DistributedDataStore dataStore = new DistributedDataStore(actorSystem,
+ new ClusterWrapperImpl(actorSystem), config, introspector.getContext());
ShardStrategyFactory.setConfiguration(config);
schemaService.registerSchemaContextListener(dataStore);
+
+ dataStore.setCloseable(overlay);
return dataStore;
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import akka.actor.ActorSelection;
+import org.opendaylight.controller.cluster.datastore.identifiers.TransactionIdentifier;
+import org.opendaylight.controller.cluster.datastore.messages.DeleteData;
+import org.opendaylight.controller.cluster.datastore.messages.MergeData;
+import org.opendaylight.controller.cluster.datastore.messages.WriteData;
+import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+
+/**
+ * Implementation of TransactionContextImpl used when talking to a pre-Lithium controller that doesn't
+ * support the BatchedModifications message.
+ *
+ * @author Thomas Pantelis
+ */
+class LegacyTransactionContextImpl extends TransactionContextImpl {
+
+ LegacyTransactionContextImpl(String transactionPath, ActorSelection actor, TransactionIdentifier identifier,
+ ActorContext actorContext, SchemaContext schemaContext, boolean isTxActorLocal,
+ short remoteTransactionVersion, OperationCompleter operationCompleter) {
+ super(transactionPath, actor, identifier, actorContext, schemaContext, isTxActorLocal,
+ remoteTransactionVersion, operationCompleter);
+ }
+
+ @Override
+ public void deleteData(YangInstanceIdentifier path) {
+ recordedOperationFutures.add(executeOperationAsync(
+ new DeleteData(path, getRemoteTransactionVersion())));
+ }
+
+ @Override
+ public void mergeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
+ recordedOperationFutures.add(executeOperationAsync(
+ new MergeData(path, data, getRemoteTransactionVersion())));
+ }
+
+ @Override
+ public void writeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
+ recordedOperationFutures.add(executeOperationAsync(
+ new WriteData(path, data, getRemoteTransactionVersion())));
+ }
+}
import akka.dispatch.OnComplete;
import com.google.common.base.Preconditions;
import java.util.concurrent.Semaphore;
+import org.opendaylight.controller.cluster.datastore.messages.BatchedModificationsReply;
final class OperationCompleter extends OnComplete<Object> {
private final Semaphore operationLimiter;
}
@Override
- public void onComplete(Throwable throwable, Object o){
- this.operationLimiter.release();
+ public void onComplete(Throwable throwable, Object message) {
+ if(message instanceof BatchedModificationsReply) {
+ this.operationLimiter.release(((BatchedModificationsReply)message).getNumBatched());
+ } else {
+ this.operationLimiter.release();
+ }
}
}
\ No newline at end of file
import org.opendaylight.controller.cluster.datastore.modification.Modification;
import org.opendaylight.controller.cluster.datastore.modification.ModificationPayload;
import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification;
+import org.opendaylight.controller.cluster.datastore.utils.Dispatchers;
+import org.opendaylight.controller.cluster.datastore.utils.MessageTracker;
import org.opendaylight.controller.cluster.datastore.utils.SerializationUtils;
import org.opendaylight.controller.cluster.notifications.RoleChangeNotifier;
import org.opendaylight.controller.cluster.raft.RaftActor;
import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
+import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationByteStringPayload;
import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationPayload;
import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStoreFactory;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionFactory;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
private final Optional<ActorRef> roleChangeNotifier;
+ private final MessageTracker appendEntriesReplyTracker;
+
/**
* Coordinates persistence recovery on startup.
*/
private final Map<String, DOMStoreTransactionChain> transactionChains = new HashMap<>();
+ private final String txnDispatcherPath;
+
protected Shard(final ShardIdentifier name, final Map<ShardIdentifier, String> peerAddresses,
final DatastoreContext datastoreContext, final SchemaContext schemaContext) {
super(name.toString(), mapPeerAddresses(peerAddresses),
this.name = name;
this.datastoreContext = datastoreContext;
this.schemaContext = schemaContext;
- this.dataPersistenceProvider = (datastoreContext.isPersistent()) ? new PersistentDataProvider() : new NonPersistentRaftDataProvider();
+ this.dataPersistenceProvider = (datastoreContext.isPersistent())
+ ? new PersistentDataProvider() : new NonPersistentRaftDataProvider();
+ this.txnDispatcherPath = new Dispatchers(context().system().dispatchers())
+ .getDispatcherPath(Dispatchers.DispatcherType.Transaction);
+
LOG.info("Shard created : {}, persistent : {}", name, datastoreContext.isPersistent());
// create a notifier actor for each cluster member
roleChangeNotifier = createRoleChangeNotifier(name.toString());
+
+ appendEntriesReplyTracker = new MessageTracker(AppendEntriesReply.class,
+ getRaftActorContext().getConfigParams().getIsolatedCheckIntervalInMillis());
}
private static Map<String, String> mapPeerAddresses(
onRecoveryComplete();
} else {
super.onReceiveRecover(message);
+ if(LOG.isTraceEnabled()) {
+ appendEntriesReplyTracker.begin();
+ }
}
}
@Override
public void onReceiveCommand(final Object message) throws Exception {
- if (message.getClass().equals(CreateTransaction.SERIALIZABLE_CLASS)) {
- handleCreateTransaction(message);
- } else if(message instanceof ForwardedReadyTransaction) {
- handleForwardedReadyTransaction((ForwardedReadyTransaction)message);
- } else if(message.getClass().equals(CanCommitTransaction.SERIALIZABLE_CLASS)) {
- handleCanCommitTransaction(CanCommitTransaction.fromSerializable(message));
- } else if(message.getClass().equals(CommitTransaction.SERIALIZABLE_CLASS)) {
- handleCommitTransaction(CommitTransaction.fromSerializable(message));
- } else if(message.getClass().equals(AbortTransaction.SERIALIZABLE_CLASS)) {
- handleAbortTransaction(AbortTransaction.fromSerializable(message));
- } else if (message.getClass().equals(CloseTransactionChain.SERIALIZABLE_CLASS)){
- closeTransactionChain(CloseTransactionChain.fromSerializable(message));
- } else if (message instanceof RegisterChangeListener) {
- registerChangeListener((RegisterChangeListener) message);
- } else if (message instanceof UpdateSchemaContext) {
- updateSchemaContext((UpdateSchemaContext) message);
- } else if (message instanceof PeerAddressResolved) {
- PeerAddressResolved resolved = (PeerAddressResolved) message;
- setPeerAddress(resolved.getPeerId().toString(),
- resolved.getPeerAddress());
- } else if(message.equals(TX_COMMIT_TIMEOUT_CHECK_MESSAGE)) {
- handleTransactionCommitTimeoutCheck();
- } else {
- super.onReceiveCommand(message);
+
+ MessageTracker.Context context = appendEntriesReplyTracker.received(message);
+
+ if(context.error().isPresent()){
+ LOG.trace("{} : AppendEntriesReply failed to arrive at the expected interval {}", persistenceId(),
+ context.error());
+ }
+
+ try {
+ if (message.getClass().equals(CreateTransaction.SERIALIZABLE_CLASS)) {
+ handleCreateTransaction(message);
+ } else if (message instanceof ForwardedReadyTransaction) {
+ handleForwardedReadyTransaction((ForwardedReadyTransaction) message);
+ } else if (message.getClass().equals(CanCommitTransaction.SERIALIZABLE_CLASS)) {
+ handleCanCommitTransaction(CanCommitTransaction.fromSerializable(message));
+ } else if (message.getClass().equals(CommitTransaction.SERIALIZABLE_CLASS)) {
+ handleCommitTransaction(CommitTransaction.fromSerializable(message));
+ } else if (message.getClass().equals(AbortTransaction.SERIALIZABLE_CLASS)) {
+ handleAbortTransaction(AbortTransaction.fromSerializable(message));
+ } else if (message.getClass().equals(CloseTransactionChain.SERIALIZABLE_CLASS)) {
+ closeTransactionChain(CloseTransactionChain.fromSerializable(message));
+ } else if (message instanceof RegisterChangeListener) {
+ registerChangeListener((RegisterChangeListener) message);
+ } else if (message instanceof UpdateSchemaContext) {
+ updateSchemaContext((UpdateSchemaContext) message);
+ } else if (message instanceof PeerAddressResolved) {
+ PeerAddressResolved resolved = (PeerAddressResolved) message;
+ setPeerAddress(resolved.getPeerId().toString(),
+ resolved.getPeerAddress());
+ } else if (message.equals(TX_COMMIT_TIMEOUT_CHECK_MESSAGE)) {
+ handleTransactionCommitTimeoutCheck();
+ } else {
+ super.onReceiveCommand(message);
+ }
+ } finally {
+ context.done();
}
}
shardMBean.incrementReadOnlyTransactionCount();
- return getContext().actorOf(
- ShardTransaction.props(factory.newReadOnlyTransaction(), getSelf(),
- schemaContext,datastoreContext, shardMBean,
- transactionId.getRemoteTransactionId(), clientVersion),
- transactionId.toString());
+ return createShardTransaction(factory.newReadOnlyTransaction(), transactionId, clientVersion);
} else if (transactionType == TransactionProxy.TransactionType.READ_WRITE.ordinal()) {
shardMBean.incrementReadWriteTransactionCount();
- return getContext().actorOf(
- ShardTransaction.props(factory.newReadWriteTransaction(), getSelf(),
- schemaContext, datastoreContext, shardMBean,
- transactionId.getRemoteTransactionId(), clientVersion),
- transactionId.toString());
-
+ return createShardTransaction(factory.newReadWriteTransaction(), transactionId, clientVersion);
} else if (transactionType == TransactionProxy.TransactionType.WRITE_ONLY.ordinal()) {
shardMBean.incrementWriteOnlyTransactionCount();
- return getContext().actorOf(
- ShardTransaction.props(factory.newWriteOnlyTransaction(), getSelf(),
- schemaContext, datastoreContext, shardMBean,
- transactionId.getRemoteTransactionId(), clientVersion),
- transactionId.toString());
+ return createShardTransaction(factory.newWriteOnlyTransaction(), transactionId, clientVersion);
} else {
throw new IllegalArgumentException(
"Shard="+name + ":CreateTransaction message has unidentified transaction type="
}
}
+ private ActorRef createShardTransaction(DOMStoreTransaction transaction, ShardTransactionIdentifier transactionId,
+ short clientVersion){
+ return getContext().actorOf(
+ ShardTransaction.props(transaction, getSelf(),
+ schemaContext, datastoreContext, shardMBean,
+ transactionId.getRemoteTransactionId(), clientVersion)
+ .withDispatcher(txnDispatcherPath),
+ transactionId.toString());
+
+ }
+
private void createTransaction(CreateTransaction createTransaction) {
try {
ActorRef transactionActor = createTransaction(createTransaction.getTransactionType(),
import org.opendaylight.controller.cluster.datastore.messages.PrimaryFound;
import org.opendaylight.controller.cluster.datastore.messages.PrimaryNotFound;
import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
+import org.opendaylight.controller.cluster.datastore.utils.Dispatchers;
import org.opendaylight.yangtools.yang.model.api.ModuleIdentifier;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import org.slf4j.Logger;
private final Configuration configuration;
+ private final String shardDispatcherPath;
+
private ShardManagerInfoMBean mBean;
private final DatastoreContext datastoreContext;
this.datastoreContext = datastoreContext;
this.dataPersistenceProvider = createDataPersistenceProvider(datastoreContext.isPersistent());
this.type = datastoreContext.getDataStoreType();
+ this.shardDispatcherPath =
+ new Dispatchers(context().system().dispatchers()).getDispatcherPath(Dispatchers.DispatcherType.Shard);
// Subscribe this actor to cluster member events
cluster.subscribeToMemberEvents(getSelf());
for (ShardInformation info : localShards.values()) {
if (info.getActor() == null) {
info.setActor(getContext().actorOf(Shard.props(info.getShardId(),
- info.getPeerAddresses(), datastoreContext, schemaContext),
- info.getShardId().toString()));
+ info.getPeerAddresses(), datastoreContext, schemaContext)
+ .withDispatcher(shardDispatcherPath), info.getShardId().toString()));
} else {
info.getActor().tell(message, getSelf());
}
try {
final CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> future = transaction.read(path);
Optional<NormalizedNode<?, ?>> optional = future.checkedGet();
- ReadDataReply readDataReply = new ReadDataReply(optional.orNull());
+ ReadDataReply readDataReply = new ReadDataReply(optional.orNull(), clientTxVersion);
- sender().tell((returnSerialized ? readDataReply.toSerializable(clientTxVersion): readDataReply), self());
+ sender().tell((returnSerialized ? readDataReply.toSerializable(): readDataReply), self());
} catch (Exception e) {
LOG.debug(String.format("Unexpected error reading path %s", path), e);
import akka.actor.ActorRef;
import akka.actor.PoisonPill;
import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
+import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
+import org.opendaylight.controller.cluster.datastore.messages.BatchedModificationsReply;
import org.opendaylight.controller.cluster.datastore.messages.DeleteData;
import org.opendaylight.controller.cluster.datastore.messages.DeleteDataReply;
import org.opendaylight.controller.cluster.datastore.messages.ForwardedReadyTransaction;
import org.opendaylight.controller.cluster.datastore.modification.CompositeModification;
import org.opendaylight.controller.cluster.datastore.modification.DeleteModification;
import org.opendaylight.controller.cluster.datastore.modification.MergeModification;
+import org.opendaylight.controller.cluster.datastore.modification.Modification;
import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification;
import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
*/
public class ShardWriteTransaction extends ShardTransaction {
- private final MutableCompositeModification modification = new MutableCompositeModification();
+ private final MutableCompositeModification compositeModification = new MutableCompositeModification();
private final DOMStoreWriteTransaction transaction;
public ShardWriteTransaction(DOMStoreWriteTransaction transaction, ActorRef shardActor,
@Override
public void handleReceive(Object message) throws Exception {
- if (message instanceof WriteData) {
- writeData(transaction, (WriteData) message, !SERIALIZED_REPLY);
-
- } else if (message instanceof MergeData) {
- mergeData(transaction, (MergeData) message, !SERIALIZED_REPLY);
-
- } else if (message instanceof DeleteData) {
- deleteData(transaction, (DeleteData) message, !SERIALIZED_REPLY);
-
+ if (message instanceof BatchedModifications) {
+ batchedModifications((BatchedModifications)message);
} else if (message instanceof ReadyTransaction) {
readyTransaction(transaction, !SERIALIZED_REPLY);
-
+ } else if(ReadyTransaction.SERIALIZABLE_CLASS.equals(message.getClass())) {
+ readyTransaction(transaction, SERIALIZED_REPLY);
} else if(WriteData.isSerializedType(message)) {
writeData(transaction, WriteData.fromSerializable(message), SERIALIZED_REPLY);
} else if(DeleteData.isSerializedType(message)) {
deleteData(transaction, DeleteData.fromSerializable(message), SERIALIZED_REPLY);
- } else if(ReadyTransaction.SERIALIZABLE_CLASS.equals(message.getClass())) {
- readyTransaction(transaction, SERIALIZED_REPLY);
-
} else if (message instanceof GetCompositedModification) {
// This is here for testing only
- getSender().tell(new GetCompositeModificationReply(modification), getSelf());
+ getSender().tell(new GetCompositeModificationReply(compositeModification), getSelf());
} else {
super.handleReceive(message);
}
}
+ private void batchedModifications(BatchedModifications batched) {
+ try {
+ for(Modification modification: batched.getModifications()) {
+ compositeModification.addModification(modification);
+ modification.apply(transaction);
+ }
+
+ getSender().tell(new BatchedModificationsReply(batched.getModifications().size()), getSelf());
+ } catch (Exception e) {
+ getSender().tell(new akka.actor.Status.Failure(e), getSelf());
+ }
+ }
+
private void writeData(DOMStoreWriteTransaction transaction, WriteData message,
boolean returnSerialized) {
LOG.debug("writeData at path : {}", message.getPath());
- modification.addModification(
+ compositeModification.addModification(
new WriteModification(message.getPath(), message.getData()));
try {
transaction.write(message.getPath(), message.getData());
boolean returnSerialized) {
LOG.debug("mergeData at path : {}", message.getPath());
- modification.addModification(
+ compositeModification.addModification(
new MergeModification(message.getPath(), message.getData()));
try {
boolean returnSerialized) {
LOG.debug("deleteData at path : {}", message.getPath());
- modification.addModification(new DeleteModification(message.getPath()));
+ compositeModification.addModification(new DeleteModification(message.getPath()));
try {
transaction.delete(message.getPath());
DeleteDataReply deleteDataReply = DeleteDataReply.INSTANCE;
DOMStoreThreePhaseCommitCohort cohort = transaction.ready();
getShardActor().forward(new ForwardedReadyTransaction(transactionID, getClientTxVersion(),
- cohort, modification, returnSerialized), getContext());
+ cohort, compositeModification, returnSerialized), getContext());
// The shard will handle the commit from here so we're no longer needed - self-destruct.
getSelf().tell(PoisonPill.getInstance(), getSelf());
private Future<Void> buildCohortList() {
Future<Iterable<ActorSelection>> combinedFutures = Futures.sequence(cohortFutures,
- actorContext.getActorSystem().dispatcher());
+ actorContext.getClientDispatcher());
return combinedFutures.transform(new AbstractFunction1<Iterable<ActorSelection>, Void>() {
@Override
}
return null;
}
- }, TransactionProxy.SAME_FAILURE_TRANSFORMER, actorContext.getActorSystem().dispatcher());
+ }, TransactionProxy.SAME_FAILURE_TRANSFORMER, actorContext.getClientDispatcher());
}
@Override
finishCanCommit(returnFuture);
}
}
- }, actorContext.getActorSystem().dispatcher());
+ }, actorContext.getClientDispatcher());
return returnFuture;
}
}
returnFuture.set(Boolean.valueOf(result));
}
- }, actorContext.getActorSystem().dispatcher());
+ }, actorContext.getClientDispatcher());
}
private Future<Iterable<Object>> invokeCohorts(Object message) {
futureList.add(actorContext.executeOperationAsync(cohort, message, actorContext.getTransactionCommitOperationTimeout()));
}
- return Futures.sequence(futureList, actorContext.getActorSystem().dispatcher());
+ return Futures.sequence(futureList, actorContext.getClientDispatcher());
}
@Override
propagateException, returnFuture, callback);
}
}
- }, actorContext.getActorSystem().dispatcher());
+ }, actorContext.getClientDispatcher());
}
return returnFuture;
callback.success();
}
}
- }, actorContext.getActorSystem().dispatcher());
+ }, actorContext.getClientDispatcher());
}
@VisibleForTesting
import com.google.common.util.concurrent.SettableFuture;
import java.util.List;
import org.opendaylight.controller.cluster.datastore.identifiers.TransactionIdentifier;
+import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
import org.opendaylight.controller.cluster.datastore.messages.CloseTransaction;
import org.opendaylight.controller.cluster.datastore.messages.DataExists;
import org.opendaylight.controller.cluster.datastore.messages.DataExistsReply;
-import org.opendaylight.controller.cluster.datastore.messages.DeleteData;
-import org.opendaylight.controller.cluster.datastore.messages.MergeData;
import org.opendaylight.controller.cluster.datastore.messages.ReadData;
import org.opendaylight.controller.cluster.datastore.messages.ReadDataReply;
import org.opendaylight.controller.cluster.datastore.messages.ReadyTransaction;
import org.opendaylight.controller.cluster.datastore.messages.ReadyTransactionReply;
import org.opendaylight.controller.cluster.datastore.messages.SerializableMessage;
-import org.opendaylight.controller.cluster.datastore.messages.VersionedSerializableMessage;
-import org.opendaylight.controller.cluster.datastore.messages.WriteData;
+import org.opendaylight.controller.cluster.datastore.modification.DeleteModification;
+import org.opendaylight.controller.cluster.datastore.modification.MergeModification;
+import org.opendaylight.controller.cluster.datastore.modification.Modification;
+import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.slf4j.LoggerFactory;
import scala.concurrent.Future;
-final class TransactionContextImpl extends AbstractTransactionContext {
+class TransactionContextImpl extends AbstractTransactionContext {
private static final Logger LOG = LoggerFactory.getLogger(TransactionContextImpl.class);
private final ActorContext actorContext;
private final ActorSelection actor;
private final boolean isTxActorLocal;
private final short remoteTransactionVersion;
- private final OperationCompleter operationCompleter;
+ private final OperationCompleter operationCompleter;
+ private BatchedModifications batchedModifications;
TransactionContextImpl(String transactionPath, ActorSelection actor, TransactionIdentifier identifier,
ActorContext actorContext, SchemaContext schemaContext,
}
private Future<Object> completeOperation(Future<Object> operationFuture){
- operationFuture.onComplete(this.operationCompleter, actorContext.getActorSystem().dispatcher());
+ operationFuture.onComplete(this.operationCompleter, actorContext.getClientDispatcher());
return operationFuture;
}
return actor;
}
- private Future<Object> executeOperationAsync(SerializableMessage msg) {
- return completeOperation(actorContext.executeOperationAsync(getActor(), isTxActorLocal ? msg : msg.toSerializable()));
+ protected short getRemoteTransactionVersion() {
+ return remoteTransactionVersion;
}
- private Future<Object> executeOperationAsync(VersionedSerializableMessage msg) {
- return completeOperation(actorContext.executeOperationAsync(getActor(), isTxActorLocal ? msg :
- msg.toSerializable(remoteTransactionVersion)));
+ protected Future<Object> executeOperationAsync(SerializableMessage msg) {
+ return completeOperation(actorContext.executeOperationAsync(getActor(), isTxActorLocal ? msg : msg.toSerializable()));
}
@Override
LOG.debug("Tx {} readyTransaction called with {} previous recorded operations pending",
identifier, recordedOperationFutures.size());
+ // Send the remaining batched modifications if any.
+
+ sendBatchedModifications();
+
// Send the ReadyTransaction message to the Tx actor.
final Future<Object> replyFuture = executeOperationAsync(ReadyTransaction.INSTANCE);
futureList.add(replyFuture);
Future<Iterable<Object>> combinedFutures = akka.dispatch.Futures.sequence(futureList,
- actorContext.getActorSystem().dispatcher());
+ actorContext.getClientDispatcher());
// Transform the combined Future into a Future that returns the cohort actor path from
// the ReadyTransactionReply. That's the end result of the ready operation.
serializedReadyReply.getClass()));
}
}
- }, TransactionProxy.SAME_FAILURE_TRANSFORMER, actorContext.getActorSystem().dispatcher());
+ }, TransactionProxy.SAME_FAILURE_TRANSFORMER, actorContext.getClientDispatcher());
+ }
+
+ private void batchModification(Modification modification) {
+ if(batchedModifications == null) {
+ batchedModifications = new BatchedModifications(remoteTransactionVersion);
+ }
+
+ batchedModifications.addModification(modification);
+
+ if(batchedModifications.getModifications().size() >=
+ actorContext.getDatastoreContext().getShardBatchedModificationCount()) {
+ sendBatchedModifications();
+ }
+ }
+
+ private void sendBatchedModifications() {
+ if(batchedModifications != null) {
+ LOG.debug("Tx {} sending {} batched modifications", identifier,
+ batchedModifications.getModifications().size());
+
+ recordedOperationFutures.add(executeOperationAsync(batchedModifications));
+ batchedModifications = null;
+ }
}
@Override
public void deleteData(YangInstanceIdentifier path) {
LOG.debug("Tx {} deleteData called path = {}", identifier, path);
- recordedOperationFutures.add(executeOperationAsync(new DeleteData(path)));
+ batchModification(new DeleteModification(path));
}
@Override
public void mergeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
LOG.debug("Tx {} mergeData called path = {}", identifier, path);
- recordedOperationFutures.add(executeOperationAsync(new MergeData(path, data)));
+ batchModification(new MergeModification(path, data));
}
@Override
public void writeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
LOG.debug("Tx {} writeData called path = {}", identifier, path);
- recordedOperationFutures.add(executeOperationAsync(new WriteData(path, data)));
+ batchModification(new WriteModification(path, data));
}
@Override
LOG.debug("Tx {} readData called path = {}", identifier, path);
+ // Send the remaining batched modifications if any.
+
+ sendBatchedModifications();
+
// If there were any previous recorded put/merge/delete operation reply Futures then we
// must wait for them to successfully complete. This is necessary to honor the read
// uncommitted semantics of the public API contract. If any one fails then fail the read.
Future<Iterable<Object>> combinedFutures = akka.dispatch.Futures.sequence(
Lists.newArrayList(recordedOperationFutures),
- actorContext.getActorSystem().dispatcher());
+ actorContext.getClientDispatcher());
OnComplete<Iterable<Object>> onComplete = new OnComplete<Iterable<Object>>() {
@Override
}
};
- combinedFutures.onComplete(onComplete, actorContext.getActorSystem().dispatcher());
+ combinedFutures.onComplete(onComplete, actorContext.getClientDispatcher());
}
}
Future<Object> readFuture = executeOperationAsync(new ReadData(path));
- readFuture.onComplete(onComplete, actorContext.getActorSystem().dispatcher());
+ readFuture.onComplete(onComplete, actorContext.getClientDispatcher());
}
@Override
LOG.debug("Tx {} dataExists called path = {}", identifier, path);
+ // Send the remaining batched modifications if any.
+
+ sendBatchedModifications();
+
// If there were any previous recorded put/merge/delete operation reply Futures then we
// must wait for them to successfully complete. This is necessary to honor the read
// uncommitted semantics of the public API contract. If any one fails then fail this
Future<Iterable<Object>> combinedFutures = akka.dispatch.Futures.sequence(
Lists.newArrayList(recordedOperationFutures),
- actorContext.getActorSystem().dispatcher());
+ actorContext.getClientDispatcher());
OnComplete<Iterable<Object>> onComplete = new OnComplete<Iterable<Object>>() {
@Override
public void onComplete(Throwable failure, Iterable<Object> notUsed)
}
};
- combinedFutures.onComplete(onComplete, actorContext.getActorSystem().dispatcher());
+ combinedFutures.onComplete(onComplete, actorContext.getClientDispatcher());
}
}
Future<Object> future = executeOperationAsync(new DataExists(path));
- future.onComplete(onComplete, actorContext.getActorSystem().dispatcher());
+ future.onComplete(onComplete, actorContext.getClientDispatcher());
}
}
private void throttleOperation(int acquirePermits) {
try {
- if(!operationLimiter.tryAcquire(acquirePermits, actorContext.getDatastoreContext().getOperationTimeoutInSeconds(), TimeUnit.SECONDS)){
+ if(!operationLimiter.tryAcquire(acquirePermits,
+ actorContext.getDatastoreContext().getOperationTimeoutInSeconds(), TimeUnit.SECONDS)){
LOG.warn("Failed to acquire operation permit for transaction {}", getIdentifier());
}
} catch (InterruptedException e) {
newTxFutureCallback.setPrimaryShard(primaryShard);
}
}
- }, actorContext.getActorSystem().dispatcher());
+ }, actorContext.getClientDispatcher());
}
return txFutureCallback;
TransactionProxy.this.transactionType.ordinal(),
getTransactionChainId()).toSerializable());
- createTxFuture.onComplete(this, actorContext.getActorSystem().dispatcher());
+ createTxFuture.onComplete(this, actorContext.getClientDispatcher());
}
@Override
public void run() {
tryCreateTransaction();
}
- }, actorContext.getActorSystem().dispatcher());
+ }, actorContext.getClientDispatcher());
return;
}
}
private TransactionContext createValidTransactionContext(CreateTransactionReply reply) {
String transactionPath = reply.getTransactionPath();
- LOG.debug("Tx {} Received transaction actor path {}", identifier, transactionPath);
+ LOG.debug("Tx {} Received {}", identifier, reply);
ActorSelection transactionActor = actorContext.actorSelection(transactionPath);
// Check if TxActor is created in the same node
boolean isTxActorLocal = actorContext.isPathLocal(transactionPath);
- return new TransactionContextImpl(transactionPath, transactionActor, identifier,
- actorContext, schemaContext, isTxActorLocal, reply.getVersion(), operationCompleter);
+ if(reply.getVersion() >= DataStoreVersions.LITHIUM_VERSION) {
+ return new TransactionContextImpl(transactionPath, transactionActor, identifier,
+ actorContext, schemaContext, isTxActorLocal, reply.getVersion(), operationCompleter);
+ } else {
+ return new LegacyTransactionContextImpl(transactionPath, transactionActor, identifier,
+ actorContext, schemaContext, isTxActorLocal, reply.getVersion(), operationCompleter);
+ }
}
}
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.jmx.mbeans;
+
+
+/**
+ * MXBean interface for data store configuration.
+ *
+ * @author Thomas Pantelis
+ */
+public interface DatastoreConfigurationMXBean {
+ long getShardTransactionIdleTimeoutInSeconds();
+
+ long getOperationTimeoutInSeconds();
+
+ long getShardHeartbeatIntervalInMillis();
+
+ int getShardJournalRecoveryLogBatchSize();
+
+ long getShardIsolatedLeaderCheckIntervalInMillis();
+
+ long getShardElectionTimeoutFactor();
+
+ int getShardSnapshotDataThresholdPercentage();
+
+ long getShardSnapshotBatchCount();
+
+ long getShardTransactionCommitTimeoutInSeconds();
+
+ int getShardTransactionCommitQueueCapacity();
+
+ long getShardInitializationTimeoutInSeconds();
+
+ long getShardLeaderElectionTimeoutInSeconds();
+
+ boolean isPersistent();
+
+ long getTransactionCreationInitialRateLimit();
+
+ int getMaxShardDataChangeExecutorPoolSize();
+
+ int getMaxShardDataChangeExecutorQueueSize();
+
+ int getMaxShardDataChangeListenerQueueSize();
+
+ int getMaxShardDataStoreExecutorQueueSize();
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.jmx.mbeans;
+
+import org.opendaylight.controller.cluster.datastore.DatastoreContext;
+import org.opendaylight.controller.md.sal.common.util.jmx.AbstractMXBean;
+
+/**
+ * Implementation of DatastoreConfigurationMXBean.
+ *
+ * @author Thomas Pantelis
+ */
+public class DatastoreConfigurationMXBeanImpl extends AbstractMXBean implements DatastoreConfigurationMXBean {
+ public static final String JMX_CATEGORY_CONFIGURATION = "Configuration";
+
+ private DatastoreContext context;
+
+ public DatastoreConfigurationMXBeanImpl(String mxBeanType) {
+ super("Datastore", mxBeanType, JMX_CATEGORY_CONFIGURATION);
+ }
+
+ public void setContext(DatastoreContext context) {
+ this.context = context;
+ }
+
+ @Override
+ public long getShardTransactionIdleTimeoutInSeconds() {
+ return context.getShardTransactionIdleTimeout().toSeconds();
+ }
+
+ @Override
+ public long getOperationTimeoutInSeconds() {
+ return context.getOperationTimeoutInSeconds();
+ }
+
+ @Override
+ public long getShardHeartbeatIntervalInMillis() {
+ return context.getShardRaftConfig().getHeartBeatInterval().toMillis();
+ }
+
+ @Override
+ public int getShardJournalRecoveryLogBatchSize() {
+ return context.getShardRaftConfig().getJournalRecoveryLogBatchSize();
+ }
+
+ @Override
+ public long getShardIsolatedLeaderCheckIntervalInMillis() {
+ return context.getShardRaftConfig().getIsolatedCheckIntervalInMillis();
+ }
+
+ @Override
+ public long getShardElectionTimeoutFactor() {
+ return context.getShardRaftConfig().getElectionTimeoutFactor();
+ }
+
+ @Override
+ public int getShardSnapshotDataThresholdPercentage() {
+ return context.getShardRaftConfig().getSnapshotDataThresholdPercentage();
+ }
+
+ @Override
+ public long getShardSnapshotBatchCount() {
+ return context.getShardRaftConfig().getSnapshotBatchCount();
+ }
+
+ @Override
+ public long getShardTransactionCommitTimeoutInSeconds() {
+ return context.getShardTransactionCommitTimeoutInSeconds();
+ }
+
+ @Override
+ public int getShardTransactionCommitQueueCapacity() {
+ return context.getShardTransactionCommitQueueCapacity();
+ }
+
+ @Override
+ public long getShardInitializationTimeoutInSeconds() {
+ return context.getShardInitializationTimeout().duration().toSeconds();
+ }
+
+ @Override
+ public long getShardLeaderElectionTimeoutInSeconds() {
+ return context.getShardLeaderElectionTimeout().duration().toSeconds();
+ }
+
+ @Override
+ public boolean isPersistent() {
+ return context.isPersistent();
+ }
+
+ @Override
+ public long getTransactionCreationInitialRateLimit() {
+ return context.getTransactionCreationInitialRateLimit();
+ }
+
+ @Override
+ public int getMaxShardDataChangeExecutorPoolSize() {
+ return context.getDataStoreProperties().getMaxDataChangeExecutorPoolSize();
+ }
+
+ @Override
+ public int getMaxShardDataChangeExecutorQueueSize() {
+ return context.getDataStoreProperties().getMaxDataChangeExecutorQueueSize();
+ }
+
+ @Override
+ public int getMaxShardDataChangeListenerQueueSize() {
+ return context.getDataStoreProperties().getMaxDataChangeListenerQueueSize();
+ }
+
+ @Override
+ public int getMaxShardDataStoreExecutorQueueSize() {
+ return context.getDataStoreProperties().getMaxDataStoreExecutorQueueSize();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.messages;
+
+import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification;
+
+/**
+ * Message used to batch write, merge, delete modification operations to the ShardTransaction actor.
+ *
+ * @author Thomas Pantelis
+ */
+public class BatchedModifications extends MutableCompositeModification implements SerializableMessage {
+ private static final long serialVersionUID = 1L;
+
+ public BatchedModifications() {
+ }
+
+ public BatchedModifications(short version) {
+ super(version);
+ }
+
+ @Override
+ public Object toSerializable() {
+ return this;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.messages;
+
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+
+/**
+ * The reply for the BatchedModifications message.
+ *
+ * @author Thomas Pantelis
+ */
+public class BatchedModificationsReply extends VersionedExternalizableMessage {
+ private static final long serialVersionUID = 1L;
+
+ private int numBatched;
+
+ public BatchedModificationsReply() {
+ }
+
+ public BatchedModificationsReply(int numBatched) {
+ this.numBatched = numBatched;
+ }
+
+
+ public int getNumBatched() {
+ return numBatched;
+ }
+
+ @Override
+ public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+ super.readExternal(in);
+ numBatched = in.readInt();
+ }
+
+ @Override
+ public void writeExternal(ObjectOutput out) throws IOException {
+ super.writeExternal(out);
+ out.writeInt(numBatched);
+ }
+
+ @Override
+ public Object toSerializable() {
+ return this;
+ }
+}
(short)o.getMessageVersion());
}
+ @Override
+ public String toString() {
+ StringBuilder builder = new StringBuilder();
+ builder.append("CreateTransactionReply [transactionPath=").append(transactionPath).append(", transactionId=")
+ .append(transactionId).append(", version=").append(version).append("]");
+ return builder.toString();
+ }
}
package org.opendaylight.controller.cluster.datastore.messages;
-import java.io.Externalizable;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-public class DeleteData implements VersionedSerializableMessage, Externalizable {
+/**
+ * @deprecated Replaced by BatchedModifications.
+ */
+@Deprecated
+public class DeleteData extends VersionedExternalizableMessage {
private static final long serialVersionUID = 1L;
public static final Class<DeleteData> SERIALIZABLE_CLASS = DeleteData.class;
private YangInstanceIdentifier path;
- private short version;
public DeleteData() {
}
- public DeleteData(final YangInstanceIdentifier path) {
+ public DeleteData(final YangInstanceIdentifier path, short version) {
+ super(version);
this.path = path;
}
return path;
}
- public short getVersion() {
- return version;
- }
-
@Override
public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
- version = in.readShort(); // Read the version - don't need to do anything with it now
+ super.readExternal(in);
path = SerializationUtils.deserializePath(in);
}
@Override
public void writeExternal(ObjectOutput out) throws IOException {
- out.writeShort(version);
+ super.writeExternal(out);
SerializationUtils.serializePath(path, out);
}
@Override
- public Object toSerializable(short toVersion) {
- if(toVersion >= DataStoreVersions.LITHIUM_VERSION) {
- version = toVersion;
+ public Object toSerializable() {
+ if(getVersion() >= DataStoreVersions.LITHIUM_VERSION) {
return this;
} else {
// To base or R1 Helium version
} else {
// From base or R1 Helium version
ShardTransactionMessages.DeleteData o = (ShardTransactionMessages.DeleteData) serializable;
- return new DeleteData(InstanceIdentifierUtils.fromSerializable(o.getInstanceIdentifierPathArguments()));
+ return new DeleteData(InstanceIdentifierUtils.fromSerializable(o.getInstanceIdentifierPathArguments()),
+ DataStoreVersions.HELIUM_2_VERSION);
}
}
import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages;
+/**
+ * @deprecated Replaced by BatchedModificationsReply.
+ */
+@Deprecated
public class DeleteDataReply extends EmptyReply {
+ private static final long serialVersionUID = 1L;
private static final Object LEGACY_SERIALIZED_INSTANCE =
ShardTransactionMessages.DeleteDataReply.newBuilder().build();
*
* @author Thomas Pantelis
*/
-public abstract class EmptyReply extends EmptyExternalizable implements VersionedSerializableMessage {
+public abstract class EmptyReply extends EmptyExternalizable {
private final Object legacySerializedInstance;
this.legacySerializedInstance = legacySerializedInstance;
}
- @Override
public Object toSerializable(short toVersion) {
return toVersion >= DataStoreVersions.LITHIUM_VERSION ? this : legacySerializedInstance;
}
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-public class MergeData extends ModifyData implements VersionedSerializableMessage {
+/**
+ * @deprecated Replaced by BatchedModifications.
+ */
+@Deprecated
+public class MergeData extends ModifyData {
private static final long serialVersionUID = 1L;
public static final Class<MergeData> SERIALIZABLE_CLASS = MergeData.class;
public MergeData() {
}
- public MergeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
- super(path, data);
+ public MergeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data, short version) {
+ super(path, data, version);
}
@Override
- public Object toSerializable(short toVersion) {
- if(toVersion >= DataStoreVersions.LITHIUM_VERSION) {
- setVersion(toVersion);
+ public Object toSerializable() {
+ if(getVersion() >= DataStoreVersions.LITHIUM_VERSION) {
return this;
} else {
// To base or R1 Helium version
ShardTransactionMessages.MergeData o = (ShardTransactionMessages.MergeData) serializable;
Decoded decoded = new NormalizedNodeToNodeCodec(null).decode(
o.getInstanceIdentifierPathArguments(), o.getNormalizedNode());
- return new MergeData(decoded.getDecodedPath(), decoded.getDecodedNode());
+ return new MergeData(decoded.getDecodedPath(), decoded.getDecodedNode(),
+ DataStoreVersions.HELIUM_2_VERSION);
}
}
import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages;
+/**
+ * @deprecated Replaced by BatchedModificationsReply.
+ */
+@Deprecated
public class MergeDataReply extends EmptyReply {
private static final long serialVersionUID = 1L;
package org.opendaylight.controller.cluster.datastore.messages;
-import java.io.Externalizable;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-public abstract class ModifyData implements Externalizable {
+/**
+ * @deprecated Replaced by BatchedModifications.
+ */
+@Deprecated
+public abstract class ModifyData extends VersionedExternalizableMessage {
private static final long serialVersionUID = 1L;
private YangInstanceIdentifier path;
private NormalizedNode<?, ?> data;
- private short version;
protected ModifyData() {
}
- protected ModifyData(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
+ protected ModifyData(YangInstanceIdentifier path, NormalizedNode<?, ?> data, short version) {
+ super(version);
this.path = path;
this.data = data;
}
return data;
}
- public short getVersion() {
- return version;
- }
-
- protected void setVersion(short version) {
- this.version = version;
- }
-
@Override
public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
- version = in.readShort();
+ super.readExternal(in);
SerializationUtils.deserializePathAndNode(in, this, APPLIER);
}
@Override
public void writeExternal(ObjectOutput out) throws IOException {
- out.writeShort(version);
+ super.writeExternal(out);
SerializationUtils.serializePathAndNode(path, data, out);
}
package org.opendaylight.controller.cluster.datastore.messages;
import com.google.protobuf.ByteString;
-import java.io.Externalizable;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-public class ReadDataReply implements VersionedSerializableMessage, Externalizable {
+public class ReadDataReply extends VersionedExternalizableMessage {
private static final long serialVersionUID = 1L;
public static final Class<ReadDataReply> SERIALIZABLE_CLASS = ReadDataReply.class;
private NormalizedNode<?, ?> normalizedNode;
- private short version;
public ReadDataReply() {
}
- public ReadDataReply(NormalizedNode<?, ?> normalizedNode) {
+ public ReadDataReply(NormalizedNode<?, ?> normalizedNode, short version) {
+ super(version);
this.normalizedNode = normalizedNode;
}
@Override
public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
- version = in.readShort();
+ super.readExternal(in);
normalizedNode = SerializationUtils.deserializeNormalizedNode(in);
}
@Override
public void writeExternal(ObjectOutput out) throws IOException {
- out.writeShort(version);
+ super.writeExternal(out);
SerializationUtils.serializeNormalizedNode(normalizedNode, out);
}
@Override
- public Object toSerializable(short toVersion) {
- if(toVersion >= DataStoreVersions.LITHIUM_VERSION) {
- version = toVersion;
+ public Object toSerializable() {
+ if(getVersion() >= DataStoreVersions.LITHIUM_VERSION) {
return this;
} else {
return toSerializableReadDataReply(normalizedNode);
} else {
ShardTransactionMessages.ReadDataReply o =
(ShardTransactionMessages.ReadDataReply) serializable;
- return new ReadDataReply(new NormalizedNodeToNodeCodec(null).decode(o.getNormalizedNode()));
+ return new ReadDataReply(new NormalizedNodeToNodeCodec(null).decode(o.getNormalizedNode()),
+ DataStoreVersions.HELIUM_2_VERSION);
}
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.messages;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+
+/**
+ * Abstract base class for a versioned Externalizable message.
+ *
+ * @author Thomas Pantelis
+ */
+public abstract class VersionedExternalizableMessage implements Externalizable, SerializableMessage {
+ private static final long serialVersionUID = 1L;
+
+ private short version;
+
+ public VersionedExternalizableMessage() {
+ }
+
+ public VersionedExternalizableMessage(short version) {
+ this.version = version;
+ }
+
+ public short getVersion() {
+ return version;
+ }
+
+ @Override
+ public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+ version = in.readShort();
+ }
+
+ @Override
+ public void writeExternal(ObjectOutput out) throws IOException {
+ out.writeShort(version);
+ }
+}
+++ /dev/null
-/*
- * Copyright (c) 2014 Brocade Communications Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore.messages;
-
-/**
- * Interface for a Serializable message with versioning.
- *
- * @author Thomas Pantelis
- */
-public interface VersionedSerializableMessage {
- Object toSerializable(short toVersion);
-}
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-public class WriteData extends ModifyData implements VersionedSerializableMessage {
+/**
+ * @deprecated Replaced by BatchedModifications.
+ */
+@Deprecated
+public class WriteData extends ModifyData {
private static final long serialVersionUID = 1L;
public static final Class<WriteData> SERIALIZABLE_CLASS = WriteData.class;
public WriteData() {
}
- public WriteData(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
- super(path, data);
+ public WriteData(YangInstanceIdentifier path, NormalizedNode<?, ?> data, short version) {
+ super(path, data, version);
}
@Override
- public Object toSerializable(short toVersion) {
- if(toVersion >= DataStoreVersions.LITHIUM_VERSION) {
- setVersion(toVersion);
+ public Object toSerializable() {
+ if(getVersion() >= DataStoreVersions.LITHIUM_VERSION) {
return this;
} else {
// To base or R1 Helium version
ShardTransactionMessages.WriteData o = (ShardTransactionMessages.WriteData) serializable;
Decoded decoded = new NormalizedNodeToNodeCodec(null).decode(
o.getInstanceIdentifierPathArguments(), o.getNormalizedNode());
- return new WriteData(decoded.getDecodedPath(), decoded.getDecodedNode());
+ return new WriteData(decoded.getDecodedPath(), decoded.getDecodedNode(),
+ DataStoreVersions.HELIUM_2_VERSION);
}
}
import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages;
+/**
+ * @deprecated Replaced by BatchedModificationsReply.
+ */
+@Deprecated
public class WriteDataReply extends EmptyReply {
private static final long serialVersionUID = 1L;
public abstract class AbstractModification implements Modification {
private YangInstanceIdentifier path;
+ private short version;
- protected AbstractModification() {
+ protected AbstractModification(short version) {
+ this.version = version;
}
protected AbstractModification(YangInstanceIdentifier path) {
public YangInstanceIdentifier getPath() {
return path;
}
+
+ public short getVersion() {
+ return version;
+ }
}
private static final long serialVersionUID = 1L;
public DeleteModification() {
+ this(DataStoreVersions.CURRENT_VERSION);
+ }
+
+ public DeleteModification(short version) {
+ super(version);
}
public DeleteModification(YangInstanceIdentifier path) {
@Override
public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
- in.readShort();
setPath(SerializationUtils.deserializePath(in));
}
@Override
public void writeExternal(ObjectOutput out) throws IOException {
- out.writeShort(DataStoreVersions.CURRENT_VERSION);
SerializationUtils.serializePath(getPath(), out);
}
return new DeleteModification(InstanceIdentifierUtils.fromSerializable(o.getPath()));
}
- public static DeleteModification fromStream(ObjectInput in) throws ClassNotFoundException, IOException {
- DeleteModification mod = new DeleteModification();
+ public static DeleteModification fromStream(ObjectInput in, short version)
+ throws ClassNotFoundException, IOException {
+ DeleteModification mod = new DeleteModification(version);
mod.readExternal(in);
return mod;
}
import java.io.IOException;
import java.io.ObjectInput;
+import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
import org.opendaylight.controller.cluster.datastore.node.NormalizedNodeToNodeCodec;
import org.opendaylight.controller.cluster.datastore.node.NormalizedNodeToNodeCodec.Decoded;
import org.opendaylight.controller.protobuff.messages.persistent.PersistentMessages;
private static final long serialVersionUID = 1L;
public MergeModification() {
+ this(DataStoreVersions.CURRENT_VERSION);
+ }
+
+ public MergeModification(short version) {
+ super(version);
}
public MergeModification(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
return new MergeModification(decoded.getDecodedPath(), decoded.getDecodedNode());
}
- public static MergeModification fromStream(ObjectInput in) throws ClassNotFoundException, IOException {
- MergeModification mod = new MergeModification();
+ public static MergeModification fromStream(ObjectInput in, short version)
+ throws ClassNotFoundException, IOException {
+ MergeModification mod = new MergeModification(version);
mod.readExternal(in);
return mod;
}
public class MutableCompositeModification implements CompositeModification {
private static final long serialVersionUID = 1L;
- private final List<Modification> modifications;
+ private final List<Modification> modifications = new ArrayList<>();
+ private short version;
public MutableCompositeModification() {
- modifications = new ArrayList<>();
+ this(DataStoreVersions.CURRENT_VERSION);
+ }
+
+ public MutableCompositeModification(short version) {
+ this.version = version;
}
@Override
return COMPOSITE;
}
+ public short getVersion() {
+ return version;
+ }
+
+ public void setVersion(short version) {
+ this.version = version;
+ }
+
/**
* Add a new Modification to the list of Modifications represented by this
* composite
@Override
public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
- in.readShort();
+ version = in.readShort();
int size = in.readInt();
byte type = in.readByte();
switch(type) {
case Modification.WRITE:
- modifications.add(WriteModification.fromStream(in));
+ modifications.add(WriteModification.fromStream(in, version));
break;
case Modification.MERGE:
- modifications.add(MergeModification.fromStream(in));
+ modifications.add(MergeModification.fromStream(in, version));
break;
case Modification.DELETE:
- modifications.add(DeleteModification.fromStream(in));
+ modifications.add(DeleteModification.fromStream(in, version));
break;
}
}
@Override
public void writeExternal(ObjectOutput out) throws IOException {
- out.writeShort(DataStoreVersions.CURRENT_VERSION);
+ out.writeShort(version);
out.writeInt(modifications.size());
builder.setTimeStamp(System.nanoTime());
for (Modification m : modifications) {
- builder.addModification(
- (PersistentMessages.Modification) m.toSerializable());
+ builder.addModification((PersistentMessages.Modification) m.toSerializable());
}
return builder.build();
private NormalizedNode<?, ?> data;
public WriteModification() {
+ this(DataStoreVersions.CURRENT_VERSION);
+ }
+
+ public WriteModification(short version) {
+ super(version);
}
public WriteModification(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
@Override
public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
- in.readShort(); // version
-
SerializationUtils.deserializePathAndNode(in, this, APPLIER);
}
@Override
public void writeExternal(ObjectOutput out) throws IOException {
- out.writeShort(DataStoreVersions.CURRENT_VERSION);
SerializationUtils.serializePathAndNode(getPath(), data, out);
}
return new WriteModification(decoded.getDecodedPath(), decoded.getDecodedNode());
}
- public static WriteModification fromStream(ObjectInput in) throws ClassNotFoundException, IOException {
- WriteModification mod = new WriteModification();
+ public static WriteModification fromStream(ObjectInput in, short version)
+ throws ClassNotFoundException, IOException {
+ WriteModification mod = new WriteModification(version);
mod.readExternal(in);
return mod;
}
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import scala.concurrent.Await;
+import scala.concurrent.ExecutionContext;
import scala.concurrent.Future;
import scala.concurrent.duration.Duration;
import scala.concurrent.duration.FiniteDuration;
private final JmxReporter jmxReporter = JmxReporter.forRegistry(metricRegistry).inDomain(DOMAIN).build();
private final int transactionOutstandingOperationLimit;
private final Timeout transactionCommitOperationTimeout;
+ private final Dispatchers dispatchers;
private volatile SchemaContext schemaContext;
this.configuration = configuration;
this.datastoreContext = datastoreContext;
this.txRateLimiter = RateLimiter.create(datastoreContext.getTransactionCreationInitialRateLimit());
+ this.dispatchers = new Dispatchers(actorSystem.dispatchers());
operationDuration = Duration.create(datastoreContext.getOperationTimeoutInSeconds(), TimeUnit.SECONDS);
operationTimeout = new Timeout(operationDuration);
transactionOutstandingOperationLimit = new CommonConfig(this.getActorSystem().settings().config()).getMailBoxCapacity();
jmxReporter.start();
+
}
public DatastoreContext getDatastoreContext() {
throw new UnknownMessageException(String.format(
"FindPrimary returned unkown response: %s", response));
}
- }, FIND_PRIMARY_FAILURE_TRANSFORMER, getActorSystem().dispatcher());
+ }, FIND_PRIMARY_FAILURE_TRANSFORMER, getClientDispatcher());
}
/**
throw new UnknownMessageException(String.format(
"FindLocalShard returned unkown response: %s", response));
}
- }, getActorSystem().dispatcher());
+ }, getClientDispatcher());
}
private String findPrimaryPathOrNull(String shardName) {
return transactionCommitOperationTimeout;
}
+ /**
+ * An akka dispatcher that is meant to be used when processing ask Futures which were triggered by client
+ * code on the datastore
+ * @return
+ */
+ public ExecutionContext getClientDispatcher() {
+ return this.dispatchers.getDispatcher(Dispatchers.DispatcherType.Client);
+ }
+
+ public String getNotificationDispatcherPath(){
+ return this.dispatchers.getDispatcherPath(Dispatchers.DispatcherType.Notification);
+ }
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.datastore.utils;
+
+import com.google.common.base.Preconditions;
+import scala.concurrent.ExecutionContext;
+
+public class Dispatchers {
+ public static final String DEFAULT_DISPATCHER_PATH = "akka.actor.default-dispatcher";
+ public static final String CLIENT_DISPATCHER_PATH = "client-dispatcher";
+ public static final String TXN_DISPATCHER_PATH = "txn-dispatcher";
+ public static final String SHARD_DISPATCHER_PATH = "shard-dispatcher";
+ public static final String NOTIFICATION_DISPATCHER_PATH = "notification-dispatcher";
+
+ private final akka.dispatch.Dispatchers dispatchers;
+
+ public static enum DispatcherType {
+ Client(CLIENT_DISPATCHER_PATH),
+ Transaction(TXN_DISPATCHER_PATH),
+ Shard(SHARD_DISPATCHER_PATH),
+ Notification(NOTIFICATION_DISPATCHER_PATH);
+
+ private final String path;
+ private DispatcherType(String path){
+ this.path = path;
+ }
+ private String path(akka.dispatch.Dispatchers dispatchers){
+ if(dispatchers.hasDispatcher(path)){
+ return path;
+ }
+ return DEFAULT_DISPATCHER_PATH;
+ }
+
+ private ExecutionContext dispatcher(akka.dispatch.Dispatchers dispatchers){
+ if(dispatchers.hasDispatcher(path)){
+ return dispatchers.lookup(path);
+ }
+ return dispatchers.defaultGlobalDispatcher();
+ }
+ }
+
+ public Dispatchers(akka.dispatch.Dispatchers dispatchers){
+ Preconditions.checkNotNull(dispatchers, "dispatchers should not be null");
+ this.dispatchers = dispatchers;
+ }
+
+ public ExecutionContext getDispatcher(DispatcherType dispatcherType){
+ return dispatcherType.dispatcher(this.dispatchers);
+ }
+
+ public String getDispatcherPath(DispatcherType dispatcherType){
+ return dispatcherType.path(this.dispatchers);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.datastore.utils;
+
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+import com.google.common.base.Stopwatch;
+import com.google.common.collect.ImmutableList;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * MessageTracker is a diagnostic utility class to be used for figuring out why a certain message which was
+ * expected to arrive in a given time interval does not arrive. It attempts to keep track of all the messages that
+ * received between the arrival of two instances of the same message and the amount of time it took to process each
+ * of those messages.
+ * <br/>
+ * Usage of the API is as follows,
+ * <pre>
+ *
+ * // Track the Foo class, Here we expect to see a message of type Foo come in every 10 millis
+ * MessageTracker tracker = new MessageTracker(Foo.class, 10);
+ *
+ * // Begin the tracking process. If this is not called then calling received and done on the resultant Context
+ * // will do nothing
+ * tracker.begin();
+ *
+ * .....
+ *
+ * MessageTracker.Context context = tracker.received(message);
+ *
+ * if(context.error().isPresent()){
+ * LOG.error("{}", context.error().get());
+ * }
+ *
+ * // Some custom processing
+ * process(message);
+ *
+ * context.done();
+ *
+ * </pre>
+ */
+public class MessageTracker {
+
+ private static final Context NO_OP_CONTEXT = new NoOpContext();
+
+ private final Class expectedMessageClass;
+
+ private final long expectedArrivalInterval;
+
+ private final List<MessageProcessingTime> messagesSinceLastExpectedMessage = new LinkedList<>();
+
+ private Stopwatch expectedMessageWatch;
+
+ private boolean enabled = false;
+
+ private Object lastExpectedMessage;
+
+ private Object currentMessage;
+
+ private final CurrentMessageContext currentMessageContext = new CurrentMessageContext();
+
+ /**
+ *
+ * @param expectedMessageClass The class of the message to track
+ * @param expectedArrivalIntervalInMillis The expected arrival interval between two instances of the expected
+ * message
+ */
+ public MessageTracker(Class expectedMessageClass, long expectedArrivalIntervalInMillis){
+ this.expectedMessageClass = expectedMessageClass;
+ this.expectedArrivalInterval = expectedArrivalIntervalInMillis;
+ }
+
+ public void begin(){
+ if(enabled) {
+ return;
+ }
+ enabled = true;
+ expectedMessageWatch = Stopwatch.createStarted();
+ }
+
+ public Context received(Object message){
+ if(!enabled) {
+ return NO_OP_CONTEXT;
+ }
+ this.currentMessage = message;
+ if(expectedMessageClass.isInstance(message)){
+ long actualElapsedTime = expectedMessageWatch.elapsed(TimeUnit.MILLISECONDS);
+ if(actualElapsedTime > expectedArrivalInterval){
+ return new ErrorContext(message, Optional.of(new FailedExpectation(lastExpectedMessage, message,
+ ImmutableList.copyOf(messagesSinceLastExpectedMessage), expectedArrivalInterval,
+ actualElapsedTime)));
+ }
+ this.lastExpectedMessage = message;
+ this.messagesSinceLastExpectedMessage.clear();
+ }
+
+ currentMessageContext.reset();
+ return currentMessageContext;
+ }
+
+ private void processed(Object message, long messageElapseTimeInNanos){
+ if(!enabled) {
+ return;
+ }
+ if(!expectedMessageClass.isInstance(message)){
+ this.messagesSinceLastExpectedMessage.add(new MessageProcessingTime(message.getClass(), messageElapseTimeInNanos));
+ }
+ }
+
+ public List<MessageProcessingTime> getMessagesSinceLastExpectedMessage(){
+ return ImmutableList.copyOf(this.messagesSinceLastExpectedMessage);
+ }
+
+ public static class MessageProcessingTime {
+ private final Class messageClass;
+ private final long elapsedTimeInNanos;
+
+ MessageProcessingTime(Class messageClass, long elapsedTimeInNanos){
+ this.messageClass = messageClass;
+ this.elapsedTimeInNanos = elapsedTimeInNanos;
+ }
+
+ @Override
+ public String toString() {
+ return "MessageProcessingTime{" +
+ "messageClass=" + messageClass.getSimpleName() +
+ ", elapsedTimeInMillis=" + TimeUnit.NANOSECONDS.toMillis(elapsedTimeInNanos) +
+ '}';
+ }
+
+ public Class getMessageClass() {
+ return messageClass;
+ }
+
+ public long getElapsedTimeInNanos() {
+ return elapsedTimeInNanos;
+ }
+ }
+
+ public interface Error {
+ Object getLastExpectedMessage();
+ Object getCurrentExpectedMessage();
+ List<MessageProcessingTime> getMessageProcessingTimesSinceLastExpectedMessage();
+ }
+
+ private class FailedExpectation implements Error {
+
+ private final Object lastExpectedMessage;
+ private final Object currentExpectedMessage;
+ private final List<MessageProcessingTime> messagesSinceLastExpectedMessage;
+ private final long expectedTimeInMillis;
+ private final long actualTimeInMillis;
+
+ public FailedExpectation(Object lastExpectedMessage, Object message, List<MessageProcessingTime> messagesSinceLastExpectedMessage, long expectedTimeInMillis, long actualTimeInMillis) {
+ this.lastExpectedMessage = lastExpectedMessage;
+ this.currentExpectedMessage = message;
+ this.messagesSinceLastExpectedMessage = messagesSinceLastExpectedMessage;
+ this.expectedTimeInMillis = expectedTimeInMillis;
+ this.actualTimeInMillis = actualTimeInMillis;
+ }
+
+ public Object getLastExpectedMessage() {
+ return lastExpectedMessage;
+ }
+
+ public Object getCurrentExpectedMessage() {
+ return currentExpectedMessage;
+ }
+
+ public List<MessageProcessingTime> getMessageProcessingTimesSinceLastExpectedMessage() {
+ return messagesSinceLastExpectedMessage;
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder builder = new StringBuilder();
+ builder.append("\n> Last Expected Message = " + lastExpectedMessage);
+ builder.append("\n> Current Expected Message = " + currentExpectedMessage);
+ builder.append("\n> Expected time in between messages = " + expectedTimeInMillis);
+ builder.append("\n> Actual time in between messages = " + actualTimeInMillis);
+ for (MessageProcessingTime time : messagesSinceLastExpectedMessage) {
+ builder.append("\n\t> ").append(time.toString());
+ }
+ return builder.toString();
+ }
+
+ }
+
+ public interface Context {
+ Context done();
+ Optional<? extends Error> error();
+ }
+
+ private static class NoOpContext implements Context {
+
+ @Override
+ public Context done() {
+ return this;
+ }
+
+ @Override
+ public Optional<Error> error() {
+ return Optional.absent();
+ }
+ }
+
+ private class CurrentMessageContext implements Context {
+ Stopwatch stopwatch = Stopwatch.createStarted();
+ boolean done = true;
+
+ public void reset(){
+ Preconditions.checkState(done);
+ done = false;
+ stopwatch.reset().start();
+ }
+
+ @Override
+ public Context done() {
+ processed(currentMessage, stopwatch.elapsed(TimeUnit.NANOSECONDS));
+ done = true;
+ return this;
+ }
+
+ @Override
+ public Optional<? extends Error> error() {
+ return Optional.absent();
+ }
+ }
+
+ private class ErrorContext implements Context {
+ Object message;
+ private final Optional<? extends Error> error;
+ Stopwatch stopwatch;
+
+ ErrorContext(Object message, Optional<? extends Error> error){
+ this.message = message;
+ this.error = error;
+ this.stopwatch = Stopwatch.createStarted();
+ }
+
+ @Override
+ public Context done(){
+ processed(message, this.stopwatch.elapsed(TimeUnit.NANOSECONDS));
+ this.stopwatch.stop();
+ return this;
+ }
+
+ @Override
+ public Optional<? extends Error> error() {
+ return error;
+ }
+ }
+}
package org.opendaylight.controller.config.yang.config.distributed_datastore_provider;
-import java.util.concurrent.TimeUnit;
-
import org.opendaylight.controller.cluster.datastore.DatastoreContext;
import org.opendaylight.controller.cluster.datastore.DistributedDataStoreFactory;
-import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStoreConfigProperties;
import org.osgi.framework.BundleContext;
-import scala.concurrent.duration.Duration;
-
public class DistributedConfigDataStoreProviderModule extends
org.opendaylight.controller.config.yang.config.distributed_datastore_provider.AbstractDistributedConfigDataStoreProviderModule {
private BundleContext bundleContext;
DatastoreContext datastoreContext = DatastoreContext.newBuilder()
.dataStoreType("config")
- .dataStoreProperties(InMemoryDOMDataStoreConfigProperties.create(
- props.getMaxShardDataChangeExecutorPoolSize().getValue().intValue(),
- props.getMaxShardDataChangeExecutorQueueSize().getValue().intValue(),
- props.getMaxShardDataChangeListenerQueueSize().getValue().intValue(),
- props.getMaxShardDataStoreExecutorQueueSize().getValue().intValue()))
- .shardTransactionIdleTimeout(Duration.create(
- props.getShardTransactionIdleTimeoutInMinutes().getValue(), TimeUnit.MINUTES))
+ .maxShardDataChangeExecutorPoolSize(props.getMaxShardDataChangeExecutorPoolSize().getValue().intValue())
+ .maxShardDataChangeExecutorQueueSize(props.getMaxShardDataChangeExecutorQueueSize().getValue().intValue())
+ .maxShardDataChangeListenerQueueSize(props.getMaxShardDataChangeListenerQueueSize().getValue().intValue())
+ .maxShardDataStoreExecutorQueueSize(props.getMaxShardDataStoreExecutorQueueSize().getValue().intValue())
+ .shardTransactionIdleTimeoutInMinutes(props.getShardTransactionIdleTimeoutInMinutes().getValue())
.operationTimeoutInSeconds(props.getOperationTimeoutInSeconds().getValue())
.shardJournalRecoveryLogBatchSize(props.getShardJournalRecoveryLogBatchSize().
getValue().intValue())
.shardSnapshotBatchCount(props.getShardSnapshotBatchCount().getValue().intValue())
.shardSnapshotDataThresholdPercentage(props.getShardSnapshotDataThresholdPercentage().getValue().intValue())
- .shardHeartbeatIntervalInMillis(props.getShardHearbeatIntervalInMillis().getValue())
- .shardInitializationTimeout(props.getShardInitializationTimeoutInSeconds().getValue(),
- TimeUnit.SECONDS)
- .shardLeaderElectionTimeout(props.getShardLeaderElectionTimeoutInSeconds().getValue(),
- TimeUnit.SECONDS)
+ .shardHeartbeatIntervalInMillis(props.getShardHeartbeatIntervalInMillis().getValue())
+ .shardInitializationTimeoutInSeconds(props.getShardInitializationTimeoutInSeconds().getValue())
+ .shardLeaderElectionTimeoutInSeconds(props.getShardLeaderElectionTimeoutInSeconds().getValue())
.shardTransactionCommitTimeoutInSeconds(
props.getShardTransactionCommitTimeoutInSeconds().getValue().intValue())
.shardTransactionCommitQueueCapacity(
.shardIsolatedLeaderCheckIntervalInMillis(
props.getShardIsolatedLeaderCheckIntervalInMillis().getValue())
.shardElectionTimeoutFactor(props.getShardElectionTimeoutFactor().getValue())
- .transactionCreationInitialRateLimit(props.getTxCreationInitialRateLimit().getValue())
+ .transactionCreationInitialRateLimit(props.getTransactionCreationInitialRateLimit().getValue())
+ .shardBatchedModificationCount(props.getShardBatchedModificationCount().getValue().intValue())
.build();
return DistributedDataStoreFactory.createInstance(getConfigSchemaServiceDependency(),
package org.opendaylight.controller.config.yang.config.distributed_datastore_provider;
-import java.util.concurrent.TimeUnit;
-
import org.opendaylight.controller.cluster.datastore.DatastoreContext;
import org.opendaylight.controller.cluster.datastore.DistributedDataStoreFactory;
-import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStoreConfigProperties;
import org.osgi.framework.BundleContext;
-import scala.concurrent.duration.Duration;
-
public class DistributedOperationalDataStoreProviderModule extends
org.opendaylight.controller.config.yang.config.distributed_datastore_provider.AbstractDistributedOperationalDataStoreProviderModule {
private BundleContext bundleContext;
DatastoreContext datastoreContext = DatastoreContext.newBuilder()
.dataStoreType("operational")
- .dataStoreProperties(InMemoryDOMDataStoreConfigProperties.create(
- props.getMaxShardDataChangeExecutorPoolSize().getValue().intValue(),
- props.getMaxShardDataChangeExecutorQueueSize().getValue().intValue(),
- props.getMaxShardDataChangeListenerQueueSize().getValue().intValue(),
- props.getMaxShardDataStoreExecutorQueueSize().getValue().intValue()))
- .shardTransactionIdleTimeout(Duration.create(
- props.getShardTransactionIdleTimeoutInMinutes().getValue(), TimeUnit.MINUTES))
+ .maxShardDataChangeExecutorPoolSize(props.getMaxShardDataChangeExecutorPoolSize().getValue().intValue())
+ .maxShardDataChangeExecutorQueueSize(props.getMaxShardDataChangeExecutorQueueSize().getValue().intValue())
+ .maxShardDataChangeListenerQueueSize(props.getMaxShardDataChangeListenerQueueSize().getValue().intValue())
+ .maxShardDataStoreExecutorQueueSize(props.getMaxShardDataStoreExecutorQueueSize().getValue().intValue())
+ .shardTransactionIdleTimeoutInMinutes(props.getShardTransactionIdleTimeoutInMinutes().getValue())
.operationTimeoutInSeconds(props.getOperationTimeoutInSeconds().getValue())
.shardJournalRecoveryLogBatchSize(props.getShardJournalRecoveryLogBatchSize().
getValue().intValue())
.shardSnapshotBatchCount(props.getShardSnapshotBatchCount().getValue().intValue())
.shardSnapshotDataThresholdPercentage(props.getShardSnapshotDataThresholdPercentage().getValue().intValue())
- .shardHeartbeatIntervalInMillis(props.getShardHearbeatIntervalInMillis().getValue())
- .shardInitializationTimeout(props.getShardInitializationTimeoutInSeconds().getValue(),
- TimeUnit.SECONDS)
- .shardLeaderElectionTimeout(props.getShardLeaderElectionTimeoutInSeconds().getValue(),
- TimeUnit.SECONDS)
+ .shardHeartbeatIntervalInMillis(props.getShardHeartbeatIntervalInMillis().getValue())
+ .shardInitializationTimeoutInSeconds(props.getShardInitializationTimeoutInSeconds().getValue())
+ .shardLeaderElectionTimeoutInSeconds(props.getShardLeaderElectionTimeoutInSeconds().getValue())
.shardTransactionCommitTimeoutInSeconds(
props.getShardTransactionCommitTimeoutInSeconds().getValue().intValue())
.shardTransactionCommitQueueCapacity(
.shardIsolatedLeaderCheckIntervalInMillis(
props.getShardIsolatedLeaderCheckIntervalInMillis().getValue())
.shardElectionTimeoutFactor(props.getShardElectionTimeoutFactor().getValue())
- .transactionCreationInitialRateLimit(props.getTxCreationInitialRateLimit().getValue())
+ .transactionCreationInitialRateLimit(props.getTransactionCreationInitialRateLimit().getValue())
+ .shardBatchedModificationCount(props.getShardBatchedModificationCount().getValue().intValue())
.build();
return DistributedDataStoreFactory.createInstance(getOperationalSchemaServiceDependency(),
}
- leaf shard-hearbeat-interval-in-millis {
+ leaf shard-heartbeat-interval-in-millis {
default 500;
type heartbeat-interval-type;
description "The interval at which a shard will send a heart beat message to its remote shard.";
an operation (eg transaction create).";
}
+ leaf shard-batched-modification-count {
+ default 100;
+ type non-zero-uint32-type;
+ description "The number of transaction modification operations (put, merge, delete) to
+ batch before sending to the shard transaction actor. Batching improves
+ performance as less modifications messages are sent to the actor and thus
+ lessens the chance that the transaction actor's mailbox queue could get full.";
+ }
+
leaf enable-metric-capture {
default false;
type boolean;
followers are active and term itself as isolated";
}
- leaf tx-creation-initial-rate-limit {
+ leaf transaction-creation-initial-rate-limit {
default 100;
type non-zero-uint32-type;
description "The initial number of transactions per second that are allowed before the data store
import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListener;
import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListenerReply;
import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.cluster.datastore.utils.Dispatchers;
import org.opendaylight.controller.cluster.datastore.utils.DoNothingActor;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker;
doReturn(mockActor).when(mockActorSystem).actorOf(any(Props.class));
ExecutionContextExecutor executor = ExecutionContexts.fromExecutor(
MoreExecutors.sameThreadExecutor());
- doReturn(executor).when(mockActorSystem).dispatcher();
+
ActorContext actorContext = mock(ActorContext.class);
+ doReturn(executor).when(actorContext).getClientDispatcher();
+
String shardName = "shard-1";
final DataChangeListenerRegistrationProxy proxy = new DataChangeListenerRegistrationProxy(
shardName, actorContext, mockListener);
shardName, actorContext, mockListener);
doReturn(DatastoreContext.newBuilder().build()).when(actorContext).getDatastoreContext();
+ doReturn(getSystem().dispatchers().defaultGlobalDispatcher()).when(actorContext).getClientDispatcher();
doReturn(getSystem()).when(actorContext).getActorSystem();
+ doReturn(Dispatchers.DEFAULT_DISPATCHER_PATH).when(actorContext).getNotificationDispatcherPath();
doReturn(getSystem().actorSelection(getRef().path())).
when(actorContext).actorSelection(getRef().path());
doReturn(duration("5 seconds")).when(actorContext).getOperationDuration();
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+import java.io.IOException;
+import java.util.Dictionary;
+import java.util.Hashtable;
+import org.junit.Test;
+import org.osgi.framework.BundleContext;
+import org.osgi.framework.ServiceReference;
+import org.osgi.service.cm.Configuration;
+import org.osgi.service.cm.ConfigurationAdmin;
+
+/**
+ * Unit tests for DatastoreContextConfigAdminOverlay.
+ *
+ * @author Thomas Pantelis
+ */
+public class DatastoreContextConfigAdminOverlayTest {
+
+ @SuppressWarnings("unchecked")
+ @Test
+ public void test() throws IOException {
+ BundleContext mockBundleContext = mock(BundleContext.class);
+ ServiceReference<ConfigurationAdmin> mockServiceRef = mock(ServiceReference.class);
+ ConfigurationAdmin mockConfigAdmin = mock(ConfigurationAdmin.class);
+ Configuration mockConfig = mock(Configuration.class);
+ DatastoreContextIntrospector mockIntrospector = mock(DatastoreContextIntrospector.class);
+
+ doReturn(mockServiceRef).when(mockBundleContext).getServiceReference(ConfigurationAdmin.class);
+ doReturn(mockConfigAdmin).when(mockBundleContext).getService(mockServiceRef);
+
+ doReturn(mockConfig).when(mockConfigAdmin).getConfiguration(DatastoreContextConfigAdminOverlay.CONFIG_ID);
+
+ doReturn(DatastoreContextConfigAdminOverlay.CONFIG_ID).when(mockConfig).getPid();
+
+ Dictionary<String, Object> properties = new Hashtable<>();
+ properties.put("property", "value");
+ doReturn(properties).when(mockConfig).getProperties();
+
+ try(DatastoreContextConfigAdminOverlay overlay = new DatastoreContextConfigAdminOverlay(
+ mockIntrospector, mockBundleContext)) {
+ }
+
+ verify(mockIntrospector).update(properties);
+
+ verify(mockBundleContext).ungetService(mockServiceRef);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import static org.junit.Assert.assertEquals;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_HEARTBEAT_INTERVAL_IN_MILLIS;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_OPERATION_TIMEOUT_IN_SECONDS;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SHARD_INITIALIZATION_TIMEOUT;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SHARD_SNAPSHOT_DATA_THRESHOLD_PERCENTAGE;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SHARD_TRANSACTION_IDLE_TIMEOUT;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SHARD_TX_COMMIT_TIMEOUT_IN_SECONDS;
+import java.util.Dictionary;
+import java.util.Hashtable;
+import org.junit.Test;
+import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStoreConfigProperties;
+
+/**
+ * Unit tests for DatastoreContextIntrospector.
+ *
+ * @author Thomas Pantelis
+ */
+public class DatastoreContextIntrospectorTest {
+
+ @Test
+ public void testUpdate() {
+ DatastoreContext context = DatastoreContext.newBuilder().dataStoreType("operational").build();
+ DatastoreContextIntrospector introspector = new DatastoreContextIntrospector(context );
+
+ Dictionary<String, Object> properties = new Hashtable<>();
+ properties.put("shard-transaction-idle-timeout-in-minutes", "31");
+ properties.put("operation-timeout-in-seconds", "26");
+ properties.put("shard-transaction-commit-timeout-in-seconds", "100");
+ properties.put("shard-journal-recovery-log-batch-size", "199");
+ properties.put("shard-snapshot-batch-count", "212");
+ properties.put("shard-heartbeat-interval-in-millis", "101");
+ properties.put("shard-transaction-commit-queue-capacity", "567");
+ properties.put("shard-initialization-timeout-in-seconds", "82");
+ properties.put("shard-leader-election-timeout-in-seconds", "66");
+ properties.put("shard-isolated-leader-check-interval-in-millis", "123");
+ properties.put("shard-snapshot-data-threshold-percentage", "100");
+ properties.put("shard-election-timeout-factor", "21");
+ properties.put("shard-batched-modification-count", "901");
+ properties.put("transactionCreationInitialRateLimit", "200");
+ properties.put("MaxShardDataChangeExecutorPoolSize", "41");
+ properties.put("Max-Shard-Data-Change Executor-Queue Size", "1111");
+ properties.put(" max shard data change listener queue size", "2222");
+ properties.put("mAx-shaRd-data-STORE-executor-quEUe-size", "3333");
+ properties.put("persistent", "false");
+
+ boolean updated = introspector.update(properties);
+ assertEquals("updated", true, updated);
+ context = introspector.getContext();
+
+ assertEquals(31, context.getShardTransactionIdleTimeout().toMinutes());
+ assertEquals(26, context.getOperationTimeoutInSeconds());
+ assertEquals(100, context.getShardTransactionCommitTimeoutInSeconds());
+ assertEquals(199, context.getShardRaftConfig().getJournalRecoveryLogBatchSize());
+ assertEquals(212, context.getShardRaftConfig().getSnapshotBatchCount());
+ assertEquals(101, context.getShardRaftConfig().getHeartBeatInterval().length());
+ assertEquals(567, context.getShardTransactionCommitQueueCapacity());
+ assertEquals(82, context.getShardInitializationTimeout().duration().toSeconds());
+ assertEquals(66, context.getShardLeaderElectionTimeout().duration().toSeconds());
+ assertEquals(123, context.getShardRaftConfig().getIsolatedCheckIntervalInMillis());
+ assertEquals(100, context.getShardRaftConfig().getSnapshotDataThresholdPercentage());
+ assertEquals(21, context.getShardRaftConfig().getElectionTimeoutFactor());
+ assertEquals(901, context.getShardBatchedModificationCount());
+ assertEquals(200, context.getTransactionCreationInitialRateLimit());
+ assertEquals(41, context.getDataStoreProperties().getMaxDataChangeExecutorPoolSize());
+ assertEquals(1111, context.getDataStoreProperties().getMaxDataChangeExecutorQueueSize());
+ assertEquals(2222, context.getDataStoreProperties().getMaxDataChangeListenerQueueSize());
+ assertEquals(3333, context.getDataStoreProperties().getMaxDataStoreExecutorQueueSize());
+ assertEquals(false, context.isPersistent());
+
+ properties.put("shard-transaction-idle-timeout-in-minutes", "32");
+ properties.put("operation-timeout-in-seconds", "27");
+ properties.put("shard-heartbeat-interval-in-millis", "102");
+ properties.put("shard-election-timeout-factor", "22");
+ properties.put("max-shard-data-change-executor-pool-size", "42");
+ properties.put("max-shard-data-store-executor-queue-size", "4444");
+ properties.put("persistent", "true");
+
+ updated = introspector.update(properties);
+ assertEquals("updated", true, updated);
+ context = introspector.getContext();
+
+ assertEquals(32, context.getShardTransactionIdleTimeout().toMinutes());
+ assertEquals(27, context.getOperationTimeoutInSeconds());
+ assertEquals(100, context.getShardTransactionCommitTimeoutInSeconds());
+ assertEquals(199, context.getShardRaftConfig().getJournalRecoveryLogBatchSize());
+ assertEquals(212, context.getShardRaftConfig().getSnapshotBatchCount());
+ assertEquals(102, context.getShardRaftConfig().getHeartBeatInterval().length());
+ assertEquals(567, context.getShardTransactionCommitQueueCapacity());
+ assertEquals(82, context.getShardInitializationTimeout().duration().toSeconds());
+ assertEquals(66, context.getShardLeaderElectionTimeout().duration().toSeconds());
+ assertEquals(123, context.getShardRaftConfig().getIsolatedCheckIntervalInMillis());
+ assertEquals(100, context.getShardRaftConfig().getSnapshotDataThresholdPercentage());
+ assertEquals(22, context.getShardRaftConfig().getElectionTimeoutFactor());
+ assertEquals(200, context.getTransactionCreationInitialRateLimit());
+ assertEquals(42, context.getDataStoreProperties().getMaxDataChangeExecutorPoolSize());
+ assertEquals(1111, context.getDataStoreProperties().getMaxDataChangeExecutorQueueSize());
+ assertEquals(2222, context.getDataStoreProperties().getMaxDataChangeListenerQueueSize());
+ assertEquals(4444, context.getDataStoreProperties().getMaxDataStoreExecutorQueueSize());
+ assertEquals(true, context.isPersistent());
+
+ updated = introspector.update(null);
+ assertEquals("updated", false, updated);
+
+ updated = introspector.update(new Hashtable<String, Object>());
+ assertEquals("updated", false, updated);
+ }
+
+
+ @Test
+ public void testUpdateWithInvalidValues() {
+ DatastoreContext context = DatastoreContext.newBuilder().dataStoreType("operational").build();
+ DatastoreContextIntrospector introspector = new DatastoreContextIntrospector(context );
+
+ Dictionary<String, Object> properties = new Hashtable<>();
+ properties.put("shard-transaction-idle-timeout-in-minutes", "0"); // bad - must be > 0
+ properties.put("shard-journal-recovery-log-batch-size", "199");
+ properties.put("shard-transaction-commit-timeout-in-seconds", "bogus"); // bad - NaN
+ properties.put("shard-snapshot-batch-count", "212"); // good
+ properties.put("operation-timeout-in-seconds", "4"); // bad - must be >= 5
+ properties.put("shard-heartbeat-interval-in-millis", "99"); // bad - must be >= 100
+ properties.put("shard-transaction-commit-queue-capacity", "567"); // good
+ properties.put("shard-snapshot-data-threshold-percentage", "101"); // bad - must be 0-100
+ properties.put("shard-initialization-timeout-in-seconds", "-1"); // bad - must be > 0
+ properties.put("max-shard-data-change-executor-pool-size", "bogus"); // bad - NaN
+ properties.put("unknownProperty", "1"); // bad - invalid property name
+
+ boolean updated = introspector.update(properties);
+ assertEquals("updated", true, updated);
+ context = introspector.getContext();
+
+ assertEquals(DEFAULT_SHARD_TRANSACTION_IDLE_TIMEOUT, context.getShardTransactionIdleTimeout());
+ assertEquals(199, context.getShardRaftConfig().getJournalRecoveryLogBatchSize());
+ assertEquals(DEFAULT_SHARD_TX_COMMIT_TIMEOUT_IN_SECONDS, context.getShardTransactionCommitTimeoutInSeconds());
+ assertEquals(212, context.getShardRaftConfig().getSnapshotBatchCount());
+ assertEquals(DEFAULT_OPERATION_TIMEOUT_IN_SECONDS, context.getOperationTimeoutInSeconds());
+ assertEquals(DEFAULT_HEARTBEAT_INTERVAL_IN_MILLIS, context.getShardRaftConfig().getHeartBeatInterval().length());
+ assertEquals(567, context.getShardTransactionCommitQueueCapacity());
+ assertEquals(DEFAULT_SHARD_SNAPSHOT_DATA_THRESHOLD_PERCENTAGE,
+ context.getShardRaftConfig().getSnapshotDataThresholdPercentage());
+ assertEquals(DEFAULT_SHARD_INITIALIZATION_TIMEOUT, context.getShardInitializationTimeout());
+ assertEquals(InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_CHANGE_EXECUTOR_POOL_SIZE,
+ context.getDataStoreProperties().getMaxDataChangeExecutorPoolSize());
+ }
+
+ @Test
+ public void testUpdateWithDatastoreTypeSpecificProperties() {
+ Dictionary<String, Object> properties = new Hashtable<>();
+ properties.put("shard-transaction-idle-timeout-in-minutes", "22"); // global setting
+ properties.put("operational.shard-transaction-idle-timeout-in-minutes", "33"); // operational override
+ properties.put("config.shard-transaction-idle-timeout-in-minutes", "44"); // config override
+
+ properties.put("max-shard-data-change-executor-pool-size", "222"); // global setting
+ properties.put("operational.max-shard-data-change-executor-pool-size", "333"); // operational override
+ properties.put("config.max-shard-data-change-executor-pool-size", "444"); // config override
+
+ properties.put("persistent", "false"); // global setting
+ properties.put("operational.Persistent", "true"); // operational override
+
+ DatastoreContext operContext = DatastoreContext.newBuilder().dataStoreType("operational").build();
+ DatastoreContextIntrospector operIntrospector = new DatastoreContextIntrospector(operContext);
+ boolean updated = operIntrospector.update(properties);
+ assertEquals("updated", true, updated);
+ operContext = operIntrospector.getContext();
+
+ assertEquals(33, operContext.getShardTransactionIdleTimeout().toMinutes());
+ assertEquals(true, operContext.isPersistent());
+ assertEquals(333, operContext.getDataStoreProperties().getMaxDataChangeExecutorPoolSize());
+
+ DatastoreContext configContext = DatastoreContext.newBuilder().dataStoreType("config").build();
+ DatastoreContextIntrospector configIntrospector = new DatastoreContextIntrospector(configContext);
+ updated = configIntrospector.update(properties);
+ assertEquals("updated", true, updated);
+ configContext = configIntrospector.getContext();
+
+ assertEquals(44, configContext.getShardTransactionIdleTimeout().toMinutes());
+ assertEquals(false, configContext.isPersistent());
+ assertEquals(444, configContext.getDataStoreProperties().getMaxDataChangeExecutorPoolSize());
+ }
+}
package org.opendaylight.controller.cluster.datastore;
import static org.junit.Assert.assertEquals;
-import org.junit.Before;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_CONFIGURATION_READER;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_HEARTBEAT_INTERVAL_IN_MILLIS;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_ISOLATED_LEADER_CHECK_INTERVAL_IN_MILLIS;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_JOURNAL_RECOVERY_BATCH_SIZE;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_OPERATION_TIMEOUT_IN_SECONDS;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_PERSISTENT;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SHARD_BATCHED_MODIFICATION_COUNT;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SHARD_ELECTION_TIMEOUT_FACTOR;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SHARD_INITIALIZATION_TIMEOUT;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SHARD_LEADER_ELECTION_TIMEOUT;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SHARD_SNAPSHOT_DATA_THRESHOLD_PERCENTAGE;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SHARD_TRANSACTION_IDLE_TIMEOUT;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SHARD_TX_COMMIT_QUEUE_CAPACITY;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SHARD_TX_COMMIT_TIMEOUT_IN_SECONDS;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SNAPSHOT_BATCH_COUNT;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_TX_CREATION_INITIAL_RATE_LIMIT;
+import java.util.concurrent.TimeUnit;
+import org.junit.Assert;
import org.junit.Test;
+import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStoreConfigProperties;
public class DatastoreContextTest {
- private DatastoreContext.Builder builder;
+ @Test
+ public void testNewBuilderWithDefaultSettings() {
+ DatastoreContext context = DatastoreContext.newBuilder().build();
- @Before
- public void setUp(){
- builder = new DatastoreContext.Builder();
+ assertEquals(DEFAULT_SHARD_TRANSACTION_IDLE_TIMEOUT, context.getShardTransactionIdleTimeout());
+ assertEquals(DEFAULT_OPERATION_TIMEOUT_IN_SECONDS, context.getOperationTimeoutInSeconds());
+ assertEquals(DEFAULT_SHARD_TX_COMMIT_TIMEOUT_IN_SECONDS, context.getShardTransactionCommitTimeoutInSeconds());
+ assertEquals(DEFAULT_JOURNAL_RECOVERY_BATCH_SIZE, context.getShardRaftConfig().getJournalRecoveryLogBatchSize());
+ assertEquals(DEFAULT_SNAPSHOT_BATCH_COUNT, context.getShardRaftConfig().getSnapshotBatchCount());
+ assertEquals(DEFAULT_HEARTBEAT_INTERVAL_IN_MILLIS, context.getShardRaftConfig().getHeartBeatInterval().length());
+ assertEquals(DEFAULT_SHARD_TX_COMMIT_QUEUE_CAPACITY, context.getShardTransactionCommitQueueCapacity());
+ assertEquals(DEFAULT_SHARD_INITIALIZATION_TIMEOUT.duration().toMillis(),
+ context.getShardInitializationTimeout().duration().toMillis());
+ assertEquals(DEFAULT_SHARD_LEADER_ELECTION_TIMEOUT.duration().toMillis(),
+ context.getShardLeaderElectionTimeout().duration().toMillis());
+ assertEquals(DEFAULT_PERSISTENT, context.isPersistent());
+ assertEquals(DEFAULT_CONFIGURATION_READER, context.getConfigurationReader());
+ assertEquals(DEFAULT_ISOLATED_LEADER_CHECK_INTERVAL_IN_MILLIS, context.getShardRaftConfig().getIsolatedCheckIntervalInMillis());
+ assertEquals(DEFAULT_SHARD_SNAPSHOT_DATA_THRESHOLD_PERCENTAGE, context.getShardRaftConfig().getSnapshotDataThresholdPercentage());
+ assertEquals(DEFAULT_SHARD_ELECTION_TIMEOUT_FACTOR, context.getShardRaftConfig().getElectionTimeoutFactor());
+ assertEquals(DEFAULT_TX_CREATION_INITIAL_RATE_LIMIT, context.getTransactionCreationInitialRateLimit());
+ assertEquals(DatastoreContext.DEFAULT_SHARD_BATCHED_MODIFICATION_COUNT, context.getShardBatchedModificationCount());
+ assertEquals(InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_CHANGE_EXECUTOR_POOL_SIZE,
+ context.getDataStoreProperties().getMaxDataChangeExecutorPoolSize());
+ assertEquals(InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_CHANGE_EXECUTOR_QUEUE_SIZE,
+ context.getDataStoreProperties().getMaxDataChangeExecutorQueueSize());
+ assertEquals(InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_CHANGE_LISTENER_QUEUE_SIZE,
+ context.getDataStoreProperties().getMaxDataChangeListenerQueueSize());
+ assertEquals(InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_STORE_EXECUTOR_QUEUE_SIZE,
+ context.getDataStoreProperties().getMaxDataStoreExecutorQueueSize());
}
@Test
- public void testDefaults(){
- DatastoreContext build = builder.build();
-
- assertEquals(DatastoreContext.DEFAULT_SHARD_TRANSACTION_IDLE_TIMEOUT , build.getShardTransactionIdleTimeout());
- assertEquals(DatastoreContext.DEFAULT_OPERATION_TIMEOUT_IN_SECONDS, build.getOperationTimeoutInSeconds());
- assertEquals(DatastoreContext.DEFAULT_SHARD_TX_COMMIT_TIMEOUT_IN_SECONDS, build.getShardTransactionCommitTimeoutInSeconds());
- assertEquals(DatastoreContext.DEFAULT_JOURNAL_RECOVERY_BATCH_SIZE, build.getShardRaftConfig().getJournalRecoveryLogBatchSize());
- assertEquals(DatastoreContext.DEFAULT_SNAPSHOT_BATCH_COUNT, build.getShardRaftConfig().getSnapshotBatchCount());
- assertEquals(DatastoreContext.DEFAULT_HEARTBEAT_INTERVAL_IN_MILLIS, build.getShardRaftConfig().getHeartBeatInterval().length());
- assertEquals(DatastoreContext.DEFAULT_SHARD_TX_COMMIT_QUEUE_CAPACITY, build.getShardTransactionCommitQueueCapacity());
- assertEquals(DatastoreContext.DEFAULT_SHARD_INITIALIZATION_TIMEOUT, build.getShardInitializationTimeout());
- assertEquals(DatastoreContext.DEFAULT_SHARD_LEADER_ELECTION_TIMEOUT, build.getShardLeaderElectionTimeout());
- assertEquals(DatastoreContext.DEFAULT_PERSISTENT, build.isPersistent());
- assertEquals(DatastoreContext.DEFAULT_CONFIGURATION_READER, build.getConfigurationReader());
- assertEquals(DatastoreContext.DEFAULT_ISOLATED_LEADER_CHECK_INTERVAL_IN_MILLIS, build.getShardRaftConfig().getIsolatedCheckIntervalInMillis());
- assertEquals(DatastoreContext.DEFAULT_SHARD_SNAPSHOT_DATA_THRESHOLD_PERCENTAGE, build.getShardRaftConfig().getSnapshotDataThresholdPercentage());
- assertEquals(DatastoreContext.DEFAULT_SHARD_ELECTION_TIMEOUT_FACTOR, build.getShardRaftConfig().getElectionTimeoutFactor());
- assertEquals(DatastoreContext.DEFAULT_TX_CREATION_INITIAL_RATE_LIMIT, build.getTransactionCreationInitialRateLimit());
+ public void testNewBuilderWithCustomSettings() {
+ DatastoreContext.Builder builder = DatastoreContext.newBuilder();
+
+ builder.shardTransactionIdleTimeout(DEFAULT_SHARD_TRANSACTION_IDLE_TIMEOUT.toMillis() + 1,
+ TimeUnit.MILLISECONDS);
+ builder.operationTimeoutInSeconds(DEFAULT_OPERATION_TIMEOUT_IN_SECONDS + 1);
+ builder.shardTransactionCommitTimeoutInSeconds(DEFAULT_SHARD_TX_COMMIT_TIMEOUT_IN_SECONDS + 1);
+ builder.shardJournalRecoveryLogBatchSize(DEFAULT_JOURNAL_RECOVERY_BATCH_SIZE + 1);
+ builder.shardSnapshotBatchCount(DEFAULT_SNAPSHOT_BATCH_COUNT + 1);
+ builder.shardHeartbeatIntervalInMillis(DEFAULT_HEARTBEAT_INTERVAL_IN_MILLIS + 1);
+ builder.shardTransactionCommitQueueCapacity(DEFAULT_SHARD_TX_COMMIT_QUEUE_CAPACITY + 1);
+ builder.shardInitializationTimeout(DEFAULT_SHARD_INITIALIZATION_TIMEOUT.
+ duration().toMillis() + 1, TimeUnit.MILLISECONDS);
+ builder.shardInitializationTimeout(DEFAULT_SHARD_INITIALIZATION_TIMEOUT.duration().toMillis() + 1,
+ TimeUnit.MILLISECONDS);
+ builder.shardLeaderElectionTimeout(DEFAULT_SHARD_LEADER_ELECTION_TIMEOUT.duration().toMillis() + 1,
+ TimeUnit.MILLISECONDS);
+ builder.persistent(!DEFAULT_PERSISTENT);
+ builder.shardIsolatedLeaderCheckIntervalInMillis(DEFAULT_ISOLATED_LEADER_CHECK_INTERVAL_IN_MILLIS + 1);
+ builder.shardSnapshotDataThresholdPercentage(DEFAULT_SHARD_SNAPSHOT_DATA_THRESHOLD_PERCENTAGE + 1);
+ builder.shardElectionTimeoutFactor(DEFAULT_SHARD_ELECTION_TIMEOUT_FACTOR + 1);
+ builder.transactionCreationInitialRateLimit(DEFAULT_TX_CREATION_INITIAL_RATE_LIMIT + 1);
+ builder.shardBatchedModificationCount(DEFAULT_SHARD_BATCHED_MODIFICATION_COUNT + 1);
+ builder.maxShardDataChangeExecutorPoolSize(
+ InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_CHANGE_EXECUTOR_POOL_SIZE + 1);
+ builder.maxShardDataChangeExecutorQueueSize(
+ InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_CHANGE_EXECUTOR_QUEUE_SIZE + 1);
+ builder.maxShardDataChangeListenerQueueSize(
+ InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_CHANGE_LISTENER_QUEUE_SIZE + 1);
+ builder.maxShardDataStoreExecutorQueueSize(
+ InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_STORE_EXECUTOR_QUEUE_SIZE + 1);
+
+ DatastoreContext context = builder.build();
+
+ verifyCustomSettings(context);
+
+ builder = DatastoreContext.newBuilderFrom(context);
+
+ DatastoreContext newContext = builder.build();
+
+ verifyCustomSettings(newContext);
+
+ Assert.assertNotSame(context, newContext);
}
-}
\ No newline at end of file
+ private void verifyCustomSettings(DatastoreContext context) {
+ assertEquals(DEFAULT_SHARD_TRANSACTION_IDLE_TIMEOUT.toMillis() + 1,
+ context.getShardTransactionIdleTimeout().toMillis());
+ assertEquals(DEFAULT_OPERATION_TIMEOUT_IN_SECONDS + 1, context.getOperationTimeoutInSeconds());
+ assertEquals(DEFAULT_SHARD_TX_COMMIT_TIMEOUT_IN_SECONDS + 1,
+ context.getShardTransactionCommitTimeoutInSeconds());
+ assertEquals(DEFAULT_JOURNAL_RECOVERY_BATCH_SIZE + 1,
+ context.getShardRaftConfig().getJournalRecoveryLogBatchSize());
+ assertEquals(DEFAULT_SNAPSHOT_BATCH_COUNT + 1, context.getShardRaftConfig().getSnapshotBatchCount());
+ assertEquals(DEFAULT_HEARTBEAT_INTERVAL_IN_MILLIS + 1,
+ context.getShardRaftConfig().getHeartBeatInterval().length());
+ assertEquals(DEFAULT_SHARD_TX_COMMIT_QUEUE_CAPACITY + 1, context.getShardTransactionCommitQueueCapacity());
+ assertEquals(DEFAULT_SHARD_INITIALIZATION_TIMEOUT.duration().toMillis() + 1,
+ context.getShardInitializationTimeout().duration().toMillis());
+ assertEquals(DEFAULT_SHARD_LEADER_ELECTION_TIMEOUT.duration().toMillis() + 1,
+ context.getShardLeaderElectionTimeout().duration().toMillis());
+ assertEquals(!DEFAULT_PERSISTENT, context.isPersistent());
+ assertEquals(DEFAULT_CONFIGURATION_READER, context.getConfigurationReader());
+ assertEquals(DEFAULT_ISOLATED_LEADER_CHECK_INTERVAL_IN_MILLIS + 1,
+ context.getShardRaftConfig().getIsolatedCheckIntervalInMillis());
+ assertEquals(DEFAULT_SHARD_SNAPSHOT_DATA_THRESHOLD_PERCENTAGE + 1,
+ context.getShardRaftConfig().getSnapshotDataThresholdPercentage());
+ assertEquals(DEFAULT_SHARD_ELECTION_TIMEOUT_FACTOR + 1, context.getShardRaftConfig().getElectionTimeoutFactor());
+ assertEquals(DEFAULT_TX_CREATION_INITIAL_RATE_LIMIT + 1, context.getTransactionCreationInitialRateLimit());
+ assertEquals(DatastoreContext.DEFAULT_SHARD_BATCHED_MODIFICATION_COUNT + 1,
+ context.getShardBatchedModificationCount());
+ assertEquals(InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_CHANGE_EXECUTOR_POOL_SIZE + 1,
+ context.getDataStoreProperties().getMaxDataChangeExecutorPoolSize());
+ assertEquals(InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_CHANGE_EXECUTOR_QUEUE_SIZE + 1,
+ context.getDataStoreProperties().getMaxDataChangeExecutorQueueSize());
+ assertEquals(InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_CHANGE_LISTENER_QUEUE_SIZE + 1,
+ context.getDataStoreProperties().getMaxDataChangeListenerQueueSize());
+ assertEquals(InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_STORE_EXECUTOR_QUEUE_SIZE + 1,
+ context.getDataStoreProperties().getMaxDataStoreExecutorQueueSize());
+ }
+}
schemaContext = TestModel.createTestContext();
doReturn(schemaContext).when(actorContext).getSchemaContext();
+ doReturn(DatastoreContext.newBuilder().build()).when(actorContext).getDatastoreContext();
}
@Test
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import static org.junit.Assert.assertEquals;
+import java.util.concurrent.Semaphore;
+import org.junit.Test;
+import org.opendaylight.controller.cluster.datastore.messages.BatchedModificationsReply;
+import org.opendaylight.controller.cluster.datastore.messages.DataExistsReply;
+
+/**
+ * Unit tests for OperationCompleter.
+ *
+ * @author Thomas Pantelis
+ */
+public class OperationCompleterTest {
+
+ @Test
+ public void testOnComplete() throws Exception {
+ int permits = 10;
+ Semaphore operationLimiter = new Semaphore(permits);
+ operationLimiter.acquire(permits);
+ int availablePermits = 0;
+
+ OperationCompleter completer = new OperationCompleter(operationLimiter );
+
+ completer.onComplete(null, new DataExistsReply(true));
+ assertEquals("availablePermits", ++availablePermits, operationLimiter.availablePermits());
+
+ completer.onComplete(null, new DataExistsReply(true));
+ assertEquals("availablePermits", ++availablePermits, operationLimiter.availablePermits());
+
+ completer.onComplete(null, new IllegalArgumentException());
+ assertEquals("availablePermits", ++availablePermits, operationLimiter.availablePermits());
+
+ completer.onComplete(null, new BatchedModificationsReply(4));
+ availablePermits += 4;
+ assertEquals("availablePermits", availablePermits, operationLimiter.availablePermits());
+ }
+}
// Write data to the Tx
txActor.tell(new WriteData(TestModel.TEST_PATH,
- ImmutableNodes.containerNode(TestModel.TEST_QNAME)).toSerializable(
- DataStoreVersions.BASE_HELIUM_VERSION), getRef());
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME), DataStoreVersions.BASE_HELIUM_VERSION).
+ toSerializable(), getRef());
expectMsgClass(duration, ShardTransactionMessages.WriteDataReply.class);
// Write data to the Tx
txActor.tell(new WriteData(TestModel.TEST_PATH,
- ImmutableNodes.containerNode(TestModel.TEST_QNAME)), getRef());
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME),
+ DataStoreVersions.BASE_HELIUM_VERSION).toSerializable(), getRef());
- expectMsgClass(duration, WriteDataReply.class);
+ expectMsgClass(duration, WriteDataReply.INSTANCE.toSerializable(
+ DataStoreVersions.BASE_HELIUM_VERSION).getClass());
// Ready the Tx
import java.util.concurrent.TimeUnit;
import org.junit.Before;
import org.junit.Test;
+import org.mockito.InOrder;
+import org.mockito.Mockito;
import org.opendaylight.controller.cluster.datastore.ShardWriteTransaction.GetCompositeModificationReply;
import org.opendaylight.controller.cluster.datastore.exceptions.UnknownMessageException;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
+import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
+import org.opendaylight.controller.cluster.datastore.messages.BatchedModificationsReply;
import org.opendaylight.controller.cluster.datastore.messages.CloseTransaction;
import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionReply;
import org.opendaylight.controller.cluster.datastore.messages.CreateSnapshot;
import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransaction;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
+import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import scala.concurrent.duration.Duration;
public class ShardTransactionTest extends AbstractActorTest {
"testOnReceiveWriteData");
transaction.tell(new WriteData(TestModel.TEST_PATH,
- ImmutableNodes.containerNode(TestModel.TEST_QNAME)).toSerializable(
- DataStoreVersions.HELIUM_2_VERSION), getRef());
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME), DataStoreVersions.HELIUM_2_VERSION).
+ toSerializable(), getRef());
expectMsgClass(duration("5 seconds"), ShardTransactionMessages.WriteDataReply.class);
// unserialized write
transaction.tell(new WriteData(TestModel.TEST_PATH,
- ImmutableNodes.containerNode(TestModel.TEST_QNAME)),
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME), DataStoreVersions.CURRENT_VERSION),
getRef());
expectMsgClass(duration("5 seconds"), WriteDataReply.class);
"testMergeData");
transaction.tell(new MergeData(TestModel.TEST_PATH,
- ImmutableNodes.containerNode(TestModel.TEST_QNAME)).toSerializable(
- DataStoreVersions.HELIUM_2_VERSION), getRef());
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME), DataStoreVersions.HELIUM_2_VERSION).
+ toSerializable(), getRef());
expectMsgClass(duration("5 seconds"), ShardTransactionMessages.MergeDataReply.class);
//unserialized merge
transaction.tell(new MergeData(TestModel.TEST_PATH,
- ImmutableNodes.containerNode(TestModel.TEST_QNAME)),
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME), DataStoreVersions.CURRENT_VERSION),
getRef());
expectMsgClass(duration("5 seconds"), MergeDataReply.class);
final ActorRef transaction = newTransactionActor(store.newWriteOnlyTransaction(),
"testDeleteData");
- transaction.tell(new DeleteData(TestModel.TEST_PATH).toSerializable(
- DataStoreVersions.HELIUM_2_VERSION), getRef());
+ transaction.tell(new DeleteData(TestModel.TEST_PATH, DataStoreVersions.HELIUM_2_VERSION).
+ toSerializable(), getRef());
expectMsgClass(duration("5 seconds"), ShardTransactionMessages.DeleteDataReply.class);
assertModification(transaction, DeleteModification.class);
//unserialized
- transaction.tell(new DeleteData(TestModel.TEST_PATH), getRef());
+ transaction.tell(new DeleteData(TestModel.TEST_PATH, DataStoreVersions.CURRENT_VERSION), getRef());
expectMsgClass(duration("5 seconds"), DeleteDataReply.class);
}};
}
+ @Test
+ public void testOnReceiveBatchedModifications() throws Exception {
+ new JavaTestKit(getSystem()) {{
+
+ DOMStoreWriteTransaction mockWriteTx = Mockito.mock(DOMStoreWriteTransaction.class);
+ final ActorRef transaction = newTransactionActor(mockWriteTx, "testOnReceiveBatchedModifications");
+
+ YangInstanceIdentifier writePath = TestModel.TEST_PATH;
+ NormalizedNode<?, ?> writeData = ImmutableContainerNodeBuilder.create().withNodeIdentifier(
+ new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME)).
+ withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo")).build();
+
+ YangInstanceIdentifier mergePath = TestModel.OUTER_LIST_PATH;
+ NormalizedNode<?, ?> mergeData = ImmutableContainerNodeBuilder.create().withNodeIdentifier(
+ new YangInstanceIdentifier.NodeIdentifier(TestModel.OUTER_LIST_QNAME)).build();
+
+ YangInstanceIdentifier deletePath = TestModel.TEST_PATH;
+
+ BatchedModifications batched = new BatchedModifications(DataStoreVersions.CURRENT_VERSION);
+ batched.addModification(new WriteModification(writePath, writeData));
+ batched.addModification(new MergeModification(mergePath, mergeData));
+ batched.addModification(new DeleteModification(deletePath));
+
+ transaction.tell(batched, getRef());
+
+ BatchedModificationsReply reply = expectMsgClass(duration("5 seconds"), BatchedModificationsReply.class);
+ assertEquals("getNumBatched", 3, reply.getNumBatched());
+
+ JavaTestKit verification = new JavaTestKit(getSystem());
+ transaction.tell(new ShardWriteTransaction.GetCompositedModification(), verification.getRef());
+
+ CompositeModification compositeModification = verification.expectMsgClass(duration("5 seconds"),
+ GetCompositeModificationReply.class).getModification();
+
+ assertEquals("CompositeModification size", 3, compositeModification.getModifications().size());
+
+ WriteModification write = (WriteModification)compositeModification.getModifications().get(0);
+ assertEquals("getPath", writePath, write.getPath());
+ assertEquals("getData", writeData, write.getData());
+
+ MergeModification merge = (MergeModification)compositeModification.getModifications().get(1);
+ assertEquals("getPath", mergePath, merge.getPath());
+ assertEquals("getData", mergeData, merge.getData());
+
+ DeleteModification delete = (DeleteModification)compositeModification.getModifications().get(2);
+ assertEquals("getPath", deletePath, delete.getPath());
+
+ InOrder inOrder = Mockito.inOrder(mockWriteTx);
+ inOrder.verify(mockWriteTx).write(writePath, writeData);
+ inOrder.verify(mockWriteTx).merge(mergePath, mergeData);
+ inOrder.verify(mockWriteTx).delete(deletePath);
+ }};
+ }
@Test
public void testOnReceiveReadyTransaction() throws Exception {
DataStoreVersions.CURRENT_VERSION);
final TestActorRef<ShardTransaction> transaction = TestActorRef.apply(props,getSystem());
- transaction.receive(new DeleteData(TestModel.TEST_PATH).toSerializable(
- DataStoreVersions.CURRENT_VERSION), ActorRef.noSender());
+ transaction.receive(new DeleteData(TestModel.TEST_PATH, DataStoreVersions.CURRENT_VERSION).
+ toSerializable(), ActorRef.noSender());
}
@Test
public void testShardTransactionInactivity() {
datastoreContext = DatastoreContext.newBuilder().shardTransactionIdleTimeout(
- Duration.create(500, TimeUnit.MILLISECONDS)).build();
+ 500, TimeUnit.MILLISECONDS).build();
new JavaTestKit(getSystem()) {{
final ActorRef transaction = newTransactionActor(store.newReadWriteTransaction(),
MockitoAnnotations.initMocks(this);
doReturn(getSystem()).when(actorContext).getActorSystem();
+ doReturn(getSystem().dispatchers().defaultGlobalDispatcher()).when(actorContext).getClientDispatcher();
doReturn(datastoreContext).when(actorContext).getDatastoreContext();
doReturn(100).when(datastoreContext).getShardTransactionCommitTimeoutInSeconds();
doReturn(commitTimer).when(actorContext).getOperationTimer("commit");
actorContext.setSchemaContext(schemaContext);
doReturn(schemaContext).when(mockActorContext).getSchemaContext();
+ doReturn(DatastoreContext.newBuilder().build()).when(mockActorContext).getDatastoreContext();
}
@SuppressWarnings("resource")
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import java.io.IOException;
+import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
+import org.mockito.ArgumentCaptor;
import org.mockito.ArgumentMatcher;
+import org.mockito.InOrder;
import org.mockito.Mock;
import org.mockito.Mockito;
import org.mockito.MockitoAnnotations;
+import org.opendaylight.controller.cluster.datastore.DatastoreContext.Builder;
import org.opendaylight.controller.cluster.datastore.TransactionProxy.TransactionType;
import org.opendaylight.controller.cluster.datastore.exceptions.PrimaryNotFoundException;
import org.opendaylight.controller.cluster.datastore.exceptions.TimeoutException;
+import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
+import org.opendaylight.controller.cluster.datastore.messages.BatchedModificationsReply;
import org.opendaylight.controller.cluster.datastore.messages.CloseTransaction;
import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
import org.opendaylight.controller.cluster.datastore.messages.DataExists;
import org.opendaylight.controller.cluster.datastore.messages.ReadyTransactionReply;
import org.opendaylight.controller.cluster.datastore.messages.WriteData;
import org.opendaylight.controller.cluster.datastore.messages.WriteDataReply;
+import org.opendaylight.controller.cluster.datastore.modification.AbstractModification;
+import org.opendaylight.controller.cluster.datastore.modification.DeleteModification;
+import org.opendaylight.controller.cluster.datastore.modification.MergeModification;
+import org.opendaylight.controller.cluster.datastore.modification.Modification;
+import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
import org.opendaylight.controller.cluster.datastore.shardstrategy.DefaultShardStrategy;
import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategyFactory;
import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages;
import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.CreateTransactionReply;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
@Mock
private ClusterWrapper mockClusterWrapper;
- String memberName = "mock-member";
+ private final String memberName = "mock-member";
+
+ private final Builder dataStoreContextBuilder = DatastoreContext.newBuilder().operationTimeoutInSeconds(2).
+ shardBatchedModificationCount(1);
@BeforeClass
public static void setUpClass() throws IOException {
schemaContext = TestModel.createTestContext();
- DatastoreContext dataStoreContext = DatastoreContext.newBuilder().operationTimeoutInSeconds(2).build();
-
doReturn(getSystem()).when(mockActorContext).getActorSystem();
+ doReturn(getSystem().dispatchers().defaultGlobalDispatcher()).when(mockActorContext).getClientDispatcher();
doReturn(memberName).when(mockActorContext).getCurrentMemberName();
doReturn(schemaContext).when(mockActorContext).getSchemaContext();
doReturn(mockClusterWrapper).when(mockActorContext).getClusterWrapper();
doReturn(mockClusterWrapper).when(mockActorContext).getClusterWrapper();
- doReturn(dataStoreContext).when(mockActorContext).getDatastoreContext();
+ doReturn(dataStoreContextBuilder.build()).when(mockActorContext).getDatastoreContext();
doReturn(10).when(mockActorContext).getTransactionOutstandingOperationLimit();
ShardStrategyFactory.setConfiguration(configuration);
}
private ReadData eqSerializedReadData() {
+ return eqSerializedReadData(TestModel.TEST_PATH);
+ }
+
+ private ReadData eqSerializedReadData(final YangInstanceIdentifier path) {
ArgumentMatcher<ReadData> matcher = new ArgumentMatcher<ReadData>() {
@Override
public boolean matches(Object argument) {
return ReadData.SERIALIZABLE_CLASS.equals(argument.getClass()) &&
- ReadData.fromSerializable(argument).getPath().equals(TestModel.TEST_PATH);
+ ReadData.fromSerializable(argument).getPath().equals(path);
}
};
return argThat(matcher);
}
- private WriteData eqSerializedWriteData(final NormalizedNode<?, ?> nodeToWrite) {
- return eqSerializedWriteData(nodeToWrite, DataStoreVersions.CURRENT_VERSION);
- }
-
- private WriteData eqSerializedWriteData(final NormalizedNode<?, ?> nodeToWrite,
- final int transactionVersion) {
+ private WriteData eqLegacyWriteData(final NormalizedNode<?, ?> nodeToWrite) {
ArgumentMatcher<WriteData> matcher = new ArgumentMatcher<WriteData>() {
@Override
public boolean matches(Object argument) {
- if((transactionVersion >= DataStoreVersions.LITHIUM_VERSION &&
- WriteData.SERIALIZABLE_CLASS.equals(argument.getClass())) ||
- (transactionVersion < DataStoreVersions.LITHIUM_VERSION &&
- ShardTransactionMessages.WriteData.class.equals(argument.getClass()))) {
-
+ if(ShardTransactionMessages.WriteData.class.equals(argument.getClass())) {
WriteData obj = WriteData.fromSerializable(argument);
- return obj.getPath().equals(TestModel.TEST_PATH) &&
- obj.getData().equals(nodeToWrite);
+ return obj.getPath().equals(TestModel.TEST_PATH) && obj.getData().equals(nodeToWrite);
}
return false;
return argThat(matcher);
}
- private WriteData eqWriteData(final NormalizedNode<?, ?> nodeToWrite) {
- ArgumentMatcher<WriteData> matcher = new ArgumentMatcher<WriteData>() {
- @Override
- public boolean matches(Object argument) {
- if(argument instanceof WriteData) {
- WriteData obj = (WriteData) argument;
- return obj.getPath().equals(TestModel.TEST_PATH) &&
- obj.getData().equals(nodeToWrite);
- }
- return false;
- }
- };
-
- return argThat(matcher);
- }
-
- private MergeData eqSerializedMergeData(final NormalizedNode<?, ?> nodeToWrite) {
- return eqSerializedMergeData(nodeToWrite, DataStoreVersions.CURRENT_VERSION);
- }
-
- private MergeData eqSerializedMergeData(final NormalizedNode<?, ?> nodeToWrite,
- final int transactionVersion) {
+ private MergeData eqLegacyMergeData(final NormalizedNode<?, ?> nodeToWrite) {
ArgumentMatcher<MergeData> matcher = new ArgumentMatcher<MergeData>() {
@Override
public boolean matches(Object argument) {
- if((transactionVersion >= DataStoreVersions.LITHIUM_VERSION &&
- MergeData.SERIALIZABLE_CLASS.equals(argument.getClass())) ||
- (transactionVersion < DataStoreVersions.LITHIUM_VERSION &&
- ShardTransactionMessages.MergeData.class.equals(argument.getClass()))) {
-
+ if(ShardTransactionMessages.MergeData.class.equals(argument.getClass())) {
MergeData obj = MergeData.fromSerializable(argument);
- return obj.getPath().equals(TestModel.TEST_PATH) &&
- obj.getData().equals(nodeToWrite);
+ return obj.getPath().equals(TestModel.TEST_PATH) && obj.getData().equals(nodeToWrite);
}
return false;
return argThat(matcher);
}
- private MergeData eqMergeData(final NormalizedNode<?, ?> nodeToWrite) {
- ArgumentMatcher<MergeData> matcher = new ArgumentMatcher<MergeData>() {
- @Override
- public boolean matches(Object argument) {
- if(argument instanceof MergeData) {
- MergeData obj = ((MergeData) argument);
- return obj.getPath().equals(TestModel.TEST_PATH) &&
- obj.getData().equals(nodeToWrite);
- }
-
- return false;
- }
- };
-
- return argThat(matcher);
- }
-
- private DeleteData eqSerializedDeleteData() {
- ArgumentMatcher<DeleteData> matcher = new ArgumentMatcher<DeleteData>() {
- @Override
- public boolean matches(Object argument) {
- return DeleteData.SERIALIZABLE_CLASS.equals(argument.getClass()) &&
- DeleteData.fromSerializable(argument).getPath().equals(TestModel.TEST_PATH);
- }
- };
-
- return argThat(matcher);
- }
-
- private DeleteData eqDeleteData() {
+ private DeleteData eqLegacyDeleteData(final YangInstanceIdentifier expPath) {
ArgumentMatcher<DeleteData> matcher = new ArgumentMatcher<DeleteData>() {
@Override
public boolean matches(Object argument) {
- return argument instanceof DeleteData &&
- ((DeleteData)argument).getPath().equals(TestModel.TEST_PATH);
+ return ShardTransactionMessages.DeleteData.class.equals(argument.getClass()) &&
+ DeleteData.fromSerializable(argument).getPath().equals(expPath);
}
};
private Future<Object> readSerializedDataReply(NormalizedNode<?, ?> data,
short transactionVersion) {
- return Futures.successful(new ReadDataReply(data).toSerializable(transactionVersion));
+ return Futures.successful(new ReadDataReply(data, transactionVersion).toSerializable());
}
private Future<Object> readSerializedDataReply(NormalizedNode<?, ?> data) {
}
private Future<ReadDataReply> readDataReply(NormalizedNode<?, ?> data) {
- return Futures.successful(new ReadDataReply(data));
+ return Futures.successful(new ReadDataReply(data, DataStoreVersions.CURRENT_VERSION));
}
private Future<Object> dataExistsSerializedReply(boolean exists) {
return Futures.successful(new DataExistsReply(exists));
}
- private Future<Object> writeSerializedDataReply(short version) {
- return Futures.successful(new WriteDataReply().toSerializable(version));
- }
-
- private Future<Object> writeSerializedDataReply() {
- return writeSerializedDataReply(DataStoreVersions.CURRENT_VERSION);
- }
-
- private Future<WriteDataReply> writeDataReply() {
- return Futures.successful(new WriteDataReply());
- }
-
- private Future<Object> mergeSerializedDataReply(short version) {
- return Futures.successful(new MergeDataReply().toSerializable(version));
- }
-
- private Future<Object> mergeSerializedDataReply() {
- return mergeSerializedDataReply(DataStoreVersions.CURRENT_VERSION);
+ private Future<BatchedModificationsReply> batchedModificationsReply(int count) {
+ return Futures.successful(new BatchedModificationsReply(count));
}
private Future<Object> incompleteFuture(){
return mock(Future.class);
}
- private Future<MergeDataReply> mergeDataReply() {
- return Futures.successful(new MergeDataReply());
+ private ActorSelection actorSelection(ActorRef actorRef) {
+ return getSystem().actorSelection(actorRef.path());
+ }
+
+ private void expectBatchedModifications(ActorRef actorRef, int count) {
+ doReturn(batchedModificationsReply(count)).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), isA(BatchedModifications.class));
}
- private Future<Object> deleteSerializedDataReply(short version) {
- return Futures.successful(new DeleteDataReply().toSerializable(version));
+ private void expectBatchedModifications(int count) {
+ doReturn(batchedModificationsReply(count)).when(mockActorContext).executeOperationAsync(
+ any(ActorSelection.class), isA(BatchedModifications.class));
}
- private Future<Object> deleteSerializedDataReply() {
- return deleteSerializedDataReply(DataStoreVersions.CURRENT_VERSION);
+ private void expectIncompleteBatchedModifications() {
+ doReturn(incompleteFuture()).when(mockActorContext).executeOperationAsync(
+ any(ActorSelection.class), isA(BatchedModifications.class));
}
- private Future<DeleteDataReply> deleteDataReply() {
- return Futures.successful(new DeleteDataReply());
+ private void expectReadyTransaction(ActorRef actorRef) {
+ doReturn(readySerializedTxReply(actorRef.path().toString())).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), isA(ReadyTransaction.SERIALIZABLE_CLASS));
}
- private ActorSelection actorSelection(ActorRef actorRef) {
- return getSystem().actorSelection(actorRef.path());
+ private void expectFailedBatchedModifications(ActorRef actorRef) {
+ doReturn(Futures.failed(new TestException())).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), isA(BatchedModifications.class));
}
private CreateTransactionReply createTransactionReply(ActorRef actorRef, int transactionVersion){
public void testRead() throws Exception {
ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_ONLY);
- TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
- READ_ONLY);
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, READ_ONLY);
doReturn(readSerializedDataReply(null)).when(mockActorContext).executeOperationAsync(
eq(actorSelection(actorRef)), eqSerializedReadData());
doReturn(Futures.successful(new Object())).when(mockActorContext).
executeOperationAsync(eq(actorSelection(actorRef)), eqSerializedReadData());
- TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
- READ_ONLY);
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, READ_ONLY);
transactionProxy.read(TestModel.TEST_PATH).checkedGet(5, TimeUnit.SECONDS);
}
doReturn(Futures.failed(new TestException())).when(mockActorContext).
executeOperationAsync(eq(actorSelection(actorRef)), eqSerializedReadData());
- TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
- READ_ONLY);
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, READ_ONLY);
propagateReadFailedExceptionCause(transactionProxy.read(TestModel.TEST_PATH));
}
@Test(expected = TestException.class)
public void testReadWithPriorRecordingOperationFailure() throws Throwable {
+ doReturn(dataStoreContextBuilder.shardBatchedModificationCount(2).build()).
+ when(mockActorContext).getDatastoreContext();
+
ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- doReturn(writeSerializedDataReply()).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqSerializedWriteData(nodeToWrite));
-
- doReturn(Futures.failed(new TestException())).when(mockActorContext).
- executeOperationAsync(eq(actorSelection(actorRef)), eqSerializedDeleteData());
+ expectFailedBatchedModifications(actorRef);
doReturn(readSerializedDataReply(null)).when(mockActorContext).executeOperationAsync(
eq(actorSelection(actorRef)), eqSerializedReadData());
- TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
- READ_WRITE);
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, READ_WRITE);
transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
NormalizedNode<?, ?> expectedNode = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- doReturn(writeSerializedDataReply()).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqSerializedWriteData(expectedNode));
+ expectBatchedModifications(actorRef, 1);
doReturn(readSerializedDataReply(expectedNode)).when(mockActorContext).executeOperationAsync(
eq(actorSelection(actorRef)), eqSerializedReadData());
- TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
- READ_WRITE);
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, READ_WRITE);
transactionProxy.write(TestModel.TEST_PATH, expectedNode);
TestModel.TEST_PATH).get(5, TimeUnit.SECONDS);
assertEquals("NormalizedNode isPresent", true, readOptional.isPresent());
-
assertEquals("Response NormalizedNode", expectedNode, readOptional.get());
+
+ InOrder inOrder = Mockito.inOrder(mockActorContext);
+ inOrder.verify(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), isA(BatchedModifications.class));
+
+ inOrder.verify(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedReadData());
}
@Test(expected=IllegalStateException.class)
public void testReadPreConditionCheck() {
-
- TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
- WRITE_ONLY);
-
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, WRITE_ONLY);
transactionProxy.read(TestModel.TEST_PATH);
}
public void testExists() throws Exception {
ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_ONLY);
- TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
- READ_ONLY);
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, READ_ONLY);
doReturn(dataExistsSerializedReply(false)).when(mockActorContext).executeOperationAsync(
eq(actorSelection(actorRef)), eqSerializedDataExists());
doReturn(Futures.failed(new TestException())).when(mockActorContext).
executeOperationAsync(eq(actorSelection(actorRef)), eqSerializedDataExists());
- TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
- READ_ONLY);
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, READ_ONLY);
propagateReadFailedExceptionCause(transactionProxy.exists(TestModel.TEST_PATH));
}
@Test(expected = TestException.class)
public void testExistsWithPriorRecordingOperationFailure() throws Throwable {
+ doReturn(dataStoreContextBuilder.shardBatchedModificationCount(2).build()).
+ when(mockActorContext).getDatastoreContext();
+
ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- doReturn(writeSerializedDataReply()).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqSerializedWriteData(nodeToWrite));
-
- doReturn(Futures.failed(new TestException())).when(mockActorContext).
- executeOperationAsync(eq(actorSelection(actorRef)), eqSerializedDeleteData());
+ expectFailedBatchedModifications(actorRef);
doReturn(dataExistsSerializedReply(false)).when(mockActorContext).executeOperationAsync(
eq(actorSelection(actorRef)), eqSerializedDataExists());
NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- doReturn(writeSerializedDataReply()).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqSerializedWriteData(nodeToWrite));
+ expectBatchedModifications(actorRef, 1);
doReturn(dataExistsSerializedReply(true)).when(mockActorContext).executeOperationAsync(
eq(actorSelection(actorRef)), eqSerializedDataExists());
- TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
- READ_WRITE);
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, READ_WRITE);
transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
Boolean exists = transactionProxy.exists(TestModel.TEST_PATH).checkedGet();
assertEquals("Exists response", true, exists);
+
+ InOrder inOrder = Mockito.inOrder(mockActorContext);
+ inOrder.verify(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), isA(BatchedModifications.class));
+
+ inOrder.verify(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedDataExists());
}
@Test(expected=IllegalStateException.class)
public void testExistsPreConditionCheck() {
-
- TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
- WRITE_ONLY);
-
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, WRITE_ONLY);
transactionProxy.exists(TestModel.TEST_PATH);
}
// Expected
}
} else {
- assertEquals("Recording operation Future result type", expResultType,
+ assertEquals(String.format("Recording operation %d Future result type", i +1 ), expResultType,
Await.result(future, Duration.create(5, TimeUnit.SECONDS)).getClass());
}
}
NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- doReturn(writeSerializedDataReply()).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqSerializedWriteData(nodeToWrite));
+ expectBatchedModifications(actorRef, 1);
+ expectReadyTransaction(actorRef);
- TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
- WRITE_ONLY);
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, WRITE_ONLY);
transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
- verify(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqSerializedWriteData(nodeToWrite));
+ // This sends the batched modification.
+ transactionProxy.ready();
+
+ verifyOneBatchedModification(actorRef, new WriteModification(TestModel.TEST_PATH, nodeToWrite));
verifyRecordingOperationFutures(transactionProxy.getRecordedOperationFutures(),
- WriteDataReply.class);
+ BatchedModificationsReply.class);
}
@Test
doReturn(readSerializedDataReply(null)).when(mockActorContext).executeOperationAsync(
eq(actorSelection(actorRef)), eqSerializedReadData());
- final NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+ expectBatchedModifications(actorRef, 1);
+ expectReadyTransaction(actorRef);
- doReturn(writeSerializedDataReply()).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqSerializedWriteData(nodeToWrite));
+ final NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
final TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, READ_WRITE);
throw caughtEx.get();
}
- verify(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqSerializedWriteData(nodeToWrite));
+ // This sends the batched modification.
+ transactionProxy.ready();
+
+ verifyOneBatchedModification(actorRef, new WriteModification(TestModel.TEST_PATH, nodeToWrite));
verifyRecordingOperationFutures(transactionProxy.getRecordedOperationFutures(),
- WriteDataReply.class);
+ BatchedModificationsReply.class);
}
@Test(expected=IllegalStateException.class)
public void testWritePreConditionCheck() {
-
- TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
- READ_ONLY);
-
- transactionProxy.write(TestModel.TEST_PATH,
- ImmutableNodes.containerNode(TestModel.TEST_QNAME));
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, READ_ONLY);
+ transactionProxy.write(TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
}
@Test(expected=IllegalStateException.class)
public void testWriteAfterReadyPreConditionCheck() {
-
- TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
- WRITE_ONLY);
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, WRITE_ONLY);
transactionProxy.ready();
- transactionProxy.write(TestModel.TEST_PATH,
- ImmutableNodes.containerNode(TestModel.TEST_QNAME));
+ transactionProxy.write(TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
}
@Test
NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- doReturn(mergeSerializedDataReply()).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqSerializedMergeData(nodeToWrite));
+ expectBatchedModifications(actorRef, 1);
+ expectReadyTransaction(actorRef);
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, WRITE_ONLY);
transactionProxy.merge(TestModel.TEST_PATH, nodeToWrite);
- verify(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqSerializedMergeData(nodeToWrite));
+ // This sends the batched modification.
+ transactionProxy.ready();
+
+ verifyOneBatchedModification(actorRef, new MergeModification(TestModel.TEST_PATH, nodeToWrite));
verifyRecordingOperationFutures(transactionProxy.getRecordedOperationFutures(),
- MergeDataReply.class);
+ BatchedModificationsReply.class);
}
@Test
public void testDelete() throws Exception {
ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY);
- doReturn(deleteSerializedDataReply()).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqSerializedDeleteData());
+ expectBatchedModifications(actorRef, 1);
+ expectReadyTransaction(actorRef);
- TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
- WRITE_ONLY);
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, WRITE_ONLY);
transactionProxy.delete(TestModel.TEST_PATH);
- verify(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqSerializedDeleteData());
+ // This sends the batched modification.
+ transactionProxy.ready();
+
+ verifyOneBatchedModification(actorRef, new DeleteModification(TestModel.TEST_PATH));
verifyRecordingOperationFutures(transactionProxy.getRecordedOperationFutures(),
- DeleteDataReply.class);
+ BatchedModificationsReply.class);
}
private void verifyCohortFutures(ThreePhaseCommitCohortProxy proxy,
doReturn(readSerializedDataReply(null)).when(mockActorContext).executeOperationAsync(
eq(actorSelection(actorRef)), eqSerializedReadData());
- doReturn(writeSerializedDataReply()).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqSerializedWriteData(nodeToWrite));
+ expectBatchedModifications(actorRef, 1);
+ expectReadyTransaction(actorRef);
- doReturn(readySerializedTxReply(actorRef.path().toString())).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), isA(ReadyTransaction.SERIALIZABLE_CLASS));
-
- TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
- READ_WRITE);
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, READ_WRITE);
transactionProxy.read(TestModel.TEST_PATH);
ThreePhaseCommitCohortProxy proxy = (ThreePhaseCommitCohortProxy) ready;
verifyRecordingOperationFutures(transactionProxy.getRecordedOperationFutures(),
- WriteDataReply.class);
+ BatchedModificationsReply.class);
verifyCohortFutures(proxy, getSystem().actorSelection(actorRef.path()));
+
+ verify(mockActorContext).executeOperationAsync(eq(actorSelection(actorRef)),
+ isA(BatchedModifications.class));
}
private ActorRef testCompatibilityWithHeliumVersion(short version) throws Exception {
doReturn(readSerializedDataReply(testNode, version)).when(mockActorContext).executeOperationAsync(
eq(actorSelection(actorRef)), eqSerializedReadData());
- doReturn(writeSerializedDataReply(version)).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqSerializedWriteData(testNode, version));
+ doReturn(Futures.successful(new WriteDataReply().toSerializable(version))).when(mockActorContext).
+ executeOperationAsync(eq(actorSelection(actorRef)), eqLegacyWriteData(testNode));
- doReturn(mergeSerializedDataReply(version)).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqSerializedMergeData(testNode, version));
+ doReturn(Futures.successful(new MergeDataReply().toSerializable(version))).when(mockActorContext).
+ executeOperationAsync(eq(actorSelection(actorRef)), eqLegacyMergeData(testNode));
- doReturn(readySerializedTxReply(actorRef.path().toString())).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), isA(ReadyTransaction.SERIALIZABLE_CLASS));
+ doReturn(Futures.successful(new DeleteDataReply().toSerializable(version))).when(mockActorContext).
+ executeOperationAsync(eq(actorSelection(actorRef)), eqLegacyDeleteData(TestModel.TEST_PATH));
+
+ expectReadyTransaction(actorRef);
doReturn(actorRef.path().toString()).when(mockActorContext).resolvePath(eq(actorRef.path().toString()),
eq(actorRef.path().toString()));
transactionProxy.merge(TestModel.TEST_PATH, testNode);
+ transactionProxy.delete(TestModel.TEST_PATH);
+
DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
assertTrue(ready instanceof ThreePhaseCommitCohortProxy);
ThreePhaseCommitCohortProxy proxy = (ThreePhaseCommitCohortProxy) ready;
verifyRecordingOperationFutures(transactionProxy.getRecordedOperationFutures(),
- ShardTransactionMessages.WriteDataReply.class, ShardTransactionMessages.MergeDataReply.class);
+ ShardTransactionMessages.WriteDataReply.class, ShardTransactionMessages.MergeDataReply.class,
+ ShardTransactionMessages.DeleteDataReply.class);
verifyCohortFutures(proxy, getSystem().actorSelection(actorRef.path()));
NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- doReturn(mergeSerializedDataReply()).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqSerializedMergeData(nodeToWrite));
+ expectFailedBatchedModifications(actorRef);
- doReturn(Futures.failed(new TestException())).when(mockActorContext).
- executeOperationAsync(eq(actorSelection(actorRef)), eqSerializedWriteData(nodeToWrite));
-
- doReturn(readySerializedTxReply(actorRef.path().toString())).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), isA(ReadyTransaction.SERIALIZABLE_CLASS));
+ expectReadyTransaction(actorRef);
doReturn(false).when(mockActorContext).isPathLocal(actorRef.path().toString());
- TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
- WRITE_ONLY);
-
- transactionProxy.merge(TestModel.TEST_PATH, nodeToWrite);
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, WRITE_ONLY);
transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
verifyCohortFutures(proxy, TestException.class);
- verifyRecordingOperationFutures(transactionProxy.getRecordedOperationFutures(),
- MergeDataReply.class, TestException.class);
+ verifyRecordingOperationFutures(transactionProxy.getRecordedOperationFutures(), TestException.class);
}
@Test
NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- doReturn(mergeSerializedDataReply()).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqSerializedMergeData(nodeToWrite));
+ expectBatchedModifications(actorRef, 1);
doReturn(Futures.failed(new TestException())).when(mockActorContext).
executeOperationAsync(eq(actorSelection(actorRef)),
isA(ReadyTransaction.SERIALIZABLE_CLASS));
- TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
- WRITE_ONLY);
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, WRITE_ONLY);
transactionProxy.merge(TestModel.TEST_PATH, nodeToWrite);
ThreePhaseCommitCohortProxy proxy = (ThreePhaseCommitCohortProxy) ready;
verifyRecordingOperationFutures(transactionProxy.getRecordedOperationFutures(),
- MergeDataReply.class);
+ BatchedModificationsReply.class);
verifyCohortFutures(proxy, TestException.class);
}
doReturn(Futures.failed(new PrimaryNotFoundException("mock"))).when(
mockActorContext).findPrimaryShardAsync(anyString());
- TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
- WRITE_ONLY);
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, WRITE_ONLY);
NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- doReturn(writeSerializedDataReply()).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqSerializedWriteData(nodeToWrite));
+ expectBatchedModifications(actorRef, 1);
doReturn(Futures.successful(new Object())).when(mockActorContext).
executeOperationAsync(eq(actorSelection(actorRef)),
isA(ReadyTransaction.SERIALIZABLE_CLASS));
- TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
- WRITE_ONLY);
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, WRITE_ONLY);
transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
doReturn(readSerializedDataReply(null)).when(mockActorContext).executeOperationAsync(
eq(actorSelection(actorRef)), eqSerializedReadData());
- TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
- READ_WRITE);
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, READ_WRITE);
transactionProxy.read(TestModel.TEST_PATH);
String actorPath = "akka.tcp://system@127.0.0.1:2550/user/tx-actor";
CreateTransactionReply createTransactionReply = CreateTransactionReply.newBuilder()
- .setTransactionId("txn-1")
- .setTransactionActorPath(actorPath)
- .build();
+ .setTransactionId("txn-1").setTransactionActorPath(actorPath).build();
doReturn(Futures.successful(createTransactionReply)).when(mockActorContext).
executeOperationAsync(eq(actorSystem.actorSelection(shardActorRef.path())),
}
@Test
- public void testLocalTxActorWrite() throws Exception {
+ public void testLocalTxActorReady() throws Exception {
ActorSystem actorSystem = getSystem();
ActorRef shardActorRef = actorSystem.actorOf(Props.create(DoNothingActor.class));
when(mockActorContext).findPrimaryShardAsync(eq(DefaultShardStrategy.DEFAULT_SHARD));
String actorPath = "akka.tcp://system@127.0.0.1:2550/user/tx-actor";
- CreateTransactionReply createTransactionReply = CreateTransactionReply.newBuilder()
- .setTransactionId("txn-1")
- .setTransactionActorPath(actorPath)
- .build();
+ CreateTransactionReply createTransactionReply = CreateTransactionReply.newBuilder().
+ setTransactionId("txn-1").setTransactionActorPath(actorPath).
+ setMessageVersion(DataStoreVersions.CURRENT_VERSION).build();
doReturn(Futures.successful(createTransactionReply)).when(mockActorContext).
- executeOperationAsync(eq(actorSystem.actorSelection(shardActorRef.path())),
+ executeOperationAsync(eq(actorSystem.actorSelection(shardActorRef.path())),
eqCreateTransaction(memberName, WRITE_ONLY));
doReturn(true).when(mockActorContext).isPathLocal(actorPath);
- NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
- doReturn(writeDataReply()).when(mockActorContext).executeOperationAsync(
- any(ActorSelection.class), eqWriteData(nodeToWrite));
+ doReturn(batchedModificationsReply(1)).when(mockActorContext).executeOperationAsync(
+ any(ActorSelection.class), isA(BatchedModifications.class));
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, WRITE_ONLY);
- transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
-
- verify(mockActorContext).executeOperationAsync(
- any(ActorSelection.class), eqWriteData(nodeToWrite));
-
- //testing local merge
- doReturn(mergeDataReply()).when(mockActorContext).executeOperationAsync(
- any(ActorSelection.class), eqMergeData(nodeToWrite));
-
- transactionProxy.merge(TestModel.TEST_PATH, nodeToWrite);
-
- verify(mockActorContext).executeOperationAsync(
- any(ActorSelection.class), eqMergeData(nodeToWrite));
-
-
- //testing local delete
- doReturn(deleteDataReply()).when(mockActorContext).executeOperationAsync(
- any(ActorSelection.class), eqDeleteData());
- transactionProxy.delete(TestModel.TEST_PATH);
-
- verify(mockActorContext).executeOperationAsync(any(ActorSelection.class), eqDeleteData());
+ NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+ transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
verifyRecordingOperationFutures(transactionProxy.getRecordedOperationFutures(),
- WriteDataReply.class, MergeDataReply.class, DeleteDataReply.class);
+ BatchedModificationsReply.class);
// testing ready
doReturn(readyTxReply(shardActorRef.path().toString())).when(mockActorContext).executeOperationAsync(
}
String actorPath = "akka.tcp://system@127.0.0.1:2550/user/tx-actor";
- CreateTransactionReply createTransactionReply = CreateTransactionReply.newBuilder()
- .setTransactionId("txn-1")
- .setTransactionActorPath(actorPath)
- .build();
+ CreateTransactionReply createTransactionReply = CreateTransactionReply.newBuilder().
+ setTransactionId("txn-1").setTransactionActorPath(actorPath).
+ setMessageVersion(DataStoreVersions.CURRENT_VERSION).build();
doReturn(Futures.successful(createTransactionReply)).when(mockActorContext).
executeOperationAsync(eq(actorSystem.actorSelection(shardActorRef.path())),
long end = System.nanoTime();
- Assert.assertTrue(String.format("took less time than expected %s was %s",
- TimeUnit.SECONDS.toNanos(mockActorContext.getDatastoreContext().getOperationTimeoutInSeconds()),
- (end-start)), (end - start) > TimeUnit.SECONDS.toNanos(mockActorContext.getDatastoreContext().getOperationTimeoutInSeconds()));
+ long expected = TimeUnit.SECONDS.toNanos(mockActorContext.getDatastoreContext().getOperationTimeoutInSeconds());
+ Assert.assertTrue(String.format("Expected elapsed time: %s. Actual: %s",
+ expected, (end-start)), (end - start) > expected);
}
}
String actorPath = "akka.tcp://system@127.0.0.1:2550/user/tx-actor";
- CreateTransactionReply createTransactionReply = CreateTransactionReply.newBuilder()
- .setTransactionId("txn-1")
- .setTransactionActorPath(actorPath)
- .build();
+ CreateTransactionReply createTransactionReply = CreateTransactionReply.newBuilder().
+ setTransactionId("txn-1").setTransactionActorPath(actorPath).
+ setMessageVersion(DataStoreVersions.CURRENT_VERSION).build();
doReturn(Futures.successful(createTransactionReply)).when(mockActorContext).
executeOperationAsync(eq(actorSystem.actorSelection(shardActorRef.path())),
long end = System.nanoTime();
- Assert.assertTrue(String.format("took more time than expected %s was %s",
- TimeUnit.SECONDS.toNanos(mockActorContext.getDatastoreContext().getOperationTimeoutInSeconds()),
- (end-start)), (end - start) <= TimeUnit.SECONDS.toNanos(mockActorContext.getDatastoreContext().getOperationTimeoutInSeconds()));
+ long expected = TimeUnit.SECONDS.toNanos(mockActorContext.getDatastoreContext().getOperationTimeoutInSeconds());
+ Assert.assertTrue(String.format("Expected elapsed time: %s. Actual: %s",
+ expected, (end-start)), (end - start) <= expected);
}
public void testWriteThrottling(boolean shardFound){
public void run(TransactionProxy transactionProxy) {
NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- doReturn(incompleteFuture()).when(mockActorContext).executeOperationAsync(
- any(ActorSelection.class), eqWriteData(nodeToWrite));
+ expectBatchedModifications(2);
transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
public void run(TransactionProxy transactionProxy) {
NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- doReturn(incompleteFuture()).when(mockActorContext).executeOperationAsync(
- any(ActorSelection.class), eqWriteData(nodeToWrite));
+ expectIncompleteBatchedModifications();
transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
}
});
-
}
@Test
public void run(TransactionProxy transactionProxy) {
NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- doReturn(incompleteFuture()).when(mockActorContext).executeOperationAsync(
- any(ActorSelection.class), eqWriteData(nodeToWrite));
+ expectBatchedModifications(2);
transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
public void run(TransactionProxy transactionProxy) {
NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- doReturn(writeSerializedDataReply()).when(mockActorContext).executeOperationAsync(
- any(ActorSelection.class), eqSerializedWriteData(nodeToWrite));
+ expectBatchedModifications(2);
transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
}
});
-
}
@Test
public void run(TransactionProxy transactionProxy) {
NormalizedNode<?, ?> nodeToMerge = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- doReturn(incompleteFuture()).when(mockActorContext).executeOperationAsync(
- any(ActorSelection.class), eqMergeData(nodeToMerge));
+ expectIncompleteBatchedModifications();
transactionProxy.merge(TestModel.TEST_PATH, nodeToMerge);
public void run(TransactionProxy transactionProxy) {
NormalizedNode<?, ?> nodeToMerge = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- doReturn(incompleteFuture()).when(mockActorContext).executeOperationAsync(
- any(ActorSelection.class), eqMergeData(nodeToMerge));
+ expectBatchedModifications(2);
transactionProxy.merge(TestModel.TEST_PATH, nodeToMerge);
public void run(TransactionProxy transactionProxy) {
NormalizedNode<?, ?> nodeToMerge = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- doReturn(mergeDataReply()).when(mockActorContext).executeOperationAsync(
- any(ActorSelection.class), eqMergeData(nodeToMerge));
+ expectBatchedModifications(2);
transactionProxy.merge(TestModel.TEST_PATH, nodeToMerge);
throttleOperation(new TransactionProxyOperation() {
@Override
public void run(TransactionProxy transactionProxy) {
- doReturn(incompleteFuture()).when(mockActorContext).executeOperationAsync(
- any(ActorSelection.class), eqDeleteData());
+ expectIncompleteBatchedModifications();
transactionProxy.delete(TestModel.TEST_PATH);
completeOperation(new TransactionProxyOperation() {
@Override
public void run(TransactionProxy transactionProxy) {
- doReturn(incompleteFuture()).when(mockActorContext).executeOperationAsync(
- any(ActorSelection.class), eqDeleteData());
+ expectBatchedModifications(2);
transactionProxy.delete(TestModel.TEST_PATH);
completeOperation(new TransactionProxyOperation() {
@Override
public void run(TransactionProxy transactionProxy) {
- doReturn(deleteDataReply()).when(mockActorContext).executeOperationAsync(
- any(ActorSelection.class), eqDeleteData());
+ expectBatchedModifications(2);
transactionProxy.delete(TestModel.TEST_PATH);
public void run(TransactionProxy transactionProxy) {
NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- doReturn(incompleteFuture()).when(mockActorContext).executeOperationAsync(
- any(ActorSelection.class), eqWriteData(nodeToWrite));
+ expectBatchedModifications(1);
doReturn(incompleteFuture()).when(mockActorContext).executeOperationAsync(
any(ActorSelection.class), any(ReadyTransaction.class));
NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
NormalizedNode<?, ?> carsNode = ImmutableNodes.containerNode(CarsModel.BASE_QNAME);
- doReturn(writeDataReply()).when(mockActorContext).executeOperationAsync(
- any(ActorSelection.class), eqWriteData(nodeToWrite));
-
- doReturn(incompleteFuture()).when(mockActorContext).executeOperationAsync(
- any(ActorSelection.class), eqWriteData(carsNode));
+ expectBatchedModifications(2);
doReturn(incompleteFuture()).when(mockActorContext).executeOperationAsync(
any(ActorSelection.class), any(ReadyTransaction.class));
}
}, 2, true);
}
+
+ @Test
+ public void testModificationOperationBatching() throws Throwable {
+ int shardBatchedModificationCount = 3;
+ doReturn(dataStoreContextBuilder.shardBatchedModificationCount(shardBatchedModificationCount).build()).
+ when(mockActorContext).getDatastoreContext();
+
+ ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
+
+ expectBatchedModifications(actorRef, shardBatchedModificationCount);
+
+ expectReadyTransaction(actorRef);
+
+ YangInstanceIdentifier writePath1 = TestModel.TEST_PATH;
+ NormalizedNode<?, ?> writeNode1 = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+
+ YangInstanceIdentifier writePath2 = TestModel.OUTER_LIST_PATH;
+ NormalizedNode<?, ?> writeNode2 = ImmutableNodes.containerNode(TestModel.OUTER_LIST_QNAME);
+
+ YangInstanceIdentifier writePath3 = TestModel.INNER_LIST_PATH;
+ NormalizedNode<?, ?> writeNode3 = ImmutableNodes.containerNode(TestModel.INNER_LIST_QNAME);
+
+ YangInstanceIdentifier mergePath1 = TestModel.TEST_PATH;
+ NormalizedNode<?, ?> mergeNode1 = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+
+ YangInstanceIdentifier mergePath2 = TestModel.OUTER_LIST_PATH;
+ NormalizedNode<?, ?> mergeNode2 = ImmutableNodes.containerNode(TestModel.OUTER_LIST_QNAME);
+
+ YangInstanceIdentifier mergePath3 = TestModel.INNER_LIST_PATH;
+ NormalizedNode<?, ?> mergeNode3 = ImmutableNodes.containerNode(TestModel.INNER_LIST_QNAME);
+
+ YangInstanceIdentifier deletePath1 = TestModel.TEST_PATH;
+ YangInstanceIdentifier deletePath2 = TestModel.OUTER_LIST_PATH;
+
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, READ_WRITE);
+
+ transactionProxy.write(writePath1, writeNode1);
+ transactionProxy.write(writePath2, writeNode2);
+ transactionProxy.delete(deletePath1);
+ transactionProxy.merge(mergePath1, mergeNode1);
+ transactionProxy.merge(mergePath2, mergeNode2);
+ transactionProxy.write(writePath3, writeNode3);
+ transactionProxy.merge(mergePath3, mergeNode3);
+ transactionProxy.delete(deletePath2);
+
+ // This sends the last batch.
+ transactionProxy.ready();
+
+ List<BatchedModifications> batchedModifications = captureBatchedModifications(actorRef);
+ assertEquals("Captured BatchedModifications count", 3, batchedModifications.size());
+
+ verifyBatchedModifications(batchedModifications.get(0), new WriteModification(writePath1, writeNode1),
+ new WriteModification(writePath2, writeNode2), new DeleteModification(deletePath1));
+
+ verifyBatchedModifications(batchedModifications.get(1), new MergeModification(mergePath1, mergeNode1),
+ new MergeModification(mergePath2, mergeNode2), new WriteModification(writePath3, writeNode3));
+
+ verifyBatchedModifications(batchedModifications.get(2), new MergeModification(mergePath3, mergeNode3),
+ new DeleteModification(deletePath2));
+
+ verifyRecordingOperationFutures(transactionProxy.getRecordedOperationFutures(),
+ BatchedModificationsReply.class, BatchedModificationsReply.class, BatchedModificationsReply.class);
+ }
+
+ @Test
+ public void testModificationOperationBatchingWithInterleavedReads() throws Throwable {
+ int shardBatchedModificationCount = 10;
+ doReturn(dataStoreContextBuilder.shardBatchedModificationCount(shardBatchedModificationCount).build()).
+ when(mockActorContext).getDatastoreContext();
+
+ ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
+
+ expectBatchedModifications(actorRef, shardBatchedModificationCount);
+
+ YangInstanceIdentifier writePath1 = TestModel.TEST_PATH;
+ NormalizedNode<?, ?> writeNode1 = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+
+ YangInstanceIdentifier writePath2 = TestModel.OUTER_LIST_PATH;
+ NormalizedNode<?, ?> writeNode2 = ImmutableNodes.containerNode(TestModel.OUTER_LIST_QNAME);
+
+ YangInstanceIdentifier mergePath1 = TestModel.TEST_PATH;
+ NormalizedNode<?, ?> mergeNode1 = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+
+ YangInstanceIdentifier mergePath2 = TestModel.INNER_LIST_PATH;
+ NormalizedNode<?, ?> mergeNode2 = ImmutableNodes.containerNode(TestModel.INNER_LIST_QNAME);
+
+ YangInstanceIdentifier deletePath = TestModel.OUTER_LIST_PATH;
+
+ doReturn(readSerializedDataReply(writeNode2)).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedReadData(writePath2));
+
+ doReturn(readSerializedDataReply(mergeNode2)).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedReadData(mergePath2));
+
+ doReturn(dataExistsSerializedReply(true)).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedDataExists());
+
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, READ_WRITE);
+
+ transactionProxy.write(writePath1, writeNode1);
+ transactionProxy.write(writePath2, writeNode2);
+
+ Optional<NormalizedNode<?, ?>> readOptional = transactionProxy.read(writePath2).
+ get(5, TimeUnit.SECONDS);
+
+ assertEquals("NormalizedNode isPresent", true, readOptional.isPresent());
+ assertEquals("Response NormalizedNode", writeNode2, readOptional.get());
+
+ transactionProxy.merge(mergePath1, mergeNode1);
+ transactionProxy.merge(mergePath2, mergeNode2);
+
+ readOptional = transactionProxy.read(mergePath2).get(5, TimeUnit.SECONDS);
+
+ transactionProxy.delete(deletePath);
+
+ Boolean exists = transactionProxy.exists(TestModel.TEST_PATH).checkedGet();
+ assertEquals("Exists response", true, exists);
+
+ assertEquals("NormalizedNode isPresent", true, readOptional.isPresent());
+ assertEquals("Response NormalizedNode", mergeNode2, readOptional.get());
+
+ List<BatchedModifications> batchedModifications = captureBatchedModifications(actorRef);
+ assertEquals("Captured BatchedModifications count", 3, batchedModifications.size());
+
+ verifyBatchedModifications(batchedModifications.get(0), new WriteModification(writePath1, writeNode1),
+ new WriteModification(writePath2, writeNode2));
+
+ verifyBatchedModifications(batchedModifications.get(1), new MergeModification(mergePath1, mergeNode1),
+ new MergeModification(mergePath2, mergeNode2));
+
+ verifyBatchedModifications(batchedModifications.get(2), new DeleteModification(deletePath));
+
+ InOrder inOrder = Mockito.inOrder(mockActorContext);
+ inOrder.verify(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), isA(BatchedModifications.class));
+
+ inOrder.verify(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedReadData(writePath2));
+
+ inOrder.verify(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), isA(BatchedModifications.class));
+
+ inOrder.verify(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedReadData(mergePath2));
+
+ inOrder.verify(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), isA(BatchedModifications.class));
+
+ inOrder.verify(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedDataExists());
+
+ verifyRecordingOperationFutures(transactionProxy.getRecordedOperationFutures(),
+ BatchedModificationsReply.class, BatchedModificationsReply.class, BatchedModificationsReply.class);
+ }
+
+ private List<BatchedModifications> captureBatchedModifications(ActorRef actorRef) {
+ ArgumentCaptor<BatchedModifications> batchedModificationsCaptor =
+ ArgumentCaptor.forClass(BatchedModifications.class);
+ verify(mockActorContext, Mockito.atLeastOnce()).executeOperationAsync(
+ eq(actorSelection(actorRef)), batchedModificationsCaptor.capture());
+
+ List<BatchedModifications> batchedModifications = filterCaptured(
+ batchedModificationsCaptor, BatchedModifications.class);
+ return batchedModifications;
+ }
+
+ private <T> List<T> filterCaptured(ArgumentCaptor<T> captor, Class<T> type) {
+ List<T> captured = new ArrayList<>();
+ for(T c: captor.getAllValues()) {
+ if(type.isInstance(c)) {
+ captured.add(c);
+ }
+ }
+
+ return captured;
+ }
+
+ private void verifyOneBatchedModification(ActorRef actorRef, Modification expected) {
+ List<BatchedModifications> batchedModifications = captureBatchedModifications(actorRef);
+ assertEquals("Captured BatchedModifications count", 1, batchedModifications.size());
+
+ verifyBatchedModifications(batchedModifications.get(0), expected);
+ }
+
+ private void verifyBatchedModifications(Object message, Modification... expected) {
+ assertEquals("Message type", BatchedModifications.class, message.getClass());
+ BatchedModifications batchedModifications = (BatchedModifications)message;
+ assertEquals("BatchedModifications size", expected.length, batchedModifications.getModifications().size());
+ for(int i = 0; i < batchedModifications.getModifications().size(); i++) {
+ Modification actual = batchedModifications.getModifications().get(i);
+ assertEquals("Modification type", expected[i].getClass(), actual.getClass());
+ assertEquals("getPath", ((AbstractModification)expected[i]).getPath(),
+ ((AbstractModification)actual).getPath());
+ if(actual instanceof WriteModification) {
+ assertEquals("getData", ((WriteModification)expected[i]).getData(),
+ ((WriteModification)actual).getData());
+ }
+ }
+ }
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.messages;
+
+import static org.junit.Assert.assertEquals;
+import java.io.Serializable;
+import org.apache.commons.lang.SerializationUtils;
+import org.junit.Test;
+import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
+import org.opendaylight.controller.cluster.datastore.modification.DeleteModification;
+import org.opendaylight.controller.cluster.datastore.modification.MergeModification;
+import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
+import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
+import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
+
+/**
+ * Unit tests for BatchedModifications.
+ *
+ * @author Thomas Pantelis
+ */
+public class BatchedModificationsTest {
+
+ @Test
+ public void testSerialization() {
+ YangInstanceIdentifier writePath = TestModel.TEST_PATH;
+ NormalizedNode<?, ?> writeData = ImmutableContainerNodeBuilder.create().withNodeIdentifier(
+ new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME)).
+ withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo")).build();
+
+ YangInstanceIdentifier mergePath = TestModel.OUTER_LIST_PATH;
+ NormalizedNode<?, ?> mergeData = ImmutableContainerNodeBuilder.create().withNodeIdentifier(
+ new YangInstanceIdentifier.NodeIdentifier(TestModel.OUTER_LIST_QNAME)).build();
+
+ YangInstanceIdentifier deletePath = TestModel.TEST_PATH;
+
+ BatchedModifications batched = new BatchedModifications(DataStoreVersions.CURRENT_VERSION);
+ batched.addModification(new WriteModification(writePath, writeData));
+ batched.addModification(new MergeModification(mergePath, mergeData));
+ batched.addModification(new DeleteModification(deletePath));
+
+ BatchedModifications clone = (BatchedModifications) SerializationUtils.clone(
+ (Serializable) batched.toSerializable());
+
+ assertEquals("getVersion", DataStoreVersions.CURRENT_VERSION, clone.getVersion());
+
+ assertEquals("getModifications size", 3, clone.getModifications().size());
+
+ WriteModification write = (WriteModification)clone.getModifications().get(0);
+ assertEquals("getVersion", DataStoreVersions.CURRENT_VERSION, write.getVersion());
+ assertEquals("getPath", writePath, write.getPath());
+ assertEquals("getData", writeData, write.getData());
+
+ MergeModification merge = (MergeModification)clone.getModifications().get(1);
+ assertEquals("getVersion", DataStoreVersions.CURRENT_VERSION, merge.getVersion());
+ assertEquals("getPath", mergePath, merge.getPath());
+ assertEquals("getData", mergeData, merge.getData());
+
+ DeleteModification delete = (DeleteModification)clone.getModifications().get(2);
+ assertEquals("getVersion", DataStoreVersions.CURRENT_VERSION, delete.getVersion());
+ assertEquals("getPath", deletePath, delete.getPath());
+ }
+
+ @Test
+ public void testBatchedModificationsReplySerialization() {
+ BatchedModificationsReply clone = (BatchedModificationsReply) SerializationUtils.clone(
+ (Serializable) new BatchedModificationsReply(100).toSerializable());
+ assertEquals("getNumBatched", 100, clone.getNumBatched());
+ }
+}
*
* @author Thomas Pantelis
*/
+@Deprecated
public class DeleteDataTest {
@Test
public void testSerialization() {
YangInstanceIdentifier path = TestModel.TEST_PATH;
- DeleteData expected = new DeleteData(path);
+ DeleteData expected = new DeleteData(path, DataStoreVersions.CURRENT_VERSION);
- Object serialized = expected.toSerializable(DataStoreVersions.CURRENT_VERSION);
+ Object serialized = expected.toSerializable();
assertEquals("Serialized type", DeleteData.class, serialized.getClass());
assertEquals("Version", DataStoreVersions.CURRENT_VERSION, ((DeleteData)serialized).getVersion());
Object clone = SerializationUtils.clone((Serializable) serialized);
- assertEquals("Version", DataStoreVersions.CURRENT_VERSION, ((DeleteData)clone).getVersion());
DeleteData actual = DeleteData.fromSerializable(clone);
+ assertEquals("getVersion", DataStoreVersions.CURRENT_VERSION, actual.getVersion());
assertEquals("getPath", expected.getPath(), actual.getPath());
}
public void testSerializationWithHeliumR1Version() throws Exception {
YangInstanceIdentifier path = TestModel.TEST_PATH;
- DeleteData expected = new DeleteData(path);
+ DeleteData expected = new DeleteData(path, DataStoreVersions.HELIUM_1_VERSION);
- Object serialized = expected.toSerializable(DataStoreVersions.HELIUM_1_VERSION);
+ Object serialized = expected.toSerializable();
assertEquals("Serialized type", ShardTransactionMessages.DeleteData.class, serialized.getClass());
DeleteData actual = DeleteData.fromSerializable(SerializationUtils.clone((Serializable) serialized));
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
+@Deprecated
public class MergeDataTest {
@Test
new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME)).
withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo")).build();
- MergeData expected = new MergeData(path, data);
+ MergeData expected = new MergeData(path, data, DataStoreVersions.CURRENT_VERSION);
- Object serialized = expected.toSerializable(DataStoreVersions.CURRENT_VERSION);
+ Object serialized = expected.toSerializable();
assertEquals("Serialized type", MergeData.class, serialized.getClass());
assertEquals("Version", DataStoreVersions.CURRENT_VERSION, ((MergeData)serialized).getVersion());
Object clone = SerializationUtils.clone((Serializable) serialized);
- assertEquals("Version", DataStoreVersions.CURRENT_VERSION, ((MergeData)clone).getVersion());
MergeData actual = MergeData.fromSerializable(clone);
+ assertEquals("Version", DataStoreVersions.CURRENT_VERSION, actual.getVersion());
assertEquals("getPath", expected.getPath(), actual.getPath());
assertEquals("getData", expected.getData(), actual.getData());
}
new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME)).
withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo")).build();
- MergeData expected = new MergeData(path, data);
+ MergeData expected = new MergeData(path, data, DataStoreVersions.HELIUM_1_VERSION);
- Object serialized = expected.toSerializable(DataStoreVersions.HELIUM_1_VERSION);
+ Object serialized = expected.toSerializable();
assertEquals("Serialized type", ShardTransactionMessages.MergeData.class, serialized.getClass());
MergeData actual = MergeData.fromSerializable(SerializationUtils.clone((Serializable) serialized));
new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME)).
withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo")).build();
- ReadDataReply expected = new ReadDataReply(data);
+ ReadDataReply expected = new ReadDataReply(data, DataStoreVersions.CURRENT_VERSION);
- Object serialized = expected.toSerializable(DataStoreVersions.CURRENT_VERSION);
+ Object serialized = expected.toSerializable();
assertEquals("Serialized type", ReadDataReply.class, serialized.getClass());
ReadDataReply actual = ReadDataReply.fromSerializable(SerializationUtils.clone(
(Serializable) serialized));
+ assertEquals("getVersion", DataStoreVersions.CURRENT_VERSION, actual.getVersion());
assertEquals("getNormalizedNode", expected.getNormalizedNode(), actual.getNormalizedNode());
}
new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME)).
withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo")).build();
- ReadDataReply expected = new ReadDataReply(data);
+ ReadDataReply expected = new ReadDataReply(data, DataStoreVersions.HELIUM_1_VERSION);
- Object serialized = expected.toSerializable(DataStoreVersions.HELIUM_1_VERSION);
+ Object serialized = expected.toSerializable();
assertEquals("Serialized type", ShardTransactionMessages.ReadDataReply.class, serialized.getClass());
ReadDataReply actual = ReadDataReply.fromSerializable(SerializationUtils.clone(
*
* @author Thomas Pantelis
*/
+@Deprecated
public class WriteDataTest {
@Test
new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME)).
withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo")).build();
- WriteData expected = new WriteData(path, data);
+ WriteData expected = new WriteData(path, data, DataStoreVersions.CURRENT_VERSION);
- Object serialized = expected.toSerializable(DataStoreVersions.CURRENT_VERSION);
+ Object serialized = expected.toSerializable();
assertEquals("Serialized type", WriteData.class, serialized.getClass());
assertEquals("Version", DataStoreVersions.CURRENT_VERSION, ((WriteData)serialized).getVersion());
Object clone = SerializationUtils.clone((Serializable) serialized);
- assertEquals("Version", DataStoreVersions.CURRENT_VERSION, ((WriteData)clone).getVersion());
WriteData actual = WriteData.fromSerializable(clone);
+ assertEquals("Version", DataStoreVersions.CURRENT_VERSION, actual.getVersion());
assertEquals("getPath", expected.getPath(), actual.getPath());
assertEquals("getData", expected.getData(), actual.getData());
}
new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME)).
withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo")).build();
- WriteData expected = new WriteData(path, data);
+ WriteData expected = new WriteData(path, data, DataStoreVersions.HELIUM_1_VERSION);
- Object serialized = expected.toSerializable(DataStoreVersions.HELIUM_1_VERSION);
+ Object serialized = expected.toSerializable();
assertEquals("Serialized type", ShardTransactionMessages.WriteData.class, serialized.getClass());
WriteData actual = WriteData.fromSerializable(SerializationUtils.clone((Serializable) serialized));
import org.apache.commons.lang.SerializationUtils;
import org.junit.Ignore;
import org.junit.Test;
+import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
MutableCompositeModification clone = (MutableCompositeModification) SerializationUtils.clone(compositeModification);
+ assertEquals("getVersion", DataStoreVersions.CURRENT_VERSION, clone.getVersion());
+
assertEquals("getModifications size", 3, clone.getModifications().size());
WriteModification write = (WriteModification)clone.getModifications().get(0);
+ assertEquals("getVersion", DataStoreVersions.CURRENT_VERSION, write.getVersion());
assertEquals("getPath", writePath, write.getPath());
assertEquals("getData", writeData, write.getData());
MergeModification merge = (MergeModification)clone.getModifications().get(1);
+ assertEquals("getVersion", DataStoreVersions.CURRENT_VERSION, merge.getVersion());
assertEquals("getPath", mergePath, merge.getPath());
assertEquals("getData", mergeData, merge.getData());
DeleteModification delete = (DeleteModification)clone.getModifications().get(2);
+ assertEquals("getVersion", DataStoreVersions.CURRENT_VERSION, delete.getVersion());
assertEquals("getPath", deletePath, delete.getPath());
}
package org.opendaylight.controller.cluster.datastore.utils;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import akka.actor.ActorRef;
import akka.actor.ActorSelection;
+import akka.actor.ActorSystem;
import akka.actor.Address;
import akka.actor.Props;
import akka.actor.UntypedActor;
import akka.japi.Creator;
import akka.testkit.JavaTestKit;
import com.google.common.base.Optional;
+import com.typesafe.config.ConfigFactory;
import java.util.concurrent.TimeUnit;
import org.apache.commons.lang.time.StopWatch;
import org.junit.Test;
assertTrue("did not take as much time as expected", watch.getTime() > 1000);
}
+
+ @Test
+ public void testClientDispatcherIsGlobalDispatcher(){
+
+ DatastoreContext mockDataStoreContext = mock(DatastoreContext.class);
+
+ doReturn(155L).when(mockDataStoreContext).getTransactionCreationInitialRateLimit();
+ doReturn("config").when(mockDataStoreContext).getDataStoreType();
+
+ ActorContext actorContext =
+ new ActorContext(getSystem(), mock(ActorRef.class), mock(ClusterWrapper.class),
+ mock(Configuration.class), mockDataStoreContext);
+
+ assertEquals(getSystem().dispatchers().defaultGlobalDispatcher(), actorContext.getClientDispatcher());
+
+ }
+
+ @Test
+ public void testClientDispatcherIsNotGlobalDispatcher(){
+
+ DatastoreContext mockDataStoreContext = mock(DatastoreContext.class);
+
+ doReturn(155L).when(mockDataStoreContext).getTransactionCreationInitialRateLimit();
+ doReturn("config").when(mockDataStoreContext).getDataStoreType();
+
+ ActorSystem actorSystem = ActorSystem.create("with-custom-dispatchers", ConfigFactory.load("application-with-custom-dispatchers.conf"));
+
+ ActorContext actorContext =
+ new ActorContext(actorSystem, mock(ActorRef.class), mock(ClusterWrapper.class),
+ mock(Configuration.class), mockDataStoreContext);
+
+ assertNotEquals(actorSystem.dispatchers().defaultGlobalDispatcher(), actorContext.getClientDispatcher());
+
+ actorSystem.shutdown();
+
+ }
+
}
--- /dev/null
+package org.opendaylight.controller.cluster.datastore.utils;
+
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+import akka.dispatch.MessageDispatcher;
+import org.junit.Test;
+
+public class DispatchersTest {
+
+ @Test
+ public void testGetDefaultDispatcherPath(){
+ akka.dispatch.Dispatchers mockDispatchers = mock(akka.dispatch.Dispatchers.class);
+ doReturn(false).when(mockDispatchers).hasDispatcher(anyString());
+ Dispatchers dispatchers = new Dispatchers(mockDispatchers);
+
+ for(Dispatchers.DispatcherType type : Dispatchers.DispatcherType.values()) {
+ assertEquals(Dispatchers.DEFAULT_DISPATCHER_PATH,
+ dispatchers.getDispatcherPath(type));
+ }
+
+ }
+
+ @Test
+ public void testGetDefaultDispatcher(){
+ akka.dispatch.Dispatchers mockDispatchers = mock(akka.dispatch.Dispatchers.class);
+ MessageDispatcher mockGlobalDispatcher = mock(MessageDispatcher.class);
+ doReturn(false).when(mockDispatchers).hasDispatcher(anyString());
+ doReturn(mockGlobalDispatcher).when(mockDispatchers).defaultGlobalDispatcher();
+ Dispatchers dispatchers = new Dispatchers(mockDispatchers);
+
+ for(Dispatchers.DispatcherType type : Dispatchers.DispatcherType.values()) {
+ assertEquals(mockGlobalDispatcher,
+ dispatchers.getDispatcher(type));
+ }
+
+ }
+
+ @Test
+ public void testGetDispatcherPath(){
+ akka.dispatch.Dispatchers mockDispatchers = mock(akka.dispatch.Dispatchers.class);
+ doReturn(true).when(mockDispatchers).hasDispatcher(anyString());
+ Dispatchers dispatchers = new Dispatchers(mockDispatchers);
+
+ assertEquals(Dispatchers.CLIENT_DISPATCHER_PATH,
+ dispatchers.getDispatcherPath(Dispatchers.DispatcherType.Client));
+
+ assertEquals(Dispatchers.TXN_DISPATCHER_PATH,
+ dispatchers.getDispatcherPath(Dispatchers.DispatcherType.Transaction));
+
+ assertEquals(Dispatchers.SHARD_DISPATCHER_PATH,
+ dispatchers.getDispatcherPath(Dispatchers.DispatcherType.Shard));
+
+ assertEquals(Dispatchers.NOTIFICATION_DISPATCHER_PATH,
+ dispatchers.getDispatcherPath(Dispatchers.DispatcherType.Notification));
+
+ }
+
+ @Test
+ public void testGetDispatcher(){
+ akka.dispatch.Dispatchers mockDispatchers = mock(akka.dispatch.Dispatchers.class);
+ MessageDispatcher mockDispatcher = mock(MessageDispatcher.class);
+ doReturn(true).when(mockDispatchers).hasDispatcher(anyString());
+ doReturn(mockDispatcher).when(mockDispatchers).lookup(anyString());
+ Dispatchers dispatchers = new Dispatchers(mockDispatchers);
+
+ assertEquals(Dispatchers.CLIENT_DISPATCHER_PATH,
+ dispatchers.getDispatcherPath(Dispatchers.DispatcherType.Client));
+
+ assertEquals(Dispatchers.TXN_DISPATCHER_PATH,
+ dispatchers.getDispatcherPath(Dispatchers.DispatcherType.Transaction));
+
+ assertEquals(Dispatchers.SHARD_DISPATCHER_PATH,
+ dispatchers.getDispatcherPath(Dispatchers.DispatcherType.Shard));
+
+ assertEquals(Dispatchers.NOTIFICATION_DISPATCHER_PATH,
+ dispatchers.getDispatcherPath(Dispatchers.DispatcherType.Notification));
+
+ }
+}
\ No newline at end of file
--- /dev/null
+package org.opendaylight.controller.cluster.datastore.utils;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+import com.google.common.util.concurrent.Uninterruptibles;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+import org.junit.Assert;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class MessageTrackerTest {
+
+ private final Logger LOG = LoggerFactory.getLogger(getClass());
+
+ private class Foo {}
+
+ @Test
+ public void testNoTracking(){
+ MessageTracker messageTracker = new MessageTracker(Foo.class, 10);
+
+ MessageTracker.Context context1 = messageTracker.received(new Foo());
+ context1.done();
+
+ Uninterruptibles.sleepUninterruptibly(20, TimeUnit.MILLISECONDS);
+
+ MessageTracker.Context context2 = messageTracker.received(new Foo());
+ context2.done();
+
+ }
+
+ @Test
+ public void testFailedExpectationOnTracking(){
+ MessageTracker messageTracker = new MessageTracker(Foo.class, 10);
+ messageTracker.begin();
+
+ MessageTracker.Context context1 = messageTracker.received(new Foo());
+ context1.done();
+
+ Uninterruptibles.sleepUninterruptibly(20, TimeUnit.MILLISECONDS);
+
+ MessageTracker.Context context2 = messageTracker.received(new Foo());
+ Assert.assertEquals(true, context2.error().isPresent());
+ Assert.assertEquals(0, context2.error().get().getMessageProcessingTimesSinceLastExpectedMessage().size());
+
+ }
+
+ @Test
+ public void testFailedExpectationOnTrackingWithMessagesInBetween(){
+ MessageTracker messageTracker = new MessageTracker(Foo.class, 10);
+ messageTracker.begin();
+
+ MessageTracker.Context context1 = messageTracker.received(new Foo());
+ context1.done();
+
+ messageTracker.received("A").done();
+ messageTracker.received(Long.valueOf(10)).done();
+ MessageTracker.Context c = messageTracker.received(Integer.valueOf(100));
+
+ Uninterruptibles.sleepUninterruptibly(20, TimeUnit.MILLISECONDS);
+
+ c.done();
+
+ MessageTracker.Context context2 = messageTracker.received(new Foo());
+
+ Assert.assertEquals(true, context2.error().isPresent());
+
+ MessageTracker.Error error = context2.error().get();
+
+ List<MessageTracker.MessageProcessingTime> messageProcessingTimes =
+ error.getMessageProcessingTimesSinceLastExpectedMessage();
+
+ Assert.assertEquals(3, messageProcessingTimes.size());
+
+ Assert.assertEquals(String.class, messageProcessingTimes.get(0).getMessageClass());
+ Assert.assertEquals(Long.class, messageProcessingTimes.get(1).getMessageClass());
+ Assert.assertEquals(Integer.class, messageProcessingTimes.get(2).getMessageClass());
+ Assert.assertTrue(messageProcessingTimes.get(2).getElapsedTimeInNanos() > TimeUnit.MILLISECONDS.toNanos(10));
+ Assert.assertEquals(Foo.class, error.getLastExpectedMessage().getClass());
+ Assert.assertEquals(Foo.class, error.getCurrentExpectedMessage().getClass());
+
+ LOG.error("An error occurred : {}" , error);
+
+ }
+
+
+ @Test
+ public void testMetExpectationOnTracking(){
+ MessageTracker messageTracker = new MessageTracker(Foo.class, 10);
+ messageTracker.begin();
+
+ MessageTracker.Context context1 = messageTracker.received(new Foo());
+ context1.done();
+
+ Uninterruptibles.sleepUninterruptibly(1, TimeUnit.MILLISECONDS);
+
+ MessageTracker.Context context2 = messageTracker.received(new Foo());
+ Assert.assertEquals(false, context2.error().isPresent());
+
+ }
+
+ @Test
+ public void testIllegalStateExceptionWhenDoneIsNotCalledWhileTracking(){
+ MessageTracker messageTracker = new MessageTracker(Foo.class, 10);
+ messageTracker.begin();
+
+ messageTracker.received(new Foo());
+
+ try {
+ messageTracker.received(new Foo());
+ fail("Expected an IllegalStateException");
+ } catch (IllegalStateException e){
+
+ }
+ }
+
+ @Test
+ public void testNoIllegalStateExceptionWhenDoneIsNotCalledWhileNotTracking(){
+ MessageTracker messageTracker = new MessageTracker(Foo.class, 10);
+
+ messageTracker.received(new Foo());
+ messageTracker.received(new Foo());
+ }
+
+ @Test
+ public void testDelayInFirstExpectedMessageArrival(){
+
+ MessageTracker messageTracker = new MessageTracker(Foo.class, 10);
+ messageTracker.begin();
+
+ Uninterruptibles.sleepUninterruptibly(20, TimeUnit.MILLISECONDS);
+
+ MessageTracker.Context context = messageTracker.received(new Foo());
+
+ Assert.assertEquals(true, context.error().isPresent());
+
+ MessageTracker.Error error = context.error().get();
+
+ Assert.assertEquals(null, error.getLastExpectedMessage());
+ Assert.assertEquals(Foo.class, error.getCurrentExpectedMessage().getClass());
+
+ String errorString = error.toString();
+ Assert.assertTrue(errorString.contains("Last Expected Message = null"));
+
+ LOG.error("An error occurred : {}", error);
+ }
+
+ @Test
+ public void testCallingBeginDoesNotResetWatch(){
+ MessageTracker messageTracker = new MessageTracker(Foo.class, 10);
+ messageTracker.begin();
+
+ Uninterruptibles.sleepUninterruptibly(20, TimeUnit.MILLISECONDS);
+
+ messageTracker.begin();
+
+ MessageTracker.Context context = messageTracker.received(new Foo());
+
+ Assert.assertEquals(true, context.error().isPresent());
+
+ }
+
+ @Test
+ public void testMessagesSinceLastExpectedMessage(){
+
+ MessageTracker messageTracker = new MessageTracker(Foo.class, 10);
+ messageTracker.begin();
+
+ MessageTracker.Context context1 = messageTracker.received(Integer.valueOf(45)).done();
+
+ Assert.assertEquals(false, context1.error().isPresent());
+
+ MessageTracker.Context context2 = messageTracker.received(Long.valueOf(45)).done();
+
+ Assert.assertEquals(false, context2.error().isPresent());
+
+ List<MessageTracker.MessageProcessingTime> processingTimeList =
+ messageTracker.getMessagesSinceLastExpectedMessage();
+
+ Assert.assertEquals(2, processingTimeList.size());
+
+ assertEquals(Integer.class, processingTimeList.get(0).getMessageClass());
+ assertEquals(Long.class, processingTimeList.get(1).getMessageClass());
+
+ }
+
+}
\ No newline at end of file
private static final String DATASTORE_TEST_YANG = "/odl-datastore-test.yang";
public static final YangInstanceIdentifier TEST_PATH = YangInstanceIdentifier.of(TEST_QNAME);
- public static final YangInstanceIdentifier OUTER_LIST_PATH = YangInstanceIdentifier.builder(TEST_PATH).node(OUTER_LIST_QNAME).build();
+ public static final YangInstanceIdentifier OUTER_LIST_PATH = YangInstanceIdentifier.builder(TEST_PATH).
+ node(OUTER_LIST_QNAME).build();
+ public static final YangInstanceIdentifier INNER_LIST_PATH = YangInstanceIdentifier.builder(TEST_PATH).
+ node(OUTER_LIST_QNAME).node(INNER_LIST_QNAME).build();
public static final QName TWO_QNAME = QName.create(TEST_QNAME,"two");
public static final QName THREE_QNAME = QName.create(TEST_QNAME,"three");
--- /dev/null
+akka {
+ persistence.snapshot-store.plugin = "in-memory-snapshot-store"
+ persistence.journal.plugin = "in-memory-journal"
+
+ loggers = ["akka.testkit.TestEventListener", "akka.event.slf4j.Slf4jLogger"]
+
+ actor {
+ serializers {
+ java = "akka.serialization.JavaSerializer"
+ proto = "akka.remote.serialization.ProtobufSerializer"
+ }
+
+ serialization-bindings {
+ "org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification" = java
+ "com.google.protobuf.Message" = proto
+
+ }
+ }
+}
+
+in-memory-journal {
+ class = "org.opendaylight.controller.cluster.datastore.utils.InMemoryJournal"
+}
+
+in-memory-snapshot-store {
+ # Class name of the plugin.
+ class = "org.opendaylight.controller.cluster.datastore.utils.InMemorySnapshotStore"
+ # Dispatcher for the plugin actor.
+ plugin-dispatcher = "akka.persistence.dispatchers.default-plugin-dispatcher"
+}
+
+bounded-mailbox {
+ mailbox-type = "org.opendaylight.controller.cluster.common.actor.MeteredBoundedMailbox"
+ mailbox-capacity = 1000
+ mailbox-push-timeout-time = 100ms
+}
+
+client-dispatcher {
+ # Dispatcher is the name of the event-based dispatcher
+ type = Dispatcher
+ # What kind of ExecutionService to use
+ executor = "fork-join-executor"
+ # Configuration for the fork join pool
+ fork-join-executor {
+ # Min number of threads to cap factor-based parallelism number to
+ parallelism-min = 2
+ # Parallelism (threads) ... ceil(available processors * factor)
+ parallelism-factor = 2.0
+ # Max number of threads to cap factor-based parallelism number to
+ parallelism-max = 10
+ }
+ # Throughput defines the maximum number of messages to be
+ # processed per actor before the thread jumps to the next actor.
+ # Set to 1 for as fair as possible.
+ throughput = 100
+}
+
+transaction-dispatcher {
+ # Dispatcher is the name of the event-based dispatcher
+ type = Dispatcher
+ # What kind of ExecutionService to use
+ executor = "fork-join-executor"
+ # Configuration for the fork join pool
+ fork-join-executor {
+ # Min number of threads to cap factor-based parallelism number to
+ parallelism-min = 2
+ # Parallelism (threads) ... ceil(available processors * factor)
+ parallelism-factor = 2.0
+ # Max number of threads to cap factor-based parallelism number to
+ parallelism-max = 10
+ }
+ # Throughput defines the maximum number of messages to be
+ # processed per actor before the thread jumps to the next actor.
+ # Set to 1 for as fair as possible.
+ throughput = 100
+}
+
+shard-dispatcher {
+ # Dispatcher is the name of the event-based dispatcher
+ type = Dispatcher
+ # What kind of ExecutionService to use
+ executor = "fork-join-executor"
+ # Configuration for the fork join pool
+ fork-join-executor {
+ # Min number of threads to cap factor-based parallelism number to
+ parallelism-min = 2
+ # Parallelism (threads) ... ceil(available processors * factor)
+ parallelism-factor = 2.0
+ # Max number of threads to cap factor-based parallelism number to
+ parallelism-max = 10
+ }
+ # Throughput defines the maximum number of messages to be
+ # processed per actor before the thread jumps to the next actor.
+ # Set to 1 for as fair as possible.
+ throughput = 100
+}
+
+notification-dispatcher {
+ # Dispatcher is the name of the event-based dispatcher
+ type = Dispatcher
+ # What kind of ExecutionService to use
+ executor = "fork-join-executor"
+ # Configuration for the fork join pool
+ fork-join-executor {
+ # Min number of threads to cap factor-based parallelism number to
+ parallelism-min = 2
+ # Parallelism (threads) ... ceil(available processors * factor)
+ parallelism-factor = 2.0
+ # Max number of threads to cap factor-based parallelism number to
+ parallelism-max = 10
+ }
+ # Throughput defines the maximum number of messages to be
+ # processed per actor before the thread jumps to the next actor.
+ # Set to 1 for as fair as possible.
+ throughput = 100
+}
\ No newline at end of file
import com.google.common.base.Preconditions;
import java.io.Serializable;
+import java.util.Iterator;
import javax.annotation.Nonnull;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.yangtools.concepts.Immutable;
import org.opendaylight.yangtools.concepts.Path;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
/**
* A unique identifier for a particular subtree. It is composed of the logical
* data store type and the instance identifier of the root node.
*/
-public final class DOMDataTreeIdentifier implements Immutable, Path<DOMDataTreeIdentifier>, Serializable {
+public final class DOMDataTreeIdentifier implements Immutable, Path<DOMDataTreeIdentifier>, Serializable, Comparable<DOMDataTreeIdentifier> {
private static final long serialVersionUID = 1L;
private final YangInstanceIdentifier rootIdentifier;
private final LogicalDatastoreType datastoreType;
}
return rootIdentifier.equals(other.rootIdentifier);
}
+
+ @Override
+ public int compareTo(final DOMDataTreeIdentifier o) {
+ int i = datastoreType.compareTo(o.datastoreType);
+ if (i != 0) {
+ return i;
+ }
+
+ final Iterator<PathArgument> mi = rootIdentifier.getPathArguments().iterator();
+ final Iterator<PathArgument> oi = o.rootIdentifier.getPathArguments().iterator();
+
+ while (mi.hasNext()) {
+ if (!oi.hasNext()) {
+ return 1;
+ }
+
+ final PathArgument ma = mi.next();
+ final PathArgument oa = oi.next();
+ i = ma.compareTo(oa);
+ if (i != 0) {
+ return i;
+ }
+ }
+
+ return oi.hasNext() ? -1 : 0;
+ }
}
import com.google.common.base.Preconditions;
import com.google.common.util.concurrent.CheckedFuture;
+import java.util.Collection;
import java.util.EnumMap;
import java.util.Map;
import java.util.Map.Entry;
* the Future fails with a {@link TransactionCommitFailedException}.
*/
protected abstract CheckedFuture<Void,TransactionCommitFailedException> submit(final DOMDataWriteTransaction transaction,
- final Iterable<DOMStoreThreePhaseCommitCohort> cohorts);
+ final Collection<DOMStoreThreePhaseCommitCohort> cohorts);
/**
* Creates a new composite read-only transaction
import com.google.common.base.Preconditions;
import com.google.common.base.Throwables;
-import com.google.common.collect.Iterables;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
+import java.util.Collection;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
};
private static final Logger LOG = LoggerFactory.getLogger(CommitCoordinationTask.class);
- private final Iterable<DOMStoreThreePhaseCommitCohort> cohorts;
+ private final Collection<DOMStoreThreePhaseCommitCohort> cohorts;
private final DurationStatisticsTracker commitStatTracker;
private final DOMDataWriteTransaction tx;
- private final int cohortSize;
public CommitCoordinationTask(final DOMDataWriteTransaction transaction,
- final Iterable<DOMStoreThreePhaseCommitCohort> cohorts,
+ final Collection<DOMStoreThreePhaseCommitCohort> cohorts,
final DurationStatisticsTracker commitStatTracker) {
this.tx = Preconditions.checkNotNull(transaction, "transaction must not be null");
this.cohorts = Preconditions.checkNotNull(cohorts, "cohorts must not be null");
this.commitStatTracker = commitStatTracker;
- this.cohortSize = Iterables.size(cohorts);
}
@Override
*
*/
private ListenableFuture<?>[] canCommitAll() {
- final ListenableFuture<?>[] ops = new ListenableFuture<?>[cohortSize];
+ final ListenableFuture<?>[] ops = new ListenableFuture<?>[cohorts.size()];
int i = 0;
for (DOMStoreThreePhaseCommitCohort cohort : cohorts) {
ops[i++] = cohort.canCommit();
*
*/
private ListenableFuture<?>[] preCommitAll() {
- final ListenableFuture<?>[] ops = new ListenableFuture<?>[cohortSize];
+ final ListenableFuture<?>[] ops = new ListenableFuture<?>[cohorts.size()];
int i = 0;
for (DOMStoreThreePhaseCommitCohort cohort : cohorts) {
ops[i++] = cohort.preCommit();
* @return List of all cohorts futures from can commit phase.
*/
private ListenableFuture<?>[] commitAll() {
- final ListenableFuture<?>[] ops = new ListenableFuture<?>[cohortSize];
+ final ListenableFuture<?>[] ops = new ListenableFuture<?>[cohorts.size()];
int i = 0;
for (DOMStoreThreePhaseCommitCohort cohort : cohorts) {
ops[i++] = cohort.commit();
*/
private ListenableFuture<Void> abortAsyncAll() {
- final ListenableFuture<?>[] ops = new ListenableFuture<?>[cohortSize];
+ final ListenableFuture<?>[] ops = new ListenableFuture<?>[cohorts.size()];
int i = 0;
for (DOMStoreThreePhaseCommitCohort cohort : cohorts) {
ops[i++] = cohort.abort();
import com.google.common.util.concurrent.CheckedFuture;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
+import java.util.Collection;
import java.util.Map;
import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
import java.util.concurrent.atomic.AtomicLong;
@Override
public CheckedFuture<Void, TransactionCommitFailedException> submit(
- final DOMDataWriteTransaction transaction, final Iterable<DOMStoreThreePhaseCommitCohort> cohorts) {
+ final DOMDataWriteTransaction transaction, final Collection<DOMStoreThreePhaseCommitCohort> cohorts) {
checkNotFailed();
checkNotClosed();
/*
* This forces allocateTransaction() on a slow path, which has to happen after
- * this method has completed executing.
+ * this method has completed executing. Also inflightTx may be updated outside
+ * the lock, hence we need to re-check.
*/
@GuardedBy("this")
private void processIfReady() {
- final PingPongTransaction tx = READY_UPDATER.getAndSet(this, null);
- if (tx != null) {
- processTransaction(tx);
+ if (inflightTx == null) {
+ final PingPongTransaction tx = READY_UPDATER.getAndSet(this, null);
+ if (tx != null) {
+ processTransaction(tx);
+ }
}
}
}
@Override
- public void close() {
+ public synchronized void close() {
final PingPongTransaction notLocked = lockedTx;
Preconditions.checkState(notLocked == null, "Attempted to close chain with outstanding transaction %s", notLocked);
- synchronized (this) {
- processIfReady();
- delegate.close();
+ // Force allocations on slow path. We will complete the rest
+ final PingPongTransaction tx = READY_UPDATER.getAndSet(this, null);
+
+ // Make sure no transaction is outstanding. Otherwise sleep a bit and retry
+ while (inflightTx != null) {
+ LOG.debug("Busy-waiting for in-flight transaction {} to complete", inflightTx);
+ Thread.yield();
+ continue;
}
+
+ // If we have an outstanding transaction, send it down
+ if (tx != null) {
+ processTransaction(tx);
+ }
+
+ // All done, close the delegate. All new allocations should fail.
+ delegate.close();
}
@Override
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.ListeningExecutorService;
+import java.util.Collection;
import java.util.Map;
import java.util.concurrent.RejectedExecutionException;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
@Override
protected CheckedFuture<Void,TransactionCommitFailedException> submit(final DOMDataWriteTransaction transaction,
- final Iterable<DOMStoreThreePhaseCommitCohort> cohorts) {
+ final Collection<DOMStoreThreePhaseCommitCohort> cohorts) {
Preconditions.checkArgument(transaction != null, "Transaction must not be null.");
Preconditions.checkArgument(cohorts != null, "Cohorts must not be null.");
LOG.debug("Tx: {} is submitted for execution.", transaction.getIdentifier());
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.broker.impl;
+
+import com.google.common.base.Preconditions;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeIdentifier;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeShard;
+import org.opendaylight.yangtools.concepts.AbstractListenerRegistration;
+
+final class ShardRegistration<T extends DOMDataTreeShard> extends AbstractListenerRegistration<T> {
+ private final DOMDataTreeIdentifier prefix;
+ private final ShardedDOMDataTree tree;
+
+ protected ShardRegistration(final ShardedDOMDataTree tree, final DOMDataTreeIdentifier prefix, final T shard) {
+ super(shard);
+ this.tree = Preconditions.checkNotNull(tree);
+ this.prefix = Preconditions.checkNotNull(prefix);
+ }
+
+ DOMDataTreeIdentifier getPrefix() {
+ return prefix;
+ }
+
+ @Override
+ protected void removeRegistration() {
+ tree.removeShard(this);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.broker.impl;
+
+import com.google.common.base.Preconditions;
+import java.util.Collection;
+import java.util.EnumMap;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.TreeMap;
+import javax.annotation.concurrent.GuardedBy;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeIdentifier;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeListener;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeProducer;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeService;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeShard;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeShardingConflictException;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeShardingService;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public final class ShardedDOMDataTree implements DOMDataTreeService, DOMDataTreeShardingService {
+ private static final Logger LOG = LoggerFactory.getLogger(ShardedDOMDataTree.class);
+ private final Map<LogicalDatastoreType, ShardingTableEntry> shardingTables = new EnumMap<>(LogicalDatastoreType.class);
+ @GuardedBy("this")
+ private final Map<DOMDataTreeIdentifier, DOMDataTreeProducer> idToProducer = new TreeMap<>();
+
+ @GuardedBy("this")
+ private ShardingTableEntry lookupShard(final DOMDataTreeIdentifier prefix) {
+ final ShardingTableEntry t = shardingTables.get(prefix.getDatastoreType());
+ if (t == null) {
+ return null;
+ }
+
+ return t.lookup(prefix.getRootIdentifier());
+ }
+
+ @GuardedBy("this")
+ private void storeShard(final DOMDataTreeIdentifier prefix, final ShardRegistration<?> reg) {
+ ShardingTableEntry t = shardingTables.get(prefix.getDatastoreType());
+ if (t == null) {
+ t = new ShardingTableEntry();
+ shardingTables.put(prefix.getDatastoreType(), t);
+ }
+
+ t.store(prefix.getRootIdentifier(), reg);
+ }
+
+ void removeShard(final ShardRegistration<?> reg) {
+ final DOMDataTreeIdentifier prefix = reg.getPrefix();
+ final ShardRegistration<?> parentReg;
+
+ synchronized (this) {
+ final ShardingTableEntry t = shardingTables.get(prefix.getDatastoreType());
+ if (t == null) {
+ LOG.warn("Shard registration {} points to non-existent table", reg);
+ return;
+ }
+
+ t.remove(prefix.getRootIdentifier());
+ parentReg = lookupShard(prefix).getRegistration();
+
+ /*
+ * FIXME: adjust all producers. This is tricky, as we need different locking strategy,
+ * simply because we risk AB/BA deadlock with a producer being split off from
+ * a producer.
+ *
+ */
+ }
+
+ if (parentReg != null) {
+ parentReg.getInstance().onChildDetached(prefix, reg.getInstance());
+ }
+ }
+
+ @Override
+ public <T extends DOMDataTreeShard> ListenerRegistration<T> registerDataTreeShard(final DOMDataTreeIdentifier prefix, final T shard) throws DOMDataTreeShardingConflictException {
+ final ShardRegistration<T> reg;
+ final ShardRegistration<?> parentReg;
+
+ synchronized (this) {
+ /*
+ * Lookup the parent shard (e.g. the one which currently matches the prefix),
+ * and if it exists, check if its registration prefix does not collide with
+ * this registration.
+ */
+ final ShardingTableEntry parent = lookupShard(prefix);
+ parentReg = parent.getRegistration();
+ if (parentReg != null && prefix.equals(parentReg.getPrefix())) {
+ throw new DOMDataTreeShardingConflictException(String.format("Prefix %s is already occupied by shard {}", prefix, parentReg.getInstance()));
+ }
+
+ // FIXME: wrap the shard in a proper adaptor based on implemented interface
+
+ reg = new ShardRegistration<T>(this, prefix, shard);
+
+ storeShard(prefix, reg);
+
+ // FIXME: update any producers/registrations
+ }
+
+ // Notify the parent shard
+ if (parentReg != null) {
+ parentReg.getInstance().onChildAttached(prefix, shard);
+ }
+
+ return reg;
+ }
+
+ @GuardedBy("this")
+ private DOMDataTreeProducer findProducer(final DOMDataTreeIdentifier subtree) {
+ for (Entry<DOMDataTreeIdentifier, DOMDataTreeProducer> e : idToProducer.entrySet()) {
+ if (e.getKey().contains(subtree)) {
+ return e.getValue();
+ }
+ }
+
+ return null;
+ }
+
+ synchronized void destroyProducer(final ShardedDOMDataTreeProducer producer) {
+ for (DOMDataTreeIdentifier s : producer.getSubtrees()) {
+ DOMDataTreeProducer r = idToProducer.remove(s);
+ if (!producer.equals(r)) {
+ LOG.error("Removed producer %s on subtree %s while removing %s", r, s, producer);
+ }
+ }
+ }
+
+ @GuardedBy("this")
+ private DOMDataTreeProducer createProducer(final Map<DOMDataTreeIdentifier, DOMDataTreeShard> shardMap) {
+ // Record the producer's attachment points
+ final DOMDataTreeProducer ret = ShardedDOMDataTreeProducer.create(this, shardMap);
+ for (DOMDataTreeIdentifier s : shardMap.keySet()) {
+ idToProducer.put(s, ret);
+ }
+
+ return ret;
+ }
+
+ @Override
+ public synchronized DOMDataTreeProducer createProducer(final Collection<DOMDataTreeIdentifier> subtrees) {
+ Preconditions.checkArgument(!subtrees.isEmpty(), "Subtrees may not be empty");
+
+ final Map<DOMDataTreeIdentifier, DOMDataTreeShard> shardMap = new HashMap<>();
+ for (DOMDataTreeIdentifier s : subtrees) {
+ // Attempting to create a disconnected producer -- all subtrees have to be unclaimed
+ final DOMDataTreeProducer producer = findProducer(s);
+ Preconditions.checkArgument(producer == null, "Subtree %s is attached to producer %s", s, producer);
+
+ shardMap.put(s, lookupShard(s).getRegistration().getInstance());
+ }
+
+ return createProducer(shardMap);
+ }
+
+ synchronized DOMDataTreeProducer createProducer(final ShardedDOMDataTreeProducer parent, final Collection<DOMDataTreeIdentifier> subtrees) {
+ Preconditions.checkNotNull(parent);
+
+ final Map<DOMDataTreeIdentifier, DOMDataTreeShard> shardMap = new HashMap<>();
+ for (DOMDataTreeIdentifier s : subtrees) {
+ shardMap.put(s, lookupShard(s).getRegistration().getInstance());
+ }
+
+ return createProducer(shardMap);
+ }
+
+ @Override
+ public synchronized <T extends DOMDataTreeListener> ListenerRegistration<T> registerListener(final T listener, final Collection<DOMDataTreeIdentifier> subtrees, final boolean allowRxMerges, final Collection<DOMDataTreeProducer> producers) {
+ // TODO Auto-generated method stub
+ return null;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.broker.impl;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.BiMap;
+import com.google.common.collect.ImmutableBiMap;
+import com.google.common.collect.ImmutableBiMap.Builder;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Queue;
+import java.util.Set;
+import javax.annotation.concurrent.GuardedBy;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeIdentifier;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeProducer;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeProducerBusyException;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeProducerException;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeShard;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
+import org.opendaylight.controller.sal.core.spi.data.DOMStore;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+final class ShardedDOMDataTreeProducer implements DOMDataTreeProducer {
+ private static final Logger LOG = LoggerFactory.getLogger(ShardedDOMDataTreeProducer.class);
+ private final BiMap<DOMDataTreeShard, DOMStoreTransactionChain> shardToChain;
+ private final Map<DOMDataTreeIdentifier, DOMDataTreeShard> idToShard;
+ private final ShardedDOMDataTree dataTree;
+
+ @GuardedBy("this")
+ private Map<DOMDataTreeIdentifier, DOMDataTreeProducer> children = Collections.emptyMap();
+ @GuardedBy("this")
+ private DOMDataWriteTransaction openTx;
+ @GuardedBy("this")
+ private boolean closed;
+
+ ShardedDOMDataTreeProducer(final ShardedDOMDataTree dataTree, final Map<DOMDataTreeIdentifier, DOMDataTreeShard> shardMap, final Set<DOMDataTreeShard> shards) {
+ this.dataTree = Preconditions.checkNotNull(dataTree);
+
+ // Create shard -> chain map
+ final Builder<DOMDataTreeShard, DOMStoreTransactionChain> cb = ImmutableBiMap.builder();
+ final Queue<Exception> es = new LinkedList<>();
+
+ for (DOMDataTreeShard s : shards) {
+ if (s instanceof DOMStore) {
+ try {
+ final DOMStoreTransactionChain c = ((DOMStore)s).createTransactionChain();
+ LOG.trace("Using DOMStore chain {} to access shard {}", c, s);
+ cb.put(s, c);
+ } catch (Exception e) {
+ LOG.error("Failed to instantiate chain for shard {}", s, e);
+ es.add(e);
+ }
+ } else {
+ LOG.error("Unhandled shard instance type {}", s.getClass());
+ }
+ }
+ this.shardToChain = cb.build();
+
+ // An error was encountered, close chains and report the error
+ if (shardToChain.size() != shards.size()) {
+ for (DOMStoreTransactionChain c : shardToChain.values()) {
+ try {
+ c.close();
+ } catch (Exception e) {
+ LOG.warn("Exception raised while closing chain %s", c, e);
+ }
+ }
+
+ final IllegalStateException e = new IllegalStateException("Failed to completely allocate contexts", es.poll());
+ while (!es.isEmpty()) {
+ e.addSuppressed(es.poll());
+ }
+
+ throw e;
+ }
+
+ idToShard = ImmutableMap.copyOf(shardMap);
+ }
+
+ @Override
+ public synchronized DOMDataWriteTransaction createTransaction(final boolean isolated) {
+ Preconditions.checkState(!closed, "Producer is already closed");
+ Preconditions.checkState(openTx == null, "Transaction %s is still open", openTx);
+
+ // Allocate backing transactions
+ final Map<DOMDataTreeShard, DOMStoreWriteTransaction> shardToTx = new HashMap<>();
+ for (Entry<DOMDataTreeShard, DOMStoreTransactionChain> e : shardToChain.entrySet()) {
+ shardToTx.put(e.getKey(), e.getValue().newWriteOnlyTransaction());
+ }
+
+ // Create the ID->transaction map
+ final ImmutableMap.Builder<DOMDataTreeIdentifier, DOMStoreWriteTransaction> b = ImmutableMap.builder();
+ for (Entry<DOMDataTreeIdentifier, DOMDataTreeShard> e : idToShard.entrySet()) {
+ b.put(e.getKey(), shardToTx.get(e.getValue()));
+ }
+
+ final ShardedDOMDataWriteTransaction ret = new ShardedDOMDataWriteTransaction(this, b.build());
+ openTx = ret;
+ return ret;
+ }
+
+ @GuardedBy("this")
+ private boolean haveSubtree(final DOMDataTreeIdentifier subtree) {
+ for (DOMDataTreeIdentifier i : idToShard.keySet()) {
+ if (i.contains(subtree)) {
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ @GuardedBy("this")
+ private DOMDataTreeProducer lookupChild(final DOMDataTreeIdentifier s) {
+ for (Entry<DOMDataTreeIdentifier, DOMDataTreeProducer> e : children.entrySet()) {
+ if (e.getKey().contains(s)) {
+ return e.getValue();
+ }
+ }
+
+ return null;
+ }
+
+ @Override
+ public synchronized DOMDataTreeProducer createProducer(final Collection<DOMDataTreeIdentifier> subtrees) {
+ Preconditions.checkState(!closed, "Producer is already closed");
+ Preconditions.checkState(openTx == null, "Transaction %s is still open", openTx);
+
+ for (DOMDataTreeIdentifier s : subtrees) {
+ // Check if the subtree was visible at any time
+ if (!haveSubtree(s)) {
+ throw new IllegalArgumentException(String.format("Subtree %s was never available in producer %s", s, this));
+ }
+
+ // Check if the subtree has not been delegated to a child
+ final DOMDataTreeProducer child = lookupChild(s);
+ Preconditions.checkArgument(child == null, "Subtree %s is delegated to child producer %s", s, child);
+
+ // Check if part of the requested subtree is not delegated to a child.
+ for (DOMDataTreeIdentifier c : children.keySet()) {
+ if (s.contains(c)) {
+ throw new IllegalArgumentException(String.format("Subtree %s cannot be delegated as it is superset of already-delegated %s", s, c));
+ }
+ }
+ }
+
+ final DOMDataTreeProducer ret = dataTree.createProducer(this, subtrees);
+ final ImmutableMap.Builder<DOMDataTreeIdentifier, DOMDataTreeProducer> cb = ImmutableMap.builder();
+ cb.putAll(children);
+ for (DOMDataTreeIdentifier s : subtrees) {
+ cb.put(s, ret);
+ }
+
+ children = cb.build();
+ return ret;
+ }
+
+ @Override
+ public synchronized void close() throws DOMDataTreeProducerException {
+ if (!closed) {
+ if (openTx != null) {
+ throw new DOMDataTreeProducerBusyException(String.format("Transaction %s is still open", openTx));
+ }
+
+ closed = true;
+ dataTree.destroyProducer(this);
+ }
+ }
+
+ static DOMDataTreeProducer create(final ShardedDOMDataTree dataTree, final Map<DOMDataTreeIdentifier, DOMDataTreeShard> shardMap) {
+ /*
+ * FIXME: we do not allow multiple multiple shards in a producer because we do not implement the
+ * synchronization primitives yet
+ */
+ final Set<DOMDataTreeShard> shards = ImmutableSet.copyOf(shardMap.values());
+ if (shards.size() > 1) {
+ throw new UnsupportedOperationException("Cross-shard producers are not supported yet");
+ }
+
+ return new ShardedDOMDataTreeProducer(dataTree, shardMap, shards);
+ }
+
+ Set<DOMDataTreeIdentifier> getSubtrees() {
+ return idToShard.keySet();
+ }
+
+ synchronized void cancelTransaction(final ShardedDOMDataWriteTransaction transaction) {
+ if (!openTx.equals(transaction)) {
+ LOG.warn("Transaction {} is not open in producer {}", transaction, this);
+ return;
+ }
+
+ LOG.debug("Transaction {} cancelled", transaction);
+ openTx = null;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.broker.impl;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicLong;
+import javax.annotation.concurrent.GuardedBy;
+import javax.annotation.concurrent.NotThreadSafe;
+import org.opendaylight.controller.md.sal.common.api.TransactionStatus;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
+import org.opendaylight.controller.md.sal.common.impl.service.AbstractDataTransaction;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeIdentifier;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@NotThreadSafe
+final class ShardedDOMDataWriteTransaction implements DOMDataWriteTransaction {
+ private static final Logger LOG = LoggerFactory.getLogger(ShardedDOMDataWriteTransaction.class);
+ private static final AtomicLong COUNTER = new AtomicLong();
+ private final Map<DOMDataTreeIdentifier, DOMStoreWriteTransaction> idToTransaction;
+ private final ShardedDOMDataTreeProducer producer;
+ private final String identifier;
+ @GuardedBy("this")
+ private boolean closed = false;
+
+ ShardedDOMDataWriteTransaction(final ShardedDOMDataTreeProducer producer, final Map<DOMDataTreeIdentifier, DOMStoreWriteTransaction> idToTransaction) {
+ this.producer = Preconditions.checkNotNull(producer);
+ this.idToTransaction = Preconditions.checkNotNull(idToTransaction);
+ this.identifier = "SHARDED-DOM-" + COUNTER.getAndIncrement();
+ }
+
+ // FIXME: use atomic operations
+ @GuardedBy("this")
+ private DOMStoreWriteTransaction lookup(final LogicalDatastoreType store, final YangInstanceIdentifier path) {
+ final DOMDataTreeIdentifier id = new DOMDataTreeIdentifier(store, path);
+
+ for (Entry<DOMDataTreeIdentifier, DOMStoreWriteTransaction> e : idToTransaction.entrySet()) {
+ if (e.getKey().contains(id)) {
+ return e.getValue();
+ }
+ }
+
+ throw new IllegalArgumentException(String.format("Path %s is not acessible from transaction %s", id, this));
+ }
+
+ @Override
+ public String getIdentifier() {
+ return identifier;
+ }
+
+ @Override
+ public synchronized boolean cancel() {
+ if (closed) {
+ return false;
+ }
+
+ LOG.debug("Cancelling transaction {}", identifier);
+ for (DOMStoreWriteTransaction tx : ImmutableSet.copyOf(idToTransaction.values())) {
+ tx.close();
+ }
+
+ closed = true;
+ producer.cancelTransaction(this);
+ return true;
+ }
+
+ @Override
+ public synchronized CheckedFuture<Void, TransactionCommitFailedException> submit() {
+ Preconditions.checkState(!closed, "Transaction %s is already closed", identifier);
+
+ final Set<DOMStoreWriteTransaction> txns = ImmutableSet.copyOf(idToTransaction.values());
+ final List<DOMStoreThreePhaseCommitCohort> cohorts = new ArrayList<>(txns.size());
+ for (DOMStoreWriteTransaction tx : txns) {
+ cohorts.add(tx.ready());
+ }
+
+ try {
+ return Futures.immediateCheckedFuture(new CommitCoordinationTask(this, cohorts, null).call());
+ } catch (TransactionCommitFailedException e) {
+ return Futures.immediateFailedCheckedFuture(e);
+ }
+ }
+
+ @Override
+ @Deprecated
+ public ListenableFuture<RpcResult<TransactionStatus>> commit() {
+ return AbstractDataTransaction.convertToLegacyCommitFuture(submit());
+ }
+
+ @Override
+ public synchronized void delete(final LogicalDatastoreType store, final YangInstanceIdentifier path) {
+ lookup(store, path).delete(path);
+ }
+
+ @Override
+ public synchronized void put(final LogicalDatastoreType store, final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
+ lookup(store, path).write(path, data);
+ }
+
+ @Override
+ public synchronized void merge(final LogicalDatastoreType store, final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
+ lookup(store, path).merge(path, data);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.broker.impl;
+
+import com.google.common.base.Preconditions;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.Map;
+import org.opendaylight.yangtools.concepts.Identifiable;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+final class ShardingTableEntry implements Identifiable<PathArgument> {
+ private static final Logger LOG = LoggerFactory.getLogger(ShardingTableEntry.class);
+ private final Map<PathArgument, ShardingTableEntry> children = Collections.emptyMap();
+ private final PathArgument identifier;
+ private ShardRegistration<?> registration;
+
+ ShardingTableEntry() {
+ identifier = null;
+ }
+
+ ShardingTableEntry(final PathArgument identifier) {
+ this.identifier = Preconditions.checkNotNull(identifier);
+ }
+
+ @Override
+ public PathArgument getIdentifier() {
+ return identifier;
+ }
+
+ public ShardRegistration<?> getRegistration() {
+ return registration;
+ }
+
+ ShardingTableEntry lookup(final YangInstanceIdentifier id) {
+ final Iterator<PathArgument> it = id.getPathArguments().iterator();
+ ShardingTableEntry entry = this;
+
+ while (it.hasNext()) {
+ final PathArgument a = it.next();
+ final ShardingTableEntry child = entry.children.get(a);
+ if (child == null) {
+ LOG.debug("Lookup of {} stopped at {}", id, a);
+ break;
+ }
+
+ entry = child;
+ }
+
+ return entry;
+ }
+
+ void store(final YangInstanceIdentifier id, final ShardRegistration<?> reg) {
+ final Iterator<PathArgument> it = id.getPathArguments().iterator();
+ ShardingTableEntry entry = this;
+
+ while (it.hasNext()) {
+ final PathArgument a = it.next();
+ ShardingTableEntry child = entry.children.get(a);
+ if (child == null) {
+ child = new ShardingTableEntry(a);
+ entry.children.put(a, child);
+ }
+ }
+
+ Preconditions.checkState(entry.registration == null);
+ entry.registration = reg;
+ }
+
+ private boolean remove(final Iterator<PathArgument> it) {
+ if (it.hasNext()) {
+ final PathArgument arg = it.next();
+ final ShardingTableEntry child = children.get(arg);
+ if (child != null) {
+ if (child.remove(it)) {
+ children.remove(arg);
+ }
+ } else {
+ LOG.warn("Cannot remove non-existent child {}", arg);
+ }
+ }
+
+ return registration == null && children.isEmpty();
+ }
+
+ void remove(final YangInstanceIdentifier id) {
+ this.remove(id.getPathArguments().iterator());
+ }
+}
request.future.set( RpcResultBuilder.<NetconfMessage>failed()
.withRpcError( NetconfMessageTransformUtil.toRpcError( e ) ).build() );
+
+ //recursively processing message to eventually find matching request
+ processMessage(message);
+
return;
}
return new NetconfMessage( doc );
}
+ //Test scenario verifying whether missing message is handled
+ @Test
+ public void testOnMissingResponseMessage() throws Exception {
+
+ setupSession();
+
+ String messageID1 = UUID.randomUUID().toString();
+ ListenableFuture<RpcResult<NetconfMessage>> resultFuture1 = sendRequest( messageID1 );
+
+ String messageID2 = UUID.randomUUID().toString();
+ ListenableFuture<RpcResult<NetconfMessage>> resultFuture2 = sendRequest( messageID2 );
+
+ String messageID3 = UUID.randomUUID().toString();
+ ListenableFuture<RpcResult<NetconfMessage>> resultFuture3 = sendRequest( messageID3 );
+
+ //response messages 1,2 are omitted
+ communicator.onMessage( mockSession, createSuccessResponseMessage( messageID3 ) );
+
+ verifyResponseMessage( resultFuture3.get(), messageID3 );
+ }
+
@Test
public void testOnSuccessfulResponseMessage() throws Exception {
setupSession();
<groupId>org.opendaylight.yangtools</groupId>
<artifactId>yang-data-codec-gson</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-model-export</artifactId>
+ <!-- FIXME: remove explicit version, once model export package is part of yangtools-artefacts -->
+ <version>0.7.0-SNAPSHOT</version>
+ </dependency>
<dependency>
<groupId>ch.qos.logback</groupId>
<Private-Package>org.opendaylight.controller.sal.rest.*,
org.opendaylight.controller.sal.restconf.rpc.*,
org.opendaylight.controller.sal.restconf.impl,
+ org.opendaylight.controller.md.sal.rest.common.*,
org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.rest.connector.rev140724.*,
</Private-Package>
<Import-Package>*,
--- /dev/null
+/**
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.md.sal.rest.common;
+
+import org.opendaylight.controller.sal.restconf.impl.RestconfDocumentedException;
+import org.opendaylight.controller.sal.restconf.impl.RestconfError.ErrorTag;
+import org.opendaylight.controller.sal.restconf.impl.RestconfError.ErrorType;
+
+/**
+ * sal-rest-connector
+ * org.opendaylight.controller.md.sal.rest.common
+ *
+ * Utility class is centralizing all needed validation functionality for a Restconf osgi module.
+ * All methods have to throw {@link RestconfDocumentedException} only, which is a representation
+ * for all error situation followed by restconf-netconf specification.
+ * @see {@link https://tools.ietf.org/html/draft-bierman-netconf-restconf-02}
+ *
+ * @author <a href="mailto:vdemcak@cisco.com">Vaclav Demcak</a>
+ *
+ * Created: Feb 24, 2015
+ */
+public class RestconfValidationUtils {
+
+ private RestconfValidationUtils () {
+ throw new UnsupportedOperationException("Utility class");
+ }
+
+ /**
+ * Method returns {@link RestconfDocumentedException} for a false condition.
+ *
+ * @param condition - condition for rise {@link RestconfDocumentedException}
+ * @param type - input {@link ErrorType} for create {@link RestconfDocumentedException}
+ * @param tag - input {@link ErrorTag} for create {@link RestconfDocumentedException}
+ * @param message - input error message for create {@link RestconfDocumentedException}
+ */
+ public static void checkDocumentedError(final boolean condition, final ErrorType type,
+ final ErrorTag tag, final String message) {
+ if(!condition) {
+ throw new RestconfDocumentedException(message, type, tag);
+ }
+ }
+
+ /**
+ * Method returns {@link RestconfDocumentedException} if value is NULL or same input value.
+ * {@link ErrorType} is relevant for server application layer
+ * {@link ErrorTag} is 404 data-missing
+ * @see {@link https://tools.ietf.org/html/draft-bierman-netconf-restconf-02}
+ *
+ * @param value - some value from {@link org.opendaylight.yangtools.yang.model.api.Module}
+ * @param moduleName - name of {@link org.opendaylight.yangtools.yang.model.api.Module}
+ * @return - T value (same input value)
+ */
+ public static <T> T checkNotNullDocumented(final T value, final String moduleName) {
+ if(value == null) {
+ final String errMsg = "Module " + moduleName + "was not found.";
+ throw new RestconfDocumentedException(errMsg, ErrorType.APPLICATION, ErrorTag.DATA_MISSING);
+ }
+ return value;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.rest.schema;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.io.PrintWriter;
+import java.lang.annotation.Annotation;
+import java.lang.reflect.Type;
+import javax.ws.rs.Produces;
+import javax.ws.rs.WebApplicationException;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.MultivaluedMap;
+import javax.ws.rs.ext.MessageBodyWriter;
+import javax.ws.rs.ext.Provider;
+
+@Provider
+@Produces(SchemaRetrievalService.YANG_MEDIA_TYPE)
+public class SchemaExportContentYangBodyWriter implements MessageBodyWriter<SchemaExportContext> {
+
+ @Override
+ public boolean isWriteable(final Class<?> type, final Type genericType, final Annotation[] annotations,
+ final MediaType mediaType) {
+ return type.equals(SchemaExportContext.class);
+ }
+
+ @Override
+ public long getSize(final SchemaExportContext t, final Class<?> type, final Type genericType,
+ final Annotation[] annotations, final MediaType mediaType) {
+ return -1;
+ }
+
+ @Override
+ public void writeTo(final SchemaExportContext t, final Class<?> type, final Type genericType,
+ final Annotation[] annotations, final MediaType mediaType,
+ final MultivaluedMap<String, Object> httpHeaders, final OutputStream entityStream) throws IOException,
+ WebApplicationException {
+ final PrintWriter writer = new PrintWriter(entityStream);
+ writer.write(t.getModule().getSource());
+
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.rest.schema;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.lang.annotation.Annotation;
+import java.lang.reflect.Type;
+import javax.ws.rs.Produces;
+import javax.ws.rs.WebApplicationException;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.MultivaluedMap;
+import javax.ws.rs.ext.MessageBodyWriter;
+import javax.ws.rs.ext.Provider;
+import javax.xml.stream.XMLStreamException;
+import org.opendaylight.yangtools.yang.model.export.YinExportUtils;
+
+@Provider
+@Produces(SchemaRetrievalService.YIN_MEDIA_TYPE)
+public class SchemaExportContentYinBodyWriter implements MessageBodyWriter<SchemaExportContext> {
+
+ @Override
+ public boolean isWriteable(final Class<?> type, final Type genericType, final Annotation[] annotations,
+ final MediaType mediaType) {
+ return type.equals(SchemaExportContext.class);
+ }
+
+ @Override
+ public long getSize(final SchemaExportContext t, final Class<?> type, final Type genericType,
+ final Annotation[] annotations, final MediaType mediaType) {
+ return -1;
+ }
+
+ @Override
+ public void writeTo(final SchemaExportContext t, final Class<?> type, final Type genericType,
+ final Annotation[] annotations, final MediaType mediaType,
+ final MultivaluedMap<String, Object> httpHeaders, final OutputStream entityStream) throws IOException,
+ WebApplicationException {
+ try {
+ YinExportUtils.writeModuleToOutputStream(t.getSchemaContext(), t.getModule(), entityStream);
+ } catch (final XMLStreamException e) {
+ throw new IllegalStateException(e);
+ }
+
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.rest.schema;
+
+import org.opendaylight.yangtools.yang.model.api.Module;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+
+public class SchemaExportContext {
+
+ private final SchemaContext schemaContext;
+ private final Module module;
+
+ public SchemaExportContext(final SchemaContext ctx, final Module module) {
+ schemaContext = ctx;
+ this.module = module;
+ }
+
+ public SchemaContext getSchemaContext() {
+ return schemaContext;
+ }
+
+ public Module getModule() {
+ return module;
+ }
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.rest.schema;
+
+import com.google.common.annotations.Beta;
+import javax.ws.rs.GET;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
+
+@Beta
+public interface SchemaRetrievalService {
+
+ public static final String YANG_MEDIA_TYPE = "application/yang";
+ public static final String YIN_MEDIA_TYPE = "application/yin+xml";
+
+ @GET
+ @Produces({YIN_MEDIA_TYPE,YANG_MEDIA_TYPE})
+ @Path("/modules/module/{identifier:.+}/schema")
+ SchemaExportContext getSchema(@PathParam("identifier") String mountAndModuleId);
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.rest.schema;
+
+import com.google.common.base.Splitter;
+import com.google.common.collect.Iterables;
+import java.text.ParseException;
+import java.util.Date;
+import java.util.Iterator;
+import org.opendaylight.controller.md.sal.rest.common.RestconfValidationUtils;
+import org.opendaylight.controller.sal.restconf.impl.ControllerContext;
+import org.opendaylight.controller.sal.restconf.impl.InstanceIdentifierContext;
+import org.opendaylight.controller.sal.restconf.impl.RestconfDocumentedException;
+import org.opendaylight.controller.sal.restconf.impl.RestconfError.ErrorTag;
+import org.opendaylight.controller.sal.restconf.impl.RestconfError.ErrorType;
+import org.opendaylight.yangtools.yang.common.SimpleDateFormatUtil;
+import org.opendaylight.yangtools.yang.model.api.Module;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+
+public class SchemaRetrievalServiceImpl implements SchemaRetrievalService {
+
+ private final ControllerContext salContext;
+
+ private static final Splitter SLASH_SPLITTER = Splitter.on("/");
+ private static final Splitter AT_SPLITTER = Splitter.on("@");
+ private static final String MOUNT_ARG = ControllerContext.MOUNT;
+
+ public SchemaRetrievalServiceImpl(final ControllerContext controllerContext) {
+ salContext = controllerContext;
+ }
+
+
+ @Override
+ public SchemaExportContext getSchema(final String mountAndModule) {
+ final SchemaContext schemaContext;
+ final Iterable<String> pathComponents = SLASH_SPLITTER.split(mountAndModule);
+ final Iterator<String> componentIter = pathComponents.iterator();
+ if(!Iterables.contains(pathComponents, MOUNT_ARG)) {
+ schemaContext = salContext.getGlobalSchema();
+ } else {
+ final StringBuilder pathBuilder = new StringBuilder();
+ while(componentIter.hasNext()) {
+ final String current = componentIter.next();
+ // It is argument, not last element.
+ if(pathBuilder.length() != 0) {
+ pathBuilder.append("/");
+ }
+ pathBuilder.append(current);
+ if(MOUNT_ARG.equals(current)) {
+ // We stop right at mountpoint, last two arguments should
+ // be module name and revision
+ break;
+ }
+ }
+ schemaContext = getMountSchemaContext(pathBuilder.toString());
+
+ }
+
+ RestconfValidationUtils.checkDocumentedError(componentIter.hasNext(),
+ ErrorType.PROTOCOL, ErrorTag.INVALID_VALUE, "Module name must be supplied.");
+ final String moduleName = componentIter.next();
+ RestconfValidationUtils.checkDocumentedError(componentIter.hasNext(),
+ ErrorType.PROTOCOL, ErrorTag.INVALID_VALUE, "Revision date must be supplied.");
+ final String revisionString = componentIter.next();
+ return getExportUsingNameAndRevision(schemaContext, moduleName, revisionString);
+ }
+
+ private SchemaExportContext getExportUsingNameAndRevision(final SchemaContext schemaContext, final String moduleName,
+ final String revisionStr) {
+ try {
+ final Date revision = SimpleDateFormatUtil.getRevisionFormat().parse(revisionStr);
+ final Module module = schemaContext.findModuleByName(moduleName, revision);
+ return new SchemaExportContext(schemaContext, RestconfValidationUtils.checkNotNullDocumented(module, moduleName));
+ } catch (final ParseException e) {
+ throw new RestconfDocumentedException("Supplied revision is not in expected date format YYYY-mm-dd", e);
+ }
+ }
+
+ private SchemaContext getMountSchemaContext(final String identifier) {
+ final InstanceIdentifierContext mountContext = salContext.toMountPointIdentifier(identifier);
+ return mountContext.getSchemaContext();
+ }
+
+
+
+}
+
@Path("/modules")
@Produces({ Draft02.MediaTypes.API + JSON, Draft02.MediaTypes.API + XML, MediaType.APPLICATION_JSON,
MediaType.APPLICATION_XML, MediaType.TEXT_XML })
- public StructuredData getModules(@Context UriInfo uriInfo);
+ public NormalizedNodeContext getModules(@Context UriInfo uriInfo);
@GET
@Path("/modules/{identifier:.+}")
@Produces({ Draft02.MediaTypes.API + JSON, Draft02.MediaTypes.API + XML, MediaType.APPLICATION_JSON,
MediaType.APPLICATION_XML, MediaType.TEXT_XML })
- public StructuredData getModules(@PathParam("identifier") String identifier, @Context UriInfo uriInfo);
+ public NormalizedNodeContext getModules(@PathParam("identifier") String identifier, @Context UriInfo uriInfo);
@GET
@Path("/modules/module/{identifier:.+}")
@Produces({ Draft02.MediaTypes.API + JSON, Draft02.MediaTypes.API + XML, MediaType.APPLICATION_JSON,
MediaType.APPLICATION_XML, MediaType.TEXT_XML })
- public StructuredData getModule(@PathParam("identifier") String identifier, @Context UriInfo uriInfo);
+ public NormalizedNodeContext getModule(@PathParam("identifier") String identifier, @Context UriInfo uriInfo);
@GET
@Path("/operations")
import java.io.IOException;
/**
+ * @deprecated class will be removed in Lithium release
+ *
* This class parses JSON elements from a gson JsonReader. It disallows multiple elements of the same name unlike the
* default gson JsonParser."
*/
+@Deprecated
public class JsonParser {
- public JsonElement parse(JsonReader reader) throws JsonIOException, JsonSyntaxException {
+ public JsonElement parse(final JsonReader reader) throws JsonIOException, JsonSyntaxException {
// code copied from gson's JsonParser and Stream classes
- boolean lenient = reader.isLenient();
+ final boolean lenient = reader.isLenient();
reader.setLenient(true);
boolean isEmpty = true;
try {
reader.peek();
isEmpty = false;
return read(reader);
- } catch (EOFException e) {
+ } catch (final EOFException e) {
if (isEmpty) {
return JsonNull.INSTANCE;
}
// The stream ended prematurely so it is likely a syntax error.
throw new JsonSyntaxException(e);
- } catch (MalformedJsonException e) {
+ } catch (final MalformedJsonException e) {
throw new JsonSyntaxException(e);
- } catch (IOException e) {
+ } catch (final IOException e) {
throw new JsonIOException(e);
- } catch (NumberFormatException e) {
+ } catch (final NumberFormatException e) {
throw new JsonSyntaxException(e);
} catch (StackOverflowError | OutOfMemoryError e) {
throw new JsonParseException("Failed parsing JSON source: " + reader + " to Json", e);
}
}
- public JsonElement read(JsonReader in) throws IOException {
+ public JsonElement read(final JsonReader in) throws IOException {
switch (in.peek()) {
case STRING:
return new JsonPrimitive(in.nextString());
case NUMBER:
- String number = in.nextString();
+ final String number = in.nextString();
return new JsonPrimitive(new LazilyParsedNumber(number));
case BOOLEAN:
return new JsonPrimitive(in.nextBoolean());
in.nextNull();
return JsonNull.INSTANCE;
case BEGIN_ARRAY:
- JsonArray array = new JsonArray();
+ final JsonArray array = new JsonArray();
in.beginArray();
while (in.hasNext()) {
array.add(read(in));
in.endArray();
return array;
case BEGIN_OBJECT:
- JsonObject object = new JsonObject();
+ final JsonObject object = new JsonObject();
in.beginObject();
while (in.hasNext()) {
final String childName = in.nextName();
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+/**
+ * @deprecated class will be removed in Lithium release
+ */
@Provider
@Consumes({ Draft02.MediaTypes.DATA + RestconfService.JSON, Draft02.MediaTypes.OPERATION + RestconfService.JSON,
MediaType.APPLICATION_JSON })
WebApplicationException {
try {
return JsonToCompositeNodeReader.read(entityStream);
- } catch (Exception e) {
+ } catch (final Exception e) {
LOG.debug("Error parsing json input", e);
throw new RestconfDocumentedException("Error parsing input: " + e.getMessage(), ErrorType.PROTOCOL,
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+/**
+ * @deprecated class will be removed in Lithium release
+ */
+@Deprecated
class JsonToCompositeNodeReader {
private static final Logger LOG = LoggerFactory.getLogger(JsonToCompositeNodeReader.class);
private static final Splitter COLON_SPLITTER = Splitter.on(':');
}
public static CompositeNodeWrapper read(final InputStream entityStream) throws UnsupportedFormatException {
- JsonParser parser = new JsonParser();
+ final JsonParser parser = new JsonParser();
- JsonElement rootElement = parser.parse(new JsonReader(new InputStreamReader(entityStream)));
+ final JsonElement rootElement = parser.parse(new JsonReader(new InputStreamReader(entityStream)));
if (rootElement.isJsonNull()) {
// no content, so return null to indicate no input
return null;
throw new UnsupportedFormatException("Root element of Json has to be Object");
}
- Set<Entry<String, JsonElement>> entrySetsOfRootJsonObject = rootElement.getAsJsonObject().entrySet();
+ final Set<Entry<String, JsonElement>> entrySetsOfRootJsonObject = rootElement.getAsJsonObject().entrySet();
if (entrySetsOfRootJsonObject.size() != 1) {
throw new UnsupportedFormatException("Json Object should contain one element");
}
- Entry<String, JsonElement> childEntry = entrySetsOfRootJsonObject.iterator().next();
- String firstElementName = childEntry.getKey();
- JsonElement firstElementType = childEntry.getValue();
+ final Entry<String, JsonElement> childEntry = entrySetsOfRootJsonObject.iterator().next();
+ final String firstElementName = childEntry.getKey();
+ final JsonElement firstElementType = childEntry.getValue();
if (firstElementType.isJsonObject()) {
// container in yang
return createStructureWithRoot(firstElementName, firstElementType.getAsJsonObject());
if (firstElementType.isJsonArray()) {
// list in yang
if (firstElementType.getAsJsonArray().size() == 1) {
- JsonElement firstElementInArray = firstElementType.getAsJsonArray().get(0);
+ final JsonElement firstElementInArray = firstElementType.getAsJsonArray().get(0);
if (firstElementInArray.isJsonObject()) {
return createStructureWithRoot(firstElementName, firstElementInArray.getAsJsonObject());
}
}
private static CompositeNodeWrapper createStructureWithRoot(final String rootObjectName, final JsonObject rootObject) {
- CompositeNodeWrapper firstNode = new CompositeNodeWrapper(getNamespaceFor(rootObjectName),
+ final CompositeNodeWrapper firstNode = new CompositeNodeWrapper(getNamespaceFor(rootObjectName),
getLocalNameFor(rootObjectName));
- for (Entry<String, JsonElement> childOfFirstNode : rootObject.entrySet()) {
+ for (final Entry<String, JsonElement> childOfFirstNode : rootObject.entrySet()) {
addChildToParent(childOfFirstNode.getKey(), childOfFirstNode.getValue(), firstNode);
}
return firstNode;
private static void addChildToParent(final String childName, final JsonElement childType,
final CompositeNodeWrapper parent) {
if (childType.isJsonObject()) {
- CompositeNodeWrapper child = new CompositeNodeWrapper(getNamespaceFor(childName),
+ final CompositeNodeWrapper child = new CompositeNodeWrapper(getNamespaceFor(childName),
getLocalNameFor(childName));
parent.addValue(child);
- for (Entry<String, JsonElement> childOfChild : childType.getAsJsonObject().entrySet()) {
+ for (final Entry<String, JsonElement> childOfChild : childType.getAsJsonObject().entrySet()) {
addChildToParent(childOfChild.getKey(), childOfChild.getValue(), child);
}
} else if (childType.isJsonArray()) {
parent.addValue(new EmptyNodeWrapper(getNamespaceFor(childName), getLocalNameFor(childName)));
} else {
- for (JsonElement childOfChildType : childType.getAsJsonArray()) {
+ for (final JsonElement childOfChildType : childType.getAsJsonArray()) {
addChildToParent(childName, childOfChildType, parent);
}
}
} else if (childType.isJsonPrimitive()) {
- JsonPrimitive childPrimitive = childType.getAsJsonPrimitive();
- String value = childPrimitive.getAsString().trim();
+ final JsonPrimitive childPrimitive = childType.getAsJsonPrimitive();
+ final String value = childPrimitive.getAsString().trim();
parent.addValue(new SimpleNodeWrapper(getNamespaceFor(childName), getLocalNameFor(childName),
resolveValueOfElement(value)));
} else {
if (Iterators.size(it) == 1) {
try {
return URI.create(maybeURI);
- } catch (IllegalArgumentException e) {
+ } catch (final IllegalArgumentException e) {
LOG.debug("Value {} couldn't be interpreted as URI.", maybeURI);
}
}
private static Object resolveValueOfElement(final String value) {
// it could be instance-identifier Built-In Type
if (!value.isEmpty() && value.charAt(0) == '/') {
- IdentityValuesDTO resolvedValue = RestUtil.asInstanceIdentifier(value, new PrefixMapingFromJson());
+ final IdentityValuesDTO resolvedValue = RestUtil.asInstanceIdentifier(value, new PrefixMapingFromJson());
if (resolvedValue != null) {
return resolvedValue;
}
}
// it could be identityref Built-In Type therefore it is necessary to look at value as module_name:local_name
- URI namespace = getNamespaceFor(value);
+ final URI namespace = getNamespaceFor(value);
if (namespace != null) {
return new IdentityValuesDTO(namespace.toString(), getLocalNameFor(value), null, value);
}
final MediaType mediaType, final MultivaluedMap<String, Object> httpHeaders, final OutputStream entityStream)
throws IOException, WebApplicationException {
NormalizedNode<?, ?> data = t.getData();
- InstanceIdentifierContext context = t.getInstanceIdentifierContext();
- DataSchemaNode schema = context.getSchemaNode();
+ final InstanceIdentifierContext context = t.getInstanceIdentifierContext();
+ final DataSchemaNode schema = context.getSchemaNode();
SchemaPath path = context.getSchemaNode().getPath();
- OutputStreamWriter outputWriter = new OutputStreamWriter(entityStream, Charsets.UTF_8);
+ final OutputStreamWriter outputWriter = new OutputStreamWriter(entityStream, Charsets.UTF_8);
if (data == null) {
throw new RestconfDocumentedException(Response.Status.NOT_FOUND);
}
boolean isDataRoot = false;
URI initialNs = null;
- outputWriter.write('{');
if (SchemaPath.ROOT.equals(path)) {
isDataRoot = true;
} else {
if(!schema.isAugmenting() && !(schema instanceof SchemaContext)) {
initialNs = schema.getQName().getNamespace();
}
- NormalizedNodeStreamWriter jsonWriter = JSONNormalizedNodeStreamWriter.create(context.getSchemaContext(),path,initialNs,outputWriter);
- NormalizedNodeWriter nnWriter = NormalizedNodeWriter.forStreamWriter(jsonWriter);
+ final NormalizedNodeStreamWriter jsonWriter = JSONNormalizedNodeStreamWriter.create(context.getSchemaContext(),path,initialNs,outputWriter);
+ final NormalizedNodeWriter nnWriter = NormalizedNodeWriter.forStreamWriter(jsonWriter);
if(isDataRoot) {
writeDataRoot(outputWriter,nnWriter,(ContainerNode) data);
} else {
nnWriter.write(data);
}
nnWriter.flush();
- outputWriter.write('}');
outputWriter.flush();
}
- private void writeDataRoot(OutputStreamWriter outputWriter, NormalizedNodeWriter nnWriter, ContainerNode data) throws IOException {
- Iterator<DataContainerChild<? extends PathArgument, ?>> iterator = data.getValue().iterator();
+ private void writeDataRoot(final OutputStreamWriter outputWriter, final NormalizedNodeWriter nnWriter, final ContainerNode data) throws IOException {
+ final Iterator<DataContainerChild<? extends PathArgument, ?>> iterator = data.getValue().iterator();
while(iterator.hasNext()) {
- DataContainerChild<? extends PathArgument, ?> child = iterator.next();
+ final DataContainerChild<? extends PathArgument, ?> child = iterator.next();
nnWriter.write(child);
nnWriter.flush();
}
import java.util.HashSet;
import java.util.Set;
import javax.ws.rs.core.Application;
+import org.opendaylight.controller.md.sal.rest.schema.SchemaExportContentYangBodyWriter;
+import org.opendaylight.controller.md.sal.rest.schema.SchemaExportContentYinBodyWriter;
+import org.opendaylight.controller.md.sal.rest.schema.SchemaRetrievalServiceImpl;
import org.opendaylight.controller.sal.restconf.impl.BrokerFacade;
import org.opendaylight.controller.sal.restconf.impl.ControllerContext;
import org.opendaylight.controller.sal.restconf.impl.RestconfImpl;
.add(JsonNormalizedNodeBodyReader.class)
.add(NormalizedNodeJsonBodyWriter.class)
.add(NormalizedNodeXmlBodyWriter.class)
+ .add(SchemaExportContentYinBodyWriter.class)
+ .add(SchemaExportContentYangBodyWriter.class)
.build();
}
@Override
public Set<Object> getSingletons() {
- Set<Object> singletons = new HashSet<>();
- ControllerContext controllerContext = ControllerContext.getInstance();
- BrokerFacade brokerFacade = BrokerFacade.getInstance();
- RestconfImpl restconfImpl = RestconfImpl.getInstance();
+ final Set<Object> singletons = new HashSet<>();
+ final ControllerContext controllerContext = ControllerContext.getInstance();
+ final BrokerFacade brokerFacade = BrokerFacade.getInstance();
+ final RestconfImpl restconfImpl = RestconfImpl.getInstance();
+ final SchemaRetrievalServiceImpl schemaRetrieval = new SchemaRetrievalServiceImpl(controllerContext);
restconfImpl.setBroker(brokerFacade);
restconfImpl.setControllerContext(controllerContext);
singletons.add(controllerContext);
singletons.add(brokerFacade);
- singletons.add(StatisticsRestconfServiceWrapper.getInstance());
+ singletons.add(schemaRetrieval);
+ singletons.add(new RestconfCompositeWrapper(StatisticsRestconfServiceWrapper.getInstance(), schemaRetrieval));
singletons.add(StructuredDataToXmlProvider.INSTANCE);
singletons.add(StructuredDataToJsonProvider.INSTANCE);
singletons.add(JsonToCompositeNodeProvider.INSTANCE);
--- /dev/null
+package org.opendaylight.controller.sal.rest.impl;
+
+import com.google.common.base.Preconditions;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.UriInfo;
+import org.opendaylight.controller.md.sal.rest.schema.SchemaExportContext;
+import org.opendaylight.controller.md.sal.rest.schema.SchemaRetrievalService;
+import org.opendaylight.controller.sal.rest.api.RestconfService;
+import org.opendaylight.controller.sal.restconf.impl.NormalizedNodeContext;
+import org.opendaylight.controller.sal.restconf.impl.StructuredData;
+import org.opendaylight.yangtools.yang.data.api.CompositeNode;
+import org.opendaylight.yangtools.yang.data.api.Node;
+
+public class RestconfCompositeWrapper implements RestconfService, SchemaRetrievalService {
+
+ private final RestconfService restconf;
+ private final SchemaRetrievalService schema;
+
+ public RestconfCompositeWrapper(final RestconfService restconf, final SchemaRetrievalService schema) {
+ this.restconf = Preconditions.checkNotNull(restconf);
+ this.schema = Preconditions.checkNotNull(schema);
+ }
+
+ @Override
+ public Object getRoot() {
+ return restconf.getRoot();
+ }
+
+ @Override
+ public NormalizedNodeContext getModules(final UriInfo uriInfo) {
+ return restconf.getModules(uriInfo);
+ }
+
+ @Override
+ public NormalizedNodeContext getModules(final String identifier, final UriInfo uriInfo) {
+ return restconf.getModules(identifier, uriInfo);
+ }
+
+ @Override
+ public NormalizedNodeContext getModule(final String identifier, final UriInfo uriInfo) {
+ return restconf.getModule(identifier, uriInfo);
+ }
+
+ @Override
+ public StructuredData getOperations(final UriInfo uriInfo) {
+ return restconf.getOperations(uriInfo);
+ }
+
+ @Override
+ public StructuredData getOperations(final String identifier, final UriInfo uriInfo) {
+ return restconf.getOperations(identifier, uriInfo);
+ }
+
+ @Override
+ public StructuredData invokeRpc(final String identifier, final CompositeNode payload, final UriInfo uriInfo) {
+ return restconf.invokeRpc(identifier, payload, uriInfo);
+ }
+
+ @Override
+ public StructuredData invokeRpc(final String identifier, final String noPayload, final UriInfo uriInfo) {
+ return restconf.invokeRpc(identifier, noPayload, uriInfo);
+ }
+
+ @Override
+ public NormalizedNodeContext readConfigurationData(final String identifier, final UriInfo uriInfo) {
+ return restconf.readConfigurationData(identifier, uriInfo);
+ }
+
+ @Override
+ public NormalizedNodeContext readOperationalData(final String identifier, final UriInfo uriInfo) {
+ return restconf.readOperationalData(identifier, uriInfo);
+ }
+
+ @Override
+ public Response updateConfigurationData(final String identifier, final Node<?> payload) {
+ return restconf.updateConfigurationData(identifier, payload);
+ }
+
+ @Override
+ public Response createConfigurationData(final String identifier, final Node<?> payload) {
+ return restconf.createConfigurationData(identifier, payload);
+ }
+
+ @Override
+ public Response createConfigurationData(final Node<?> payload) {
+ return restconf.createConfigurationData(payload);
+ }
+
+ @Override
+ public Response deleteConfigurationData(final String identifier) {
+ return restconf.deleteConfigurationData(identifier);
+ }
+
+ @Override
+ public Response subscribeToStream(final String identifier, final UriInfo uriInfo) {
+ return restconf.subscribeToStream(identifier, uriInfo);
+ }
+
+ @Override
+ public StructuredData getAvailableStreams(final UriInfo uriInfo) {
+ return restconf.getAvailableStreams(uriInfo);
+ }
+
+ @Override
+ public SchemaExportContext getSchema(final String mountId) {
+ return schema.getSchema(mountId);
+ }
+}
import org.opendaylight.yangtools.yang.data.api.CompositeNode;
import org.opendaylight.yangtools.yang.model.api.DataNodeContainer;
+/**
+ * @deprecated class will be removed in Lithium release
+ */
@Provider
@Produces({ Draft02.MediaTypes.API + RestconfService.JSON, Draft02.MediaTypes.DATA + RestconfService.JSON,
Draft02.MediaTypes.OPERATION + RestconfService.JSON, MediaType.APPLICATION_JSON })
public void writeTo(final StructuredData t, final Class<?> type, final Type genericType, final Annotation[] annotations,
final MediaType mediaType, final MultivaluedMap<String, Object> httpHeaders, final OutputStream entityStream)
throws IOException, WebApplicationException {
- CompositeNode data = t.getData();
+ final CompositeNode data = t.getData();
if (data == null) {
throw new RestconfDocumentedException(Response.Status.NOT_FOUND);
}
- JsonWriter writer = new JsonWriter(new OutputStreamWriter(entityStream, Charsets.UTF_8));
+ final JsonWriter writer = new JsonWriter(new OutputStreamWriter(entityStream, Charsets.UTF_8));
if (t.isPrettyPrintMode()) {
writer.setIndent(" ");
} else {
writer.setIndent("");
}
- JsonMapper jsonMapper = new JsonMapper(t.getMountPoint());
+ final JsonMapper jsonMapper = new JsonMapper(t.getMountPoint());
jsonMapper.write(writer, data, (DataNodeContainer) t.getSchema());
writer.flush();
}
import org.slf4j.LoggerFactory;
import org.w3c.dom.Document;
+/**
+ * @deprecated class will be removed in Lithium release
+ */
@Provider
@Produces({ Draft02.MediaTypes.API + RestconfService.XML, Draft02.MediaTypes.DATA + RestconfService.XML,
Draft02.MediaTypes.OPERATION + RestconfService.XML, MediaType.APPLICATION_XML, MediaType.TEXT_XML })
final Transformer ret;
try {
ret = FACTORY.newTransformer();
- } catch (TransformerConfigurationException e) {
+ } catch (final TransformerConfigurationException e) {
LOG.error("Failed to instantiate XML transformer", e);
throw new IllegalStateException("XML encoding currently unavailable", e);
}
final Annotation[] annotations, final MediaType mediaType,
final MultivaluedMap<String, Object> httpHeaders, final OutputStream entityStream) throws IOException,
WebApplicationException {
- CompositeNode data = t.getData();
+ final CompositeNode data = t.getData();
if (data == null) {
throw new RestconfDocumentedException(Response.Status.NOT_FOUND);
}
} else {
trans.setOutputProperty(OutputKeys.INDENT, "no");
}
- } catch (RuntimeException e) {
+ } catch (final RuntimeException e) {
throw new RestconfDocumentedException(e.getMessage(), ErrorType.TRANSPORT, ErrorTag.OPERATION_FAILED);
}
final Document domTree = new XmlMapper().write(data, (DataNodeContainer) t.getSchema());
try {
trans.transform(new DOMSource(domTree), new StreamResult(entityStream));
- } catch (TransformerException e) {
+ } catch (final TransformerException e) {
LOG.error("Error during translation of Document to OutputStream", e);
throw new RestconfDocumentedException(e.getMessage(), ErrorType.TRANSPORT, ErrorTag.OPERATION_FAILED);
}
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+/**
+ * @deprecated class will be removed in Lithium release
+ */
@Provider
@Consumes({ Draft02.MediaTypes.DATA + RestconfService.XML, Draft02.MediaTypes.OPERATION + RestconfService.XML,
MediaType.APPLICATION_XML, MediaType.TEXT_XML })
@Override
public Node<?> readFrom(final Class<Node<?>> type, final Type genericType, final Annotation[] annotations,
- MediaType mediaType, MultivaluedMap<String, String> httpHeaders, InputStream entityStream)
+ final MediaType mediaType, final MultivaluedMap<String, String> httpHeaders, final InputStream entityStream)
throws IOException, WebApplicationException {
- XmlToCompositeNodeReader xmlReader = new XmlToCompositeNodeReader();
+ final XmlToCompositeNodeReader xmlReader = new XmlToCompositeNodeReader();
try {
return xmlReader.read(entityStream);
} catch (XMLStreamException | UnsupportedFormatException e) {
package org.opendaylight.controller.sal.rest.impl;
import static com.google.common.base.Preconditions.checkArgument;
-
import java.io.BufferedInputStream;
import java.io.IOException;
import java.io.InputStream;
import org.opendaylight.controller.sal.restconf.impl.SimpleNodeWrapper;
import org.opendaylight.yangtools.yang.data.api.Node;
+/**
+ * @deprecated class will be removed in Lithium release
+ */
@Deprecated
public class XmlToCompositeNodeReader {
eventReader = xmlInputFactory.createXMLEventReader(entityStream);
if (eventReader.hasNext()) {
- XMLEvent element = eventReader.peek();
+ final XMLEvent element = eventReader.peek();
if (element.isStartDocument()) {
eventReader.nextEvent();
}
private boolean isSimpleNodeEvent(final XMLEvent event) throws XMLStreamException {
checkArgument(event != null, "XML Event cannot be NULL!");
if (event.isStartElement()) {
- XMLEvent innerEvent = skipCommentsAndWhitespace();
+ final XMLEvent innerEvent = skipCommentsAndWhitespace();
if (innerEvent != null && (innerEvent.isCharacters() || innerEvent.isEndElement())) {
return true;
}
private boolean isCompositeNodeEvent(final XMLEvent event) throws XMLStreamException {
checkArgument(event != null, "XML Event cannot be NULL!");
if (event.isStartElement()) {
- XMLEvent innerEvent = skipCommentsAndWhitespace();
+ final XMLEvent innerEvent = skipCommentsAndWhitespace();
if (innerEvent != null) {
if (innerEvent.isStartElement()) {
return true;
private XMLEvent skipCommentsAndWhitespace() throws XMLStreamException {
while (eventReader.hasNext()) {
- XMLEvent event = eventReader.peek();
+ final XMLEvent event = eventReader.peek();
if (event.getEventType() == XMLStreamConstants.COMMENT) {
eventReader.nextEvent();
continue;
}
if (event.isCharacters()) {
- Characters chars = event.asCharacters();
+ final Characters chars = event.asCharacters();
if (chars.isWhiteSpace()) {
eventReader.nextEvent();
continue;
private NodeWrapper<? extends Node<?>> resolveSimpleNodeFromStartElement(final StartElement startElement)
throws XMLStreamException {
checkArgument(startElement != null, "Start Element cannot be NULL!");
- String data = getValueOf(startElement);
+ final String data = getValueOf(startElement);
if (data == null) {
return new EmptyNodeWrapper(getNamespaceFor(startElement), getLocalNameFor(startElement));
}
}
private URI getNamespaceFor(final StartElement startElement) {
- String namespaceURI = startElement.getName().getNamespaceURI();
+ final String namespaceURI = startElement.getName().getNamespaceURI();
return namespaceURI.isEmpty() ? null : URI.create(namespaceURI);
}
private Object resolveValueOfElement(final String value, final StartElement startElement) {
// it could be instance-identifier Built-In Type
if (value.startsWith("/")) {
- IdentityValuesDTO iiValue = RestUtil.asInstanceIdentifier(value, new RestUtil.PrefixMapingFromXml(
+ final IdentityValuesDTO iiValue = RestUtil.asInstanceIdentifier(value, new RestUtil.PrefixMapingFromXml(
startElement));
if (iiValue != null) {
return iiValue;
}
}
// it could be identityref Built-In Type
- String[] namespaceAndValue = value.split(":");
+ final String[] namespaceAndValue = value.split(":");
if (namespaceAndValue.length == 2) {
- String namespace = startElement.getNamespaceContext().getNamespaceURI(namespaceAndValue[0]);
+ final String namespace = startElement.getNamespaceContext().getNamespaceURI(namespaceAndValue[0]);
if (namespace != null && !namespace.isEmpty()) {
return new IdentityValuesDTO(namespace, namespaceAndValue[1], namespaceAndValue[0], value);
}
import org.opendaylight.yangtools.yang.data.api.SimpleNode;
import org.opendaylight.yangtools.yang.data.impl.NodeFactory;
+/**
+ * @deprecated class will be removed in Lithium release
+ */
+@Deprecated
public final class CompositeNodeWrapper implements NodeWrapper<CompositeNode>, CompositeNode {
private MutableCompositeNode compositeNode;
name = new QName(namespace, localName);
}
- List<Node<?>> nodeValues = new ArrayList<>(values.size());
- for (NodeWrapper<?> nodeWrapper : values) {
+ final List<Node<?>> nodeValues = new ArrayList<>(values.size());
+ for (final NodeWrapper<?> nodeWrapper : values) {
nodeValues.add(nodeWrapper.unwrap());
}
compositeNode = NodeFactory.createMutableCompositeNode(name, null, nodeValues, null, null);
import org.opendaylight.yangtools.yang.data.api.Node;
import org.opendaylight.yangtools.yang.data.impl.NodeFactory;
+/**
+ * @deprecated class will be removed in Lithium release
+ */
+@Deprecated
public final class EmptyNodeWrapper implements NodeWrapper<Node<?>>, Node<Void> {
private Node<?> unwrapped;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.data.api.Node;
+/**
+ * @deprecated class will be removed in Lithium release
+ */
+@Deprecated
public interface NodeWrapper<T extends Node<?>> {
void setQname(QName name);
import org.opendaylight.yangtools.yang.data.api.SimpleNode;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.InstanceIdentifierBuilder;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.data.api.schema.LeafSetEntryNode;
+import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
+import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.data.api.schema.tree.ModifiedNodeDoesNotExistException;
import org.opendaylight.yangtools.yang.data.composite.node.schema.cnsn.parser.CnSnToNormalizedNodeParserFactory;
import org.opendaylight.yangtools.yang.data.impl.ImmutableCompositeNode;
import org.opendaylight.yangtools.yang.data.impl.NodeFactory;
+import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
+import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.CollectionNodeBuilder;
+import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.DataContainerNodeAttrBuilder;
+import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.ListNodeBuilder;
import org.opendaylight.yangtools.yang.model.api.AnyXmlSchemaNode;
import org.opendaylight.yangtools.yang.model.api.ContainerSchemaNode;
import org.opendaylight.yangtools.yang.model.api.DataNodeContainer;
try {
EVENT_SUBSCRIPTION_AUGMENT_REVISION = new SimpleDateFormat("yyyy-MM-dd").parse("2014-07-08");
NETCONF_BASE_QNAME = QName.create(QNameModule.create(new URI(NETCONF_BASE), null), NETCONF_BASE_PAYLOAD_NAME );
- } catch (ParseException e) {
+ } catch (final ParseException e) {
throw new RestconfDocumentedException(
"It wasn't possible to convert revision date of sal-remote-augment to date", ErrorType.APPLICATION,
ErrorTag.OPERATION_FAILED);
- } catch (URISyntaxException e) {
+ } catch (final URISyntaxException e) {
throw new RestconfDocumentedException(
"It wasn't possible to create instance of URI class with "+NETCONF_BASE+" URI", ErrorType.APPLICATION,
ErrorTag.OPERATION_FAILED);
}
@Override
- public StructuredData getModules(final UriInfo uriInfo) {
- final Module restconfModule = this.getRestconfModule();
+ public NormalizedNodeContext getModules(final UriInfo uriInfo) {
+ final Set<Module> allModules = controllerContext.getAllModules();
+ final MapNode allModuleMap = makeModuleMapNode(allModules);
- final List<Node<?>> modulesAsData = new ArrayList<Node<?>>();
- final DataSchemaNode moduleSchemaNode = controllerContext.getRestconfModuleRestConfSchemaNode(restconfModule,
- Draft02.RestConfModule.MODULE_LIST_SCHEMA_NODE);
+ final SchemaContext schemaContext = controllerContext.getGlobalSchema();
- Set<Module> allModules = this.controllerContext.getAllModules();
- for (final Module module : allModules) {
- CompositeNode moduleCompositeNode = this.toModuleCompositeNode(module, moduleSchemaNode);
- modulesAsData.add(moduleCompositeNode);
- }
+ final Module restconfModule = getRestconfModule();
+ final DataSchemaNode modulesSchemaNode = controllerContext.getRestconfModuleRestConfSchemaNode(
+ restconfModule, Draft02.RestConfModule.MODULES_CONTAINER_SCHEMA_NODE);
+ Preconditions.checkState(modulesSchemaNode instanceof ContainerSchemaNode);
+
+ final DataContainerNodeAttrBuilder<NodeIdentifier, ContainerNode> moduleContainerBuilder =
+ Builders.containerBuilder((ContainerSchemaNode) modulesSchemaNode);
+ moduleContainerBuilder.withChild(allModuleMap);
- final DataSchemaNode modulesSchemaNode = controllerContext.getRestconfModuleRestConfSchemaNode(restconfModule,
- Draft02.RestConfModule.MODULES_CONTAINER_SCHEMA_NODE);
- QName qName = modulesSchemaNode.getQName();
- final CompositeNode modulesNode = NodeFactory.createImmutableCompositeNode(qName, null, modulesAsData);
- return new StructuredData(modulesNode, modulesSchemaNode, null, parsePrettyPrintParameter(uriInfo));
+ return new NormalizedNodeContext(new InstanceIdentifierContext(null, modulesSchemaNode,
+ null, schemaContext), moduleContainerBuilder.build());
}
+ /**
+ * Valid only for mount point
+ */
@Override
- public StructuredData getAvailableStreams(final UriInfo uriInfo) {
- Set<String> availableStreams = Notificator.getStreamNames();
-
- final List<Node<?>> streamsAsData = new ArrayList<Node<?>>();
- Module restconfModule = this.getRestconfModule();
- final DataSchemaNode streamSchemaNode = controllerContext.getRestconfModuleRestConfSchemaNode(restconfModule,
- Draft02.RestConfModule.STREAM_LIST_SCHEMA_NODE);
- for (final String streamName : availableStreams) {
- streamsAsData.add(this.toStreamCompositeNode(streamName, streamSchemaNode));
+ public NormalizedNodeContext getModules(final String identifier, final UriInfo uriInfo) {
+ Preconditions.checkNotNull(identifier);
+ if ( ! identifier.contains(ControllerContext.MOUNT)) {
+ final String errMsg = "URI has bad format. If modules behind mount point should be showed,"
+ + " URI has to end with " + ControllerContext.MOUNT;
+ throw new RestconfDocumentedException(errMsg, ErrorType.PROTOCOL, ErrorTag.INVALID_VALUE);
}
- final DataSchemaNode streamsSchemaNode = controllerContext.getRestconfModuleRestConfSchemaNode(restconfModule,
- Draft02.RestConfModule.STREAMS_CONTAINER_SCHEMA_NODE);
- QName qName = streamsSchemaNode.getQName();
- final CompositeNode streamsNode = NodeFactory.createImmutableCompositeNode(qName, null, streamsAsData);
- return new StructuredData(streamsNode, streamsSchemaNode, null, parsePrettyPrintParameter(uriInfo));
+ final InstanceIdentifierContext mountPointIdentifier = controllerContext.toMountPointIdentifier(identifier);
+ final DOMMountPoint mountPoint = mountPointIdentifier.getMountPoint();
+ final Set<Module> modules = controllerContext.getAllModules(mountPoint);
+ final SchemaContext schemaContext = mountPoint.getSchemaContext();
+ final MapNode mountPointModulesMap = makeModuleMapNode(modules);
+
+ final Module restconfModule = getRestconfModule();
+ final DataSchemaNode modulesSchemaNode = controllerContext.getRestconfModuleRestConfSchemaNode(
+ restconfModule, Draft02.RestConfModule.MODULES_CONTAINER_SCHEMA_NODE);
+ Preconditions.checkState(modulesSchemaNode instanceof ContainerSchemaNode);
+
+ final DataContainerNodeAttrBuilder<NodeIdentifier, ContainerNode> moduleContainerBuilder =
+ Builders.containerBuilder((ContainerSchemaNode) modulesSchemaNode);
+ moduleContainerBuilder.withChild(mountPointModulesMap);
+
+ return new NormalizedNodeContext(new InstanceIdentifierContext(null, modulesSchemaNode,
+ mountPoint, schemaContext), moduleContainerBuilder.build());
}
@Override
- public StructuredData getModules(final String identifier, final UriInfo uriInfo) {
- Set<Module> modules = null;
+ public NormalizedNodeContext getModule(final String identifier, final UriInfo uriInfo) {
+ Preconditions.checkNotNull(identifier);
+ final QName moduleNameAndRevision = getModuleNameAndRevision(identifier);
+ Module module = null;
DOMMountPoint mountPoint = null;
+ final SchemaContext schemaContext;
if (identifier.contains(ControllerContext.MOUNT)) {
- InstanceIdentifierContext mountPointIdentifier = this.controllerContext.toMountPointIdentifier(identifier);
+ final InstanceIdentifierContext mountPointIdentifier = controllerContext.toMountPointIdentifier(identifier);
mountPoint = mountPointIdentifier.getMountPoint();
- modules = this.controllerContext.getAllModules(mountPoint);
+ module = controllerContext.findModuleByNameAndRevision(mountPoint, moduleNameAndRevision);
+ schemaContext = mountPoint.getSchemaContext();
} else {
- throw new RestconfDocumentedException(
- "URI has bad format. If modules behind mount point should be showed, URI has to end with "
- + ControllerContext.MOUNT, ErrorType.PROTOCOL, ErrorTag.INVALID_VALUE);
+ module = controllerContext.findModuleByNameAndRevision(moduleNameAndRevision);
+ schemaContext = controllerContext.getGlobalSchema();
}
- final List<Node<?>> modulesAsData = new ArrayList<Node<?>>();
- Module restconfModule = this.getRestconfModule();
- final DataSchemaNode moduleSchemaNode = controllerContext.getRestconfModuleRestConfSchemaNode(restconfModule,
- Draft02.RestConfModule.MODULE_LIST_SCHEMA_NODE);
-
- for (final Module module : modules) {
- modulesAsData.add(this.toModuleCompositeNode(module, moduleSchemaNode));
+ if (module == null) {
+ final String errMsg = "Module with name '" + moduleNameAndRevision.getLocalName()
+ + "' and revision '" + moduleNameAndRevision.getRevision() + "' was not found.";
+ throw new RestconfDocumentedException(errMsg, ErrorType.PROTOCOL, ErrorTag.UNKNOWN_ELEMENT);
}
- final DataSchemaNode modulesSchemaNode = controllerContext.getRestconfModuleRestConfSchemaNode(restconfModule,
- Draft02.RestConfModule.MODULES_CONTAINER_SCHEMA_NODE);
- QName qName = modulesSchemaNode.getQName();
- final CompositeNode modulesNode = NodeFactory.createImmutableCompositeNode(qName, null, modulesAsData);
- return new StructuredData(modulesNode, modulesSchemaNode, mountPoint, parsePrettyPrintParameter(uriInfo));
+ final Module restconfModule = getRestconfModule();
+ final Set<Module> modules = Collections.singleton(module);
+ final MapNode moduleMap = makeModuleMapNode(modules);
+
+ final DataSchemaNode moduleSchemaNode = controllerContext.getRestconfModuleRestConfSchemaNode(
+ restconfModule, Draft02.RestConfModule.MODULE_LIST_SCHEMA_NODE);
+ Preconditions.checkState(moduleSchemaNode instanceof ListSchemaNode);
+
+ return new NormalizedNodeContext(new InstanceIdentifierContext(null, moduleSchemaNode, mountPoint,
+ schemaContext), moduleMap);
}
@Override
- public StructuredData getModule(final String identifier, final UriInfo uriInfo) {
- final QName moduleNameAndRevision = this.getModuleNameAndRevision(identifier);
- Module module = null;
- DOMMountPoint mountPoint = null;
- if (identifier.contains(ControllerContext.MOUNT)) {
- InstanceIdentifierContext mountPointIdentifier = this.controllerContext.toMountPointIdentifier(identifier);
- mountPoint = mountPointIdentifier.getMountPoint();
- module = this.controllerContext.findModuleByNameAndRevision(mountPoint, moduleNameAndRevision);
- } else {
- module = this.controllerContext.findModuleByNameAndRevision(moduleNameAndRevision);
- }
+ public StructuredData getAvailableStreams(final UriInfo uriInfo) {
+ final Set<String> availableStreams = Notificator.getStreamNames();
- if (module == null) {
- throw new RestconfDocumentedException("Module with name '" + moduleNameAndRevision.getLocalName()
- + "' and revision '" + moduleNameAndRevision.getRevision() + "' was not found.",
- ErrorType.PROTOCOL, ErrorTag.UNKNOWN_ELEMENT);
+ final List<Node<?>> streamsAsData = new ArrayList<Node<?>>();
+ final Module restconfModule = getRestconfModule();
+ final DataSchemaNode streamSchemaNode = controllerContext.getRestconfModuleRestConfSchemaNode(restconfModule,
+ Draft02.RestConfModule.STREAM_LIST_SCHEMA_NODE);
+ for (final String streamName : availableStreams) {
+ streamsAsData.add(toStreamCompositeNode(streamName, streamSchemaNode));
}
- Module restconfModule = this.getRestconfModule();
- final DataSchemaNode moduleSchemaNode = controllerContext.getRestconfModuleRestConfSchemaNode(restconfModule,
- Draft02.RestConfModule.MODULE_LIST_SCHEMA_NODE);
- final CompositeNode moduleNode = this.toModuleCompositeNode(module, moduleSchemaNode);
- return new StructuredData(moduleNode, moduleSchemaNode, mountPoint, parsePrettyPrintParameter(uriInfo));
+ final DataSchemaNode streamsSchemaNode = controllerContext.getRestconfModuleRestConfSchemaNode(restconfModule,
+ Draft02.RestConfModule.STREAMS_CONTAINER_SCHEMA_NODE);
+ final QName qName = streamsSchemaNode.getQName();
+ final CompositeNode streamsNode = NodeFactory.createImmutableCompositeNode(qName, null, streamsAsData);
+ return new StructuredData(streamsNode, streamsSchemaNode, null, parsePrettyPrintParameter(uriInfo));
}
@Override
public StructuredData getOperations(final UriInfo uriInfo) {
- Set<Module> allModules = this.controllerContext.getAllModules();
- return this.operationsFromModulesToStructuredData(allModules, null, parsePrettyPrintParameter(uriInfo));
+ final Set<Module> allModules = controllerContext.getAllModules();
+ return operationsFromModulesToStructuredData(allModules, null, parsePrettyPrintParameter(uriInfo));
}
@Override
Set<Module> modules = null;
DOMMountPoint mountPoint = null;
if (identifier.contains(ControllerContext.MOUNT)) {
- InstanceIdentifierContext mountPointIdentifier = this.controllerContext.toMountPointIdentifier(identifier);
+ final InstanceIdentifierContext mountPointIdentifier = controllerContext.toMountPointIdentifier(identifier);
mountPoint = mountPointIdentifier.getMountPoint();
- modules = this.controllerContext.getAllModules(mountPoint);
+ modules = controllerContext.getAllModules(mountPoint);
} else {
throw new RestconfDocumentedException(
"URI has bad format. If operations behind mount point should be showed, URI has to end with "
+ ControllerContext.MOUNT, ErrorType.PROTOCOL, ErrorTag.INVALID_VALUE);
}
- return this.operationsFromModulesToStructuredData(modules, mountPoint, parsePrettyPrintParameter(uriInfo));
+ return operationsFromModulesToStructuredData(modules, mountPoint, parsePrettyPrintParameter(uriInfo));
}
private StructuredData operationsFromModulesToStructuredData(final Set<Module> modules,
final DOMMountPoint mountPoint, final boolean prettyPrint) {
final List<Node<?>> operationsAsData = new ArrayList<Node<?>>();
- Module restconfModule = this.getRestconfModule();
+ final Module restconfModule = getRestconfModule();
final DataSchemaNode operationsSchemaNode = controllerContext.getRestconfModuleRestConfSchemaNode(
restconfModule, Draft02.RestConfModule.OPERATIONS_CONTAINER_SCHEMA_NODE);
- QName qName = operationsSchemaNode.getQName();
- SchemaPath path = operationsSchemaNode.getPath();
- ContainerSchemaNodeBuilder containerSchemaNodeBuilder = new ContainerSchemaNodeBuilder(
+ final QName qName = operationsSchemaNode.getQName();
+ final SchemaPath path = operationsSchemaNode.getPath();
+ final ContainerSchemaNodeBuilder containerSchemaNodeBuilder = new ContainerSchemaNodeBuilder(
Draft02.RestConfModule.NAME, 0, qName, path);
final ContainerSchemaNodeBuilder fakeOperationsSchemaNode = containerSchemaNodeBuilder;
for (final Module module : modules) {
- Set<RpcDefinition> rpcs = module.getRpcs();
+ final Set<RpcDefinition> rpcs = module.getRpcs();
for (final RpcDefinition rpc : rpcs) {
- QName rpcQName = rpc.getQName();
- SimpleNode<Object> immutableSimpleNode = NodeFactory.<Object> createImmutableSimpleNode(rpcQName, null,
+ final QName rpcQName = rpc.getQName();
+ final SimpleNode<Object> immutableSimpleNode = NodeFactory.<Object> createImmutableSimpleNode(rpcQName, null,
null);
operationsAsData.add(immutableSimpleNode);
- String name = module.getName();
- LeafSchemaNodeBuilder leafSchemaNodeBuilder = new LeafSchemaNodeBuilder(name, 0, rpcQName,
+ final String name = module.getName();
+ final LeafSchemaNodeBuilder leafSchemaNodeBuilder = new LeafSchemaNodeBuilder(name, 0, rpcQName,
SchemaPath.create(true, QName.create("dummy")));
final LeafSchemaNodeBuilder fakeRpcSchemaNode = leafSchemaNodeBuilder;
fakeRpcSchemaNode.setAugmenting(true);
- EmptyType instance = EmptyType.getInstance();
+ final EmptyType instance = EmptyType.getInstance();
fakeRpcSchemaNode.setType(instance);
fakeOperationsSchemaNode.addChildNode(fakeRpcSchemaNode.build());
}
}
final CompositeNode operationsNode = NodeFactory.createImmutableCompositeNode(qName, null, operationsAsData);
- ContainerSchemaNode schemaNode = fakeOperationsSchemaNode.build();
+ final ContainerSchemaNode schemaNode = fakeOperationsSchemaNode.build();
return new StructuredData(operationsNode, schemaNode, mountPoint, prettyPrint);
}
private Module getRestconfModule() {
- Module restconfModule = controllerContext.getRestconfModule();
+ final Module restconfModule = controllerContext.getRestconfModule();
if (restconfModule == null) {
throw new RestconfDocumentedException("ietf-restconf module was not found.", ErrorType.APPLICATION,
ErrorTag.OPERATION_NOT_SUPPORTED);
moduleNameAndRevision = identifier;
}
- Splitter splitter = Splitter.on("/").omitEmptyStrings();
- Iterable<String> split = splitter.split(moduleNameAndRevision);
+ final Splitter splitter = Splitter.on("/").omitEmptyStrings();
+ final Iterable<String> split = splitter.split(moduleNameAndRevision);
final List<String> pathArgs = Lists.<String> newArrayList(split);
if (pathArgs.size() < 2) {
throw new RestconfDocumentedException(
try {
final String moduleName = pathArgs.get(0);
- String revision = pathArgs.get(1);
+ final String revision = pathArgs.get(1);
final Date moduleRevision = REVISION_FORMAT.parse(revision);
return QName.create(null, moduleRevision, moduleName);
- } catch (ParseException e) {
+ } catch (final ParseException e) {
throw new RestconfDocumentedException("URI has bad format. It should be \'moduleName/yyyy-MM-dd\'",
ErrorType.PROTOCOL, ErrorTag.INVALID_VALUE);
}
instanceDataChildrenByName = ControllerContext.findInstanceDataChildrenByName(
((DataNodeContainer) moduleSchemaNode), "revision");
final DataSchemaNode revisionSchemaNode = Iterables.getFirst(instanceDataChildrenByName, null);
- Date _revision = module.getRevision();
+ final Date _revision = module.getRevision();
moduleNodeValues.add(NodeFactory.<String> createImmutableSimpleNode(revisionSchemaNode.getQName(), null,
REVISION_FORMAT.format(_revision)));
@Override
public StructuredData invokeRpc(final String identifier, final CompositeNode payload, final UriInfo uriInfo) {
- final RpcExecutor rpc = this.resolveIdentifierInInvokeRpc(identifier);
- QName rpcName = rpc.getRpcDefinition().getQName();
- URI rpcNamespace = rpcName.getNamespace();
+ final RpcExecutor rpc = resolveIdentifierInInvokeRpc(identifier);
+ final QName rpcName = rpc.getRpcDefinition().getQName();
+ final URI rpcNamespace = rpcName.getNamespace();
if (Objects.equal(rpcNamespace.toString(), SAL_REMOTE_NAMESPACE)
&& Objects.equal(rpcName.getLocalName(), SAL_REMOTE_RPC_SUBSRCIBE)) {
return invokeSalRemoteRpcSubscribeRPC(payload, rpc.getRpcDefinition(), parsePrettyPrintParameter(uriInfo));
final YangInstanceIdentifier pathIdentifier = ((YangInstanceIdentifier) pathValue);
String streamName = null;
if (!Iterables.isEmpty(pathIdentifier.getPathArguments())) {
- String fullRestconfIdentifier = this.controllerContext.toFullRestconfIdentifier(pathIdentifier);
+ final String fullRestconfIdentifier = controllerContext.toFullRestconfIdentifier(pathIdentifier);
LogicalDatastoreType datastore = parseEnumTypeParameter(value, LogicalDatastoreType.class,
DATASTORE_PARAM_NAME);
DOMMountPoint mountPoint = null;
if (identifier.contains(ControllerContext.MOUNT)) {
// mounted RPC call - look up mount instance.
- InstanceIdentifierContext mountPointId = controllerContext.toMountPointIdentifier(identifier);
+ final InstanceIdentifierContext mountPointId = controllerContext.toMountPointIdentifier(identifier);
mountPoint = mountPointId.getMountPoint();
- int startOfRemoteRpcName = identifier.lastIndexOf(ControllerContext.MOUNT)
+ final int startOfRemoteRpcName = identifier.lastIndexOf(ControllerContext.MOUNT)
+ ControllerContext.MOUNT.length() + 1;
- String remoteRpcName = identifier.substring(startOfRemoteRpcName);
+ final String remoteRpcName = identifier.substring(startOfRemoteRpcName);
identifierEncoded = remoteRpcName;
} else if (identifier.indexOf("/") != CHAR_NOT_FOUND) {
+ " couldn't be splitted to 2 parts (module:rpc name)", ErrorType.APPLICATION,
ErrorTag.INVALID_VALUE);
}
- for (Module module : schemaContext.getModules()) {
+ for (final Module module : schemaContext.getModules()) {
if (module.getName().equals(splittedIdentifier[0])) {
- for (RpcDefinition rpcDefinition : module.getRpcs()) {
+ for (final RpcDefinition rpcDefinition : module.getRpcs()) {
if (rpcDefinition.getQName().getLocalName().equals(splittedIdentifier[1])) {
return rpcDefinition;
}
}
CompositeNode rpcRequest = null;
- RpcDefinition rpc = rpcExecutor.getRpcDefinition();
- QName rpcName = rpc.getQName();
+ final RpcDefinition rpc = rpcExecutor.getRpcDefinition();
+ final QName rpcName = rpc.getQName();
if (payload == null) {
rpcRequest = NodeFactory.createMutableCompositeNode(rpcName, null, null, null, null);
} else {
final CompositeNode value = this.normalizeNode(payload, rpc.getInput(), null);
- List<Node<?>> input = Collections.<Node<?>> singletonList(value);
+ final List<Node<?>> input = Collections.<Node<?>> singletonList(value);
rpcRequest = NodeFactory.createMutableCompositeNode(rpcName, null, input, null, null);
}
- RpcResult<CompositeNode> rpcResult = rpcExecutor.invokeRpc(rpcRequest);
+ final RpcResult<CompositeNode> rpcResult = rpcExecutor.invokeRpc(rpcRequest);
checkRpcSuccessAndThrowException(rpcResult);
@Override
public NormalizedNodeContext readConfigurationData(final String identifier, final UriInfo uriInfo) {
final InstanceIdentifierContext iiWithData = controllerContext.toInstanceIdentifier(identifier);
- DOMMountPoint mountPoint = iiWithData.getMountPoint();
+ final DOMMountPoint mountPoint = iiWithData.getMountPoint();
NormalizedNode<?, ?> data = null;
YangInstanceIdentifier normalizedII;
if (mountPoint != null) {
}
if (node instanceof CompositeNode) {
- ImmutableList.Builder<Node<?>> newChildNodes = ImmutableList.<Node<?>> builder();
+ final ImmutableList.Builder<Node<?>> newChildNodes = ImmutableList.<Node<?>> builder();
if (depth > 1) {
- for (Node<?> childNode : ((CompositeNode) node).getValue()) {
+ for (final Node<?> childNode : ((CompositeNode) node).getValue()) {
newChildNodes.add(pruneDataAtDepth(childNode, depth - 1));
}
}
}
private Integer parseDepthParameter(final UriInfo info) {
- String param = info.getQueryParameters(false).getFirst(UriParameters.DEPTH.toString());
+ final String param = info.getQueryParameters(false).getFirst(UriParameters.DEPTH.toString());
if (Strings.isNullOrEmpty(param) || "unbounded".equals(param)) {
return null;
}
try {
- Integer depth = Integer.valueOf(param);
+ final Integer depth = Integer.valueOf(param);
if (depth < 1) {
throw new RestconfDocumentedException(new RestconfError(ErrorType.PROTOCOL, ErrorTag.INVALID_VALUE,
"Invalid depth parameter: " + depth, null,
}
return depth;
- } catch (NumberFormatException e) {
+ } catch (final NumberFormatException e) {
throw new RestconfDocumentedException(new RestconfError(ErrorType.PROTOCOL, ErrorTag.INVALID_VALUE,
"Invalid depth parameter: " + e.getMessage(), null,
"The depth parameter must be an integer > 1 or \"unbounded\""));
@Override
public NormalizedNodeContext readOperationalData(final String identifier, final UriInfo info) {
final InstanceIdentifierContext iiWithData = controllerContext.toInstanceIdentifier(identifier);
- DOMMountPoint mountPoint = iiWithData.getMountPoint();
+ final DOMMountPoint mountPoint = iiWithData.getMountPoint();
NormalizedNode<?, ?> data = null;
YangInstanceIdentifier normalizedII;
if (mountPoint != null) {
}
private boolean parsePrettyPrintParameter(final UriInfo info) {
- String param = info.getQueryParameters(false).getFirst(UriParameters.PRETTY_PRINT.toString());
+ final String param = info.getQueryParameters(false).getFirst(UriParameters.PRETTY_PRINT.toString());
return Boolean.parseBoolean(param);
}
@Override
public Response updateConfigurationData(final String identifier, final Node<?> payload) {
- final InstanceIdentifierContext iiWithData = this.controllerContext.toInstanceIdentifier(identifier);
+ final InstanceIdentifierContext iiWithData = controllerContext.toInstanceIdentifier(identifier);
validateInput(iiWithData.getSchemaNode(), payload);
- DOMMountPoint mountPoint = iiWithData.getMountPoint();
+ final DOMMountPoint mountPoint = iiWithData.getMountPoint();
validateTopLevelNodeName(payload, iiWithData.getInstanceIdentifier());
final CompositeNode value = this.normalizeNode(payload, iiWithData.getSchemaNode(), mountPoint);
validateListKeysEqualityInPayloadAndUri(iiWithData, value);
}
break;
- } catch (TransactionCommitFailedException e) {
+ } catch (final TransactionCommitFailedException e) {
if(e instanceof OptimisticLockFailedException) {
if(--tries <= 0) {
LOG.debug("Got OptimisticLockFailedException on last try - failing");
private void isEqualUriAndPayloadKeyValues(final Map<QName, Object> uriKeyValues, final CompositeNode payload,
final List<QName> keyDefinitions) {
- for (QName keyDefinition : keyDefinitions) {
+ for (final QName keyDefinition : keyDefinitions) {
final Object uriKeyValue = uriKeyValues.get(keyDefinition);
// should be caught during parsing URI to InstanceIdentifier
if (uriKeyValue == null) {
+ " in the message body.", ErrorType.PROTOCOL, ErrorTag.DATA_MISSING);
}
- Object payloadKeyValue = payloadKeyValues.iterator().next().getValue();
+ final Object payloadKeyValue = payloadKeyValues.iterator().next().getValue();
if (!uriKeyValue.equals(payloadKeyValue)) {
throw new RestconfDocumentedException("The value '" + uriKeyValue + "' for key '"
+ keyDefinition.getLocalName() + "' specified in the URI doesn't match the value '"
throw new RestconfDocumentedException("Input is required.", ErrorType.PROTOCOL, ErrorTag.MALFORMED_MESSAGE);
}
- URI payloadNS = this.namespace(payload);
+ final URI payloadNS = namespace(payload);
if (payloadNS == null) {
throw new RestconfDocumentedException(
"Data has bad format. Root element node must have namespace (XML format) or module name(JSON format)",
InstanceIdentifierContext iiWithData = null;
CompositeNode value = null;
- if (this.representsMountPointRootData(payload)) {
+ if (representsMountPointRootData(payload)) {
// payload represents mount point data and URI represents path to the mount point
- if (this.endsWithMountPoint(identifier)) {
+ if (endsWithMountPoint(identifier)) {
throw new RestconfDocumentedException("URI has bad format. URI should be without \""
+ ControllerContext.MOUNT + "\" for POST operation.", ErrorType.PROTOCOL,
ErrorTag.INVALID_VALUE);
}
- final String completeIdentifier = this.addMountPointIdentifier(identifier);
- iiWithData = this.controllerContext.toInstanceIdentifier(completeIdentifier);
+ final String completeIdentifier = addMountPointIdentifier(identifier);
+ iiWithData = controllerContext.toInstanceIdentifier(completeIdentifier);
value = this.normalizeNode(payload, iiWithData.getSchemaNode(), iiWithData.getMountPoint());
} else {
- final InstanceIdentifierContext incompleteInstIdWithData = this.controllerContext
+ final InstanceIdentifierContext incompleteInstIdWithData = controllerContext
.toInstanceIdentifier(identifier);
final DataNodeContainer parentSchema = (DataNodeContainer) incompleteInstIdWithData.getSchemaNode();
- DOMMountPoint mountPoint = incompleteInstIdWithData.getMountPoint();
+ final DOMMountPoint mountPoint = incompleteInstIdWithData.getMountPoint();
final Module module = findModule(mountPoint, payload);
- String payloadName = this.getName(payload);
+ final String payloadName = getName(payload);
final DataSchemaNode schemaNode = ControllerContext.findInstanceDataChildByNameAndNamespace(
parentSchema, payloadName, module.getNamespace());
value = this.normalizeNode(payload, schemaNode, mountPoint);
final NormalizedNode<?, ?> datastoreNormalizedData = compositeNodeToDatastoreNormalizedNode(value,
iiWithData.getSchemaNode());
- DOMMountPoint mountPoint = iiWithData.getMountPoint();
+ final DOMMountPoint mountPoint = iiWithData.getMountPoint();
YangInstanceIdentifier normalizedII;
try {
normalizedII = controllerContext.toNormalized(iiWithData.getInstanceIdentifier());
broker.commitConfigurationDataPost(normalizedII, datastoreNormalizedData);
}
- } catch(RestconfDocumentedException e) {
+ } catch(final RestconfDocumentedException e) {
throw e;
- } catch (Exception e) {
+ } catch (final Exception e) {
throw new RestconfDocumentedException("Error creating data", e);
}
throw new RestconfDocumentedException("Input is required.", ErrorType.PROTOCOL, ErrorTag.MALFORMED_MESSAGE);
}
- URI payloadNS = this.namespace(payload);
+ final URI payloadNS = namespace(payload);
if (payloadNS == null) {
throw new RestconfDocumentedException(
"Data has bad format. Root element node must have namespace (XML format) or module name(JSON format)",
final Module module = this.findModule(null, payload);
- String payloadName = this.getName(payload);
+ final String payloadName = getName(payload);
final DataSchemaNode schemaNode = ControllerContext.findInstanceDataChildByNameAndNamespace(module,
payloadName, module.getNamespace());
final CompositeNode value = this.normalizeNode(payload, schemaNode, null);
- final InstanceIdentifierContext iiWithData = this.addLastIdentifierFromData(null, value, schemaNode,ControllerContext.getInstance().getGlobalSchema());
+ final InstanceIdentifierContext iiWithData = addLastIdentifierFromData(null, value, schemaNode,ControllerContext.getInstance().getGlobalSchema());
final NormalizedNode<?, ?> datastoreNormalizedData = compositeNodeToDatastoreNormalizedNode(value, schemaNode);
- DOMMountPoint mountPoint = iiWithData.getMountPoint();
+ final DOMMountPoint mountPoint = iiWithData.getMountPoint();
YangInstanceIdentifier normalizedII;
try {
normalizedII = controllerContext.toNormalized(iiWithData.getInstanceIdentifier());
broker.commitConfigurationDataPost(normalizedII, datastoreNormalizedData);
}
- } catch(RestconfDocumentedException e) {
+ } catch(final RestconfDocumentedException e) {
throw e;
- } catch (Exception e) {
+ } catch (final Exception e) {
throw new RestconfDocumentedException("Error creating data", e);
}
@Override
public Response deleteConfigurationData(final String identifier) {
final InstanceIdentifierContext iiWithData = controllerContext.toInstanceIdentifier(identifier);
- DOMMountPoint mountPoint = iiWithData.getMountPoint();
+ final DOMMountPoint mountPoint = iiWithData.getMountPoint();
YangInstanceIdentifier normalizedII;
try {
normalizedII = controllerContext.toNormalized(iiWithData.getInstanceIdentifier());
broker.commitConfigurationDataDelete(normalizedII).get();
}
- } catch (Exception e) {
+ } catch (final Exception e) {
final Optional<Throwable> searchedException = Iterables.tryFind(Throwables.getCausalChain(e),
Predicates.instanceOf(ModifiedNodeDoesNotExistException.class));
if (searchedException.isPresent()) {
throw new RestconfDocumentedException("Stream was not found.", ErrorType.PROTOCOL, ErrorTag.UNKNOWN_ELEMENT);
}
- Map<String, String> paramToValues = resolveValuesFromUri(identifier);
- LogicalDatastoreType datastore = parserURIEnumParameter(LogicalDatastoreType.class,
+ final Map<String, String> paramToValues = resolveValuesFromUri(identifier);
+ final LogicalDatastoreType datastore = parserURIEnumParameter(LogicalDatastoreType.class,
paramToValues.get(DATASTORE_PARAM_NAME));
if (datastore == null) {
throw new RestconfDocumentedException("Stream name doesn't contains datastore value (pattern /datastore=)",
ErrorType.APPLICATION, ErrorTag.MISSING_ATTRIBUTE);
}
- DataChangeScope scope = parserURIEnumParameter(DataChangeScope.class, paramToValues.get(SCOPE_PARAM_NAME));
+ final DataChangeScope scope = parserURIEnumParameter(DataChangeScope.class, paramToValues.get(SCOPE_PARAM_NAME));
if (scope == null) {
throw new RestconfDocumentedException("Stream name doesn't contains datastore value (pattern /scope=)",
ErrorType.APPLICATION, ErrorTag.MISSING_ATTRIBUTE);
final UriBuilder uriBuilder = uriInfo.getAbsolutePathBuilder();
int notificationPort = NOTIFICATION_PORT;
try {
- WebSocketServer webSocketServerInstance = WebSocketServer.getInstance();
+ final WebSocketServer webSocketServerInstance = WebSocketServer.getInstance();
notificationPort = webSocketServerInstance.getPort();
- } catch (NullPointerException e) {
+ } catch (final NullPointerException e) {
WebSocketServer.createInstance(NOTIFICATION_PORT);
}
- UriBuilder port = uriBuilder.port(notificationPort);
+ final UriBuilder port = uriBuilder.port(notificationPort);
final URI uriToWebsocketServer = port.replacePath(streamName).build();
return Response.status(Status.OK).location(uriToWebsocketServer).build();
*/
private <T> T parseEnumTypeParameter(final CompositeNode compNode, final Class<T> classDescriptor,
final String paramName) {
- QNameModule salRemoteAugment = QNameModule.create(NAMESPACE_EVENT_SUBSCRIPTION_AUGMENT,
+ final QNameModule salRemoteAugment = QNameModule.create(NAMESPACE_EVENT_SUBSCRIPTION_AUGMENT,
EVENT_SUBSCRIPTION_AUGMENT_REVISION);
- SimpleNode<?> simpleNode = compNode.getFirstSimpleByName(QName.create(salRemoteAugment, paramName));
+ final SimpleNode<?> simpleNode = compNode.getFirstSimpleByName(QName.create(salRemoteAugment, paramName));
if (simpleNode == null) {
return null;
}
- Object rawValue = simpleNode.getValue();
+ final Object rawValue = simpleNode.getValue();
if (!(rawValue instanceof String)) {
return null;
}
}
private <T> T resolveAsEnum(final Class<T> classDescriptor, final String value) {
- T[] enumConstants = classDescriptor.getEnumConstants();
+ final T[] enumConstants = classDescriptor.getEnumConstants();
if (enumConstants != null) {
- for (T enm : classDescriptor.getEnumConstants()) {
+ for (final T enm : classDescriptor.getEnumConstants()) {
if (((Enum<?>) enm).name().equals(value)) {
return enm;
}
}
private Map<String, String> resolveValuesFromUri(final String uri) {
- Map<String, String> result = new HashMap<>();
- String[] tokens = uri.split("/");
+ final Map<String, String> result = new HashMap<>();
+ final String[] tokens = uri.split("/");
for (int i = 1; i < tokens.length; i++) {
- String[] parameterTokens = tokens[i].split("=");
+ final String[] parameterTokens = tokens[i].split("=");
if (parameterTokens.length == 2) {
result.put(parameterTokens[0], parameterTokens[1]);
}
if (data instanceof NodeWrapper) {
module = findModule(mountPoint, (NodeWrapper<?>) data);
} else if (data != null) {
- URI namespace = data.getNodeType().getNamespace();
+ final URI namespace = data.getNodeType().getNamespace();
if (mountPoint != null) {
- module = this.controllerContext.findModuleByNamespace(mountPoint, namespace);
+ module = controllerContext.findModuleByNamespace(mountPoint, namespace);
} else {
- module = this.controllerContext.findModuleByNamespace(namespace);
+ module = controllerContext.findModuleByNamespace(namespace);
}
}
if (module != null) {
}
private Module findModule(final DOMMountPoint mountPoint, final NodeWrapper<?> data) {
- URI namespace = data.getNamespace();
+ final URI namespace = data.getNamespace();
Preconditions.<URI> checkNotNull(namespace);
Module module = null;
if (mountPoint != null) {
- module = this.controllerContext.findModuleByNamespace(mountPoint, namespace);
+ module = controllerContext.findModuleByNamespace(mountPoint, namespace);
if (module == null) {
- module = this.controllerContext.findModuleByName(mountPoint, namespace.toString());
+ module = controllerContext.findModuleByName(mountPoint, namespace.toString());
}
} else {
- module = this.controllerContext.findModuleByNamespace(namespace);
+ module = controllerContext.findModuleByNamespace(namespace);
if (module == null) {
- module = this.controllerContext.findModuleByName(namespace.toString());
+ module = controllerContext.findModuleByName(namespace.toString());
}
}
}
private InstanceIdentifierContext addLastIdentifierFromData(final InstanceIdentifierContext identifierWithSchemaNode,
- final CompositeNode data, final DataSchemaNode schemaOfData, SchemaContext schemaContext) {
+ final CompositeNode data, final DataSchemaNode schemaOfData, final SchemaContext schemaContext) {
YangInstanceIdentifier instanceIdentifier = null;
if (identifierWithSchemaNode != null) {
instanceIdentifier = identifierWithSchemaNode.getInstanceIdentifier();
}
if ((schemaOfData instanceof ListSchemaNode)) {
- HashMap<QName, Object> keys = this.resolveKeysFromData(((ListSchemaNode) schemaOfData), data);
+ final HashMap<QName, Object> keys = resolveKeysFromData(((ListSchemaNode) schemaOfData), data);
iiBuilder.nodeWithKey(schemaOfData.getQName(), keys);
} else {
iiBuilder.node(schemaOfData.getQName());
}
- YangInstanceIdentifier instance = iiBuilder.toInstance();
+ final YangInstanceIdentifier instance = iiBuilder.toInstance();
DOMMountPoint mountPoint = null;
- SchemaContext schemaCtx = null;
+ final SchemaContext schemaCtx = null;
if (identifierWithSchemaNode != null) {
mountPoint = identifierWithSchemaNode.getMountPoint();
}
private HashMap<QName, Object> resolveKeysFromData(final ListSchemaNode listNode, final CompositeNode dataNode) {
final HashMap<QName, Object> keyValues = new HashMap<QName, Object>();
- List<QName> _keyDefinition = listNode.getKeyDefinition();
+ final List<QName> _keyDefinition = listNode.getKeyDefinition();
for (final QName key : _keyDefinition) {
SimpleNode<? extends Object> head = null;
- String localName = key.getLocalName();
- List<SimpleNode<? extends Object>> simpleNodesByName = dataNode.getSimpleNodesByName(localName);
+ final String localName = key.getLocalName();
+ final List<SimpleNode<? extends Object>> simpleNodesByName = dataNode.getSimpleNodesByName(localName);
if (simpleNodesByName != null) {
head = Iterables.getFirst(simpleNodesByName, null);
}
}
private boolean representsMountPointRootData(final Node<?> data) {
- URI namespace = this.namespace(data);
+ final URI namespace = namespace(data);
return (SchemaContext.NAME.getNamespace().equals(namespace) /*
* || MOUNT_POINT_MODULE_NAME .equals( namespace .
* toString( ) )
*/)
- && SchemaContext.NAME.getLocalName().equals(this.localName(data));
+ && SchemaContext.NAME.getLocalName().equals(localName(data));
}
private String addMountPointIdentifier(final String identifier) {
- boolean endsWith = identifier.endsWith("/");
+ final boolean endsWith = identifier.endsWith("/");
if (endsWith) {
return (identifier + ControllerContext.MOUNT);
}
private CompositeNode normalizeNode(final Node<?> node, final DataSchemaNode schema, final DOMMountPoint mountPoint) {
if (schema == null) {
- String localName = node == null ? null :
+ final String localName = node == null ? null :
node instanceof NodeWrapper ? ((NodeWrapper<?>)node).getLocalName() :
node.getNodeType().getLocalName();
if ((node instanceof NodeWrapper<?>)) {
NodeWrapper<?> nodeWrap = (NodeWrapper<?>) node;
- boolean isChangeAllowed = ((NodeWrapper<?>) node).isChangeAllowed();
+ final boolean isChangeAllowed = ((NodeWrapper<?>) node).isChangeAllowed();
if (isChangeAllowed) {
nodeWrap = topLevelElementAsCompositeNodeWrapper((NodeWrapper<?>) node, schema);
try {
this.normalizeNode(nodeWrap, schema, null, mountPoint);
- } catch (IllegalArgumentException e) {
- RestconfDocumentedException restconfDocumentedException = new RestconfDocumentedException(e.getMessage(), ErrorType.PROTOCOL, ErrorTag.INVALID_VALUE);
+ } catch (final IllegalArgumentException e) {
+ final RestconfDocumentedException restconfDocumentedException = new RestconfDocumentedException(e.getMessage(), ErrorType.PROTOCOL, ErrorTag.INVALID_VALUE);
restconfDocumentedException.addSuppressed(e);
throw restconfDocumentedException;
}
if (nodeBuilder.getQname() != null) {
currentAugment = previousAugment;
} else {
- currentAugment = this.normalizeNodeName(nodeBuilder, schema, previousAugment, mountPoint);
+ currentAugment = normalizeNodeName(nodeBuilder, schema, previousAugment, mountPoint);
if (nodeBuilder.getQname() == null) {
throw new RestconfDocumentedException(
"Data has bad format.\nIf data is in XML format then namespace for \""
}
private void normalizeAnyXmlNode(final CompositeNodeWrapper compositeNode, final AnyXmlSchemaNode schema) {
- List<NodeWrapper<?>> children = compositeNode.getValues();
- for (NodeWrapper<? extends Object> child : children) {
+ final List<NodeWrapper<?>> children = compositeNode.getValues();
+ for (final NodeWrapper<? extends Object> child : children) {
child.setNamespace(schema.getQName().getNamespace());
if (child instanceof CompositeNodeWrapper) {
normalizeAnyXmlNode((CompositeNodeWrapper) child, schema);
final DOMMountPoint mountPoint) {
final Object value = simpleNode.getValue();
Object inputValue = value;
- TypeDef typeDef = this.typeDefinition(schema);
+ final TypeDef typeDef = this.typeDefinition(schema);
TypeDefinition<? extends Object> typeDefinition = typeDef != null ? typeDef.typedef : null;
// For leafrefs, extract the type it is pointing to
if(typeDefinition instanceof LeafrefTypeDefinition) {
if (schema.getQName().equals(typeDef.qName)) {
- typeDefinition = SchemaContextUtil.getBaseTypeForLeafRef(((LeafrefTypeDefinition) typeDefinition), mountPoint == null ? this.controllerContext.getGlobalSchema() : mountPoint.getSchemaContext(), schema);
+ typeDefinition = SchemaContextUtil.getBaseTypeForLeafRef(((LeafrefTypeDefinition) typeDefinition), mountPoint == null ? controllerContext.getGlobalSchema() : mountPoint.getSchemaContext(), schema);
} else {
- typeDefinition = SchemaContextUtil.getBaseTypeForLeafRef(((LeafrefTypeDefinition) typeDefinition), mountPoint == null ? this.controllerContext.getGlobalSchema() : mountPoint.getSchemaContext(), typeDef.qName);
+ typeDefinition = SchemaContextUtil.getBaseTypeForLeafRef(((LeafrefTypeDefinition) typeDefinition), mountPoint == null ? controllerContext.getGlobalSchema() : mountPoint.getSchemaContext(), typeDef.qName);
}
}
Object outputValue = inputValue;
if (typeDefinition != null) {
- Codec<Object, Object> codec = RestCodec.from(typeDefinition, mountPoint);
+ final Codec<Object, Object> codec = RestCodec.from(typeDefinition, mountPoint);
outputValue = codec == null ? null : codec.deserialize(inputValue);
}
schema, child.getLocalName());
if (potentialSchemaNodes.size() > 1 && child.getNamespace() == null) {
- StringBuilder builder = new StringBuilder();
+ final StringBuilder builder = new StringBuilder();
for (final DataSchemaNode potentialSchemaNode : potentialSchemaNodes) {
builder.append(" ").append(potentialSchemaNode.getQName().getNamespace().toString()).append("\n");
}
boolean rightNodeSchemaFound = false;
for (final DataSchemaNode potentialSchemaNode : potentialSchemaNodes) {
if (!rightNodeSchemaFound) {
- final QName potentialCurrentAugment = this.normalizeNodeName(child, potentialSchemaNode,
+ final QName potentialCurrentAugment = normalizeNodeName(child, potentialSchemaNode,
currentAugment, mountPoint);
if (child.getQname() != null) {
this.normalizeNode(child, potentialSchemaNode, potentialCurrentAugment, mountPoint);
}
if ((schema instanceof ListSchemaNode)) {
- ListSchemaNode listSchemaNode = (ListSchemaNode) schema;
+ final ListSchemaNode listSchemaNode = (ListSchemaNode) schema;
final List<QName> listKeys = listSchemaNode.getKeyDefinition();
for (final QName listKey : listKeys) {
boolean foundKey = false;
private void checkNodeMultiplicityAccordingToSchema(final DataNodeContainer dataNodeContainer,
final List<NodeWrapper<?>> nodes) {
- Map<String, Integer> equalNodeNamesToCounts = new HashMap<String, Integer>();
- for (NodeWrapper<?> child : nodes) {
+ final Map<String, Integer> equalNodeNamesToCounts = new HashMap<String, Integer>();
+ for (final NodeWrapper<?> child : nodes) {
Integer count = equalNodeNamesToCounts.get(child.getLocalName());
equalNodeNamesToCounts.put(child.getLocalName(), count == null ? 1 : ++count);
}
- for (DataSchemaNode childSchemaNode : dataNodeContainer.getChildNodes()) {
+ for (final DataSchemaNode childSchemaNode : dataNodeContainer.getChildNodes()) {
if (childSchemaNode instanceof ContainerSchemaNode || childSchemaNode instanceof LeafSchemaNode) {
- String localName = childSchemaNode.getQName().getLocalName();
- Integer count = equalNodeNamesToCounts.get(localName);
+ final String localName = childSchemaNode.getQName().getLocalName();
+ final Integer count = equalNodeNamesToCounts.get(localName);
if (count != null && count > 1) {
throw new RestconfDocumentedException("Multiple input data elements were specified for '"
+ childSchemaNode.getQName().getLocalName()
private NormalizedNode<?, ?> compositeNodeToDatastoreNormalizedNode(final CompositeNode compNode,
final DataSchemaNode schema) {
- List<Node<?>> lst = new ArrayList<Node<?>>();
+ final List<Node<?>> lst = new ArrayList<Node<?>>();
lst.add(compNode);
if (schema instanceof ContainerSchemaNode) {
return CnSnToNormalizedNodeParserFactory.getInstance().getContainerNodeParser()
// TODO Auto-generated method stub
return null;
}
+
+ private MapNode makeModuleMapNode(final Set<Module> modules) {
+ Preconditions.checkNotNull(modules);
+ final Module restconfModule = getRestconfModule();
+ final DataSchemaNode moduleSchemaNode = controllerContext.getRestconfModuleRestConfSchemaNode(
+ restconfModule, Draft02.RestConfModule.MODULE_LIST_SCHEMA_NODE);
+ Preconditions.checkState(moduleSchemaNode instanceof ListSchemaNode);
+
+ final CollectionNodeBuilder<MapEntryNode, MapNode> listModuleBuilder = Builders
+ .mapBuilder((ListSchemaNode) moduleSchemaNode);
+
+ for (final Module module : modules) {
+ listModuleBuilder.withChild(toModuleEntryNode(module, moduleSchemaNode));
+ }
+ return listModuleBuilder.build();
+ }
+
+ protected MapEntryNode toModuleEntryNode(final Module module, final DataSchemaNode moduleSchemaNode) {
+ Preconditions.checkArgument(moduleSchemaNode instanceof ListSchemaNode,
+ "moduleSchemaNode has to be of type ListSchemaNode");\r final ListSchemaNode listModuleSchemaNode = (ListSchemaNode) moduleSchemaNode;\r final DataContainerNodeAttrBuilder<NodeIdentifierWithPredicates, MapEntryNode> moduleNodeValues = Builders\r .mapEntryBuilder(listModuleSchemaNode);\r\r List<DataSchemaNode> instanceDataChildrenByName = ControllerContext.findInstanceDataChildrenByName(\r (listModuleSchemaNode), "name");\r final DataSchemaNode nameSchemaNode = Iterables.getFirst(instanceDataChildrenByName, null);\r Preconditions.checkState(nameSchemaNode instanceof LeafSchemaNode);\r moduleNodeValues.withChild(Builders.leafBuilder((LeafSchemaNode) nameSchemaNode).withValue(module.getName())\r .build());\r\r instanceDataChildrenByName = ControllerContext.findInstanceDataChildrenByName(\r (listModuleSchemaNode), "revision");\r final DataSchemaNode revisionSchemaNode = Iterables.getFirst(instanceDataChildrenByName, null);\r Preconditions.checkState(revisionSchemaNode instanceof LeafSchemaNode);\r final String revision = REVISION_FORMAT.format(module.getRevision());\r moduleNodeValues.withChild(Builders.leafBuilder((LeafSchemaNode) revisionSchemaNode).withValue(revision)\r .build());\r\r instanceDataChildrenByName = ControllerContext.findInstanceDataChildrenByName(\r (listModuleSchemaNode), "namespace");\r final DataSchemaNode namespaceSchemaNode = Iterables.getFirst(instanceDataChildrenByName, null);\r Preconditions.checkState(namespaceSchemaNode instanceof LeafSchemaNode);\r moduleNodeValues.withChild(Builders.leafBuilder((LeafSchemaNode) namespaceSchemaNode)\r .withValue(module.getNamespace().toString()).build());\r\r instanceDataChildrenByName = ControllerContext.findInstanceDataChildrenByName(\r (listModuleSchemaNode), "feature");\r final DataSchemaNode featureSchemaNode = Iterables.getFirst(instanceDataChildrenByName, null);\r Preconditions.checkState(featureSchemaNode instanceof LeafListSchemaNode);\r final ListNodeBuilder<Object, LeafSetEntryNode<Object>> featuresBuilder = Builders\r .leafSetBuilder((LeafListSchemaNode) featureSchemaNode);\r for (final FeatureDefinition feature : module.getFeatures()) {\r featuresBuilder.withChild(Builders.leafSetEntryBuilder(((LeafListSchemaNode) featureSchemaNode))\r .withValue(feature.getQName().getLocalName()).build());\r }\r moduleNodeValues.withChild(featuresBuilder.build());
+\r return moduleNodeValues.build();\r }
}
import org.opendaylight.yangtools.yang.data.api.SimpleNode;
import org.opendaylight.yangtools.yang.data.impl.NodeFactory;
+/**
+ * @deprecated class will be removed in Lithium release
+ */
+@Deprecated
public final class SimpleNodeWrapper implements NodeWrapper<SimpleNode<?>>, SimpleNode<Object> {
private SimpleNode<Object> simpleNode;
final RestconfService delegate;
- private StatisticsRestconfServiceWrapper(RestconfService delegate) {
+ private StatisticsRestconfServiceWrapper(final RestconfService delegate) {
this.delegate = delegate;
}
}
@Override
- public StructuredData getModules(UriInfo uriInfo) {
+ public NormalizedNodeContext getModules(final UriInfo uriInfo) {
return delegate.getModules(uriInfo);
}
@Override
- public StructuredData getModules(String identifier, UriInfo uriInfo) {
+ public NormalizedNodeContext getModules(final String identifier, final UriInfo uriInfo) {
return delegate.getModules(identifier, uriInfo);
}
@Override
- public StructuredData getModule(String identifier, UriInfo uriInfo) {
+ public NormalizedNodeContext getModule(final String identifier, final UriInfo uriInfo) {
return delegate.getModule(identifier, uriInfo);
}
@Override
- public StructuredData getOperations(UriInfo uriInfo) {
+ public StructuredData getOperations(final UriInfo uriInfo) {
return delegate.getOperations(uriInfo);
}
@Override
- public StructuredData getOperations(String identifier, UriInfo uriInfo) {
+ public StructuredData getOperations(final String identifier, final UriInfo uriInfo) {
return delegate.getOperations(identifier, uriInfo);
}
@Override
- public StructuredData invokeRpc(String identifier, CompositeNode payload, UriInfo uriInfo) {
+ public StructuredData invokeRpc(final String identifier, final CompositeNode payload, final UriInfo uriInfo) {
rpc.incrementAndGet();
return delegate.invokeRpc(identifier, payload, uriInfo);
}
@Override
- public StructuredData invokeRpc(String identifier, String noPayload, UriInfo uriInfo) {
+ public StructuredData invokeRpc(final String identifier, final String noPayload, final UriInfo uriInfo) {
rpc.incrementAndGet();
return delegate.invokeRpc(identifier, noPayload, uriInfo);
}
@Override
- public NormalizedNodeContext readConfigurationData(String identifier, UriInfo uriInfo) {
+ public NormalizedNodeContext readConfigurationData(final String identifier, final UriInfo uriInfo) {
configGet.incrementAndGet();
return delegate.readConfigurationData(identifier, uriInfo);
}
@Override
- public NormalizedNodeContext readOperationalData(String identifier, UriInfo uriInfo) {
+ public NormalizedNodeContext readOperationalData(final String identifier, final UriInfo uriInfo) {
operationalGet.incrementAndGet();
return delegate.readOperationalData(identifier, uriInfo);
}
@Override
- public Response updateConfigurationData(String identifier, Node<?> payload) {
+ public Response updateConfigurationData(final String identifier, final Node<?> payload) {
configPut.incrementAndGet();
return delegate.updateConfigurationData(identifier, payload);
}
@Override
- public Response createConfigurationData(String identifier, Node<?> payload) {
+ public Response createConfigurationData(final String identifier, final Node<?> payload) {
configPost.incrementAndGet();
return delegate.createConfigurationData(identifier, payload);
}
@Override
- public Response createConfigurationData(Node<?> payload) {
+ public Response createConfigurationData(final Node<?> payload) {
configPost.incrementAndGet();
return delegate.createConfigurationData(payload);
}
@Override
- public Response deleteConfigurationData(String identifier) {
+ public Response deleteConfigurationData(final String identifier) {
return delegate.deleteConfigurationData(identifier);
}
@Override
- public Response subscribeToStream(String identifier, UriInfo uriInfo) {
+ public Response subscribeToStream(final String identifier, final UriInfo uriInfo) {
return delegate.subscribeToStream(identifier, uriInfo);
}
@Override
- public StructuredData getAvailableStreams(UriInfo uriInfo) {
+ public StructuredData getAvailableStreams(final UriInfo uriInfo) {
return delegate.getAvailableStreams(uriInfo);
}
public BigInteger getRpc() {
return BigInteger.valueOf(rpc.get());
}
-
}
import org.opendaylight.yangtools.yang.data.api.CompositeNode;
import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
+/**
+ * @deprecated class will be removed in Lithium release
+ */
+@Deprecated
public class StructuredData {
private final CompositeNode data;
this.data = data;
this.schema = schema;
this.mountPoint = mountPoint;
- this.prettyPrintMode = preattyPrintMode;
+ prettyPrintMode = preattyPrintMode;
}
public CompositeNode getData() {
--- /dev/null
+module iana-if-type {
+ namespace "urn:ietf:params:xml:ns:yang:iana-if-type";
+ prefix ianaift;
+
+ organization "IANA";
+ contact
+ " Internet Assigned Numbers Authority
+
+ Postal: ICANN
+ 4676 Admiralty Way, Suite 330
+ Marina del Rey, CA 90292
+
+ Tel: +1 310 823 9358
+ E-Mail: iana&iana.org";
+ description
+ "This YANG module defines the iana-if-type typedef, which
+ contains YANG definitions for IANA-registered interface types.
+
+ This YANG module is maintained by IANA, and reflects the
+ 'ifType definitions' registry.
+
+ The latest revision of this YANG module can be obtained from
+ the IANA web site.
+
+ Copyright (c) 2011 IETF Trust and the persons identified as
+ authors of the code. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or
+ without modification, is permitted pursuant to, and subject
+ to the license terms contained in, the Simplified BSD License
+ set forth in Section 4.c of the IETF Trust's Legal Provisions
+ Relating to IETF Documents
+ (http://trustee.ietf.org/license-info).
+
+ This version of this YANG module is part of RFC XXXX; see
+ the RFC itself for full legal notices.";
+ // RFC Ed.: replace XXXX with actual RFC number and remove this
+ // note.
+
+ // RFC Ed.: update the date below with the date of RFC publication
+ // and remove this note.
+ revision 2013-07-04 {
+ description
+ "Initial revision.";
+ reference
+ "RFC XXXX: IANA Interface Type YANG Module";
+ }
+
+ typedef iana-if-type {
+ type enumeration {
+ enum "other" {
+ value 1;
+ description
+ "None of the following";
+ }
+ enum "regular1822" {
+ value 2;
+ }
+ enum "hdh1822" {
+ value 3;
+ }
+ enum "ddnX25" {
+ value 4;
+ }
+ enum "rfc877x25" {
+ value 5;
+ reference
+ "RFC 1382 - SNMP MIB Extension for the X.25 Packet Layer";
+ }
+ enum "ethernetCsmacd" {
+ value 6;
+ description
+ "For all ethernet-like interfaces, regardless of speed,
+ as per RFC3635.";
+ reference
+ "RFC 3635 - Definitions of Managed Objects for the
+ Ethernet-like Interface Types.";
+ }
+ enum "iso88023Csmacd" {
+ value 7;
+ status deprecated;
+ description
+ "Deprecated via RFC3635.
+ Use ethernetCsmacd(6) instead.";
+ reference
+ "RFC 3635 - Definitions of Managed Objects for the
+ Ethernet-like Interface Types.";
+ }
+ enum "iso88024TokenBus" {
+ value 8;
+ }
+ enum "iso88025TokenRing" {
+ value 9;
+ }
+ enum "iso88026Man" {
+ value 10;
+ }
+ enum "starLan" {
+ value 11;
+ status deprecated;
+ description
+ "Deprecated via RFC3635.
+ Use ethernetCsmacd(6) instead.";
+ reference
+ "RFC 3635 - Definitions of Managed Objects for the
+ Ethernet-like Interface Types.";
+ }
+ enum "proteon10Mbit" {
+ value 12;
+ }
+ enum "proteon80Mbit" {
+ value 13;
+ }
+ enum "hyperchannel" {
+ value 14;
+ }
+ enum "fddi" {
+ value 15;
+ reference
+ "RFC 1512 - FDDI Management Information Base";
+ }
+ enum "lapb" {
+ value 16;
+ reference
+ "RFC 1381 - SNMP MIB Extension for X.25 LAPB";
+ }
+ enum "sdlc" {
+ value 17;
+ }
+ enum "ds1" {
+ value 18;
+ description
+ "DS1-MIB";
+ reference
+ "RFC 4805 - Definitions of Managed Objects for the
+ DS1, J1, E1, DS2, and E2 Interface Types";
+ }
+ enum "e1" {
+ value 19;
+ status obsolete;
+ description
+ "Obsolete see DS1-MIB";
+ reference
+ "RFC 4805 - Definitions of Managed Objects for the
+ DS1, J1, E1, DS2, and E2 Interface Types";
+ }
+ enum "basicISDN" {
+ value 20;
+ description
+ "see also RFC2127";
+ }
+ enum "primaryISDN" {
+ value 21;
+ }
+ enum "propPointToPointSerial" {
+ value 22;
+ description
+ "proprietary serial";
+ }
+ enum "ppp" {
+ value 23;
+ }
+ enum "softwareLoopback" {
+ value 24;
+ }
+ enum "eon" {
+ value 25;
+ description
+ "CLNP over IP";
+ }
+ enum "ethernet3Mbit" {
+ value 26;
+ }
+ enum "nsip" {
+ value 27;
+ description
+ "XNS over IP";
+ }
+ enum "slip" {
+ value 28;
+ description
+ "generic SLIP";
+ }
+ enum "ultra" {
+ value 29;
+ description
+ "ULTRA technologies";
+ }
+ enum "ds3" {
+ value 30;
+ description
+ "DS3-MIB";
+ reference
+ "RFC 3896 - Definitions of Managed Objects for the
+ DS3/E3 Interface Type";
+ }
+ enum "sip" {
+ value 31;
+ description
+ "SMDS, coffee";
+ reference
+ "RFC 1694 - Definitions of Managed Objects for SMDS
+ Interfaces using SMIv2";
+ }
+ enum "frameRelay" {
+ value 32;
+ description
+ "DTE only.";
+ reference
+ "RFC 2115 - Management Information Base for Frame Relay
+ DTEs Using SMIv2";
+ }
+ enum "rs232" {
+ value 33;
+ reference
+ "RFC 1659 - Definitions of Managed Objects for RS-232-like
+ Hardware Devices using SMIv2";
+ }
+ enum "para" {
+ value 34;
+ description
+ "parallel-port";
+ reference
+ "RFC 1660 - Definitions of Managed Objects for
+ Parallel-printer-like Hardware Devices using
+ SMIv2";
+ }
+ enum "arcnet" {
+ value 35;
+ description
+ "arcnet";
+ }
+ enum "arcnetPlus" {
+ value 36;
+ description
+ "arcnet plus";
+ }
+ enum "atm" {
+ value 37;
+ description
+ "ATM cells";
+ }
+ enum "miox25" {
+ value 38;
+ reference
+ "RFC 1461 - SNMP MIB extension for Multiprotocol
+ Interconnect over X.25";
+ }
+ enum "sonet" {
+ value 39;
+ description
+ "SONET or SDH";
+ }
+ enum "x25ple" {
+ value 40;
+ reference
+ "RFC 2127 - ISDN Management Information Base using SMIv2";
+ }
+ enum "iso88022llc" {
+ value 41;
+ }
+ enum "localTalk" {
+ value 42;
+ }
+ enum "smdsDxi" {
+ value 43;
+ }
+ enum "frameRelayService" {
+ value 44;
+ description
+ "FRNETSERV-MIB";
+ reference
+ "RFC 2954 - Definitions of Managed Objects for Frame
+ Relay Service";
+ }
+ enum "v35" {
+ value 45;
+ }
+ enum "hssi" {
+ value 46;
+ }
+ enum "hippi" {
+ value 47;
+ }
+ enum "modem" {
+ value 48;
+ description
+ "Generic modem";
+ }
+ enum "aal5" {
+ value 49;
+ description
+ "AAL5 over ATM";
+ }
+ enum "sonetPath" {
+ value 50;
+ }
+ enum "sonetVT" {
+ value 51;
+ }
+ enum "smdsIcip" {
+ value 52;
+ description
+ "SMDS InterCarrier Interface";
+ }
+ enum "propVirtual" {
+ value 53;
+ description
+ "proprietary virtual/internal";
+ reference
+ "RFC 2863 - The Interfaces Group MIB";
+ }
+ enum "propMultiplexor" {
+ value 54;
+ description
+ "proprietary multiplexing";
+ reference
+ "RFC 2863 - The Interfaces Group MIB";
+ }
+ enum "ieee80212" {
+ value 55;
+ description
+ "100BaseVG";
+ }
+ enum "fibreChannel" {
+ value 56;
+ description
+ "Fibre Channel";
+ }
+ enum "hippiInterface" {
+ value 57;
+ description
+ "HIPPI interfaces";
+ }
+ enum "frameRelayInterconnect" {
+ value 58;
+ status obsolete;
+ description
+ "Obsolete use either
+ frameRelay(32) or frameRelayService(44).";
+ }
+ enum "aflane8023" {
+ value 59;
+ description
+ "ATM Emulated LAN for 802.3";
+ }
+ enum "aflane8025" {
+ value 60;
+ description
+ "ATM Emulated LAN for 802.5";
+ }
+ enum "cctEmul" {
+ value 61;
+ description
+ "ATM Emulated circuit";
+ }
+ enum "fastEther" {
+ value 62;
+ status deprecated;
+ description
+ "Obsoleted via RFC3635.
+ ethernetCsmacd(6) should be used instead";
+ reference
+ "RFC 3635 - Definitions of Managed Objects for the
+ Ethernet-like Interface Types.";
+ }
+ enum "isdn" {
+ value 63;
+ description
+ "ISDN and X.25";
+ reference
+ "RFC 1356 - Multiprotocol Interconnect on X.25 and ISDN
+ in the Packet Mode";
+ }
+ enum "v11" {
+ value 64;
+ description
+ "CCITT V.11/X.21";
+ }
+ enum "v36" {
+ value 65;
+ description
+ "CCITT V.36";
+ }
+ enum "g703at64k" {
+ value 66;
+ description
+ "CCITT G703 at 64Kbps";
+ }
+ enum "g703at2mb" {
+ value 67;
+ status obsolete;
+ description
+ "Obsolete see DS1-MIB";
+ }
+ enum "qllc" {
+ value 68;
+ description
+ "SNA QLLC";
+ }
+ enum "fastEtherFX" {
+ value 69;
+ status deprecated;
+ description
+ "Obsoleted via RFC3635
+ ethernetCsmacd(6) should be used instead";
+ reference
+ "RFC 3635 - Definitions of Managed Objects for the
+ Ethernet-like Interface Types.";
+ }
+ enum "channel" {
+ value 70;
+ description
+ "channel";
+ }
+ enum "ieee80211" {
+ value 71;
+ description
+ "radio spread spectrum";
+ }
+ enum "ibm370parChan" {
+ value 72;
+ description
+ "IBM System 360/370 OEMI Channel";
+ }
+ enum "escon" {
+ value 73;
+ description
+ "IBM Enterprise Systems Connection";
+ }
+ enum "dlsw" {
+ value 74;
+ description
+ "Data Link Switching";
+ }
+ enum "isdns" {
+ value 75;
+ description
+ "ISDN S/T interface";
+ }
+ enum "isdnu" {
+ value 76;
+ description
+ "ISDN U interface";
+ }
+ enum "lapd" {
+ value 77;
+ description
+ "Link Access Protocol D";
+ }
+ enum "ipSwitch" {
+ value 78;
+ description
+ "IP Switching Objects";
+ }
+ enum "rsrb" {
+ value 79;
+ description
+ "Remote Source Route Bridging";
+ }
+ enum "atmLogical" {
+ value 80;
+ description
+ "ATM Logical Port";
+ reference
+ "RFC 3606 - Definitions of Supplemental Managed Objects
+ for ATM Interface";
+ }
+ enum "ds0" {
+ value 81;
+ description
+ "Digital Signal Level 0";
+ reference
+ "RFC 2494 - Definitions of Managed Objects for the DS0
+ and DS0 Bundle Interface Type";
+ }
+ enum "ds0Bundle" {
+ value 82;
+ description
+ "group of ds0s on the same ds1";
+ reference
+ "RFC 2494 - Definitions of Managed Objects for the DS0
+ and DS0 Bundle Interface Type";
+ }
+ enum "bsc" {
+ value 83;
+ description
+ "Bisynchronous Protocol";
+ }
+ enum "async" {
+ value 84;
+ description
+ "Asynchronous Protocol";
+ }
+ enum "cnr" {
+ value 85;
+ description
+ "Combat Net Radio";
+ }
+ enum "iso88025Dtr" {
+ value 86;
+ description
+ "ISO 802.5r DTR";
+ }
+ enum "eplrs" {
+ value 87;
+ description
+ "Ext Pos Loc Report Sys";
+ }
+ enum "arap" {
+ value 88;
+ description
+ "Appletalk Remote Access Protocol";
+ }
+ enum "propCnls" {
+ value 89;
+ description
+ "Proprietary Connectionless Protocol";
+ }
+ enum "hostPad" {
+ value 90;
+ description
+ "CCITT-ITU X.29 PAD Protocol";
+ }
+ enum "termPad" {
+ value 91;
+ description
+ "CCITT-ITU X.3 PAD Facility";
+ }
+ enum "frameRelayMPI" {
+ value 92;
+ description
+ "Multiproto Interconnect over FR";
+ }
+ enum "x213" {
+ value 93;
+ description
+ "CCITT-ITU X213";
+ }
+ enum "adsl" {
+ value 94;
+ description
+ "Asymmetric Digital Subscriber Loop";
+ }
+ enum "radsl" {
+ value 95;
+ description
+ "Rate-Adapt. Digital Subscriber Loop";
+ }
+ enum "sdsl" {
+ value 96;
+ description
+ "Symmetric Digital Subscriber Loop";
+ }
+ enum "vdsl" {
+ value 97;
+ description
+ "Very H-Speed Digital Subscrib. Loop";
+ }
+ enum "iso88025CRFPInt" {
+ value 98;
+ description
+ "ISO 802.5 CRFP";
+ }
+ enum "myrinet" {
+ value 99;
+ description
+ "Myricom Myrinet";
+ }
+ enum "voiceEM" {
+ value 100;
+ description
+ "voice recEive and transMit";
+ }
+ enum "voiceFXO" {
+ value 101;
+ description
+ "voice Foreign Exchange Office";
+ }
+ enum "voiceFXS" {
+ value 102;
+ description
+ "voice Foreign Exchange Station";
+ }
+ enum "voiceEncap" {
+ value 103;
+ description
+ "voice encapsulation";
+ }
+ enum "voiceOverIp" {
+ value 104;
+ description
+ "voice over IP encapsulation";
+ }
+ enum "atmDxi" {
+ value 105;
+ description
+ "ATM DXI";
+ }
+ enum "atmFuni" {
+ value 106;
+ description
+ "ATM FUNI";
+ }
+ enum "atmIma" {
+ value 107;
+ description
+ "ATM IMA";
+ }
+ enum "pppMultilinkBundle" {
+ value 108;
+ description
+ "PPP Multilink Bundle";
+ }
+ enum "ipOverCdlc" {
+ value 109;
+ description
+ "IBM ipOverCdlc";
+ }
+ enum "ipOverClaw" {
+ value 110;
+ description
+ "IBM Common Link Access to Workstn";
+ }
+ enum "stackToStack" {
+ value 111;
+ description
+ "IBM stackToStack";
+ }
+ enum "virtualIpAddress" {
+ value 112;
+ description
+ "IBM VIPA";
+ }
+ enum "mpc" {
+ value 113;
+ description
+ "IBM multi-protocol channel support";
+ }
+ enum "ipOverAtm" {
+ value 114;
+ description
+ "IBM ipOverAtm";
+ reference
+ "RFC 2320 - Definitions of Managed Objects for Classical IP
+ and ARP Over ATM Using SMIv2 (IPOA-MIB)";
+ }
+ enum "iso88025Fiber" {
+ value 115;
+ description
+ "ISO 802.5j Fiber Token Ring";
+ }
+ enum "tdlc" {
+ value 116;
+ description
+ "IBM twinaxial data link control";
+ }
+ enum "gigabitEthernet" {
+ value 117;
+ status deprecated;
+ description
+ "Obsoleted via RFC3635
+ ethernetCsmacd(6) should be used instead";
+ reference
+ "RFC 3635 - Definitions of Managed Objects for the
+ Ethernet-like Interface Types.";
+ }
+ enum "hdlc" {
+ value 118;
+ description
+ "HDLC";
+ }
+ enum "lapf" {
+ value 119;
+ description
+ "LAP F";
+ }
+ enum "v37" {
+ value 120;
+ description
+ "V.37";
+ }
+ enum "x25mlp" {
+ value 121;
+ description
+ "Multi-Link Protocol";
+ }
+ enum "x25huntGroup" {
+ value 122;
+ description
+ "X25 Hunt Group";
+ }
+ enum "transpHdlc" {
+ value 123;
+ description
+ "Transp HDLC";
+ }
+ enum "interleave" {
+ value 124;
+ description
+ "Interleave channel";
+ }
+ enum "fast" {
+ value 125;
+ description
+ "Fast channel";
+ }
+ enum "ip" {
+ value 126;
+ description
+ "IP (for APPN HPR in IP networks)";
+ }
+ enum "docsCableMaclayer" {
+ value 127;
+ description
+ "CATV Mac Layer";
+ }
+ enum "docsCableDownstream" {
+ value 128;
+ description
+ "CATV Downstream interface";
+ }
+ enum "docsCableUpstream" {
+ value 129;
+ description
+ "CATV Upstream interface";
+ }
+ enum "a12MppSwitch" {
+ value 130;
+ description
+ "Avalon Parallel Processor";
+ }
+ enum "tunnel" {
+ value 131;
+ description
+ "Encapsulation interface";
+ }
+ enum "coffee" {
+ value 132;
+ description
+ "coffee pot";
+ reference
+ "RFC 2325 - Coffee MIB";
+ }
+ enum "ces" {
+ value 133;
+ description
+ "Circuit Emulation Service";
+ }
+ enum "atmSubInterface" {
+ value 134;
+ description
+ "ATM Sub Interface";
+ }
+ enum "l2vlan" {
+ value 135;
+ description
+ "Layer 2 Virtual LAN using 802.1Q";
+ }
+ enum "l3ipvlan" {
+ value 136;
+ description
+ "Layer 3 Virtual LAN using IP";
+ }
+ enum "l3ipxvlan" {
+ value 137;
+ description
+ "Layer 3 Virtual LAN using IPX";
+ }
+ enum "digitalPowerline" {
+ value 138;
+ description
+ "IP over Power Lines";
+ }
+ enum "mediaMailOverIp" {
+ value 139;
+ description
+ "Multimedia Mail over IP";
+ }
+ enum "dtm" {
+ value 140;
+ description
+ "Dynamic syncronous Transfer Mode";
+ }
+ enum "dcn" {
+ value 141;
+ description
+ "Data Communications Network";
+ }
+ enum "ipForward" {
+ value 142;
+ description
+ "IP Forwarding Interface";
+ }
+ enum "msdsl" {
+ value 143;
+ description
+ "Multi-rate Symmetric DSL";
+ }
+ enum "ieee1394" {
+ value 144;
+ description
+ "IEEE1394 High Performance Serial Bus";
+ }
+ enum "if-gsn" {
+ value 145;
+ description
+ "HIPPI-6400";
+ }
+ enum "dvbRccMacLayer" {
+ value 146;
+ description
+ "DVB-RCC MAC Layer";
+ }
+ enum "dvbRccDownstream" {
+ value 147;
+ description
+ "DVB-RCC Downstream Channel";
+ }
+ enum "dvbRccUpstream" {
+ value 148;
+ description
+ "DVB-RCC Upstream Channel";
+ }
+ enum "atmVirtual" {
+ value 149;
+ description
+ "ATM Virtual Interface";
+ }
+ enum "mplsTunnel" {
+ value 150;
+ description
+ "MPLS Tunnel Virtual Interface";
+ }
+ enum "srp" {
+ value 151;
+ description
+ "Spatial Reuse Protocol";
+ }
+ enum "voiceOverAtm" {
+ value 152;
+ description
+ "Voice Over ATM";
+ }
+ enum "voiceOverFrameRelay" {
+ value 153;
+ description
+ "Voice Over Frame Relay";
+ }
+ enum "idsl" {
+ value 154;
+ description
+ "Digital Subscriber Loop over ISDN";
+ }
+ enum "compositeLink" {
+ value 155;
+ description
+ "Avici Composite Link Interface";
+ }
+ enum "ss7SigLink" {
+ value 156;
+ description
+ "SS7 Signaling Link";
+ }
+ enum "propWirelessP2P" {
+ value 157;
+ description
+ "Prop. P2P wireless interface";
+ }
+ enum "frForward" {
+ value 158;
+ description
+ "Frame Forward Interface";
+ }
+ enum "rfc1483" {
+ value 159;
+ description
+ "Multiprotocol over ATM AAL5";
+ reference
+ "RFC 1483 - Multiprotocol Encapsulation over ATM
+ Adaptation Layer 5";
+ }
+ enum "usb" {
+ value 160;
+ description
+ "USB Interface";
+ }
+ enum "ieee8023adLag" {
+ value 161;
+ description
+ "IEEE 802.3ad Link Aggregate";
+ }
+ enum "bgppolicyaccounting" {
+ value 162;
+ description
+ "BGP Policy Accounting";
+ }
+ enum "frf16MfrBundle" {
+ value 163;
+ description
+ "FRF .16 Multilink Frame Relay";
+ }
+ enum "h323Gatekeeper" {
+ value 164;
+ description
+ "H323 Gatekeeper";
+ }
+ enum "h323Proxy" {
+ value 165;
+ description
+ "H323 Voice and Video Proxy";
+ }
+ enum "mpls" {
+ value 166;
+ description
+ "MPLS";
+ }
+ enum "mfSigLink" {
+ value 167;
+ description
+ "Multi-frequency signaling link";
+ }
+ enum "hdsl2" {
+ value 168;
+ description
+ "High Bit-Rate DSL - 2nd generation";
+ }
+ enum "shdsl" {
+ value 169;
+ description
+ "Multirate HDSL2";
+ }
+ enum "ds1FDL" {
+ value 170;
+ description
+ "Facility Data Link 4Kbps on a DS1";
+ }
+ enum "pos" {
+ value 171;
+ description
+ "Packet over SONET/SDH Interface";
+ }
+ enum "dvbAsiIn" {
+ value 172;
+ description
+ "DVB-ASI Input";
+ }
+ enum "dvbAsiOut" {
+ value 173;
+ description
+ "DVB-ASI Output";
+ }
+ enum "plc" {
+ value 174;
+ description
+ "Power Line Communtications";
+ }
+ enum "nfas" {
+ value 175;
+ description
+ "Non Facility Associated Signaling";
+ }
+ enum "tr008" {
+ value 176;
+ description
+ "TR008";
+ }
+ enum "gr303RDT" {
+ value 177;
+ description
+ "Remote Digital Terminal";
+ }
+ enum "gr303IDT" {
+ value 178;
+ description
+ "Integrated Digital Terminal";
+ }
+ enum "isup" {
+ value 179;
+ description
+ "ISUP";
+ }
+ enum "propDocsWirelessMaclayer" {
+ value 180;
+ description
+ "Cisco proprietary Maclayer";
+ }
+ enum "propDocsWirelessDownstream" {
+ value 181;
+ description
+ "Cisco proprietary Downstream";
+ }
+ enum "propDocsWirelessUpstream" {
+ value 182;
+ description
+ "Cisco proprietary Upstream";
+ }
+ enum "hiperlan2" {
+ value 183;
+ description
+ "HIPERLAN Type 2 Radio Interface";
+ }
+ enum "propBWAp2Mp" {
+ value 184;
+ description
+ "PropBroadbandWirelessAccesspt2multipt use of this value
+ for IEEE 802.16 WMAN interfaces as per IEEE Std 802.16f
+ is deprecated and ieee80216WMAN(237) should be used
+ instead.";
+ }
+ enum "sonetOverheadChannel" {
+ value 185;
+ description
+ "SONET Overhead Channel";
+ }
+ enum "digitalWrapperOverheadChannel" {
+ value 186;
+ description
+ "Digital Wrapper";
+ }
+ enum "aal2" {
+ value 187;
+ description
+ "ATM adaptation layer 2";
+ }
+ enum "radioMAC" {
+ value 188;
+ description
+ "MAC layer over radio links";
+ }
+ enum "atmRadio" {
+ value 189;
+ description
+ "ATM over radio links";
+ }
+ enum "imt" {
+ value 190;
+ description
+ "Inter Machine Trunks";
+ }
+ enum "mvl" {
+ value 191;
+ description
+ "Multiple Virtual Lines DSL";
+ }
+ enum "reachDSL" {
+ value 192;
+ description
+ "Long Reach DSL";
+ }
+ enum "frDlciEndPt" {
+ value 193;
+ description
+ "Frame Relay DLCI End Point";
+ }
+ enum "atmVciEndPt" {
+ value 194;
+ description
+ "ATM VCI End Point";
+ }
+ enum "opticalChannel" {
+ value 195;
+ description
+ "Optical Channel";
+ }
+ enum "opticalTransport" {
+ value 196;
+ description
+ "Optical Transport";
+ }
+ enum "propAtm" {
+ value 197;
+ description
+ "Proprietary ATM";
+ }
+ enum "voiceOverCable" {
+ value 198;
+ description
+ "Voice Over Cable Interface";
+ }
+ enum "infiniband" {
+ value 199;
+ description
+ "Infiniband";
+ }
+ enum "teLink" {
+ value 200;
+ description
+ "TE Link";
+ }
+ enum "q2931" {
+ value 201;
+ description
+ "Q.2931";
+ }
+ enum "virtualTg" {
+ value 202;
+ description
+ "Virtual Trunk Group";
+ }
+ enum "sipTg" {
+ value 203;
+ description
+ "SIP Trunk Group";
+ }
+ enum "sipSig" {
+ value 204;
+ description
+ "SIP Signaling";
+ }
+ enum "docsCableUpstreamChannel" {
+ value 205;
+ description
+ "CATV Upstream Channel";
+ }
+ enum "econet" {
+ value 206;
+ description
+ "Acorn Econet";
+ }
+ enum "pon155" {
+ value 207;
+ description
+ "FSAN 155Mb Symetrical PON interface";
+ }
+ enum "pon622" {
+ value 208;
+ description
+ "FSAN622Mb Symetrical PON interface";
+ }
+ enum "bridge" {
+ value 209;
+ description
+ "Transparent bridge interface";
+ }
+ enum "linegroup" {
+ value 210;
+ description
+ "Interface common to multiple lines";
+ }
+ enum "voiceEMFGD" {
+ value 211;
+ description
+ "voice E&M Feature Group D";
+ }
+ enum "voiceFGDEANA" {
+ value 212;
+ description
+ "voice FGD Exchange Access North American";
+ }
+ enum "voiceDID" {
+ value 213;
+ description
+ "voice Direct Inward Dialing";
+ }
+ enum "mpegTransport" {
+ value 214;
+ description
+ "MPEG transport interface";
+ }
+ enum "sixToFour" {
+ value 215;
+ status deprecated;
+ description
+ "6to4 interface (DEPRECATED)";
+ reference
+ "RFC 4087 - IP Tunnel MIB";
+ }
+ enum "gtp" {
+ value 216;
+ description
+ "GTP (GPRS Tunneling Protocol)";
+ }
+ enum "pdnEtherLoop1" {
+ value 217;
+ description
+ "Paradyne EtherLoop 1";
+ }
+ enum "pdnEtherLoop2" {
+ value 218;
+ description
+ "Paradyne EtherLoop 2";
+ }
+ enum "opticalChannelGroup" {
+ value 219;
+ description
+ "Optical Channel Group";
+ }
+ enum "homepna" {
+ value 220;
+ description
+ "HomePNA ITU-T G.989";
+ }
+ enum "gfp" {
+ value 221;
+ description
+ "Generic Framing Procedure (GFP)";
+ }
+ enum "ciscoISLvlan" {
+ value 222;
+ description
+ "Layer 2 Virtual LAN using Cisco ISL";
+ }
+ enum "actelisMetaLOOP" {
+ value 223;
+ description
+ "Acteleis proprietary MetaLOOP High Speed Link";
+ }
+ enum "fcipLink" {
+ value 224;
+ description
+ "FCIP Link";
+ }
+ enum "rpr" {
+ value 225;
+ description
+ "Resilient Packet Ring Interface Type";
+ }
+ enum "qam" {
+ value 226;
+ description
+ "RF Qam Interface";
+ }
+ enum "lmp" {
+ value 227;
+ description
+ "Link Management Protocol";
+ reference
+ "RFC 4327 - Link Management Protocol (LMP) Management
+ Information Base (MIB)";
+ }
+ enum "cblVectaStar" {
+ value 228;
+ description
+ "Cambridge Broadband Networks Limited VectaStar";
+ }
+ enum "docsCableMCmtsDownstream" {
+ value 229;
+ description
+ "CATV Modular CMTS Downstream Interface";
+ }
+ enum "adsl2" {
+ value 230;
+ status deprecated;
+ description
+ "Asymmetric Digital Subscriber Loop Version 2
+ (DEPRECATED/OBSOLETED - please use adsl2plus(238)
+ instead)";
+ reference
+ "RFC 4706 - Definitions of Managed Objects for Asymmetric
+ Digital Subscriber Line 2 (ADSL2)";
+ }
+ enum "macSecControlledIF" {
+ value 231;
+ description
+ "MACSecControlled";
+ }
+ enum "macSecUncontrolledIF" {
+ value 232;
+ description
+ "MACSecUncontrolled";
+ }
+ enum "aviciOpticalEther" {
+ value 233;
+ description
+ "Avici Optical Ethernet Aggregate";
+ }
+ enum "atmbond" {
+ value 234;
+ description
+ "atmbond";
+ }
+ enum "voiceFGDOS" {
+ value 235;
+ description
+ "voice FGD Operator Services";
+ }
+ enum "mocaVersion1" {
+ value 236;
+ description
+ "MultiMedia over Coax Alliance (MoCA) Interface
+ as documented in information provided privately to IANA";
+ }
+ enum "ieee80216WMAN" {
+ value 237;
+ description
+ "IEEE 802.16 WMAN interface";
+ }
+ enum "adsl2plus" {
+ value 238;
+ description
+ "Asymmetric Digital Subscriber Loop Version 2,
+ Version 2 Plus and all variants";
+ }
+ enum "dvbRcsMacLayer" {
+ value 239;
+ description
+ "DVB-RCS MAC Layer";
+ reference
+ "RFC 5728 - The SatLabs Group DVB-RCS MIB";
+ }
+ enum "dvbTdm" {
+ value 240;
+ description
+ "DVB Satellite TDM";
+ reference
+ "RFC 5728 - The SatLabs Group DVB-RCS MIB";
+ }
+ enum "dvbRcsTdma" {
+ value 241;
+ description
+ "DVB-RCS TDMA";
+ reference
+ "RFC 5728 - The SatLabs Group DVB-RCS MIB";
+ }
+ enum "x86Laps" {
+ value 242;
+ description
+ "LAPS based on ITU-T X.86/Y.1323";
+ }
+ enum "wwanPP" {
+ value 243;
+ description
+ "3GPP WWAN";
+ }
+ enum "wwanPP2" {
+ value 244;
+ description
+ "3GPP2 WWAN";
+ }
+ enum "voiceEBS" {
+ value 245;
+ description
+ "voice P-phone EBS physical interface";
+ }
+ enum "ifPwType" {
+ value 246;
+ description
+ "Pseudowire interface type";
+ reference
+ "RFC 5601 - Pseudowire (PW) Management Information Base";
+ }
+ enum "ilan" {
+ value 247;
+ description
+ "Internal LAN on a bridge per IEEE 802.1ap";
+ }
+ enum "pip" {
+ value 248;
+ description
+ "Provider Instance Port on a bridge per IEEE 802.1ah PBB";
+ }
+ enum "aluELP" {
+ value 249;
+ description
+ "Alcatel-Lucent Ethernet Link Protection";
+ }
+ enum "gpon" {
+ value 250;
+ description
+ "Gigabit-capable passive optical networks (G-PON) as per
+ ITU-T G.948";
+ }
+ enum "vdsl2" {
+ value 251;
+ description
+ "Very high speed digital subscriber line Version 2
+ (as per ITU-T Recommendation G.993.2)";
+ reference
+ "RFC 5650 - Definitions of Managed Objects for Very High
+ Speed Digital Subscriber Line 2 (VDSL2)";
+ }
+ enum "capwapDot11Profile" {
+ value 252;
+ description
+ "WLAN Profile Interface";
+ reference
+ "RFC 5834 - Control and Provisioning of Wireless Access
+ Points (CAPWAP) Protocol Binding MIB for
+ IEEE 802.11";
+ }
+ enum "capwapDot11Bss" {
+ value 253;
+ description
+ "WLAN BSS Interface";
+ reference
+ "RFC 5834 - Control and Provisioning of Wireless Access
+ Points (CAPWAP) Protocol Binding MIB for
+ IEEE 802.11";
+ }
+ enum "capwapWtpVirtualRadio" {
+ value 254;
+ description
+ "WTP Virtual Radio Interface";
+ reference
+ "RFC 5833 - Control and Provisioning of Wireless Access
+ Points (CAPWAP) Protocol Base MIB";
+ }
+ enum "bits" {
+ value 255;
+ description
+ "bitsport";
+ }
+ enum "docsCableUpstreamRfPort" {
+ value 256;
+ description
+ "DOCSIS CATV Upstream RF Port";
+ }
+ enum "cableDownstreamRfPort" {
+ value 257;
+ description
+ "CATV downstream RF port";
+ }
+ enum "vmwareVirtualNic" {
+ value 258;
+ description
+ "VMware Virtual Network Interface";
+ }
+ enum "ieee802154" {
+ value 259;
+ description
+ "IEEE 802.15.4 WPAN interface";
+ reference
+ "IEEE 802.15.4-2006";
+ }
+ enum "otnOdu" {
+ value 260;
+ description
+ "OTN Optical Data Unit";
+ }
+ enum "otnOtu" {
+ value 261;
+ description
+ "OTN Optical channel Transport Unit";
+ }
+ enum "ifVfiType" {
+ value 262;
+ description
+ "VPLS Forwarding Instance Interface Type";
+ }
+ enum "g9981" {
+ value 263;
+ description
+ "G.998.1 bonded interface";
+ }
+ enum "g9982" {
+ value 264;
+ description
+ "G.998.2 bonded interface";
+ }
+ enum "g9983" {
+ value 265;
+ description
+ "G.998.3 bonded interface";
+ }
+ enum "aluEpon" {
+ value 266;
+ description
+ "Ethernet Passive Optical Networks (E-PON)";
+ }
+ enum "aluEponOnu" {
+ value 267;
+ description
+ "EPON Optical Network Unit";
+ }
+ enum "aluEponPhysicalUni" {
+ value 268;
+ description
+ "EPON physical User to Network interface";
+ }
+ enum "aluEponLogicalLink" {
+ value 269;
+ description
+ "The emulation of a point-to-point link over the EPON
+ layer";
+ }
+ enum "aluGponOnu" {
+ value 270;
+ description
+ "GPON Optical Network Unit";
+ reference
+ "ITU-T G.984.2";
+ }
+ enum "aluGponPhysicalUni" {
+ value 271;
+ description
+ "GPON physical User to Network interface";
+ reference
+ "ITU-T G.984.2";
+ }
+ enum "vmwareNicTeam" {
+ value 272;
+ description
+ "VMware NIC Team";
+ }
+ // value 273 reserved by IANA
+ }
+ description
+ "This data type is used as the syntax of the 'type'
+ leaf in the 'interface' list in the YANG module
+ ietf-interface.
+
+ The definition of this typedef with the
+ addition of newly assigned values is published
+ periodically by the IANA, in either the Assigned
+ Numbers RFC, or some derivative of it specific to
+ Internet Network Management number assignments. (The
+ latest arrangements can be obtained by contacting the
+ IANA.)
+
+ Requests for new values should be made to IANA via
+ email (iana&iana.org).";
+ reference
+ "IANA ifType definitions registry.
+ <http://www.iana.org/assignments/smi-numbers>";
+ }
+}
\ No newline at end of file
--- /dev/null
+ module ietf-inet-types {
+
+ namespace "urn:ietf:params:xml:ns:yang:ietf-inet-types";
+ prefix "inet";
+
+ organization
+ "IETF NETMOD (NETCONF Data Modeling Language) Working Group";
+
+ contact
+ "WG Web: <http://tools.ietf.org/wg/netmod/>
+ WG List: <mailto:netmod@ietf.org>
+
+ WG Chair: David Partain
+ <mailto:david.partain@ericsson.com>
+
+ WG Chair: David Kessens
+ <mailto:david.kessens@nsn.com>
+
+ Editor: Juergen Schoenwaelder
+ <mailto:j.schoenwaelder@jacobs-university.de>";
+
+ description
+ "This module contains a collection of generally useful derived
+ YANG data types for Internet addresses and related things.
+
+ Copyright (c) 2010 IETF Trust and the persons identified as
+ authors of the code. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, is permitted pursuant to, and subject to the license
+ terms contained in, the Simplified BSD License set forth in Section
+ 4.c of the IETF Trust's Legal Provisions Relating to IETF Documents
+ (http://trustee.ietf.org/license-info).
+
+ This version of this YANG module is part of RFC 6021; see
+ the RFC itself for full legal notices.";
+
+ revision 2010-09-24 {
+ description
+ "Initial revision.";
+ reference
+ "RFC 6021: Common YANG Data Types";
+ }
+
+ /*** collection of protocol field related types ***/
+
+ typedef ip-version {
+ type enumeration {
+ enum unknown {
+ value "0";
+ description
+ "An unknown or unspecified version of the Internet protocol.";
+ }
+ enum ipv4 {
+ value "1";
+ description
+ "The IPv4 protocol as defined in RFC 791.";
+ }
+ enum ipv6 {
+ value "2";
+ description
+ "The IPv6 protocol as defined in RFC 2460.";
+ }
+ }
+ description
+ "This value represents the version of the IP protocol.
+
+ In the value set and its semantics, this type is equivalent
+ to the InetVersion textual convention of the SMIv2.";
+ reference
+ "RFC 791: Internet Protocol
+ RFC 2460: Internet Protocol, Version 6 (IPv6) Specification
+ RFC 4001: Textual Conventions for Internet Network Addresses";
+ }
+
+ typedef dscp {
+ type uint8 {
+ range "0..63";
+ }
+ description
+ "The dscp type represents a Differentiated Services Code-Point
+ that may be used for marking packets in a traffic stream.
+
+ In the value set and its semantics, this type is equivalent
+ to the Dscp textual convention of the SMIv2.";
+ reference
+ "RFC 3289: Management Information Base for the Differentiated
+ Services Architecture
+ RFC 2474: Definition of the Differentiated Services Field
+ (DS Field) in the IPv4 and IPv6 Headers
+ RFC 2780: IANA Allocation Guidelines For Values In
+ the Internet Protocol and Related Headers";
+ }
+
+ typedef ipv6-flow-label {
+ type uint32 {
+ range "0..1048575";
+ }
+ description
+ "The flow-label type represents flow identifier or Flow Label
+ in an IPv6 packet header that may be used to discriminate
+ traffic flows.
+
+ In the value set and its semantics, this type is equivalent
+ to the IPv6FlowLabel textual convention of the SMIv2.";
+ reference
+ "RFC 3595: Textual Conventions for IPv6 Flow Label
+ RFC 2460: Internet Protocol, Version 6 (IPv6) Specification";
+ }
+
+ typedef port-number {
+ type uint16 {
+ range "0..65535";
+ }
+ description
+ "The port-number type represents a 16-bit port number of an
+ Internet transport layer protocol such as UDP, TCP, DCCP, or
+ SCTP. Port numbers are assigned by IANA. A current list of
+ all assignments is available from <http://www.iana.org/>.
+
+ Note that the port number value zero is reserved by IANA. In
+ situations where the value zero does not make sense, it can
+ be excluded by subtyping the port-number type.
+
+ In the value set and its semantics, this type is equivalent
+ to the InetPortNumber textual convention of the SMIv2.";
+ reference
+ "RFC 768: User Datagram Protocol
+ RFC 793: Transmission Control Protocol
+ RFC 4960: Stream Control Transmission Protocol
+ RFC 4340: Datagram Congestion Control Protocol (DCCP)
+ RFC 4001: Textual Conventions for Internet Network Addresses";
+ }
+
+ /*** collection of autonomous system related types ***/
+
+ typedef as-number {
+ type uint32;
+ description
+ "The as-number type represents autonomous system numbers
+ which identify an Autonomous System (AS). An AS is a set
+ of routers under a single technical administration, using
+ an interior gateway protocol and common metrics to route
+ packets within the AS, and using an exterior gateway
+ protocol to route packets to other ASs'. IANA maintains
+ the AS number space and has delegated large parts to the
+ regional registries.
+
+ Autonomous system numbers were originally limited to 16
+ bits. BGP extensions have enlarged the autonomous system
+ number space to 32 bits. This type therefore uses an uint32
+ base type without a range restriction in order to support
+ a larger autonomous system number space.
+
+ In the value set and its semantics, this type is equivalent
+ to the InetAutonomousSystemNumber textual convention of
+ the SMIv2.";
+ reference
+ "RFC 1930: Guidelines for creation, selection, and registration
+ of an Autonomous System (AS)
+ RFC 4271: A Border Gateway Protocol 4 (BGP-4)
+ RFC 4893: BGP Support for Four-octet AS Number Space
+ RFC 4001: Textual Conventions for Internet Network Addresses";
+ }
+
+ /*** collection of IP address and hostname related types ***/
+
+ typedef ip-address {
+ type union {
+ type inet:ipv4-address;
+ type inet:ipv6-address;
+ }
+ description
+ "The ip-address type represents an IP address and is IP
+ version neutral. The format of the textual representations
+ implies the IP version.";
+ }
+
+ typedef ipv4-address {
+ type string {
+ pattern
+ '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}'
+ + '([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])'
+ + '(%[\p{N}\p{L}]+)?';
+ }
+ description
+ "The ipv4-address type represents an IPv4 address in
+ dotted-quad notation. The IPv4 address may include a zone
+ index, separated by a % sign.
+
+ The zone index is used to disambiguate identical address
+ values. For link-local addresses, the zone index will
+ typically be the interface index number or the name of an
+ interface. If the zone index is not present, the default
+ zone of the device will be used.
+
+ The canonical format for the zone index is the numerical
+ format";
+ }
+
+ typedef ipv6-address {
+ type string {
+ pattern '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}'
+ + '((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|'
+ + '(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\.){3}'
+ + '(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))'
+ + '(%[\p{N}\p{L}]+)?';
+ pattern '(([^:]+:){6}(([^:]+:[^:]+)|(.*\..*)))|'
+ + '((([^:]+:)*[^:]+)?::(([^:]+:)*[^:]+)?)'
+ + '(%.+)?';
+ }
+ description
+ "The ipv6-address type represents an IPv6 address in full,
+ mixed, shortened, and shortened-mixed notation. The IPv6
+ address may include a zone index, separated by a % sign.
+
+ The zone index is used to disambiguate identical address
+ values. For link-local addresses, the zone index will
+ typically be the interface index number or the name of an
+ interface. If the zone index is not present, the default
+ zone of the device will be used.
+
+ The canonical format of IPv6 addresses uses the compressed
+ format described in RFC 4291, Section 2.2, item 2 with the
+ following additional rules: the :: substitution must be
+ applied to the longest sequence of all-zero 16-bit chunks
+ in an IPv6 address. If there is a tie, the first sequence
+ of all-zero 16-bit chunks is replaced by ::. Single
+ all-zero 16-bit chunks are not compressed. The canonical
+ format uses lowercase characters and leading zeros are
+ not allowed. The canonical format for the zone index is
+ the numerical format as described in RFC 4007, Section
+ 11.2.";
+ reference
+ "RFC 4291: IP Version 6 Addressing Architecture
+ RFC 4007: IPv6 Scoped Address Architecture
+ RFC 5952: A Recommendation for IPv6 Address Text Representation";
+ }
+
+ typedef ip-prefix {
+ type union {
+ type inet:ipv4-prefix;
+ type inet:ipv6-prefix;
+ }
+ description
+ "The ip-prefix type represents an IP prefix and is IP
+ version neutral. The format of the textual representations
+ implies the IP version.";
+ }
+
+ typedef ipv4-prefix {
+ type string {
+ pattern
+ '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}'
+ + '([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])'
+ + '/(([0-9])|([1-2][0-9])|(3[0-2]))';
+ }
+ description
+ "The ipv4-prefix type represents an IPv4 address prefix.
+ The prefix length is given by the number following the
+ slash character and must be less than or equal to 32.
+
+ A prefix length value of n corresponds to an IP address
+ mask that has n contiguous 1-bits from the most
+ significant bit (MSB) and all other bits set to 0.
+
+ The canonical format of an IPv4 prefix has all bits of
+ the IPv4 address set to zero that are not part of the
+ IPv4 prefix.";
+ }
+
+ typedef ipv6-prefix {
+ type string {
+ pattern '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}'
+ + '((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|'
+ + '(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\.){3}'
+ + '(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))'
+ + '(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))';
+ pattern '(([^:]+:){6}(([^:]+:[^:]+)|(.*\..*)))|'
+ + '((([^:]+:)*[^:]+)?::(([^:]+:)*[^:]+)?)'
+ + '(/.+)';
+ }
+ description
+ "The ipv6-prefix type represents an IPv6 address prefix.
+ The prefix length is given by the number following the
+ slash character and must be less than or equal 128.
+
+ A prefix length value of n corresponds to an IP address
+ mask that has n contiguous 1-bits from the most
+ significant bit (MSB) and all other bits set to 0.
+
+ The IPv6 address should have all bits that do not belong
+ to the prefix set to zero.
+
+ The canonical format of an IPv6 prefix has all bits of
+ the IPv6 address set to zero that are not part of the
+ IPv6 prefix. Furthermore, IPv6 address is represented
+ in the compressed format described in RFC 4291, Section
+ 2.2, item 2 with the following additional rules: the ::
+ substitution must be applied to the longest sequence of
+ all-zero 16-bit chunks in an IPv6 address. If there is
+ a tie, the first sequence of all-zero 16-bit chunks is
+ replaced by ::. Single all-zero 16-bit chunks are not
+ compressed. The canonical format uses lowercase
+ characters and leading zeros are not allowed.";
+ reference
+ "RFC 4291: IP Version 6 Addressing Architecture";
+ }
+
+ /*** collection of domain name and URI types ***/
+
+ typedef domain-name {
+ type string {
+ pattern '((([a-zA-Z0-9_]([a-zA-Z0-9\-_]){0,61})?[a-zA-Z0-9]\.)*'
+ + '([a-zA-Z0-9_]([a-zA-Z0-9\-_]){0,61})?[a-zA-Z0-9]\.?)'
+ + '|\.';
+ length "1..253";
+ }
+ description
+ "The domain-name type represents a DNS domain name. The
+ name SHOULD be fully qualified whenever possible.
+
+ Internet domain names are only loosely specified. Section
+ 3.5 of RFC 1034 recommends a syntax (modified in Section
+ 2.1 of RFC 1123). The pattern above is intended to allow
+ for current practice in domain name use, and some possible
+ future expansion. It is designed to hold various types of
+ domain names, including names used for A or AAAA records
+ (host names) and other records, such as SRV records. Note
+ that Internet host names have a stricter syntax (described
+ in RFC 952) than the DNS recommendations in RFCs 1034 and
+ 1123, and that systems that want to store host names in
+ schema nodes using the domain-name type are recommended to
+ adhere to this stricter standard to ensure interoperability.
+
+ The encoding of DNS names in the DNS protocol is limited
+ to 255 characters. Since the encoding consists of labels
+ prefixed by a length bytes and there is a trailing NULL
+ byte, only 253 characters can appear in the textual dotted
+ notation.
+
+ The description clause of schema nodes using the domain-name
+ type MUST describe when and how these names are resolved to
+ IP addresses. Note that the resolution of a domain-name value
+ may require to query multiple DNS records (e.g., A for IPv4
+ and AAAA for IPv6). The order of the resolution process and
+ which DNS record takes precedence can either be defined
+ explicitely or it may depend on the configuration of the
+ resolver.
+
+ Domain-name values use the US-ASCII encoding. Their canonical
+ format uses lowercase US-ASCII characters. Internationalized
+ domain names MUST be encoded in punycode as described in RFC
+ 3492";
+ reference
+ "RFC 952: DoD Internet Host Table Specification
+ RFC 1034: Domain Names - Concepts and Facilities
+ RFC 1123: Requirements for Internet Hosts -- Application
+ and Support
+ RFC 2782: A DNS RR for specifying the location of services
+ (DNS SRV)
+ RFC 3492: Punycode: A Bootstring encoding of Unicode for
+ Internationalized Domain Names in Applications
+ (IDNA)
+ RFC 5891: Internationalizing Domain Names in Applications
+ (IDNA): Protocol";
+ }
+
+ typedef host {
+ type union {
+ type inet:ip-address;
+ type inet:domain-name;
+ }
+ description
+ "The host type represents either an IP address or a DNS
+ domain name.";
+ }
+
+ typedef uri {
+ type string;
+ description
+ "The uri type represents a Uniform Resource Identifier
+ (URI) as defined by STD 66.
+
+ Objects using the uri type MUST be in US-ASCII encoding,
+ and MUST be normalized as described by RFC 3986 Sections
+ 6.2.1, 6.2.2.1, and 6.2.2.2. All unnecessary
+ percent-encoding is removed, and all case-insensitive
+ characters are set to lowercase except for hexadecimal
+ digits, which are normalized to uppercase as described in
+ Section 6.2.2.1.
+
+ The purpose of this normalization is to help provide
+ unique URIs. Note that this normalization is not
+ sufficient to provide uniqueness. Two URIs that are
+ textually distinct after this normalization may still be
+ equivalent.
+
+ Objects using the uri type may restrict the schemes that
+ they permit. For example, 'data:' and 'urn:' schemes
+ might not be appropriate.
+
+ A zero-length URI is not a valid URI. This can be used to
+ express 'URI absent' where required.
+
+ In the value set and its semantics, this type is equivalent
+ to the Uri SMIv2 textual convention defined in RFC 5017.";
+ reference
+ "RFC 3986: Uniform Resource Identifier (URI): Generic Syntax
+ RFC 3305: Report from the Joint W3C/IETF URI Planning Interest
+ Group: Uniform Resource Identifiers (URIs), URLs,
+ and Uniform Resource Names (URNs): Clarifications
+ and Recommendations
+ RFC 5017: MIB Textual Conventions for Uniform Resource
+ Identifiers (URIs)";
+ }
+
+ }
--- /dev/null
+module ietf-interfaces {
+
+ namespace "urn:ietf:params:xml:ns:yang:ietf-interfaces";
+ prefix if;
+
+ import ietf-yang-types {
+ prefix yang;
+ }
+ import iana-if-type {
+ prefix ianaift;
+ }
+
+ organization
+ "IETF NETMOD (NETCONF Data Modeling Language) Working Group";
+
+ contact
+ "WG Web: <http://tools.ietf.org/wg/netmod/>
+ WG List: <mailto:netmod@ietf.org>
+
+ WG Chair: David Kessens
+ <mailto:david.kessens@nsn.com>
+
+ WG Chair: Juergen Schoenwaelder
+ <mailto:j.schoenwaelder@jacobs-university.de>
+
+ Editor: Martin Bjorklund
+ <mailto:mbj@tail-f.com>";
+
+ description
+ "This module contains a collection of YANG definitions for
+ managing network interfaces.
+
+ Copyright (c) 2013 IETF Trust and the persons identified as
+ authors of the code. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or
+ without modification, is permitted pursuant to, and subject
+ to the license terms contained in, the Simplified BSD License
+ set forth in Section 4.c of the IETF Trust's Legal Provisions
+ Relating to IETF Documents
+ (http://trustee.ietf.org/license-info).
+
+ This version of this YANG module is part of RFC XXXX; see
+ the RFC itself for full legal notices.";
+
+ // RFC Ed.: replace XXXX with actual RFC number and remove this
+ // note.
+
+ // RFC Ed.: update the date below with the date of RFC publication
+ // and remove this note.
+ revision 2013-07-04 {
+ description
+ "Initial revision.";
+ reference
+ "RFC XXXX: A YANG Data Model for Interface Management";
+ }
+
+ /* Typedefs */
+
+ typedef interface-ref {
+ type leafref {
+ path "/if:interfaces/if:interface/if:name";
+ }
+ description
+ "This type is used by data models that need to reference
+ configured interfaces.";
+ }
+
+ typedef interface-state-ref {
+ type leafref {
+ path "/if:interfaces-state/if:interface/if:name";
+ }
+ description
+ "This type is used by data models that need to reference
+ the operationally present interfaces.";
+ }
+
+ /* Features */
+
+ feature arbitrary-names {
+ description
+ "This feature indicates that the device allows user-controlled
+ interfaces to be named arbitrarily.";
+ }
+
+ feature pre-provisioning {
+ description
+ "This feature indicates that the device supports
+ pre-provisioning of interface configuration, i.e., it is
+ possible to configure an interface whose physical interface
+ hardware is not present on the device.";
+ }
+
+ feature if-mib {
+ description
+ "This feature indicates that the device implements IF-MIB.";
+ reference
+ "RFC 2863: The Interfaces Group MIB";
+ }
+
+ /* Data nodes */
+
+ container interfaces {
+ description
+ "Interface configuration parameters.";
+
+ list interface {
+ key "name";
+
+ description
+ "The list of configured interfaces on the device.
+
+ The operational state of an interface is available in the
+ /interfaces-state/interface list. If the configuration of a
+ system-controlled interface cannot be used by the system
+ (e.g., the interface hardware present does not match the
+ interface type), then the configuration is not applied to
+ the system-controlled interface shown in the
+ /interfaces-state/interface list. If the the configuration
+ of a user-controlled interface cannot be used by the system,
+ the configured interface is not instantiated in the
+ /interfaces-state/interface list.";
+
+ leaf name {
+ type string;
+ description
+ "The name of the interface.
+
+ A device MAY restrict the allowed values for this leaf,
+ possibly depending on the type of the interface.
+
+ For system-controlled interfaces, this leaf is the
+ device-specific name of the interface. The 'config false'
+ list /interfaces-state/interface contains the currently
+ existing interfaces on the device.
+
+ If a client tries to create configuration for a
+ system-controlled interface that is not present in the
+ /interfaces-state/interface list, the server MAY reject
+ the request, if the implementation does not support
+ pre-provisioning of interfaces, or if the name refers to
+ an interface that can never exist in the system. A
+ NETCONF server MUST reply with an rpc-error with the
+ error-tag 'invalid-value' in this case.
+
+ If the device supports pre-provisioning of interface
+ configuration, the feature 'pre-provisioning' is
+ advertised.
+
+ If the device allows arbitrarily named user-controlled
+ interfaces, the feature 'arbitrary-names' is advertised.
+
+ When a configured user-controlled interface is created by
+ the system, it is instantiated with the same name in the
+ /interface-state/interface list. Since the name in that
+ list MAY be mapped to ifName by an implementation, such an
+ implementation MUST restrict the allowed values for this
+ leaf so that it matches the restrictions of ifName.
+
+ If a NETCONF server that implements this restriction is
+ sent a value that doesn't match the restriction, it MUST
+ reply with an rpc-error with the error-tag
+ 'invalid-value'.";
+ }
+
+ leaf description {
+ type string;
+ description
+ "A textual description of the interface.
+
+ This leaf MAY be mapped to ifAlias by an implementation.
+ Such an implementation MUST restrict the allowed values
+ for this leaf so that it matches the restrictions of
+ ifAlias.
+
+ If a NETCONF server that implements this restriction is
+ sent a value that doesn't match the restriction, it MUST
+ reply with an rpc-error with the error-tag
+ 'invalid-value'.
+
+ Since ifAlias is defined to be stored in non-volatile
+ storage, the MIB implementation MUST map ifAlias to the
+ value of 'description' in the persistently stored
+ datastore.
+
+ Specifically, if the device supports ':startup', when
+ ifAlias is read the device MUST return the value of
+ 'description' in the 'startup' datastore, and when it is
+ written, it MUST be written to the 'running' and 'startup'
+ datastores. Note that it is up to the implementation if
+ it modifies this single leaf in 'startup', or if it
+ performs an implicit copy-config from 'running' to
+ 'startup'.
+
+ If the device does not support ':startup', ifAlias MUST
+ be mapped to the 'description' leaf in the 'running'
+ datastore.";
+ reference
+ "RFC 2863: The Interfaces Group MIB - ifAlias";
+ }
+
+ leaf type {
+ type ianaift:iana-if-type;
+ mandatory true;
+ description
+ "The type of the interface.
+
+ When an interface entry is created, a server MAY
+ initialize the type leaf with a valid value, e.g., if it
+ is possible to derive the type from the name of the
+ interface.
+
+ If a client tries to set the type of an interface to a
+ value that can never be used by the system, e.g., if the
+ type is not supported or if the type does not match the
+ name of the interface, the server MUST reject the request.
+ A NETCONF server MUST reply with an rpc-error with the
+ error-tag 'invalid-value' in this case.";
+ reference
+ "RFC 2863: The Interfaces Group MIB - ifType";
+ }
+
+ leaf enabled {
+ type boolean;
+ default "true";
+ description
+ "This leaf contains the configured, desired state of the
+ interface.
+
+ Systems that implement the IF-MIB use the value of this
+ leaf in the 'running' datastore to set
+ IF-MIB.ifAdminStatus to 'up' or 'down' after an ifEntry
+ has been initialized, as described in RFC 2863.
+
+ Changes in this leaf in the 'running' datastore are
+ reflected in ifAdminStatus, but if ifAdminStatus is
+ changed over SNMP, this leaf is not affected.";
+ reference
+ "RFC 2863: The Interfaces Group MIB - ifAdminStatus";
+ }
+
+ leaf link-up-down-trap-enable {
+ if-feature if-mib;
+ type enumeration {
+ enum enabled {
+ value 1;
+ }
+ enum disabled {
+ value 2;
+ }
+ }
+ description
+ "Controls whether linkUp/linkDown SNMP notifications
+ should be generated for this interface.
+
+ If this node is not configured, the value 'enabled' is
+ operationally used by the server for interfaces which do
+ not operate on top of any other interface (i.e., there are
+ no 'lower-layer-if' entries), and 'disabled' otherwise.";
+ reference
+ "RFC 2863: The Interfaces Group MIB -
+ ifLinkUpDownTrapEnable";
+ }
+ }
+ }
+
+ container interfaces-state {
+ config false;
+ description
+ "Data nodes for the operational state of interfaces.";
+
+ list interface {
+ key "name";
+
+ description
+ "The list of interfaces on the device.
+
+ System-controlled interfaces created by the system are
+ always present in this list, whether they are configured or
+ not.";
+
+ leaf name {
+ type string;
+ description
+ "The name of the interface.
+
+ This leaf MAY be mapped to ifName by an implementation.";
+ reference
+ "RFC 2863: The Interfaces Group MIB - ifName";
+ }
+
+ leaf type {
+ type ianaift:iana-if-type;
+ mandatory true;
+ description
+ "The type of the interface.";
+ reference
+ "RFC 2863: The Interfaces Group MIB - ifType";
+ }
+
+ leaf admin-status {
+ if-feature if-mib;
+ type enumeration {
+ enum up {
+ value 1;
+ description
+ "Ready to pass packets.";
+ }
+ enum down {
+ value 2;
+ description
+ "Not ready to pass packets and not in some test mode.";
+ }
+ enum testing {
+ value 3;
+ description
+ "In some test mode.";
+ }
+ }
+ mandatory true;
+ description
+ "The desired state of the interface.
+
+ This leaf has the same read semantics as ifAdminStatus.";
+ reference
+ "RFC 2863: The Interfaces Group MIB - ifAdminStatus";
+ }
+
+ leaf oper-status {
+ type enumeration {
+ enum up {
+ value 1;
+ description
+ "Ready to pass packets.";
+ }
+ enum down {
+ value 2;
+ description
+ "The interface does not pass any packets.";
+ }
+ enum testing {
+ value 3;
+ description
+ "In some test mode. No operational packets can
+ be passed.";
+ }
+ enum unknown {
+ value 4;
+ description
+ "Status cannot be determined for some reason.";
+ }
+ enum dormant {
+ value 5;
+ description
+ "Waiting for some external event.";
+ }
+ enum not-present {
+ value 6;
+ description
+ "Some component (typically hardware) is missing.";
+ }
+ enum lower-layer-down {
+ value 7;
+ description
+ "Down due to state of lower-layer interface(s).";
+ }
+ }
+ mandatory true;
+ description
+ "The current operational state of the interface.
+
+ This leaf has the same semantics as ifOperStatus.";
+ reference
+ "RFC 2863: The Interfaces Group MIB - ifOperStatus";
+ }
+
+ leaf last-change {
+ type yang:date-and-time;
+ description
+ "The time the interface entered its current operational
+ state. If the current state was entered prior to the
+ last re-initialization of the local network management
+ subsystem, then this node is not present.";
+ reference
+ "RFC 2863: The Interfaces Group MIB - ifLastChange";
+ }
+
+ leaf if-index {
+ if-feature if-mib;
+ type int32 {
+ range "1..2147483647";
+ }
+ mandatory true;
+ description
+ "The ifIndex value for the ifEntry represented by this
+ interface.";
+ reference
+ "RFC 2863: The Interfaces Group MIB - ifIndex";
+ }
+
+ leaf phys-address {
+ type yang:phys-address;
+ description
+ "The interface's address at its protocol sub-layer. For
+ example, for an 802.x interface, this object normally
+ contains a MAC address. The interface's media-specific
+ modules must define the bit and byte ordering and the
+ format of the value of this object. For interfaces that do
+ not have such an address (e.g., a serial line), this node
+ is not present.";
+ reference
+ "RFC 2863: The Interfaces Group MIB - ifPhysAddress";
+ }
+
+ leaf-list higher-layer-if {
+ type interface-state-ref;
+ description
+ "A list of references to interfaces layered on top of this
+ interface.";
+ reference
+ "RFC 2863: The Interfaces Group MIB - ifStackTable";
+ }
+
+ leaf-list lower-layer-if {
+ type interface-state-ref;
+ description
+ "A list of references to interfaces layered underneath this
+ interface.";
+ reference
+ "RFC 2863: The Interfaces Group MIB - ifStackTable";
+ }
+
+ leaf speed {
+ type yang:gauge64;
+ units "bits / second";
+ description
+ "An estimate of the interface's current bandwidth in bits
+ per second. For interfaces that do not vary in
+ bandwidth or for those where no accurate estimation can
+ be made, this node should contain the nominal bandwidth.
+ For interfaces that have no concept of bandwidth, this
+ node is not present.";
+ reference
+ "RFC 2863: The Interfaces Group MIB -
+ ifSpeed, ifHighSpeed";
+ }
+
+ container statistics {
+ description
+ "A collection of interface-related statistics objects.";
+
+ leaf discontinuity-time {
+ type yang:date-and-time;
+ mandatory true;
+ description
+ "The time on the most recent occasion at which any one or
+ more of this interface's counters suffered a
+ discontinuity. If no such discontinuities have occurred
+ since the last re-initialization of the local management
+ subsystem, then this node contains the time the local
+ management subsystem re-initialized itself.";
+ }
+
+ leaf in-octets {
+ type yang:counter64;
+ description
+ "The total number of octets received on the interface,
+ including framing characters.
+
+ Discontinuities in the value of this counter can occur
+ at re-initialization of the management system, and at
+ other times as indicated by the value of
+ 'discontinuity-time'.";
+ reference
+ "RFC 2863: The Interfaces Group MIB - ifHCInOctets";
+ }
+ leaf in-unicast-pkts {
+ type yang:counter64;
+ description
+ "The number of packets, delivered by this sub-layer to a
+ higher (sub-)layer, which were not addressed to a
+ multicast or broadcast address at this sub-layer.
+
+ Discontinuities in the value of this counter can occur
+ at re-initialization of the management system, and at
+ other times as indicated by the value of
+ 'discontinuity-time'.";
+ reference
+ "RFC 2863: The Interfaces Group MIB - ifHCInUcastPkts";
+ }
+ leaf in-broadcast-pkts {
+ type yang:counter64;
+ description
+ "The number of packets, delivered by this sub-layer to a
+ higher (sub-)layer, which were addressed to a broadcast
+ address at this sub-layer.
+
+ Discontinuities in the value of this counter can occur
+ at re-initialization of the management system, and at
+ other times as indicated by the value of
+ 'discontinuity-time'.";
+ reference
+ "RFC 2863: The Interfaces Group MIB -
+ ifHCInBroadcastPkts";
+ }
+ leaf in-multicast-pkts {
+ type yang:counter64;
+ description
+ "The number of packets, delivered by this sub-layer to a
+ higher (sub-)layer, which were addressed to a multicast
+ address at this sub-layer. For a MAC layer protocol,
+ this includes both Group and Functional addresses.
+
+ Discontinuities in the value of this counter can occur
+ at re-initialization of the management system, and at
+ other times as indicated by the value of
+ 'discontinuity-time'.";
+ reference
+ "RFC 2863: The Interfaces Group MIB -
+ ifHCInMulticastPkts";
+ }
+ leaf in-discards {
+ type yang:counter32;
+ description
+ "The number of inbound packets which were chosen to be
+ discarded even though no errors had been detected to
+ prevent their being deliverable to a higher-layer
+ protocol. One possible reason for discarding such a
+ packet could be to free up buffer space.
+
+ Discontinuities in the value of this counter can occur
+ at re-initialization of the management system, and at
+ other times as indicated by the value of
+ 'discontinuity-time'.";
+ reference
+ "RFC 2863: The Interfaces Group MIB - ifInDiscards";
+ }
+ leaf in-errors {
+ type yang:counter32;
+ description
+ "For packet-oriented interfaces, the number of inbound
+ packets that contained errors preventing them from being
+ deliverable to a higher-layer protocol. For character-
+ oriented or fixed-length interfaces, the number of
+ inbound transmission units that contained errors
+ preventing them from being deliverable to a higher-layer
+ protocol.
+
+ Discontinuities in the value of this counter can occur
+ at re-initialization of the management system, and at
+ other times as indicated by the value of
+ 'discontinuity-time'.";
+ reference
+ "RFC 2863: The Interfaces Group MIB - ifInErrors";
+ }
+ leaf in-unknown-protos {
+ type yang:counter32;
+ description
+ "For packet-oriented interfaces, the number of packets
+ received via the interface which were discarded because
+ of an unknown or unsupported protocol. For
+ character-oriented or fixed-length interfaces that
+ support protocol multiplexing the number of transmission
+ units received via the interface which were discarded
+ because of an unknown or unsupported protocol. For any
+ interface that does not support protocol multiplexing,
+ this counter is not present.
+ Discontinuities in the value of this counter can occur
+ at re-initialization of the management system, and at
+ other times as indicated by the value of
+ 'discontinuity-time'.";
+ reference
+ "RFC 2863: The Interfaces Group MIB - ifInUnknownProtos";
+ }
+
+ leaf out-octets {
+ type yang:counter64;
+ description
+ "The total number of octets transmitted out of the
+ interface, including framing characters.
+
+ Discontinuities in the value of this counter can occur
+ at re-initialization of the management system, and at
+ other times as indicated by the value of
+ 'discontinuity-time'.";
+ reference
+ "RFC 2863: The Interfaces Group MIB - ifHCOutOctets";
+ }
+ leaf out-unicast-pkts {
+ type yang:counter64;
+ description
+ "The total number of packets that higher-level protocols
+ requested be transmitted, and which were not addressed
+ to a multicast or broadcast address at this sub-layer,
+ including those that were discarded or not sent.
+
+ Discontinuities in the value of this counter can occur
+ at re-initialization of the management system, and at
+ other times as indicated by the value of
+ 'discontinuity-time'.";
+ reference
+ "RFC 2863: The Interfaces Group MIB - ifHCOutUcastPkts";
+ }
+ leaf out-broadcast-pkts {
+ type yang:counter64;
+ description
+ "The total number of packets that higher-level protocols
+ requested be transmitted, and which were addressed to a
+ broadcast address at this sub-layer, including those
+ that were discarded or not sent.
+
+ Discontinuities in the value of this counter can occur
+ at re-initialization of the management system, and at
+ other times as indicated by the value of
+ 'discontinuity-time'.";
+ reference
+ "RFC 2863: The Interfaces Group MIB -
+ ifHCOutBroadcastPkts";
+ }
+ leaf out-multicast-pkts {
+ type yang:counter64;
+ description
+ "The total number of packets that higher-level protocols
+ requested be transmitted, and which were addressed to a
+ multicast address at this sub-layer, including those
+ that were discarded or not sent. For a MAC layer
+ protocol, this includes both Group and Functional
+ addresses.
+
+ Discontinuities in the value of this counter can occur
+ at re-initialization of the management system, and at
+ other times as indicated by the value of
+ 'discontinuity-time'.";
+ reference
+ "RFC 2863: The Interfaces Group MIB -
+ ifHCOutMulticastPkts";
+ }
+ leaf out-discards {
+ type yang:counter32;
+ description
+ "The number of outbound packets which were chosen to be
+ discarded even though no errors had been detected to
+ prevent their being transmitted. One possible reason
+ for discarding such a packet could be to free up buffer
+ space.
+
+ Discontinuities in the value of this counter can occur
+ at re-initialization of the management system, and at
+ other times as indicated by the value of
+ 'discontinuity-time'.";
+ reference
+ "RFC 2863: The Interfaces Group MIB - ifOutDiscards";
+ }
+ leaf out-errors {
+ type yang:counter32;
+ description
+ "For packet-oriented interfaces, the number of outbound
+ packets that could not be transmitted because of errors.
+ For character-oriented or fixed-length interfaces, the
+ number of outbound transmission units that could not be
+ transmitted because of errors.
+
+ Discontinuities in the value of this counter can occur
+ at re-initialization of the management system, and at
+ other times as indicated by the value of
+ 'discontinuity-time'.";
+ reference
+ "RFC 2863: The Interfaces Group MIB - ifOutErrors";
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
--- /dev/null
+module ietf-restconf {
+ namespace "urn:ietf:params:xml:ns:yang:ietf-restconf";
+ prefix "restconf";
+
+ import ietf-yang-types { prefix yang; }
+ import ietf-inet-types { prefix inet; }
+
+ organization
+ "IETF NETCONF (Network Configuration) Working Group";
+
+ contact
+ "Editor: Andy Bierman
+ <mailto:andy@yumaworks.com>
+
+ Editor: Martin Bjorklund
+ <mailto:mbj@tail-f.com>
+
+ Editor: Kent Watsen
+ <mailto:kwatsen@juniper.net>
+
+ Editor: Rex Fernando
+ <mailto:rex@cisco.com>";
+
+ description
+ "This module contains conceptual YANG specifications
+ for the YANG Patch and error content that is used in
+ RESTCONF protocol messages. A conceptual container
+ representing the RESTCONF API nodes (media type
+ application/yang.api).
+
+ Note that the YANG definitions within this module do not
+ represent configuration data of any kind.
+ The YANG grouping statements provide a normative syntax
+ for XML and JSON message encoding purposes.
+ Copyright (c) 2013 IETF Trust and the persons identified as
+ authors of the code. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or
+ without modification, is permitted pursuant to, and subject
+ to the license terms contained in, the Simplified BSD License
+ set forth in Section 4.c of the IETF Trust's Legal Provisions
+ Relating to IETF Documents
+ (http://trustee.ietf.org/license-info).
+
+ This version of this YANG module is part of RFC XXXX; see
+ the RFC itself for full legal notices.";
+
+ // RFC Ed.: replace XXXX with actual RFC number and remove this
+ // note.
+
+ // RFC Ed.: remove this note
+ // Note: extracted from draft-bierman-netconf-restconf-02.txt
+
+ // RFC Ed.: update the date below with the date of RFC publication
+ // and remove this note.
+ revision 2013-10-19 {
+ description
+ "Initial revision.";
+ reference
+ "RFC XXXX: RESTCONF Protocol.";
+ }
+
+ typedef data-resource-identifier {
+ type string {
+ length "1 .. max";
+ }
+ description
+ "Contains a Data Resource Identifier formatted string
+ to identify a specific data node. The data node that
+ uses this data type SHOULD define the document root
+ for data resource identifiers. The default document
+ root is the target datastore conceptual root node.
+ Data resource identifiers are defined relative to
+ this document root.";
+ reference
+ "RFC XXXX: [sec. 5.3.1.1 ABNF For Data Resource Identifiers]";
+ }
+
+ // this typedef is TBD; not currently used
+ typedef datastore-identifier {
+ type union {
+ type enumeration {
+ enum candidate {
+ description
+ "Identifies the NETCONF shared candidate datastore.";
+ reference
+ "RFC 6241, section 8.3";
+ }
+ enum running {
+ description
+ "Identifies the NETCONF running datastore.";
+ reference
+ "RFC 6241, section 5.1";
+ }
+ enum startup {
+ description
+ "Identifies the NETCONF startup datastore.";
+ reference
+ "RFC 6241, section 8.7";
+ }
+ }
+ type string;
+ }
+ description
+ "Contains a string to identify a specific datastore.
+ The enumerated datastore identifier values are
+ reserved for standard datastore names.";
+ }
+
+ typedef revision-identifier {
+ type string {
+ pattern '\d{4}-\d{2}-\d{2}';
+ }
+ description
+ "Represents a specific date in YYYY-MM-DD format.
+ TBD: make pattern more precise to exclude leading zeros.";
+ }
+
+ grouping yang-patch {
+ description
+ "A grouping that contains a YANG container
+ representing the syntax and semantics of a
+ YANG Patch edit request message.";
+
+ container yang-patch {
+ description
+ "Represents a conceptual sequence of datastore edits,
+ called a patch. Each patch is given a client-assigned
+ patch identifier. Each edit MUST be applied
+ in ascending order, and all edits MUST be applied.
+ If any errors occur, then the target datastore MUST NOT
+ be changed by the patch operation.
+
+ A patch MUST be validated by the server to be a
+ well-formed message before any of the patch edits
+ are validated or attempted.
+
+ YANG datastore validation (defined in RFC 6020, section
+ 8.3.3) is performed after all edits have been
+ individually validated.
+
+ It is possible for a datastore constraint violation to occur
+ due to any node in the datastore, including nodes not
+ included in the edit list. Any validation errors MUST
+ be reported in the reply message.";
+
+ reference
+ "RFC 6020, section 8.3.";
+
+ leaf patch-id {
+ type string;
+ description
+ "An arbitrary string provided by the client to identify
+ the entire patch. This value SHOULD be present in any
+ audit logging records generated by the server for the
+ patch. Error messages returned by the server pertaining
+ to this patch will be identified by this patch-id value.";
+ }
+
+ leaf comment {
+ type string {
+ length "0 .. 1024";
+ }
+ description
+ "An arbitrary string provided by the client to describe
+ the entire patch. This value SHOULD be present in any
+ audit logging records generated by the server for the
+ patch.";
+ }
+
+ list edit {
+ key edit-id;
+ ordered-by user;
+
+ description
+ "Represents one edit within the YANG Patch
+ request message.";
+ leaf edit-id {
+ type string;
+ description
+ "Arbitrary string index for the edit.
+ Error messages returned by the server pertaining
+ to a specific edit will be identified by this
+ value.";
+ }
+
+ leaf operation {
+ type enumeration {
+ enum create {
+ description
+ "The target data node is created using the
+ supplied value, only if it does not already
+ exist.";
+ }
+ enum delete {
+ description
+ "Delete the target node, only if the data resource
+ currently exists, otherwise return an error.";
+ }
+ enum insert {
+ description
+ "Insert the supplied value into a user-ordered
+ list or leaf-list entry. The target node must
+ represent a new data resource.";
+ }
+ enum merge {
+ description
+ "The supplied value is merged with the target data
+ node.";
+ }
+ enum move {
+ description
+ "Move the target node. Reorder a user-ordered
+ list or leaf-list. The target node must represent
+ an existing data resource.";
+ }
+ enum replace {
+ description
+ "The supplied value is used to replace the target
+ data node.";
+ }
+ enum remove {
+ description
+ "Delete the target node if it currently exists.";
+ }
+ }
+ mandatory true;
+ description
+ "The datastore operation requested for the associated
+ edit entry";
+ }
+
+ leaf target {
+ type data-resource-identifier;
+ mandatory true;
+ description
+ "Identifies the target data resource for the edit
+ operation.";
+ }
+
+ leaf point {
+ when "(../operation = 'insert' or " +
+ "../operation = 'move') and " +
+ "(../where = 'before' or ../where = 'after')" {
+ description
+ "Point leaf only applies for insert or move
+ operations, before or after an existing entry.";
+ }
+ type data-resource-identifier;
+ description
+ "The absolute URL path for the data node that is being
+ used as the insertion point or move point for the
+ target of this edit entry.";
+ }
+
+ leaf where {
+ when "../operation = 'insert' or ../operation = 'move'" {
+ description
+ "Where leaf only applies for insert or move
+ operations.";
+ }
+ type enumeration {
+ enum before {
+ description
+ "Insert or move a data node before the data resource
+ identified by the 'point' parameter.";
+ }
+ enum after {
+ description
+ "Insert or move a data node after the data resource
+ identified by the 'point' parameter.";
+ }
+ enum first {
+ description
+ "Insert or move a data node so it becomes ordered
+ as the first entry.";
+ }
+ enum last {
+ description
+ "Insert or move a data node so it becomes ordered
+ as the last entry.";
+ }
+
+ }
+ default last;
+ description
+ "Identifies where a data resource will be inserted or
+ moved. YANG only allows these operations for
+ list and leaf-list data nodes that are ordered-by
+ user.";
+ }
+
+ anyxml value {
+ when "(../operation = 'create' or " +
+ "../operation = 'merge' " +
+ "or ../operation = 'replace' or " +
+ "../operation = 'insert')" {
+ description
+ "Value node only used for create, merge,
+ replace, and insert operations";
+ }
+ description
+ "Value used for this edit operation.";
+ }
+ }
+ }
+
+ } // grouping yang-patch
+
+
+ grouping yang-patch-status {
+
+ description
+ "A grouping that contains a YANG container
+ representing the syntax and semantics of
+ YANG Patch status response message.";
+
+ container yang-patch-status {
+ description
+ "A container representing the response message
+ sent by the server after a YANG Patch edit
+ request message has been processed.";
+
+ leaf patch-id {
+ type string;
+ description
+ "The patch-id value used in the request";
+ }
+
+ choice global-status {
+ description
+ "Report global errors or complete success.
+ If there is no case selected then errors
+ are reported in the edit-status container.";
+
+ case global-errors {
+ uses errors;
+ description
+ "This container will be present if global
+ errors unrelated to a specific edit occurred.";
+ }
+ leaf ok {
+ type empty;
+ description
+ "This leaf will be present if the request succeeded
+ and there are no errors reported in the edit-status
+ container.";
+ }
+ }
+
+ container edit-status {
+ description
+ "This container will be present if there are
+ edit-specific status responses to report.";
+
+ list edit {
+ key edit-id;
+
+ description
+ "Represents a list of status responses,
+ corresponding to edits in the YANG Patch
+ request message. If an edit entry was
+ skipped or not reached by the server,
+ then this list will not contain a corresponding
+ entry for that edit.";
+
+ leaf edit-id {
+ type string;
+ description
+ "Response status is for the edit list entry
+ with this edit-id value.";
+ }
+ choice edit-status-choice {
+ description
+ "A choice between different types of status
+ responses for each edit entry.";
+ leaf ok {
+ type empty;
+ description
+ "This edit entry was invoked without any
+ errors detected by the server associated
+ with this edit.";
+ }
+ leaf location {
+ type inet:uri;
+ description
+ "Contains the Location header value that would be
+ returned if this edit causes a new resource to be
+ created. If the edit identified by the same edit-id
+ value was successfully invoked and a new resource
+ was created, then this field will be returned
+ instead of 'ok'.";
+ }
+ case errors {
+ uses errors;
+ description
+ "The server detected errors associated with the
+ edit identified by the same edit-id value.";
+ }
+ }
+ }
+ }
+ }
+ } // grouping yang-patch-status
+
+
+ grouping errors {
+
+ description
+ "A grouping that contains a YANG container
+ representing the syntax and semantics of a
+ YANG Patch errors report within a response message.";
+
+ container errors {
+ config false; // needed so list error does not need a key
+ description
+ "Represents an error report returned by the server if
+ a request results in an error.";
+
+ list error {
+ description
+ "An entry containing information about one
+ specific error that occurred while processing
+ a RESTCONF request.";
+ reference "RFC 6241, Section 4.3";
+
+ leaf error-type {
+ type enumeration {
+ enum transport {
+ description "The transport layer";
+ }
+ enum rpc {
+ description "The rpc or notification layer";
+ }
+ enum protocol {
+ description "The protocol operation layer";
+ }
+ enum application {
+ description "The server application layer";
+ }
+ }
+ mandatory true;
+ description
+ "The protocol layer where the error occurred.";
+ }
+
+ leaf error-tag {
+ type string;
+ mandatory true;
+ description
+ "The enumerated error tag.";
+ }
+
+ leaf error-app-tag {
+ type string;
+ description
+ "The application-specific error tag.";
+ }
+
+ leaf error-path {
+ type data-resource-identifier;
+ description
+ "The target data resource identifier associated
+ with the error, if any.";
+ }
+ leaf error-message {
+ type string;
+ description
+ "A message describing the error.";
+ }
+
+ container error-info {
+ description
+ "A container allowing additional information
+ to be included in the error report.";
+ // arbitrary anyxml content here
+ }
+ }
+ }
+ } // grouping errors
+
+
+ grouping restconf {
+
+ description
+ "A grouping that contains a YANG container
+ representing the syntax and semantics of
+ the RESTCONF API resource.";
+
+ container restconf {
+ description
+ "Conceptual container representing the
+ application/yang.api resource type.";
+
+ container config {
+ description
+ "Container representing the application/yang.datastore
+ resource type. Represents the conceptual root of the
+ unified configuration datastore containing YANG data
+ nodes. The child nodes of this container are
+ configuration data resources (application/yang.data)
+ defined as top-level YANG data nodes from the modules
+ advertised by the server in /restconf/modules.";
+ }
+
+ container operational {
+ description
+ "Container representing the application/yang.datastore
+ resource type. Represents the conceptual root of the
+ operational data supported by the server. The child
+ nodes of this container are operational data resources
+ (application/yang.data) defined as top-level
+ YANG data nodes from the modules advertised by
+ the server in /restconf/modules.";
+ }
+
+ container modules {
+ description
+ "Contains a list of module description entries.
+ These modules are currently loaded into the server.";
+
+ list module {
+ key "name revision";
+ description
+ "Each entry represents one module currently
+ supported by the server.";
+
+ leaf name {
+ type yang:yang-identifier;
+ description "The YANG module name.";
+ }
+ leaf revision {
+ type union {
+ type revision-identifier;
+ type string { length 0; }
+ }
+ description
+ "The YANG module revision date. An empty string is
+ used if no revision statement is present in the
+ YANG module.";
+ }
+ leaf namespace {
+ type inet:uri;
+ mandatory true;
+ description
+ "The XML namespace identifier for this module.";
+ }
+ leaf-list feature {
+ type yang:yang-identifier;
+ description
+ "List of YANG feature names from this module that are
+ supported by the server.";
+ }
+ leaf-list deviation {
+ type yang:yang-identifier;
+ description
+ "List of YANG deviation module names used by this
+ server to modify the conformance of the module
+ associated with this entry.";
+ }
+ }
+ }
+
+ container operations {
+ description
+ "Container for all operation resources
+ (application/yang.operation),
+
+ Each resource is represented as an empty leaf with the
+ name of the RPC operation from the YANG rpc statement.
+
+ E.g.;
+
+ POST /restconf/operations/show-log-errors
+
+ leaf show-log-errors {
+ type empty;
+ }
+ ";
+ }
+
+ container streams {
+ description
+ "Container representing the notification event streams
+ supported by the server.";
+ reference
+ "RFC 5277, Section 3.4, <streams> element.";
+
+ list stream {
+ key name;
+ description
+ "Each entry describes an event stream supported by
+ the server.";
+
+ leaf name {
+ type string;
+ description "The stream name";
+ reference "RFC 5277, Section 3.4, <name> element.";
+ }
+
+ leaf description {
+ type string;
+ description "Description of stream content";
+ reference
+ "RFC 5277, Section 3.4, <description> element.";
+ }
+
+ leaf replay-support {
+ type boolean;
+ description
+ "Indicates if replay buffer supported for this stream";
+ reference
+ "RFC 5277, Section 3.4, <replaySupport> element.";
+ }
+
+ leaf replay-log-creation-time {
+ type yang:date-and-time;
+ description
+ "Indicates the time the replay log for this stream
+ was created.";
+ reference
+ "RFC 5277, Section 3.4, <replayLogCreationTime>
+ element.";
+ }
+
+ leaf events {
+ type empty;
+ description
+ "Represents the entry point for establishing
+ notification delivery via server sent events.";
+ }
+ }
+ }
+
+ leaf version {
+ type enumeration {
+ enum "1.0" {
+ description
+ "Version 1.0 of the RESTCONF protocol.";
+ }
+ }
+ config false;
+ description
+ "Contains the RESTCONF protocol version.";
+ }
+ }
+ } // grouping restconf
+
+
+ grouping notification {
+ description
+ "Contains the notification message wrapper definition.";
+
+ container notification {
+ description
+ "RESTCONF notification message wrapper.";
+ leaf event-time {
+ type yang:date-and-time;
+ mandatory true;
+ description
+ "The time the event was generated by the
+ event source.";
+ reference
+ "RFC 5277, section 4, <eventTime> element.";
+ }
+
+ /* The YANG-specific notification container is encoded
+ * after the 'event-time' element. The format
+ * corresponds to the notificationContent element
+ * in RFC 5277, section 4. For example:
+ *
+ * module example-one {
+ * ...
+ * notification event1 { ... }
+ *
+ * }
+ *
+ * Encoded as element 'event1' in the namespace
+ * for module 'example-one'.
+ */
+ }
+ } // grouping notification
+
+ }
\ No newline at end of file
--- /dev/null
+ module ietf-yang-types {
+
+ namespace "urn:ietf:params:xml:ns:yang:ietf-yang-types";
+ prefix "yang";
+
+ organization
+ "IETF NETMOD (NETCONF Data Modeling Language) Working Group";
+
+ contact
+ "WG Web: <http://tools.ietf.org/wg/netmod/>
+ WG List: <mailto:netmod@ietf.org>
+
+ WG Chair: David Partain
+ <mailto:david.partain@ericsson.com>
+
+ WG Chair: David Kessens
+ <mailto:david.kessens@nsn.com>
+
+ Editor: Juergen Schoenwaelder
+ <mailto:j.schoenwaelder@jacobs-university.de>";
+
+ description
+ "This module contains a collection of generally useful derived
+ YANG data types.
+
+ Copyright (c) 2010 IETF Trust and the persons identified as
+ authors of the code. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, is permitted pursuant to, and subject to the license
+ terms contained in, the Simplified BSD License set forth in Section
+ 4.c of the IETF Trust's Legal Provisions Relating to IETF Documents
+ (http://trustee.ietf.org/license-info).
+
+ This version of this YANG module is part of RFC 6021; see
+ the RFC itself for full legal notices.";
+
+ revision 2010-09-24 {
+ description
+ "Initial revision.";
+ reference
+ "RFC 6021: Common YANG Data Types";
+ }
+
+ /*** collection of counter and gauge types ***/
+
+ typedef counter32 {
+ type uint32;
+ description
+ "The counter32 type represents a non-negative integer
+ that monotonically increases until it reaches a
+ maximum value of 2^32-1 (4294967295 decimal), when it
+ wraps around and starts increasing again from zero.
+
+ Counters have no defined 'initial' value, and thus, a
+ single value of a counter has (in general) no information
+ content. Discontinuities in the monotonically increasing
+ value normally occur at re-initialization of the
+ management system, and at other times as specified in the
+ description of a schema node using this type. If such
+ other times can occur, for example, the creation of
+ a schema node of type counter32 at times other than
+ re-initialization, then a corresponding schema node
+ should be defined, with an appropriate type, to indicate
+ the last discontinuity.
+
+ The counter32 type should not be used for configuration
+ schema nodes. A default statement SHOULD NOT be used in
+ combination with the type counter32.
+
+ In the value set and its semantics, this type is equivalent
+ to the Counter32 type of the SMIv2.";
+ reference
+ "RFC 2578: Structure of Management Information Version 2 (SMIv2)";
+ }
+
+ typedef zero-based-counter32 {
+ type yang:counter32;
+ default "0";
+ description
+ "The zero-based-counter32 type represents a counter32
+ that has the defined 'initial' value zero.
+
+ A schema node of this type will be set to zero (0) on creation
+ and will thereafter increase monotonically until it reaches
+ a maximum value of 2^32-1 (4294967295 decimal), when it
+ wraps around and starts increasing again from zero.
+
+ Provided that an application discovers a new schema node
+ of this type within the minimum time to wrap, it can use the
+ 'initial' value as a delta. It is important for a management
+ station to be aware of this minimum time and the actual time
+ between polls, and to discard data if the actual time is too
+ long or there is no defined minimum time.
+
+ In the value set and its semantics, this type is equivalent
+ to the ZeroBasedCounter32 textual convention of the SMIv2.";
+ reference
+ "RFC 4502: Remote Network Monitoring Management Information
+ Base Version 2";
+ }
+
+ typedef counter64 {
+ type uint64;
+ description
+ "The counter64 type represents a non-negative integer
+ that monotonically increases until it reaches a
+ maximum value of 2^64-1 (18446744073709551615 decimal),
+ when it wraps around and starts increasing again from zero.
+
+ Counters have no defined 'initial' value, and thus, a
+ single value of a counter has (in general) no information
+ content. Discontinuities in the monotonically increasing
+ value normally occur at re-initialization of the
+ management system, and at other times as specified in the
+ description of a schema node using this type. If such
+ other times can occur, for example, the creation of
+ a schema node of type counter64 at times other than
+ re-initialization, then a corresponding schema node
+ should be defined, with an appropriate type, to indicate
+ the last discontinuity.
+
+ The counter64 type should not be used for configuration
+ schema nodes. A default statement SHOULD NOT be used in
+ combination with the type counter64.
+
+ In the value set and its semantics, this type is equivalent
+ to the Counter64 type of the SMIv2.";
+ reference
+ "RFC 2578: Structure of Management Information Version 2 (SMIv2)";
+ }
+
+ typedef zero-based-counter64 {
+ type yang:counter64;
+ default "0";
+ description
+ "The zero-based-counter64 type represents a counter64 that
+ has the defined 'initial' value zero.
+
+ A schema node of this type will be set to zero (0) on creation
+ and will thereafter increase monotonically until it reaches
+ a maximum value of 2^64-1 (18446744073709551615 decimal),
+ when it wraps around and starts increasing again from zero.
+
+ Provided that an application discovers a new schema node
+ of this type within the minimum time to wrap, it can use the
+ 'initial' value as a delta. It is important for a management
+ station to be aware of this minimum time and the actual time
+ between polls, and to discard data if the actual time is too
+ long or there is no defined minimum time.
+
+ In the value set and its semantics, this type is equivalent
+ to the ZeroBasedCounter64 textual convention of the SMIv2.";
+ reference
+ "RFC 2856: Textual Conventions for Additional High Capacity
+ Data Types";
+ }
+
+ typedef gauge32 {
+ type uint32;
+ description
+ "The gauge32 type represents a non-negative integer, which
+ may increase or decrease, but shall never exceed a maximum
+ value, nor fall below a minimum value. The maximum value
+ cannot be greater than 2^32-1 (4294967295 decimal), and
+ the minimum value cannot be smaller than 0. The value of
+ a gauge32 has its maximum value whenever the information
+ being modeled is greater than or equal to its maximum
+ value, and has its minimum value whenever the information
+ being modeled is smaller than or equal to its minimum value.
+ If the information being modeled subsequently decreases
+ below (increases above) the maximum (minimum) value, the
+ gauge32 also decreases (increases).
+
+ In the value set and its semantics, this type is equivalent
+ to the Gauge32 type of the SMIv2.";
+ reference
+ "RFC 2578: Structure of Management Information Version 2 (SMIv2)";
+ }
+
+ typedef gauge64 {
+ type uint64;
+ description
+ "The gauge64 type represents a non-negative integer, which
+ may increase or decrease, but shall never exceed a maximum
+ value, nor fall below a minimum value. The maximum value
+ cannot be greater than 2^64-1 (18446744073709551615), and
+ the minimum value cannot be smaller than 0. The value of
+ a gauge64 has its maximum value whenever the information
+ being modeled is greater than or equal to its maximum
+ value, and has its minimum value whenever the information
+ being modeled is smaller than or equal to its minimum value.
+ If the information being modeled subsequently decreases
+ below (increases above) the maximum (minimum) value, the
+ gauge64 also decreases (increases).
+
+ In the value set and its semantics, this type is equivalent
+ to the CounterBasedGauge64 SMIv2 textual convention defined
+ in RFC 2856";
+ reference
+ "RFC 2856: Textual Conventions for Additional High Capacity
+ Data Types";
+ }
+
+ /*** collection of identifier related types ***/
+
+ typedef object-identifier {
+ type string {
+ pattern '(([0-1](\.[1-3]?[0-9]))|(2\.(0|([1-9]\d*))))'
+ + '(\.(0|([1-9]\d*)))*';
+ }
+ description
+ "The object-identifier type represents administratively
+ assigned names in a registration-hierarchical-name tree.
+
+ Values of this type are denoted as a sequence of numerical
+ non-negative sub-identifier values. Each sub-identifier
+ value MUST NOT exceed 2^32-1 (4294967295). Sub-identifiers
+ are separated by single dots and without any intermediate
+ whitespace.
+
+ The ASN.1 standard restricts the value space of the first
+ sub-identifier to 0, 1, or 2. Furthermore, the value space
+ of the second sub-identifier is restricted to the range
+ 0 to 39 if the first sub-identifier is 0 or 1. Finally,
+ the ASN.1 standard requires that an object identifier
+ has always at least two sub-identifier. The pattern
+ captures these restrictions.
+
+ Although the number of sub-identifiers is not limited,
+ module designers should realize that there may be
+ implementations that stick with the SMIv2 limit of 128
+ sub-identifiers.
+
+ This type is a superset of the SMIv2 OBJECT IDENTIFIER type
+ since it is not restricted to 128 sub-identifiers. Hence,
+ this type SHOULD NOT be used to represent the SMIv2 OBJECT
+ IDENTIFIER type, the object-identifier-128 type SHOULD be
+ used instead.";
+ reference
+ "ISO9834-1: Information technology -- Open Systems
+ Interconnection -- Procedures for the operation of OSI
+ Registration Authorities: General procedures and top
+ arcs of the ASN.1 Object Identifier tree";
+ }
+
+
+
+
+ typedef object-identifier-128 {
+ type object-identifier {
+ pattern '\d*(\.\d*){1,127}';
+ }
+ description
+ "This type represents object-identifiers restricted to 128
+ sub-identifiers.
+
+ In the value set and its semantics, this type is equivalent
+ to the OBJECT IDENTIFIER type of the SMIv2.";
+ reference
+ "RFC 2578: Structure of Management Information Version 2 (SMIv2)";
+ }
+
+ typedef yang-identifier {
+ type string {
+ length "1..max";
+ pattern '[a-zA-Z_][a-zA-Z0-9\-_.]*';
+ pattern '.|..|[^xX].*|.[^mM].*|..[^lL].*';
+ }
+ description
+ "A YANG identifier string as defined by the 'identifier'
+ rule in Section 12 of RFC 6020. An identifier must
+ start with an alphabetic character or an underscore
+ followed by an arbitrary sequence of alphabetic or
+ numeric characters, underscores, hyphens, or dots.
+
+ A YANG identifier MUST NOT start with any possible
+ combination of the lowercase or uppercase character
+ sequence 'xml'.";
+ reference
+ "RFC 6020: YANG - A Data Modeling Language for the Network
+ Configuration Protocol (NETCONF)";
+ }
+
+ /*** collection of date and time related types ***/
+
+ typedef date-and-time {
+ type string {
+ pattern '\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d+)?'
+ + '(Z|[\+\-]\d{2}:\d{2})';
+ }
+ description
+ "The date-and-time type is a profile of the ISO 8601
+ standard for representation of dates and times using the
+ Gregorian calendar. The profile is defined by the
+ date-time production in Section 5.6 of RFC 3339.
+
+ The date-and-time type is compatible with the dateTime XML
+ schema type with the following notable exceptions:
+
+ (a) The date-and-time type does not allow negative years.
+
+ (b) The date-and-time time-offset -00:00 indicates an unknown
+ time zone (see RFC 3339) while -00:00 and +00:00 and Z all
+ represent the same time zone in dateTime.
+
+ (c) The canonical format (see below) of data-and-time values
+ differs from the canonical format used by the dateTime XML
+ schema type, which requires all times to be in UTC using the
+ time-offset 'Z'.
+
+ This type is not equivalent to the DateAndTime textual
+ convention of the SMIv2 since RFC 3339 uses a different
+ separator between full-date and full-time and provides
+ higher resolution of time-secfrac.
+
+ The canonical format for date-and-time values with a known time
+ zone uses a numeric time zone offset that is calculated using
+ the device's configured known offset to UTC time. A change of
+ the device's offset to UTC time will cause date-and-time values
+ to change accordingly. Such changes might happen periodically
+ in case a server follows automatically daylight saving time
+ (DST) time zone offset changes. The canonical format for
+ date-and-time values with an unknown time zone (usually referring
+ to the notion of local time) uses the time-offset -00:00.";
+ reference
+ "RFC 3339: Date and Time on the Internet: Timestamps
+ RFC 2579: Textual Conventions for SMIv2
+ XSD-TYPES: XML Schema Part 2: Datatypes Second Edition";
+ }
+
+ typedef timeticks {
+ type uint32;
+ description
+ "The timeticks type represents a non-negative integer that
+ represents the time, modulo 2^32 (4294967296 decimal), in
+ hundredths of a second between two epochs. When a schema
+ node is defined that uses this type, the description of
+ the schema node identifies both of the reference epochs.
+
+ In the value set and its semantics, this type is equivalent
+ to the TimeTicks type of the SMIv2.";
+ reference
+ "RFC 2578: Structure of Management Information Version 2 (SMIv2)";
+ }
+
+ typedef timestamp {
+ type yang:timeticks;
+ description
+ "The timestamp type represents the value of an associated
+ timeticks schema node at which a specific occurrence happened.
+ The specific occurrence must be defined in the description
+ of any schema node defined using this type. When the specific
+ occurrence occurred prior to the last time the associated
+ timeticks attribute was zero, then the timestamp value is
+ zero. Note that this requires all timestamp values to be
+ reset to zero when the value of the associated timeticks
+ attribute reaches 497+ days and wraps around to zero.
+
+ The associated timeticks schema node must be specified
+ in the description of any schema node using this type.
+
+ In the value set and its semantics, this type is equivalent
+ to the TimeStamp textual convention of the SMIv2.";
+ reference
+ "RFC 2579: Textual Conventions for SMIv2";
+ }
+
+ /*** collection of generic address types ***/
+
+ typedef phys-address {
+ type string {
+ pattern '([0-9a-fA-F]{2}(:[0-9a-fA-F]{2})*)?';
+ }
+ description
+ "Represents media- or physical-level addresses represented
+ as a sequence octets, each octet represented by two hexadecimal
+ numbers. Octets are separated by colons. The canonical
+ representation uses lowercase characters.
+
+ In the value set and its semantics, this type is equivalent
+ to the PhysAddress textual convention of the SMIv2.";
+ reference
+ "RFC 2579: Textual Conventions for SMIv2";
+ }
+
+ typedef mac-address {
+ type string {
+ pattern '[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}';
+ }
+ description
+ "The mac-address type represents an IEEE 802 MAC address.
+ The canonical representation uses lowercase characters.
+
+ In the value set and its semantics, this type is equivalent
+ to the MacAddress textual convention of the SMIv2.";
+ reference
+ "IEEE 802: IEEE Standard for Local and Metropolitan Area
+ Networks: Overview and Architecture
+ RFC 2579: Textual Conventions for SMIv2";
+ }
+
+ /*** collection of XML specific types ***/
+
+ typedef xpath1.0 {
+ type string;
+ description
+ "This type represents an XPATH 1.0 expression.
+
+ When a schema node is defined that uses this type, the
+ description of the schema node MUST specify the XPath
+ context in which the XPath expression is evaluated.";
+ reference
+ "XPATH: XML Path Language (XPath) Version 1.0";
+ }
+
+ }
import java.util.Map.Entry;
import javax.management.ObjectName;
import javax.management.openmbean.OpenType;
-import org.opendaylight.controller.config.util.ConfigRegistryClient;
+import org.opendaylight.controller.config.util.BeanReader;
import org.opendaylight.controller.config.yangjmxgenerator.RuntimeBeanEntry;
import org.opendaylight.controller.config.yangjmxgenerator.attribute.AttributeIfc;
import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
private final Map<String, AttributeIfc> yangToAttrConfig;
private final String nullableDummyContainerName;
private final Map<String, AttributeIfc> jmxToAttrConfig;
- private final ConfigRegistryClient configRegistryClient;
+ private final BeanReader configRegistryClient;
- public InstanceConfig(ConfigRegistryClient configRegistryClient, Map<String, AttributeIfc> yangNamesToAttributes,
+ public InstanceConfig(BeanReader configRegistryClient, Map<String, AttributeIfc> yangNamesToAttributes,
String nullableDummyContainerName) {
this.yangToAttrConfig = yangNamesToAttributes;
TransactionProvider transactionProvider) {
switch (source) {
case running:
- return new RunningDatastoreQueryStrategy();
+ return new RunningDatastoreQueryStrategy(transactionProvider);
case candidate:
return new CandidateDatastoreQueryStrategy(transactionProvider);
default:
protected Element handleWithNoSubsequentOperations(Document document, XmlElement xml) throws NetconfDocumentedException {
fromXml(xml);
try {
- this.transactionProvider.abortTransaction();
- } catch (final IllegalStateException e) {
+ if (transactionProvider.getTransaction().isPresent()) {
+ this.transactionProvider.abortTransaction();
+ }
+ } catch (final RuntimeException e) {
LOG.warn("Abort failed: ", e);
final Map<String, String> errorInfo = new HashMap<>();
errorInfo
.put(ErrorTag.operation_failed.name(),
- "Operation failed. Use 'get-config' or 'edit-config' before triggering 'discard-changes' operation");
+ "Abort failed.");
throw new NetconfDocumentedException(e.getMessage(), e, ErrorType.application, ErrorTag.operation_failed,
ErrorSeverity.error, errorInfo);
}
import javax.management.InstanceNotFoundException;
import javax.management.ObjectName;
import org.opendaylight.controller.config.api.ValidationException;
+import org.opendaylight.controller.config.util.BeanReader;
import org.opendaylight.controller.config.util.ConfigRegistryClient;
import org.opendaylight.controller.config.util.ConfigTransactionClient;
import org.opendaylight.controller.config.yangjmxgenerator.ModuleMXBeanEntry;
public static Map<String/* Namespace from yang file */,
Map<String /* Name of module entry from yang file */, ModuleConfig>> transformMbeToModuleConfigs
- (final ConfigRegistryClient configRegistryClient, Map<String/* Namespace from yang file */,
+ (final BeanReader configRegistryClient, Map<String/* Namespace from yang file */,
Map<String /* Name of module entry from yang file */, ModuleMXBeanEntry>> mBeanEntries) {
Map<String, Map<String, ModuleConfig>> namespaceToModuleNameToModuleConfig = Maps.newHashMap();
@Override
protected Element handleWithNoSubsequentOperations(Document document, XmlElement xml) throws NetconfDocumentedException {
-
EditConfigXmlParser.EditConfigExecution editConfigExecution;
Config cfg = getConfigMapping(getConfigRegistryClient(), yangStoreSnapshot);
editConfigExecution = editConfigXmlParser.fromXml(xml, cfg);
import java.util.Set;
import javax.management.ObjectName;
import org.opendaylight.controller.config.util.ConfigRegistryClient;
+import org.opendaylight.controller.config.util.ConfigTransactionClient;
import org.opendaylight.controller.config.yangjmxgenerator.ModuleMXBeanEntry;
import org.opendaylight.controller.config.yangjmxgenerator.RuntimeBeanEntry;
import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
import org.opendaylight.controller.netconf.confignetconfconnector.operations.Datastore;
import org.opendaylight.controller.netconf.confignetconfconnector.operations.editconfig.EditConfig;
import org.opendaylight.controller.netconf.confignetconfconnector.osgi.YangStoreContext;
+import org.opendaylight.controller.netconf.confignetconfconnector.transactions.TransactionProvider;
import org.opendaylight.controller.netconf.util.exception.MissingNameSpaceException;
import org.opendaylight.controller.netconf.util.exception.UnexpectedElementException;
import org.opendaylight.controller.netconf.util.exception.UnexpectedNamespaceException;
public class Get extends AbstractConfigNetconfOperation {
+ private final TransactionProvider transactionProvider;
private final YangStoreContext yangStoreSnapshot;
private static final Logger LOG = LoggerFactory.getLogger(Get.class);
- public Get(YangStoreContext yangStoreSnapshot, ConfigRegistryClient configRegistryClient,
+ public Get(final TransactionProvider transactionProvider, YangStoreContext yangStoreSnapshot, ConfigRegistryClient configRegistryClient,
String netconfSessionIdForReporting) {
super(configRegistryClient, netconfSessionIdForReporting);
+ this.transactionProvider = transactionProvider;
this.yangStoreSnapshot = yangStoreSnapshot;
}
protected Element handleWithNoSubsequentOperations(Document document, XmlElement xml) throws NetconfDocumentedException {
checkXml(xml);
- final Set<ObjectName> runtimeBeans = getConfigRegistryClient().lookupRuntimeBeans();
+ final ObjectName testTransaction = transactionProvider.getOrCreateReadTransaction();
+ final ConfigTransactionClient registryClient = getConfigRegistryClient().getConfigTransactionClient(testTransaction);
- //Transaction provider required only for candidate datastore
- final Set<ObjectName> configBeans = Datastore.getInstanceQueryStrategy(Datastore.running, null)
- .queryInstances(getConfigRegistryClient());
+ try {
+ // Runtime beans are not parts of transactions and have to be queried against the central registry
+ final Set<ObjectName> runtimeBeans = getConfigRegistryClient().lookupRuntimeBeans();
- final Map<String, Map<String, ModuleRuntime>> moduleRuntimes = createModuleRuntimes(getConfigRegistryClient(),
- yangStoreSnapshot.getModuleMXBeanEntryMap());
- final Map<String, Map<String, ModuleConfig>> moduleConfigs = EditConfig.transformMbeToModuleConfigs(
- getConfigRegistryClient(), yangStoreSnapshot.getModuleMXBeanEntryMap());
+ final Set<ObjectName> configBeans = Datastore.getInstanceQueryStrategy(Datastore.running, transactionProvider)
+ .queryInstances(getConfigRegistryClient());
- final Runtime runtime = new Runtime(moduleRuntimes, moduleConfigs);
+ final Map<String, Map<String, ModuleRuntime>> moduleRuntimes = createModuleRuntimes(getConfigRegistryClient(),
+ yangStoreSnapshot.getModuleMXBeanEntryMap());
+ final Map<String, Map<String, ModuleConfig>> moduleConfigs = EditConfig.transformMbeToModuleConfigs(
+ registryClient, yangStoreSnapshot.getModuleMXBeanEntryMap());
- final Element element = runtime.toXml(runtimeBeans, configBeans, document);
+ final Runtime runtime = new Runtime(moduleRuntimes, moduleConfigs);
- LOG.trace("{} operation successful", XmlNetconfConstants.GET);
+ final Element element = runtime.toXml(runtimeBeans, configBeans, document);
- return element;
+ LOG.trace("{} operation successful", XmlNetconfConstants.GET);
+
+ return element;
+ } finally {
+ transactionProvider.closeReadTransaction();
+ }
}
}
private Element getResponseInternal(final Document document, final ConfigRegistryClient configRegistryClient,
final Datastore source) {
- Element dataElement = XmlUtil.createElement(document, XmlNetconfConstants.DATA_KEY, Optional.<String>absent());
- final Set<ObjectName> instances = Datastore.getInstanceQueryStrategy(source, this.transactionProvider)
- .queryInstances(configRegistryClient);
- final Config configMapping = new Config(EditConfig.transformMbeToModuleConfigs(configRegistryClient,
- yangStoreSnapshot.getModuleMXBeanEntryMap()));
-
-
- ObjectName on = transactionProvider.getOrCreateTransaction();
- ConfigTransactionClient ta = configRegistryClient.getConfigTransactionClient(on);
-
- ServiceRegistryWrapper serviceTracker = new ServiceRegistryWrapper(ta);
- dataElement = configMapping.toXml(instances, this.maybeNamespace, document, dataElement, serviceTracker);
-
- LOG.trace("{} operation successful", GET_CONFIG);
-
- return dataElement;
+ final ConfigTransactionClient registryClient;
+ // Read current state from a transaction, if running is source, then start new transaction just for reading
+ // in case of candidate, get current transaction representing candidate
+ if(source == Datastore.running) {
+ final ObjectName readTx = transactionProvider.getOrCreateReadTransaction();
+ registryClient = getConfigRegistryClient().getConfigTransactionClient(readTx);
+ } else {
+ registryClient = getConfigRegistryClient().getConfigTransactionClient(transactionProvider.getOrCreateTransaction());
+ }
+
+ try {
+ Element dataElement = XmlUtil.createElement(document, XmlNetconfConstants.DATA_KEY, Optional.<String>absent());
+ final Set<ObjectName> instances = Datastore.getInstanceQueryStrategy(source, this.transactionProvider)
+ .queryInstances(configRegistryClient);
+
+ final Config configMapping = new Config(EditConfig.transformMbeToModuleConfigs(registryClient,
+ yangStoreSnapshot.getModuleMXBeanEntryMap()));
+
+ ServiceRegistryWrapper serviceTracker = new ServiceRegistryWrapper(registryClient);
+ dataElement = configMapping.toXml(instances, this.maybeNamespace, document, dataElement, serviceTracker);
+
+ LOG.trace("{} operation successful", GET_CONFIG);
+
+ return dataElement;
+ } finally {
+ if(source == Datastore.running) {
+ transactionProvider.closeReadTransaction();
+ }
+ }
}
@Override
import java.util.Set;
import javax.management.ObjectName;
import org.opendaylight.controller.config.util.ConfigRegistryClient;
+import org.opendaylight.controller.config.util.ConfigTransactionClient;
+import org.opendaylight.controller.netconf.confignetconfconnector.transactions.TransactionProvider;
public class RunningDatastoreQueryStrategy implements DatastoreQueryStrategy {
+ private final TransactionProvider transactionProvider;
+
+ public RunningDatastoreQueryStrategy(TransactionProvider transactionProvider) {
+ this.transactionProvider = transactionProvider;
+ }
+
@Override
public Set<ObjectName> queryInstances(ConfigRegistryClient configRegistryClient) {
- return configRegistryClient.lookupConfigBeans();
+ ObjectName on = transactionProvider.getOrCreateReadTransaction();
+ ConfigTransactionClient proxy = configRegistryClient.getConfigTransactionClient(on);
+ return proxy.lookupConfigBeans();
}
}
ops.add(new Commit(transactionProvider, configRegistryClient, netconfSessionIdForReporting));
ops.add(new Lock(netconfSessionIdForReporting));
ops.add(new UnLock(netconfSessionIdForReporting));
- ops.add(new Get(yangStoreSnapshot, configRegistryClient, netconfSessionIdForReporting));
+ ops.add(new Get(transactionProvider, yangStoreSnapshot, configRegistryClient, netconfSessionIdForReporting));
ops.add(new DiscardChanges(transactionProvider, configRegistryClient, netconfSessionIdForReporting));
ops.add(new Validate(transactionProvider, configRegistryClient, netconfSessionIdForReporting));
ops.add(new RuntimeRpc(yangStoreSnapshot, configRegistryClient, netconfSessionIdForReporting));
private final ConfigRegistryClient configRegistryClient;
private final String netconfSessionIdForReporting;
- private ObjectName transaction;
+ private ObjectName candidateTx;
+ private ObjectName readTx;
private final List<ObjectName> allOpenedTransactions = new ArrayList<>();
private static final String NO_TRANSACTION_FOUND_FOR_SESSION = "No transaction found for session ";
public synchronized Optional<ObjectName> getTransaction() {
- if (transaction == null){
+ if (candidateTx == null){
return Optional.absent();
}
// Transaction was already closed somehow
- if (!isStillOpenTransaction(transaction)) {
- LOG.warn("Fixing illegal state: transaction {} was closed in {}", transaction,
+ if (!isStillOpenTransaction(candidateTx)) {
+ LOG.warn("Fixing illegal state: transaction {} was closed in {}", candidateTx,
netconfSessionIdForReporting);
- transaction = null;
+ candidateTx = null;
return Optional.absent();
}
- return Optional.of(transaction);
+ return Optional.of(candidateTx);
+ }
+
+ public synchronized Optional<ObjectName> getReadTransaction() {
+
+ if (readTx == null){
+ return Optional.absent();
+ }
+
+ // Transaction was already closed somehow
+ if (!isStillOpenTransaction(readTx)) {
+ LOG.warn("Fixing illegal state: transaction {} was closed in {}", readTx,
+ netconfSessionIdForReporting);
+ readTx = null;
+ return Optional.absent();
+ }
+ return Optional.of(readTx);
}
private boolean isStillOpenTransaction(ObjectName transaction) {
if (ta.isPresent()) {
return ta.get();
}
- transaction = configRegistryClient.beginConfig();
- allOpenedTransactions.add(transaction);
- return transaction;
+ candidateTx = configRegistryClient.beginConfig();
+ allOpenedTransactions.add(candidateTx);
+ return candidateTx;
+ }
+
+ public synchronized ObjectName getOrCreateReadTransaction() {
+ Optional<ObjectName> ta = getReadTransaction();
+
+ if (ta.isPresent()) {
+ return ta.get();
+ }
+ readTx = configRegistryClient.beginConfig();
+ allOpenedTransactions.add(readTx);
+ return readTx;
}
/**
try {
CommitStatus status = configRegistryClient.commitConfig(taON);
// clean up
- allOpenedTransactions.remove(transaction);
- transaction = null;
+ allOpenedTransactions.remove(candidateTx);
+ candidateTx = null;
return status;
} catch (ValidationException validationException) {
// no clean up: user can reconfigure and recover this transaction
ConfigTransactionClient transactionClient = configRegistryClient.getConfigTransactionClient(taON.get());
transactionClient.abortConfig();
- allOpenedTransactions.remove(transaction);
- transaction = null;
+ allOpenedTransactions.remove(candidateTx);
+ candidateTx = null;
+ }
+
+ public synchronized void closeReadTransaction() {
+ LOG.debug("Closing read transaction");
+ Optional<ObjectName> taON = getReadTransaction();
+ Preconditions.checkState(taON.isPresent(), NO_TRANSACTION_FOUND_FOR_SESSION + netconfSessionIdForReporting);
+
+ ConfigTransactionClient transactionClient = configRegistryClient.getConfigTransactionClient(taON.get());
+ transactionClient.abortConfig();
+ allOpenedTransactions.remove(readTx);
+ readTx = null;
}
public synchronized void abortTestTransaction(ObjectName testTx) {
commit();
}
- @Test(expected = NetconfDocumentedException.class)
+ @Test
public void testEx2() throws Exception {
- discard();
+ assertContainsElement(discard(), readXmlToElement("<ok xmlns=\"urn:ietf:params:xml:ns:netconf:base:1.0\"/>"));
}
- private void discard() throws ParserConfigurationException, SAXException, IOException, NetconfDocumentedException {
+ private Document discard() throws ParserConfigurationException, SAXException, IOException, NetconfDocumentedException {
DiscardChanges discardOp = new DiscardChanges(transactionProvider, configRegistryClient, NETCONF_SESSION_ID);
- executeOp(discardOp, "netconfMessages/discardChanges.xml");
+ return executeOp(discardOp, "netconfMessages/discardChanges.xml");
}
private void checkBinaryLeafEdited(final Document response) throws NodeTestException, SAXException, IOException {
}
private Document get() throws NetconfDocumentedException, ParserConfigurationException, SAXException, IOException {
- Get getOp = new Get(yangStoreSnapshot, configRegistryClient, NETCONF_SESSION_ID);
+ Get getOp = new Get(transactionProvider, yangStoreSnapshot, configRegistryClient, NETCONF_SESSION_ID);
return executeOp(getOp, "netconfMessages/get.xml");
}
import java.util.List;
+/**
+ * This interface defines the methods for Neutron Requests
+ *
+ * @deprecated Replaced by {@link org.opendaylight.neutron.northbound.api.INeutronRequest}
+ */
+@Deprecated
public interface INeutronRequest<T extends INeutronObject> {
public T getSingleton();
public boolean isSingleton();
/**
* This interface defines the methods a service that wishes to be aware of Firewall Rules needs to implement
*
+ * @deprecated Replaced by {@link org.opendaylight.neutron.neutron.spi.INeutronFirewallAware}
*/
+@Deprecated
public interface INeutronFirewallAware {
/**
/**
* This interface defines the methods for CRUD of NB OpenStack Firewall objects
*
+ * @deprecated Replaced by {@link org.opendaylight.neutron.neutron.spi.INeutronFirewallCRUD}
*/
+@Deprecated
public interface INeutronFirewallCRUD {
/**
* Applications call this interface method to determine if a particular
/**
* This interface defines the methods a service that wishes to be aware of Firewall Policys needs to implement
*
+ * @deprecated Replaced by {@link org.opendaylight.neutron.neutron.spi.INeutronFirewallPolicyAware}
*/
+@Deprecated
public interface INeutronFirewallPolicyAware {
/**
/**
* This interface defines the methods for CRUD of NB OpenStack Firewall Policy objects
*
+ * @deprecated Replaced by {@link org.opendaylight.neutron.neutron.spi.INeutronFirewallPolicyCRUD}
*/
+@Deprecated
public interface INeutronFirewallPolicyCRUD {
/**
* Applications call this interface method to determine if a particular
/**
* This interface defines the methods a service that wishes to be aware of Firewall Rules needs to implement
*
+ * @deprecated Replaced by {@link org.opendaylight.neutron.neutron.spi.INeutronFirewallRuleAware}
*/
+@Deprecated
public interface INeutronFirewallRuleAware {
/**
/**
* This interface defines the methods for CRUD of NB OpenStack Firewall Rule objects
*
+ * @deprecated Replaced by {@link org.opendaylight.neutron.neutron.spi.INeutronFirewallRuleCRUD}
*/
+@Deprecated
public interface INeutronFirewallRuleCRUD {
/**
* Applications call this interface method to determine if a particular
/**
* This interface defines the methods a service that wishes to be aware of Neutron FloatingIPs needs to implement
*
+ * @deprecated Replaced by {@link org.opendaylight.neutron.neutron.spi.INeutronFloatingIPAware}
*/
+@Deprecated
public interface INeutronFloatingIPAware {
/**
/**
* This interface defines the methods for CRUD of NB FloatingIP objects
*
+ * @deprecated Replaced by {@link org.opendaylight.neutron.neutron.spi.INeutronFloatingIPCRUD}
*/
+@Deprecated
public interface INeutronFloatingIPCRUD {
/**
* Applications call this interface method to determine if a particular
/**
* This interface defines the methods a service that wishes to be aware of LoadBalancer Rules needs to implement
*
+ * @deprecated Replaced by {@link org.opendaylight.neutron.neutron.spi.INeutronLoadBalancerAware}
*/
+@Deprecated
public interface INeutronLoadBalancerAware {
/**
/**
* This interface defines the methods for CRUD of NB OpenStack LoadBalancer objects
*
+ * @deprecated Replaced by {@link org.opendaylight.neutron.neutron.spi.INeutronLoadBalancerCRUD}
*/
+@Deprecated
public interface INeutronLoadBalancerCRUD {
/**
* Applications call this interface method to determine if a particular
/**
* This interface defines the methods a service that wishes to be aware of LoadBalancerHealthMonitor Rules needs to implement
*
+ * @deprecated Replaced by {@link org.opendaylight.neutron.neutron.spi.INeutronLoadBalancerHealthMonitorAware}
*/
+@Deprecated
public interface INeutronLoadBalancerHealthMonitorAware {
/**
/**
* This interface defines the methods for CRUD of NB OpenStack LoadBalancerHealthMonitor objects
*
+ * @deprecated Replaced by {@link org.opendaylight.neutron.neutron.spi.INeutronLoadBalancerHealthMonitorCRUD}
*/
+@Deprecated
public interface INeutronLoadBalancerHealthMonitorCRUD {
/**
* Applications call this interface method to determine if a particular
/**
* This interface defines the methods a service that wishes to be aware of LoadBalancerListener Rules needs to implement
*
+ * @deprecated Replaced by {@link org.opendaylight.neutron.neutron.spi.INeutronLoadBalancerListenerAware}
*/
+@Deprecated
public interface INeutronLoadBalancerListenerAware {
/**
/**
* This interface defines the methods for CRUD of NB OpenStack LoadBalancerListener objects
*
+ * @deprecated Replaced by {@link org.opendaylight.neutron.neutron.spi.INeutronLoadBalancerListenerCRUD}
*/
+@Deprecated
public interface INeutronLoadBalancerListenerCRUD {
/**
* Applications call this interface method to determine if a particular
/**
* This interface defines the methods a service that wishes to be aware of LoadBalancerPool Rules needs to implement
*
+ * @deprecated Replaced by {@link org.opendaylight.neutron.neutron.spi.INeutronLoadBalancerPoolAware}
*/
+@Deprecated
public interface INeutronLoadBalancerPoolAware {
/**
/**
* This interface defines the methods for CRUD of NB OpenStack LoadBalancerPool objects
*
+ * @deprecated Replaced by {@link org.opendaylight.neutron.neutron.spi.INeutronLoadBalancerPoolCRUD}
*/
+@Deprecated
public interface INeutronLoadBalancerPoolCRUD {
/**
* Applications call this interface method to determine if a particular
*/
package org.opendaylight.controller.networkconfig.neutron;
+/**
+ * This interface defines the methods for CRUD of NB OpenStack INeutronLoadBalancerPoolMemberAware objects
+ *
+ * @deprecated Replaced by {@link org.opendaylight.neutron.neutron.spi.INeutronLoadBalancerPoolMemberAware}
+ */
+
+@Deprecated
public interface INeutronLoadBalancerPoolMemberAware {
import java.util.List;
+/**
+ * This interface defines the methods for CRUD of NB OpenStack INeutronLoadBalancerPoolMemberCRUD objects
+ *
+ * @deprecated Replaced by {@link org.opendaylight.neutron.neutron.spi.INeutronLoadBalancerPoolMemberCRUD}
+ */
+
+@Deprecated
public interface INeutronLoadBalancerPoolMemberCRUD {
/**
/**
* This interface defines the methods a service that wishes to be aware of Neutron Networks needs to implement
*
+ * @deprecated Replaced by {@link org.opendaylight.neutron.neutron.spi.INeutronNetworkAware}
*/
+@Deprecated
public interface INeutronNetworkAware {
/**
/**
* This interface defines the methods for CRUD of NB network objects
*
+ * @deprecated Replaced by {@link org.opendaylight.neutron.neutron.spi.INeutronNetworkCRUD}
*/
+@Deprecated
public interface INeutronNetworkCRUD {
/**
* Applications call this interface method to determine if a particular
/**
* This class contains behaviour common to Neutron configuration objects
+ *
+ * @deprecated Replaced by {@link org.opendaylight.neutron.neutron.spi.INeutronObject}
*/
+@Deprecated
public interface INeutronObject {
public String getID();
public void setID(String id);
/**
* This interface defines the methods a service that wishes to be aware of Neutron Ports needs to implement
*
+ * @deprecated Replaced by {@link org.opendaylight.neutron.neutron.spi.INeutronPortAware}
*/
+@Deprecated
public interface INeutronPortAware {
/**
/**
* This interface defines the methods for CRUD of NB Port objects
*
+ * @deprecated Replaced by {@link org.opendaylight.neutron.neutron.spi.INeutronPortCRUD}
*/
+@Deprecated
public interface INeutronPortCRUD {
/**
* Applications call this interface method to determine if a particular
/**
* This interface defines the methods a service that wishes to be aware of Neutron Routers needs to implement
*
+ * @deprecated Replaced by {@link org.opendaylight.neutron.neutron.spi.INeutronRouterAware}
*/
+@Deprecated
public interface INeutronRouterAware {
/**
/**
* This interface defines the methods for CRUD of NB Router objects
*
+ * @deprecated Replaced by {@link org.opendaylight.neutron.neutron.spi.INeutronRouterCRUD}
*/
+@Deprecated
public interface INeutronRouterCRUD {
/**
* Applications call this interface method to determine if a particular
/**
* This interface defines the methods a service that wishes to be aware of Neutron Security Groups needs to implement
+ *
+ * @deprecated Replaced by {@link org.opendaylight.neutron.neutron.spi.INeutronSecurityGroupAware}
*/
+@Deprecated
public interface INeutronSecurityGroupAware {
/**
/**
* This interface defines the methods for CRUD of NB OpenStack Security Group objects
+ *
+ * @deprecated Replaced by {@link org.opendaylight.neutron.neutron.spi.INeutronSecurityGroupCRUD}
*/
+@Deprecated
public interface INeutronSecurityGroupCRUD {
/**
* Applications call this interface method to determine if a particular
/**
* This interface defines the methods required to be aware of Neutron Security Rules
+ *
+ * @deprecated Replaced by {@link org.opendaylight.neutron.neutron.spi.INeutronSecurityRuleAware}
*/
+@Deprecated
public interface INeutronSecurityRuleAware {
/**
/**
* This interface defines the methods for CRUD of NB OpenStack Security Rule objects
+ *
+ * @deprecated Replaced by {@link org.opendaylight.neutron.neutron.spi.INeutronSecurityRuleCRUD}
*/
+@Deprecated
public interface INeutronSecurityRuleCRUD {
/**
* Applications call this interface method to determine if a particular
/**
* This interface defines the methods a service that wishes to be aware of Neutron Subnets needs to implement
*
+ * @deprecated Replaced by {@link org.opendaylight.neutron.neutron.spi.INeutronSubnetAware}
*/
+@Deprecated
public interface INeutronSubnetAware {
/**
/**
* This interface defines the methods for CRUD of NB Subnet objects
*
+ * @deprecated Replaced by {@link org.opendaylight.neutron.neutron.spi.INeutronSubnetCRUD}
*/
+@Deprecated
public interface INeutronSubnetCRUD {
/**
* Applications call this interface method to determine if a particular