/**
* Channel handler that responds to channelInactive event and reconnects the session.
- * Only if the initial connection was successfully established and promise was not canceled.
+ * Only if the promise was not canceled.
*/
private static final class ClosedChannelHandler extends ChannelInboundHandlerAdapter {
private final ReconnectPromise<?, ?> promise;
assertFalse(session.isSuccess());
}
+ @Test
+ public void testNegotiationFailedReconnect() throws Exception {
+ final Promise<Boolean> p = new DefaultPromise<>(GlobalEventExecutor.INSTANCE);
+
+ this.dispatcher = getServerDispatcher(p);
+
+ this.server = this.dispatcher.createServer(this.serverAddress, new SessionListenerFactory<SimpleSessionListener>() {
+ @Override
+ public SimpleSessionListener getSessionListener() {
+ return new SimpleSessionListener();
+ }
+ });
+
+ this.server.get();
+
+ this.clientDispatcher = new SimpleDispatcher(new SessionNegotiatorFactory<SimpleMessage, SimpleSession, SimpleSessionListener>() {
+ @Override
+ public SessionNegotiator<SimpleSession> getSessionNegotiator(final SessionListenerFactory<SimpleSessionListener> factory,
+ final Channel channel, final Promise<SimpleSession> promise) {
+
+ return new SimpleSessionNegotiator(promise, channel) {
+ @Override
+ protected void startNegotiation() throws Exception {
+ negotiationFailed(new IllegalStateException("Negotiation failed"));
+ }
+ };
+ }
+ }, new DefaultPromise<SimpleSession>(GlobalEventExecutor.INSTANCE), eventLoopGroup);
+
+ final ReconnectStrategyFactory reconnectStrategyFactory = mock(ReconnectStrategyFactory.class);
+ final ReconnectStrategy reconnectStrategy = getMockedReconnectStrategy();
+ doReturn(reconnectStrategy).when(reconnectStrategyFactory).createReconnectStrategy();
+
+ this.clientDispatcher.createReconnectingClient(this.serverAddress,
+ reconnectStrategyFactory, new SessionListenerFactory<SimpleSessionListener>() {
+ @Override
+ public SimpleSessionListener getSessionListener() {
+ return new SimpleSessionListener();
+ }
+ });
+
+
+ // Reconnect strategy should be consulted at least twice, for initial connect and reconnect attempts after drop
+ verify(reconnectStrategyFactory, timeout((int) TimeUnit.MINUTES.toMillis(3)).atLeast(2)).createReconnectStrategy();
+ }
+
private SimpleDispatcher getClientDispatcher() {
return new SimpleDispatcher(new SessionNegotiatorFactory<SimpleMessage, SimpleSession, SimpleSessionListener>() {
@Override
<ignore/>
</action>
</pluginExecution>
+ <pluginExecution>
+ <pluginExecutionFilter>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-maven-plugin</artifactId>
+ <versionRange>[0.5,)</versionRange>
+ <goals>
+ <goal>generate-sources</goal>
+ </goals>
+ </pluginExecutionFilter>
+ <action>
+ <execute></execute>
+ </action>
+ </pluginExecution>
</pluginExecutions>
</lifecycleMappingMetadata>
</configuration>
</modules>
</profile>
</profiles>
-</project>
\ No newline at end of file
+</project>
import org.osgi.framework.BundleContext;
+@Deprecated
public abstract class AbstractBindingAwareConsumer extends AbstractBrokerAwareActivator implements BindingAwareConsumer {
@Override
import org.opendaylight.yangtools.yang.binding.RpcService;
import org.osgi.framework.BundleContext;
+@Deprecated
public abstract class AbstractBindingAwareProvider extends AbstractBrokerAwareActivator implements BindingAwareProvider {
@Override
}
public List<?> execute(Object o) {
- List<Object> result = new LinkedList<>();
if (o == null) {
return null;
}
- if (Set.class.isAssignableFrom(o.getClass())) {
- Set<?> lst = (Set<?>) o;
- for (Object oo : lst) {
+ List<Object> result = new LinkedList<>();
+ if (o instanceof Set) {
+ for (Object oo : (Set<?>) o) {
addToResult(result, execute(oo));
}
- return result;
- } else if (List.class.isAssignableFrom(o.getClass())) {
- List<?> lst = (List<?>) o;
- for (Object oo : lst) {
+ } else if (o instanceof List) {
+ for (Object oo : (List<?>) o) {
addToResult(result, execute(oo));
}
- return result;
- } else if (Map.class.isAssignableFrom(o.getClass())) {
- Map<?, ?> map = (Map<?, ?>) o;
- for (Object oo : map.values()) {
+ } else if (o instanceof Map) {
+ for (Object oo : ((Map<?, ?>) o).values()) {
addToResult(result, execute(oo));
}
- return result;
+ } else {
+ addToResult(result, XSQLCriteria.getValue(o, this.property));
}
-
- addToResult(result, XSQLCriteria.getValue(o, this.property));
-
return result;
}
private static void addToResult(List<Object> result, Object o) {
- if (o == null) {
- return;
- }
- if (Set.class.isAssignableFrom(o.getClass())) {
- Set<?> lst = (Set<?>) o;
- for (Object oo : lst) {
- result.add(oo);
- }
- } else if (List.class.isAssignableFrom(o.getClass())) {
- List<?> lst = (List<?>) o;
- for (Object oo : lst) {
- result.add(oo);
- }
- } else if (Map.class.isAssignableFrom(o.getClass())) {
- Map<?, ?> map = (Map<?, ?>) o;
- for (Object oo : map.values()) {
- result.add(oo);
- }
- } else {
+ if (o instanceof Set) {
+ result.addAll((Set<?>)o);
+ } else if (o instanceof List) {
+ result.addAll((List<?>)o);
+ } else if (o instanceof Map) {
+ result.addAll(((Map<?, ?>)o).values());
+ } else if (o != null) {
result.add(o);
}
}
-
}
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.reset;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.timeout;
import static org.mockito.Mockito.verify;
import static org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants.RPC_REPLY_KEY;
import static org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_BASE_1_0;
import com.google.common.collect.Sets;
import com.google.common.util.concurrent.ListenableFuture;
import io.netty.channel.ChannelFuture;
+import io.netty.channel.EventLoopGroup;
+import io.netty.channel.nio.NioEventLoopGroup;
+import io.netty.util.HashedWheelTimer;
+import io.netty.util.Timer;
import io.netty.util.concurrent.Future;
import io.netty.util.concurrent.GenericFutureListener;
+import io.netty.util.concurrent.GlobalEventExecutor;
import java.io.ByteArrayInputStream;
+import java.net.InetSocketAddress;
import java.util.Collection;
import java.util.Collections;
import java.util.UUID;
import org.mockito.MockitoAnnotations;
import org.opendaylight.controller.netconf.api.NetconfMessage;
import org.opendaylight.controller.netconf.api.NetconfTerminationReason;
+import org.opendaylight.controller.netconf.client.NetconfClientDispatcherImpl;
import org.opendaylight.controller.netconf.client.NetconfClientSession;
+import org.opendaylight.controller.netconf.client.conf.NetconfClientConfiguration;
+import org.opendaylight.controller.netconf.client.conf.NetconfReconnectingClientConfigurationBuilder;
+import org.opendaylight.controller.netconf.nettyutil.handler.ssh.authentication.LoginPassword;
import org.opendaylight.controller.sal.connect.api.RemoteDevice;
import org.opendaylight.controller.sal.connect.api.RemoteDeviceCommunicator;
import org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil;
import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
+import org.opendaylight.protocol.framework.ReconnectStrategy;
+import org.opendaylight.protocol.framework.ReconnectStrategyFactory;
+import org.opendaylight.protocol.framework.TimedReconnectStrategy;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.common.RpcError;
import org.opendaylight.yangtools.yang.common.RpcResult;
errorInfo.contains( "<bad-element>bar</bad-element>" ) );
}
+ /**
+ * Test whether reconnect is scheduled properly
+ */
+ @Test
+ public void testNetconfDeviceReconnectInCommunicator() throws Exception {
+ final RemoteDevice<NetconfSessionCapabilities, NetconfMessage> device = mock(RemoteDevice.class);
+
+ final TimedReconnectStrategy timedReconnectStrategy = new TimedReconnectStrategy(GlobalEventExecutor.INSTANCE, 10000, 0, 1.0, null, 100L, null);
+ final ReconnectStrategy reconnectStrategy = spy(new ReconnectStrategy() {
+ @Override
+ public int getConnectTimeout() throws Exception {
+ return timedReconnectStrategy.getConnectTimeout();
+ }
+
+ @Override
+ public Future<Void> scheduleReconnect(final Throwable cause) {
+ return timedReconnectStrategy.scheduleReconnect(cause);
+ }
+
+ @Override
+ public void reconnectSuccessful() {
+ timedReconnectStrategy.reconnectSuccessful();
+ }
+ });
+
+ final NetconfDeviceCommunicator listener = new NetconfDeviceCommunicator(new RemoteDeviceId("test"), device);
+ final EventLoopGroup group = new NioEventLoopGroup();
+ final Timer time = new HashedWheelTimer();
+ try {
+ final NetconfClientConfiguration cfg = NetconfReconnectingClientConfigurationBuilder.create()
+ .withAddress(new InetSocketAddress("localhost", 65000))
+ .withReconnectStrategy(reconnectStrategy)
+ .withConnectStrategyFactory(new ReconnectStrategyFactory() {
+ @Override
+ public ReconnectStrategy createReconnectStrategy() {
+ return reconnectStrategy;
+ }
+ })
+ .withAuthHandler(new LoginPassword("admin", "admin"))
+ .withConnectionTimeoutMillis(10000)
+ .withProtocol(NetconfClientConfiguration.NetconfClientProtocol.SSH)
+ .withSessionListener(listener)
+ .build();
+
+
+ listener.initializeRemoteConnection(new NetconfClientDispatcherImpl(group, group, time), cfg);
+
+ verify(reconnectStrategy, timeout((int) TimeUnit.MINUTES.toMillis(3)).times(101)).scheduleReconnect(any(Throwable.class));
+ } finally {
+ time.stop();
+ group.shutdownGracefully();
+ }
+ }
+
@Test
public void testOnResponseMessageWithWrongMessageID() throws Exception {
setupSession();
/**
* All disconnected Nodes need be removed from stat list Nodes
+ *
* @param flowNode
* @return true/false if the {@link Node} removed successful
*/
boolean disconnectedNodeUnregistration(InstanceIdentifier<Node> nodeIdent);
+ /**
+ * Method add new feature {@link StatCapabTypes} to Node identified by
+ * nodeIdent -> InstanceIdentifier<Node>
+ *
+ * @param flowNode
+ * @return true/false if the {@link StatCapabTypes} add successful
+ */
+ boolean registerAdditionalNodeFeature(InstanceIdentifier<Node> nodeIdent, StatCapabTypes statCapab);
+
/**
* Method return true only and only if {@link StatPermCollector} contain
* valid node registration in its internal {@link Node} map.
* Otherwise return false.
*
- * @param InstanceIdentifier<FlowCapableNode> flowNode
+ * @param flowNode
* @return
*/
boolean isProvidedFlowNodeActive(InstanceIdentifier<Node> nodeIdent);
*/
void disconnectedNodeUnregistration(InstanceIdentifier<Node> nodeIdent);
+ /**
+ * Method wraps {@link StatPermCollector}.registerAdditionalNodeFeature to provide
+ * possibility to register additional Node Feature {@link StatCapabTypes} for
+ * statistics collecting.
+ *
+ * @param nodeIdent
+ * @param statCapab
+ */
+ void registerAdditionalNodeFeature(InstanceIdentifier<Node> nodeIdent, StatCapabTypes statCapab);
+
/**
* Method provides access to Device RPC methods by wrapped
* internal method. In next {@link StatRpcMsgManager} is registered all
private ListenerRegistration<DataChangeListener> listenerRegistration;
protected final Map<InstanceIdentifier<Node>, Map<InstanceIdentifier<T>, Integer>> mapNodesForDelete = new ConcurrentHashMap<>();
+ protected final Map<InstanceIdentifier<Node>, Integer> mapNodeFeautureRepeater = new ConcurrentHashMap<>();
private final Class<T> clazz;
super.close();
}
+ /**
+ * Method return actual DataObject identified by InstanceIdentifier from Config/DS
+ * @param path
+ * @return
+ */
protected final <K extends DataObject> Optional<K> readLatestConfiguration(final InstanceIdentifier<K> path) {
if(currentReadTx == null) {
currentReadTx = dataBroker.newReadOnlyTransaction();
return txContainer;
}
+ /**
+ * Method validate TransactionCacheContainer. It needs to call before every txCacheContainer processing.
+ *
+ * @param txCacheContainer
+ * @return
+ */
+ protected boolean isTransactionCacheContainerValid(final Optional<TransactionCacheContainer<?>> txCacheContainer) {
+ if ( ! txCacheContainer.isPresent()) {
+ LOG.debug("Transaction Cache Container is not presented!");
+ return false;
+ }
+ if (txCacheContainer.get().getNodeId() == null) {
+ LOG.debug("Transaction Cache Container {} don't have Node ID!", txCacheContainer.get().getId());
+ return false;
+ }
+ if (txCacheContainer.get().getNotifications() == null) {
+ LOG.debug("Transaction Cache Container {} for {} node don't have Notifications!",
+ txCacheContainer.get().getId(), txCacheContainer.get().getNodeId());
+ return false;
+ }
+ return true;
+ }
+
/**
* Wrapping Future object call to {@link org.opendaylight.controller.md.statistics.manager.StatRpcMsgManager}
* isExpectedStatistics with 10sec TimeOut.
import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.controller.md.statistics.manager.StatPermCollector.StatCapabTypes;
import org.opendaylight.controller.md.statistics.manager.StatRpcMsgManager.TransactionCacheContainer;
import org.opendaylight.controller.md.statistics.manager.StatisticsManager;
import org.opendaylight.controller.md.statistics.manager.StatisticsManager.StatDataStoreOperation;
import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.NodeGroupDescStats;
import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.NodeGroupDescStatsBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.NodeGroupFeatures;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.NodeGroupFeaturesBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.NodeGroupStatistics;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.NodeGroupStatisticsBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.OpendaylightGroupStatisticsListener;
import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.group.desc.GroupDescBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.group.features.GroupFeatures;
import org.slf4j.LoggerFactory;
import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
/**
* statistics-manager
final TransactionId transId = notification.getTransactionId();
final NodeId nodeId = notification.getId();
if ( ! isExpectedStatistics(transId, nodeId)) {
- LOG.debug("STAT-MANAGER - GroupDescStatsUpdated: unregistred notification detect TransactionId {}", transId);
+ LOG.debug("Unregistred notification detect TransactionId {}", transId);
return;
}
+ manager.getRpcMsgManager().addNotification(notification, nodeId);
if (notification.isMoreReplies()) {
- manager.getRpcMsgManager().addNotification(notification, nodeId);
return;
}
- final List<GroupDescStats> groupStats = notification.getGroupDescStats() != null
- ? new ArrayList<>(notification.getGroupDescStats()) : new ArrayList<GroupDescStats>(10);
- final Optional<TransactionCacheContainer<?>> txContainer = getTransactionCacheContainer(transId, nodeId);
- if (txContainer.isPresent()) {
- final List<? extends TransactionAware> cacheNotifs =
- txContainer.get().getNotifications();
- for (final TransactionAware notif : cacheNotifs) {
- if (notif instanceof GroupDescStatsUpdated) {
- groupStats.addAll(((GroupDescStatsUpdated) notif).getGroupDescStats());
- }
- }
- }
- final InstanceIdentifier<Node> nodeIdent = InstanceIdentifier
- .create(Nodes.class).child(Node.class, new NodeKey(nodeId));
+
+ /* Don't block RPC Notification thread */
manager.enqueue(new StatDataStoreOperation() {
@Override
public void applyOperation(final ReadWriteTransaction tx) {
- statGroupDescCommit(groupStats, nodeIdent, tx);
+ final InstanceIdentifier<Node> nodeIdent = InstanceIdentifier
+ .create(Nodes.class).child(Node.class, new NodeKey(nodeId));
+ /* Validate exist FlowCapableNode */
+ final InstanceIdentifier<FlowCapableNode> fNodeIdent = nodeIdent.augmentation(FlowCapableNode.class);
+ Optional<FlowCapableNode> fNode = Optional.absent();
+ try {
+ fNode = tx.read(LogicalDatastoreType.OPERATIONAL,fNodeIdent).checkedGet();
+ }
+ catch (final ReadFailedException e) {
+ LOG.debug("Read Operational/DS for FlowCapableNode fail! {}", fNodeIdent, e);
+ }
+ if ( ! fNode.isPresent()) {
+ return;
+ }
+ /* Get and Validate TransactionCacheContainer */
+ final Optional<TransactionCacheContainer<?>> txContainer = getTransactionCacheContainer(transId, nodeId);
+ if ( ! isTransactionCacheContainerValid(txContainer)) {
+ return;
+ }
+ /* Prepare List actual Groups and not updated Groups will be removed */
+ final List<Group> existGroups = fNode.get().getGroup() != null
+ ? fNode.get().getGroup() : Collections.<Group> emptyList();
+ final List<GroupKey> existGroupKeys = new ArrayList<>();
+ for (final Group group : existGroups) {
+ existGroupKeys.add(group.getKey());
+ }
+ /* GroupDesc processing */
+ statGroupDescCommit(txContainer, tx, fNodeIdent, existGroupKeys);
+ /* Delete all not presented Group Nodes */
+ deleteAllNotPresentNode(fNodeIdent, tx, Collections.unmodifiableList(existGroupKeys));
/* Notification for continue collecting statistics */
notifyToCollectNextStatistics(nodeIdent);
}
@Override
public void onGroupFeaturesUpdated(final GroupFeaturesUpdated notification) {
+ Preconditions.checkNotNull(notification);
final TransactionId transId = notification.getTransactionId();
final NodeId nodeId = notification.getId();
if ( ! isExpectedStatistics(transId, nodeId)) {
- LOG.debug("STAT-MANAGER - MeterFeaturesUpdated: unregistred notification detect TransactionId {}", transId);
+ LOG.debug("Unregistred notification detect TransactionId {}", transId);
return;
}
+ manager.getRpcMsgManager().addNotification(notification, nodeId);
if (notification.isMoreReplies()) {
- manager.getRpcMsgManager().addNotification(notification, nodeId);
- return;
- }
- final Optional<TransactionCacheContainer<?>> txContainer = getTransactionCacheContainer(transId, nodeId);
- if ( ! txContainer.isPresent()) {
return;
}
- final InstanceIdentifier<Node> nodeIdent = InstanceIdentifier
- .create(Nodes.class).child(Node.class, new NodeKey(nodeId));
+ /* Don't block RPC Notification thread */
manager.enqueue(new StatDataStoreOperation() {
@Override
public void applyOperation(final ReadWriteTransaction tx) {
- notifyToCollectNextStatistics(nodeIdent);
- final GroupFeatures stats = new GroupFeaturesBuilder(notification).build();
- final InstanceIdentifier<GroupFeatures> groupFeatureIdent = nodeIdent
- .augmentation(NodeGroupFeatures.class).child(GroupFeatures.class);
- Optional<Node> node = Optional.absent();
- try {
- node = tx.read(LogicalDatastoreType.OPERATIONAL, nodeIdent).checkedGet();
- }
- catch (final ReadFailedException e) {
- LOG.debug("Read Operational/DS for Node fail! {}", nodeIdent, e);
+ /* Get and Validate TransactionCacheContainer */
+ final Optional<TransactionCacheContainer<?>> txContainer = getTransactionCacheContainer(transId, nodeId);
+ if ( ! isTransactionCacheContainerValid(txContainer)) {
+ return;
}
- if (node.isPresent()) {
- tx.put(LogicalDatastoreType.OPERATIONAL, groupFeatureIdent, stats);
+
+ final InstanceIdentifier<Node> nodeIdent = InstanceIdentifier
+ .create(Nodes.class).child(Node.class, new NodeKey(nodeId));
+
+ final List<? extends TransactionAware> cacheNotifs = txContainer.get().getNotifications();
+ for (final TransactionAware notif : cacheNotifs) {
+ if ( ! (notif instanceof GroupFeaturesUpdated)) {
+ break;
+ }
+ final GroupFeatures stats = new GroupFeaturesBuilder((GroupFeaturesUpdated)notif).build();
+ final InstanceIdentifier<NodeGroupFeatures> nodeGroupFeatureIdent =
+ nodeIdent.augmentation(NodeGroupFeatures.class);
+ final InstanceIdentifier<GroupFeatures> groupFeatureIdent = nodeGroupFeatureIdent
+ .child(GroupFeatures.class);
+ Optional<Node> node = Optional.absent();
+ try {
+ node = tx.read(LogicalDatastoreType.OPERATIONAL, nodeIdent).checkedGet();
+ }
+ catch (final ReadFailedException e) {
+ LOG.debug("Read Operational/DS for Node fail! {}", nodeIdent, e);
+ }
+ if (node.isPresent()) {
+ tx.merge(LogicalDatastoreType.OPERATIONAL, nodeGroupFeatureIdent, new NodeGroupFeaturesBuilder().build(), true);
+ tx.put(LogicalDatastoreType.OPERATIONAL, groupFeatureIdent, stats);
+ manager.registerAdditionalNodeFeature(nodeIdent, StatCapabTypes.GROUP_STATS);
+ }
}
}
});
@Override
public void onGroupStatisticsUpdated(final GroupStatisticsUpdated notification) {
+ Preconditions.checkNotNull(notification);
final TransactionId transId = notification.getTransactionId();
final NodeId nodeId = notification.getId();
if ( ! isExpectedStatistics(transId, nodeId)) {
LOG.debug("STAT-MANAGER - GroupStatisticsUpdated: unregistred notification detect TransactionId {}", transId);
return;
}
+ manager.getRpcMsgManager().addNotification(notification, nodeId);
if (notification.isMoreReplies()) {
- manager.getRpcMsgManager().addNotification(notification, nodeId);
return;
}
- final List<GroupStats> groupStats = notification.getGroupStats() != null
- ? new ArrayList<>(notification.getGroupStats()) : new ArrayList<GroupStats>(10);
- Optional<Group> notifGroup = Optional.absent();
- final Optional<TransactionCacheContainer<?>> txContainer = getTransactionCacheContainer(transId, nodeId);
- if (txContainer.isPresent()) {
- final Optional<? extends DataObject> inputObj = txContainer.get().getConfInput();
- if (inputObj.isPresent() && inputObj.get() instanceof Group) {
- notifGroup = Optional.<Group> of((Group)inputObj.get());
- }
- final List<? extends TransactionAware> cacheNotifs =
- txContainer.get().getNotifications();
- for (final TransactionAware notif : cacheNotifs) {
- if (notif instanceof GroupStatisticsUpdated) {
- groupStats.addAll(((GroupStatisticsUpdated) notif).getGroupStats());
- }
- }
- }
- final Optional<Group> group = notifGroup;
- final InstanceIdentifier<Node> nodeIdent = InstanceIdentifier
- .create(Nodes.class).child(Node.class, new NodeKey(nodeId));
+
+ /* Don't block RPC Notification thread */
manager.enqueue(new StatDataStoreOperation() {
@Override
public void applyOperation(final ReadWriteTransaction tx) {
- /* Notification for continue collecting statistics */
- if ( ! group.isPresent()) {
+
+ final InstanceIdentifier<Node> nodeIdent = InstanceIdentifier
+ .create(Nodes.class).child(Node.class, new NodeKey(nodeId));
+ /* Node exist check */
+ Optional<Node> node = Optional.absent();
+ try {
+ node = tx.read(LogicalDatastoreType.OPERATIONAL, nodeIdent).checkedGet();
+ }
+ catch (final ReadFailedException e) {
+ LOG.debug("Read Operational/DS for Node fail! {}", nodeIdent, e);
+ }
+ if ( ! node.isPresent()) {
+ return;
+ }
+
+ /* Get and Validate TransactionCacheContainer */
+ final Optional<TransactionCacheContainer<?>> txContainer = getTransactionCacheContainer(transId, nodeId);
+ if ( ! isTransactionCacheContainerValid(txContainer)) {
+ return;
+ }
+ final List<? extends TransactionAware> cacheNotifs = txContainer.get().getNotifications();
+
+ Optional<Group> notifGroup = Optional.absent();
+ final Optional<? extends DataObject> inputObj = txContainer.get().getConfInput();
+ if (inputObj.isPresent() && inputObj.get() instanceof Group) {
+ notifGroup = Optional.<Group> of((Group)inputObj.get());
+ }
+ for (final TransactionAware notif : cacheNotifs) {
+ if ( ! (notif instanceof GroupStatisticsUpdated)) {
+ break;
+ }
+ statGroupCommit(((GroupStatisticsUpdated) notif).getGroupStats(), nodeIdent, tx);
+ }
+ if (notifGroup.isPresent()) {
notifyToCollectNextStatistics(nodeIdent);
}
- statGroupCommit(groupStats, nodeIdent, group, tx);
}
});
}
private void statGroupCommit(final List<GroupStats> groupStats, final InstanceIdentifier<Node> nodeIdent,
- final Optional<Group> group, final ReadWriteTransaction trans) {
+ final ReadWriteTransaction tx) {
+
+ Preconditions.checkNotNull(groupStats);
+ Preconditions.checkNotNull(nodeIdent);
+ Preconditions.checkNotNull(tx);
+
final InstanceIdentifier<FlowCapableNode> fNodeIdent = nodeIdent.augmentation(FlowCapableNode.class);
- for (final GroupStats groupStat : groupStats) {
- final GroupStatistics stats = new GroupStatisticsBuilder(groupStat).build();
+ for (final GroupStats gStat : groupStats) {
+ final GroupStatistics stats = new GroupStatisticsBuilder(gStat).build();
- final GroupKey groupKey = new GroupKey(groupStat.getGroupId());
- final InstanceIdentifier<GroupStatistics> gsIdent = fNodeIdent
- .child(Group.class,groupKey).augmentation(NodeGroupStatistics.class)
- .child(GroupStatistics.class);
+ final InstanceIdentifier<Group> groupIdent = fNodeIdent.child(Group.class, new GroupKey(gStat.getGroupId()));
+ final InstanceIdentifier<NodeGroupStatistics> nGroupStatIdent =groupIdent
+ .augmentation(NodeGroupStatistics.class);
+ final InstanceIdentifier<GroupStatistics> gsIdent = nGroupStatIdent.child(GroupStatistics.class);
/* Statistics Writing */
- Optional<FlowCapableNode> fNode = Optional.absent();
+ Optional<Group> group = Optional.absent();
try {
- fNode = trans.read(LogicalDatastoreType.OPERATIONAL, fNodeIdent).checkedGet();
+ group = tx.read(LogicalDatastoreType.OPERATIONAL, groupIdent).checkedGet();
}
catch (final ReadFailedException e) {
- LOG.debug("Read Operational/DS for FlowCapableNode fail! {}", fNodeIdent, e);
+ LOG.debug("Read Operational/DS for Group node fail! {}", groupIdent, e);
}
- if (fNode.isPresent()) {
- trans.put(LogicalDatastoreType.OPERATIONAL, gsIdent, stats);
+ if (group.isPresent()) {
+ tx.merge(LogicalDatastoreType.OPERATIONAL, nGroupStatIdent, new NodeGroupStatisticsBuilder().build(), true);
+ tx.put(LogicalDatastoreType.OPERATIONAL, gsIdent, stats);
}
}
}
- private void statGroupDescCommit(final List<GroupDescStats> groupStats, final InstanceIdentifier<Node> nodeIdent,
- final ReadWriteTransaction trans) {
- final InstanceIdentifier<FlowCapableNode> fNodeIdent = nodeIdent.augmentation(FlowCapableNode.class);
+ private void statGroupDescCommit(final Optional<TransactionCacheContainer<?>> txContainer, final ReadWriteTransaction tx,
+ final InstanceIdentifier<FlowCapableNode> fNodeIdent, final List<GroupKey> existGroupKeys) {
- final List<GroupKey> deviceGroupKeys = new ArrayList<>();
+ Preconditions.checkNotNull(existGroupKeys);
+ Preconditions.checkNotNull(txContainer);
+ Preconditions.checkNotNull(fNodeIdent);
+ Preconditions.checkNotNull(tx);
- for (final GroupDescStats group : groupStats) {
- if (group.getGroupId() != null) {
- final GroupBuilder groupBuilder = new GroupBuilder(group);
- final GroupKey groupKey = new GroupKey(group.getGroupId());
- final InstanceIdentifier<Group> groupRef = fNodeIdent.child(Group.class,groupKey);
+ final List<? extends TransactionAware> cacheNotifs = txContainer.get().getNotifications();
+ for (final TransactionAware notif : cacheNotifs) {
+ if ( ! (notif instanceof GroupDescStatsUpdated)) {
+ break;
+ }
+ final List<GroupDescStats> groupStats = ((GroupDescStatsUpdated) notif).getGroupDescStats();
+ if (groupStats == null) {
+ break;
+ }
+ for (final GroupDescStats group : groupStats) {
+ if (group.getGroupId() != null) {
+ final GroupBuilder groupBuilder = new GroupBuilder(group);
+ final GroupKey groupKey = new GroupKey(group.getGroupId());
+ final InstanceIdentifier<Group> groupRef = fNodeIdent.child(Group.class,groupKey);
- final NodeGroupDescStatsBuilder groupDesc= new NodeGroupDescStatsBuilder();
- groupDesc.setGroupDesc(new GroupDescBuilder(group).build());
- //Update augmented data
- groupBuilder.addAugmentation(NodeGroupDescStats.class, groupDesc.build());
- deviceGroupKeys.add(groupKey);
- Optional<FlowCapableNode> hashIdUpd = Optional.absent();
- try {
- hashIdUpd = trans.read(LogicalDatastoreType.OPERATIONAL,fNodeIdent).checkedGet();
- }
- catch (final ReadFailedException e) {
- LOG.debug("Read Operational/DS for FlowCapableNode fail! {}", fNodeIdent, e);
- }
- if (hashIdUpd.isPresent()) {
- trans.put(LogicalDatastoreType.OPERATIONAL, groupRef, groupBuilder.build());
+ final NodeGroupDescStatsBuilder groupDesc= new NodeGroupDescStatsBuilder();
+ groupDesc.setGroupDesc(new GroupDescBuilder(group).build());
+ //Update augmented data
+ groupBuilder.addAugmentation(NodeGroupDescStats.class, groupDesc.build());
+ existGroupKeys.remove(groupKey);
+ tx.put(LogicalDatastoreType.OPERATIONAL, groupRef, groupBuilder.build());
}
}
}
- /* Delete all not presented Group Nodes */
- deleteAllNotPresentNode(fNodeIdent, trans, deviceGroupKeys);
}
private void deleteAllNotPresentNode(final InstanceIdentifier<FlowCapableNode> fNodeIdent,
final ReadWriteTransaction trans, final List<GroupKey> deviceGroupKeys) {
- final Optional<FlowCapableNode> fNode = readLatestConfiguration(fNodeIdent);
- if ( ! fNode.isPresent()) {
- LOG.trace("Read Operational/DS for FlowCapableNode fail! Node {} doesn't exist.", fNodeIdent);
+ Preconditions.checkNotNull(fNodeIdent);
+ Preconditions.checkNotNull(trans);
+
+ if (deviceGroupKeys == null) {
return;
}
- final List<Group> existGroups = fNode.get().getGroup() != null
- ? fNode.get().getGroup() : Collections.<Group> emptyList();
- /* Add all existed groups paths - no updated paths has to be removed */
- for (final Group group : existGroups) {
- if (deviceGroupKeys.remove(group.getKey())) {
- break; // group still exist on device
- }
- LOG.trace("Group {} has to removed.", group);
- final InstanceIdentifier<Group> delGroupIdent = fNodeIdent.child(Group.class, group.getKey());
+
+ for (final GroupKey key : deviceGroupKeys) {
+ final InstanceIdentifier<Group> delGroupIdent = fNodeIdent.child(Group.class, key);
+ LOG.trace("Group {} has to removed.", key);
Optional<Group> delGroup = Optional.absent();
try {
delGroup = trans.read(LogicalDatastoreType.OPERATIONAL, delGroupIdent).checkedGet();
import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.controller.md.statistics.manager.StatPermCollector.StatCapabTypes;
import org.opendaylight.controller.md.statistics.manager.StatRpcMsgManager.TransactionCacheContainer;
import org.opendaylight.controller.md.statistics.manager.StatisticsManager;
import org.opendaylight.controller.md.statistics.manager.StatisticsManager.StatDataStoreOperation;
import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.NodeMeterConfigStats;
import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.NodeMeterConfigStatsBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.NodeMeterFeatures;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.NodeMeterFeaturesBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.NodeMeterStatistics;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.NodeMeterStatisticsBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.OpendaylightMeterStatisticsListener;
import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.nodes.node.MeterFeatures;
import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.nodes.node.MeterFeaturesBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.nodes.node.meter.MeterStatisticsBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.types.rev130918.meter.config.stats.reply.MeterConfigStats;
import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.types.rev130918.meter.statistics.reply.MeterStats;
+import org.opendaylight.yangtools.yang.binding.DataObject;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
/**
* statistics-manager
LOG.debug("STAT-MANAGER - MeterConfigStatsUpdated: unregistred notification detect TransactionId {}", transId);
return;
}
+ manager.getRpcMsgManager().addNotification(notification, nodeId);
if (notification.isMoreReplies()) {
- manager.getRpcMsgManager().addNotification(notification, nodeId);
return;
}
- final List<MeterConfigStats> meterConfStat = notification.getMeterConfigStats() != null
- ? new ArrayList<>(notification.getMeterConfigStats()) : new ArrayList<MeterConfigStats>(10);
- final Optional<TransactionCacheContainer<?>> txContainer = getTransactionCacheContainer(transId, nodeId);
- if (txContainer.isPresent()) {
- final List<? extends TransactionAware> cacheNotifs = txContainer.get().getNotifications();
- for (final TransactionAware notif : cacheNotifs) {
- if (notif instanceof MeterConfigStatsUpdated) {
- meterConfStat.addAll(((MeterConfigStatsUpdated) notif).getMeterConfigStats());
- }
- }
- }
- final InstanceIdentifier<Node> nodeIdent = InstanceIdentifier.create(Nodes.class).child(Node.class, new NodeKey(nodeId));
+
+ /* Don't block RPC Notification thread */
manager.enqueue(new StatDataStoreOperation() {
@Override
public void applyOperation(final ReadWriteTransaction tx) {
+
+ final InstanceIdentifier<Node> nodeIdent = InstanceIdentifier
+ .create(Nodes.class).child(Node.class, new NodeKey(nodeId));
+
+ /* Validate exist FlowCapableNode */
+ final InstanceIdentifier<FlowCapableNode> fNodeIdent = nodeIdent.augmentation(FlowCapableNode.class);
+ Optional<FlowCapableNode> fNode = Optional.absent();
+ try {
+ fNode = tx.read(LogicalDatastoreType.OPERATIONAL,fNodeIdent).checkedGet();
+ }
+ catch (final ReadFailedException e) {
+ LOG.debug("Read Operational/DS for FlowCapableNode fail! {}", fNodeIdent, e);
+ }
+ if ( ! fNode.isPresent()) {
+ return;
+ }
+ /* Get and Validate TransactionCacheContainer */
+ final Optional<TransactionCacheContainer<?>> txContainer = getTransactionCacheContainer(transId, nodeId);
+ if ( ! isTransactionCacheContainerValid(txContainer)) {
+ return;
+ }
+ /* Prepare List actual Meters and not updated Meters will be removed */
+ final List<Meter> existMeters = fNode.get().getMeter() != null
+ ? fNode.get().getMeter() : Collections.<Meter> emptyList();
+ final List<MeterKey> existMeterKeys = new ArrayList<>();
+ for (final Meter meter : existMeters) {
+ existMeterKeys.add(meter.getKey());
+ }
+ /* MeterConfig processing */
+ comitConfMeterStats(txContainer, tx, fNodeIdent, existMeterKeys);
+ /* Delete all not presented Meter Nodes */
+ deleteAllNotPresentedNodes(fNodeIdent, tx, Collections.unmodifiableList(existMeterKeys));
/* Notification for continue collecting statistics */
notifyToCollectNextStatistics(nodeIdent);
- comitConfMeterStats(meterConfStat, nodeIdent, tx);
}
});
}
@Override
public void onMeterFeaturesUpdated(final MeterFeaturesUpdated notification) {
+ Preconditions.checkNotNull(notification);
final TransactionId transId = notification.getTransactionId();
final NodeId nodeId = notification.getId();
if ( ! isExpectedStatistics(transId, nodeId)) {
LOG.debug("STAT-MANAGER - MeterFeaturesUpdated: unregistred notification detect TransactionId {}", transId);
return;
}
+ manager.getRpcMsgManager().addNotification(notification, nodeId);
if (notification.isMoreReplies()) {
- manager.getRpcMsgManager().addNotification(notification, nodeId);
- return;
- }
- final Optional<TransactionCacheContainer<?>> txContainer = getTransactionCacheContainer(transId, nodeId);
- if ( ! txContainer.isPresent()) {
return;
}
- final MeterFeatures stats = new MeterFeaturesBuilder(notification).build();
- final InstanceIdentifier<Node> nodeIdent = InstanceIdentifier
- .create(Nodes.class).child(Node.class, new NodeKey(nodeId));
- final InstanceIdentifier<MeterFeatures> meterFeatureIdent = nodeIdent
- .augmentation(NodeMeterFeatures.class).child(MeterFeatures.class);
+ /* Don't block RPC Notification thread */
manager.enqueue(new StatDataStoreOperation() {
@Override
public void applyOperation(final ReadWriteTransaction tx) {
- /* Notification for continue collecting statistics */
- notifyToCollectNextStatistics(nodeIdent);
- Optional<Node> node = Optional.absent();
- try {
- node = tx.read(LogicalDatastoreType.OPERATIONAL, nodeIdent).checkedGet();
- }
- catch (final ReadFailedException e) {
- LOG.debug("Read Operational/DS for Node fail! {}", nodeIdent, e);
+ /* Get and Validate TransactionCacheContainer */
+ final Optional<TransactionCacheContainer<?>> txContainer = getTransactionCacheContainer(transId, nodeId);
+ if ( ! isTransactionCacheContainerValid(txContainer)) {
+ return;
}
- if (node.isPresent()) {
- tx.put(LogicalDatastoreType.OPERATIONAL, meterFeatureIdent, stats);
+
+ final InstanceIdentifier<Node> nodeIdent = InstanceIdentifier
+ .create(Nodes.class).child(Node.class, new NodeKey(nodeId));
+
+ final List<? extends TransactionAware> cacheNotifs = txContainer.get().getNotifications();
+ for (final TransactionAware notif : cacheNotifs) {
+ if ( ! (notif instanceof MeterFeaturesUpdated)) {
+ break;
+ }
+ final MeterFeatures stats = new MeterFeaturesBuilder((MeterFeaturesUpdated)notif).build();
+ final InstanceIdentifier<NodeMeterFeatures> nodeMeterFeatureIdent =
+ nodeIdent.augmentation(NodeMeterFeatures.class);
+ final InstanceIdentifier<MeterFeatures> meterFeatureIdent = nodeMeterFeatureIdent
+ .child(MeterFeatures.class);
+ Optional<Node> node = Optional.absent();
+ try {
+ node = tx.read(LogicalDatastoreType.OPERATIONAL, nodeIdent).checkedGet();
+ }
+ catch (final ReadFailedException e) {
+ LOG.debug("Read Operational/DS for Node fail! {}", nodeIdent, e);
+ }
+ if (node.isPresent()) {
+ tx.merge(LogicalDatastoreType.OPERATIONAL, nodeMeterFeatureIdent, new NodeMeterFeaturesBuilder().build(), true);
+ tx.put(LogicalDatastoreType.OPERATIONAL, meterFeatureIdent, stats);
+ manager.registerAdditionalNodeFeature(nodeIdent, StatCapabTypes.METER_STATS);
+ }
}
}
});
@Override
public void onMeterStatisticsUpdated(final MeterStatisticsUpdated notification) {
+ Preconditions.checkNotNull(notification);
final TransactionId transId = notification.getTransactionId();
final NodeId nodeId = notification.getId();
if ( ! isExpectedStatistics(transId, nodeId)) {
LOG.debug("STAT-MANAGER - MeterStatisticsUpdated: unregistred notification detect TransactionId {}", transId);
return;
}
+ manager.getRpcMsgManager().addNotification(notification, nodeId);
if (notification.isMoreReplies()) {
- manager.getRpcMsgManager().addNotification(notification, nodeId);
return;
}
- final List<MeterStats> meterStat = notification.getMeterStats() != null
- ? new ArrayList<>(notification.getMeterStats()) : new ArrayList<MeterStats>(10);
- final Optional<TransactionCacheContainer<?>> txContainer = getTransactionCacheContainer(transId, nodeId);
- if (txContainer.isPresent()) {
- final List<? extends TransactionAware> cacheNotifs = txContainer.get().getNotifications();
- for (final TransactionAware notif : cacheNotifs) {
- if (notif instanceof MeterConfigStatsUpdated) {
- meterStat.addAll(((MeterStatisticsUpdated) notif).getMeterStats());
- }
- }
- }
- final InstanceIdentifier<Node> nodeIdent = InstanceIdentifier.create(Nodes.class).child(Node.class, new NodeKey(nodeId));
+
+ /* Don't block RPC Notification thread */
manager.enqueue(new StatDataStoreOperation() {
@Override
public void applyOperation(final ReadWriteTransaction tx) {
- statMeterCommit(meterStat, nodeIdent, tx);
- /* Notification for continue collecting statistics */
- notifyToCollectNextStatistics(nodeIdent);
+
+ final InstanceIdentifier<Node> nodeIdent = InstanceIdentifier
+ .create(Nodes.class).child(Node.class, new NodeKey(nodeId));
+ /* Node exist check */
+ Optional<Node> node = Optional.absent();
+ try {
+ node = tx.read(LogicalDatastoreType.OPERATIONAL, nodeIdent).checkedGet();
+ }
+ catch (final ReadFailedException e) {
+ LOG.debug("Read Operational/DS for Node fail! {}", nodeIdent, e);
+ }
+ if ( ! node.isPresent()) {
+ return;
+ }
+
+ /* Get and Validate TransactionCacheContainer */
+ final Optional<TransactionCacheContainer<?>> txContainer = getTransactionCacheContainer(transId, nodeId);
+ if ( ! isTransactionCacheContainerValid(txContainer)) {
+ return;
+ }
+ final List<? extends TransactionAware> cacheNotifs = txContainer.get().getNotifications();
+
+ Optional<Meter> notifMeter = Optional.absent();
+ final Optional<? extends DataObject> inputObj = txContainer.get().getConfInput();
+ if (inputObj.isPresent() && inputObj.get() instanceof Meter) {
+ notifMeter = Optional.<Meter> of((Meter)inputObj.get());
+ }
+ for (final TransactionAware notif : cacheNotifs) {
+ if ( ! (notif instanceof MeterStatisticsUpdated)) {
+ break;
+ }
+ statMeterCommit(((MeterStatisticsUpdated) notif).getMeterStats(), nodeIdent, tx);
+ }
+ if (notifMeter.isPresent()) {
+ notifyToCollectNextStatistics(nodeIdent);
+ }
}
});
}
private void statMeterCommit(final List<MeterStats> meterStats,
- final InstanceIdentifier<Node> nodeIdent, final ReadWriteTransaction trans) {
+ final InstanceIdentifier<Node> nodeIdent, final ReadWriteTransaction tx) {
+
+ Preconditions.checkNotNull(meterStats);
+ Preconditions.checkNotNull(nodeIdent);
+ Preconditions.checkNotNull(tx);
final InstanceIdentifier<FlowCapableNode> fNodeIdent = nodeIdent.augmentation(FlowCapableNode.class);
+
for (final MeterStats mStat : meterStats) {
final MeterStatistics stats = new MeterStatisticsBuilder(mStat).build();
- final MeterKey mKey = new MeterKey(mStat.getMeterId());
- final InstanceIdentifier<MeterStatistics> msIdent = fNodeIdent
- .child(Meter.class, mKey).augmentation(NodeMeterStatistics.class)
- .child(MeterStatistics.class);
+ final InstanceIdentifier<Meter> meterIdent = fNodeIdent.child(Meter.class, new MeterKey(mStat.getMeterId()));
+ final InstanceIdentifier<NodeMeterStatistics> nodeMeterStatIdent = meterIdent
+ .augmentation(NodeMeterStatistics.class);
+ final InstanceIdentifier<MeterStatistics> msIdent = nodeMeterStatIdent.child(MeterStatistics.class);
/* Meter Statistics commit */
- Optional<FlowCapableNode> fNode = Optional.absent();
+ Optional<Meter> meter = Optional.absent();
try {
- fNode = trans.read(LogicalDatastoreType.OPERATIONAL, fNodeIdent).checkedGet();
+ meter = tx.read(LogicalDatastoreType.OPERATIONAL, meterIdent).checkedGet();
}
catch (final ReadFailedException e) {
LOG.debug("Read Operational/DS for FlowCapableNode fail! {}", fNodeIdent, e);
}
- if (fNode.isPresent()) {
- trans.put(LogicalDatastoreType.OPERATIONAL, msIdent, stats);
+ if (meter.isPresent()) {
+ tx.merge(LogicalDatastoreType.OPERATIONAL, nodeMeterStatIdent, new NodeMeterStatisticsBuilder().build(), true);
+ tx.put(LogicalDatastoreType.OPERATIONAL, msIdent, stats);
}
}
}
- private void comitConfMeterStats(final List<MeterConfigStats> meterConfStat,
- final InstanceIdentifier<Node> nodeIdent, final ReadWriteTransaction trans) {
+ private void comitConfMeterStats(final Optional<TransactionCacheContainer<?>> txContainer, final ReadWriteTransaction tx,
+ final InstanceIdentifier<FlowCapableNode> fNodeIdent, final List<MeterKey> existMeterKeys) {
- final InstanceIdentifier<FlowCapableNode> fNodeIdent = nodeIdent.augmentation(FlowCapableNode.class);
- final List<MeterKey> deviceMeterKeys = new ArrayList<>();
-
- for (final MeterConfigStats meterConf : meterConfStat) {
- final MeterBuilder meterBuilder = new MeterBuilder(meterConf);
- if (meterConf.getMeterId() != null) {
- final MeterKey meterKey = new MeterKey(meterConf.getMeterId());
- meterBuilder.setKey(meterKey);
- final InstanceIdentifier<Meter> meterRef = nodeIdent
- .augmentation(FlowCapableNode.class).child(Meter.class,meterKey);
- final NodeMeterConfigStatsBuilder meterConfig = new NodeMeterConfigStatsBuilder();
- meterConfig.setMeterConfigStats(new MeterConfigStatsBuilder(meterConf).build());
- //Update augmented data
- meterBuilder.addAugmentation(NodeMeterConfigStats.class, meterConfig.build());
- deviceMeterKeys.add(meterKey);
- Optional<FlowCapableNode> fNode = Optional.absent();
- try {
- fNode = trans.read(LogicalDatastoreType.OPERATIONAL, fNodeIdent).checkedGet();
- }
- catch (final ReadFailedException e) {
- LOG.debug("Read Operational/DS for FlowCapableNode fail! {}", fNodeIdent, e);
- }
- if (fNode.isPresent()) {
- trans.put(LogicalDatastoreType.OPERATIONAL, meterRef, meterBuilder.build());
+ Preconditions.checkNotNull(existMeterKeys);
+ Preconditions.checkNotNull(txContainer);
+ Preconditions.checkNotNull(fNodeIdent);
+ Preconditions.checkNotNull(tx);
+
+ final List<? extends TransactionAware> cacheNotifs = txContainer.get().getNotifications();
+ for (final TransactionAware notif : cacheNotifs) {
+ if ( ! (notif instanceof MeterConfigStatsUpdated)) {
+ break;
+ }
+ final List<MeterConfigStats> meterStats = ((MeterConfigStatsUpdated) notif).getMeterConfigStats();
+ if (meterStats == null) {
+ break;
+ }
+ for (final MeterConfigStats meterStat : meterStats) {
+ if (meterStat.getMeterId() != null) {
+ final MeterBuilder meterBuilder = new MeterBuilder(meterStat);
+ final MeterKey meterKey = new MeterKey(meterStat.getMeterId());
+ final InstanceIdentifier<Meter> meterRef = fNodeIdent.child(Meter.class, meterKey);
+
+ final NodeMeterConfigStatsBuilder meterConfig = new NodeMeterConfigStatsBuilder();
+ meterConfig.setMeterConfigStats(new MeterConfigStatsBuilder(meterStat).build());
+ //Update augmented data
+ meterBuilder.addAugmentation(NodeMeterConfigStats.class, meterConfig.build());
+ existMeterKeys.remove(meterKey);
+ tx.put(LogicalDatastoreType.OPERATIONAL, meterRef, meterBuilder.build());
}
}
}
- /* Delete all not presented Meter Nodes */
- deleteAllNotPresentedNodes(fNodeIdent, trans, deviceMeterKeys);
}
private void deleteAllNotPresentedNodes(final InstanceIdentifier<FlowCapableNode> fNodeIdent,
- final ReadWriteTransaction trans, final List<MeterKey> deviceMeterKeys) {
- /* Delete all not presented meters */
- final Optional<FlowCapableNode> fNode = readLatestConfiguration(fNodeIdent);
+ final ReadWriteTransaction tx, final List<MeterKey> deviceMeterKeys) {
+
+ Preconditions.checkNotNull(fNodeIdent);
+ Preconditions.checkNotNull(tx);
- if ( ! fNode.isPresent()) {
- LOG.trace("Read Operational/DS for FlowCapableNode fail! Node {} doesn't exist.", fNodeIdent);
+ if (deviceMeterKeys == null) {
return;
}
- final List<Meter> existMeters = fNode.get().getMeter() != null
- ? fNode.get().getMeter() : Collections.<Meter> emptyList();
- /* Add all existed groups paths - no updated paths has to be removed */
- for (final Meter meter : existMeters) {
- if (deviceMeterKeys.remove(meter.getKey())) {
- break; // Meter still exist on device
- }
- final InstanceIdentifier<Meter> delMeterIdent = fNodeIdent.child(Meter.class, meter.getKey());
+
+ for (final MeterKey key : deviceMeterKeys) {
+ final InstanceIdentifier<Meter> delMeterIdent = fNodeIdent.child(Meter.class, key);
+ LOG.trace("Meter {} has to removed.", key);
Optional<Meter> delMeter = Optional.absent();
try {
- delMeter = trans.read(LogicalDatastoreType.OPERATIONAL, delMeterIdent).checkedGet();
+ delMeter = tx.read(LogicalDatastoreType.OPERATIONAL, delMeterIdent).checkedGet();
}
catch (final ReadFailedException e) {
// NOOP - probably another transaction delete that node
}
if (delMeter.isPresent()) {
- trans.delete(LogicalDatastoreType.OPERATIONAL, delMeterIdent);
+ tx.delete(LogicalDatastoreType.OPERATIONAL, delMeterIdent);
}
}
}
package org.opendaylight.controller.md.statistics.manager.impl;
-import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
import org.opendaylight.controller.md.sal.binding.api.DataBroker;
import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
import org.slf4j.LoggerFactory;
import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
/**
* statistics-manager
LOG.debug("STAT-MANAGER - QueueStatisticsUpdate: unregistred notification detect TransactionId {}", transId);
return;
}
+ manager.getRpcMsgManager().addNotification(notification, nodeId);
if (notification.isMoreReplies()) {
- manager.getRpcMsgManager().addNotification(notification, nodeId);
return;
}
- final List<QueueIdAndStatisticsMap> queueStats = notification.getQueueIdAndStatisticsMap() != null
- ? new ArrayList<>(notification.getQueueIdAndStatisticsMap()) : new ArrayList<QueueIdAndStatisticsMap>(10);
- final Optional<TransactionCacheContainer<?>> txContainer = getTransactionCacheContainer(transId, nodeId);
- if (txContainer.isPresent()) {
- final List<? extends TransactionAware> cachedNotifs =
- txContainer.get().getNotifications();
- for (final TransactionAware notif : cachedNotifs) {
- if (notif instanceof QueueStatisticsUpdate) {
- queueStats.addAll(((QueueStatisticsUpdate) notif).getQueueIdAndStatisticsMap());
- }
- }
- }
- final InstanceIdentifier<Node> nodeIdent = InstanceIdentifier.create(Nodes.class)
- .child(Node.class, new NodeKey(nodeId));
- /* Queue statistics are small size and we are not able to change for OF cross controller
- * - don't need to make are atomic */
+
+ /* Don't block RPC Notification thread */
manager.enqueue(new StatDataStoreOperation() {
@Override
- public void applyOperation(final ReadWriteTransaction trans) {
- /* Notification for continue */
+ public void applyOperation(final ReadWriteTransaction tx) {
+
+ final InstanceIdentifier<Node> nodeIdent = InstanceIdentifier.create(Nodes.class)
+ .child(Node.class, new NodeKey(nodeId));
+
+ /* Validate exist Node */
+ Optional<Node> fNode = Optional.absent();
+ try {
+ fNode = tx.read(LogicalDatastoreType.OPERATIONAL, nodeIdent).checkedGet();
+ }
+ catch (final ReadFailedException e) {
+ LOG.debug("Read Operational/DS for Node fail! {}", nodeIdent, e);
+ }
+ if ( ! fNode.isPresent()) {
+ LOG.trace("Read Operational/DS for Node fail! Node {} doesn't exist.", nodeIdent);
+ return;
+ }
+
+ /* Get and Validate TransactionCacheContainer */
+ final Optional<TransactionCacheContainer<?>> txContainer = getTransactionCacheContainer(transId, nodeId);
+ if ( ! isTransactionCacheContainerValid(txContainer)) {
+ return;
+ }
+ /* Prepare List actual Queues and not updated Queues will be removed */
+ final List<NodeConnector> existConnectors = fNode.get().getNodeConnector() != null
+ ? fNode.get().getNodeConnector() : Collections.<NodeConnector> emptyList();
+ final Map<QueueKey, NodeConnectorKey> existQueueKeys = new HashMap<>();
+ for (final NodeConnector connect : existConnectors) {
+ final List<Queue> listQueues = connect.getAugmentation(FlowCapableNodeConnector.class).getQueue();
+ if (listQueues != null) {
+ for (final Queue queue : listQueues) {
+ existQueueKeys.put(queue.getKey(), connect.getKey());
+ }
+ }
+ }
+ /* Queue processing */
+ statQueueCommit(txContainer, tx, nodeIdent, existQueueKeys);
+ /* Delete all not presented Group Nodes */
+ deleteAllNotPresentedNodes(nodeIdent, tx, Collections.unmodifiableMap(existQueueKeys));
+ /* Notification for continue collecting statistics */
notifyToCollectNextStatistics(nodeIdent);
- statQueueCommit(queueStats, nodeIdent, trans);
}
});
}
- private void statQueueCommit(final List<QueueIdAndStatisticsMap> queueStats,
- final InstanceIdentifier<Node> nodeIdent, final ReadWriteTransaction trans) {
+ private void statQueueCommit(
+ final Optional<TransactionCacheContainer<?>> txContainer, final ReadWriteTransaction tx,
+ final InstanceIdentifier<Node> nodeIdent, final Map<QueueKey, NodeConnectorKey> existQueueKeys) {
- /* check exist FlowCapableNode and write statistics */
- Optional<Node> fNode = Optional.absent();
- try {
- fNode = trans.read(LogicalDatastoreType.OPERATIONAL, nodeIdent).checkedGet();
- }
- catch (final ReadFailedException e) {
- LOG.debug("Read Operational/DS for Node fail! {}", nodeIdent, e);
- return;
+ Preconditions.checkNotNull(existQueueKeys);
+ Preconditions.checkNotNull(txContainer);
+ Preconditions.checkNotNull(nodeIdent);
+ Preconditions.checkNotNull(tx);
+
+ final List<? extends TransactionAware> cacheNotifs = txContainer.get().getNotifications();
+ for (final TransactionAware notif : cacheNotifs) {
+ if ( ! (notif instanceof QueueStatisticsUpdate)) {
+ break;
+ }
+ final List<QueueIdAndStatisticsMap> queueStats = ((QueueStatisticsUpdate) notif).getQueueIdAndStatisticsMap();
+ if (queueStats == null) {
+ break;
+ }
+ for (final QueueIdAndStatisticsMap queueStat : queueStats) {
+ if (queueStat.getQueueId() != null) {
+ final FlowCapableNodeConnectorQueueStatistics statChild =
+ new FlowCapableNodeConnectorQueueStatisticsBuilder(queueStat).build();
+ final FlowCapableNodeConnectorQueueStatisticsDataBuilder statBuild =
+ new FlowCapableNodeConnectorQueueStatisticsDataBuilder();
+ statBuild.setFlowCapableNodeConnectorQueueStatistics(statChild);
+ final QueueKey qKey = new QueueKey(queueStat.getQueueId());
+ final InstanceIdentifier<FlowCapableNodeConnectorQueueStatisticsData> queueStatIdent = nodeIdent
+ .child(NodeConnector.class, new NodeConnectorKey(queueStat.getNodeConnectorId()))
+ .augmentation(FlowCapableNodeConnector.class)
+ .child(Queue.class, qKey).augmentation(FlowCapableNodeConnectorQueueStatisticsData.class);
+ existQueueKeys.remove(qKey);
+ tx.put(LogicalDatastoreType.OPERATIONAL, queueStatIdent, statBuild.build());
+ }
+ }
}
- if ( ! fNode.isPresent()) {
- LOG.trace("Read Operational/DS for Node fail! Node {} doesn't exist.", nodeIdent);
+ }
+
+ private void deleteAllNotPresentedNodes(final InstanceIdentifier<Node> nodeIdent,
+ final ReadWriteTransaction tx, final Map<QueueKey, NodeConnectorKey> existQueueKeys) {
+
+ Preconditions.checkNotNull(nodeIdent);
+ Preconditions.checkNotNull(tx);
+
+ if (existQueueKeys == null) {
return;
}
- for (final QueueIdAndStatisticsMap queueEntry : queueStats) {
- final FlowCapableNodeConnectorQueueStatistics statChild =
- new FlowCapableNodeConnectorQueueStatisticsBuilder(queueEntry).build();
- final FlowCapableNodeConnectorQueueStatisticsDataBuilder statBuild =
- new FlowCapableNodeConnectorQueueStatisticsDataBuilder();
- statBuild.setFlowCapableNodeConnectorQueueStatistics(statChild);
- final QueueKey qKey = new QueueKey(queueEntry.getQueueId());
- final InstanceIdentifier<FlowCapableNodeConnectorQueueStatisticsData> queueStatIdent = nodeIdent
- .child(NodeConnector.class, new NodeConnectorKey(queueEntry.getNodeConnectorId()))
- .augmentation(FlowCapableNodeConnector.class)
- .child(Queue.class, qKey).augmentation(FlowCapableNodeConnectorQueueStatisticsData.class);
- trans.put(LogicalDatastoreType.OPERATIONAL, queueStatIdent, statBuild.build());
+ for (final Entry<QueueKey, NodeConnectorKey> entry : existQueueKeys.entrySet()) {
+ final InstanceIdentifier<Queue> queueIdent = nodeIdent.child(NodeConnector.class, entry.getValue())
+ .augmentation(FlowCapableNodeConnector.class).child(Queue.class, entry.getKey());
+ LOG.trace("Queue {} has to removed.", queueIdent);
+ Optional<Queue> delQueue = Optional.absent();
+ try {
+ delQueue = tx.read(LogicalDatastoreType.OPERATIONAL, queueIdent).checkedGet();
+ }
+ catch (final ReadFailedException e) {
+ // NOOP - probably another transaction delete that node
+ }
+ if (delQueue.isPresent()) {
+ tx.delete(LogicalDatastoreType.OPERATIONAL, queueIdent);
+ }
}
}
}
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
+import java.util.Set;
import org.opendaylight.controller.md.sal.binding.api.DataBroker;
import org.opendaylight.controller.md.sal.binding.api.DataChangeListener;
import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker.DataChangeScope;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeEvent;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
import org.opendaylight.controller.md.statistics.manager.StatNodeRegistration;
import org.opendaylight.controller.md.statistics.manager.StatPermCollector.StatCapabTypes;
import org.opendaylight.controller.md.statistics.manager.StatisticsManager;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRemoved;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeUpdated;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.NodeMeterFeatures;
import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.binding.DataObject;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
*
* Created: Aug 28, 2014
*/
-public class StatNodeRegistrationImpl implements StatNodeRegistration {
+public class StatNodeRegistrationImpl implements StatNodeRegistration, DataChangeListener {
private static final Logger LOG = LoggerFactory.getLogger(StatNodeRegistrationImpl.class);
Preconditions.checkArgument(db != null, "DataBroker can not be null!");
Preconditions.checkArgument(notificationService != null, "NotificationProviderService can not be null!");
notifListenerRegistration = notificationService.registerNotificationListener(this);
+ /* Build Path */
+ final InstanceIdentifier<FlowCapableNode> flowNodeWildCardIdentifier = InstanceIdentifier.create(Nodes.class)
+ .child(Node.class).augmentation(FlowCapableNode.class);
+ listenerRegistration = db.registerDataChangeListener(LogicalDatastoreType.OPERATIONAL,
+ flowNodeWildCardIdentifier, StatNodeRegistrationImpl.this, DataChangeScope.BASE);
}
@Override
maxCapTables = data.getMaxTables();
final Optional<Short> maxTables = Optional.<Short> of(maxCapTables);
-
- /* Meters management */
- final InstanceIdentifier<NodeMeterFeatures> meterFeaturesIdent = nodeIdent.augmentation(NodeMeterFeatures.class);
-
-
- Optional<NodeMeterFeatures> meterFeatures = Optional.absent();
- try {
- meterFeatures = tx.read(LogicalDatastoreType.OPERATIONAL, meterFeaturesIdent).checkedGet();
- }
- catch (final ReadFailedException e) {
- LOG.warn("Read NodeMeterFeatures {} fail!", meterFeaturesIdent, e);
- }
- if (meterFeatures.isPresent()) {
- statCapabTypes.add(StatCapabTypes.METER_STATS);
- }
manager.connectedNodeRegistration(nodeIdent,
Collections.unmodifiableList(statCapabTypes), maxTables.get());
}
@Override
public void onNodeRemoved(final NodeRemoved notification) {
+ Preconditions.checkNotNull(notification);
final NodeRef nodeRef = notification.getNodeRef();
final InstanceIdentifier<?> nodeRefIdent = nodeRef.getValue();
final InstanceIdentifier<Node> nodeIdent =
@Override
public void onNodeUpdated(final NodeUpdated notification) {
+ Preconditions.checkNotNull(notification);
final FlowCapableNodeUpdated newFlowNode =
notification.getAugmentation(FlowCapableNodeUpdated.class);
if (newFlowNode != null && newFlowNode.getSwitchFeatures() != null) {
connectFlowCapableNode(swichFeaturesIdent, switchFeatures, nodeIdent);
}
}
+
+ @Override
+ public void onDataChanged(final AsyncDataChangeEvent<InstanceIdentifier<?>, DataObject> changeEvent) {
+ Preconditions.checkNotNull(changeEvent,"Async ChangeEvent can not be null!");
+ /* All DataObjects for create */
+ final Set<InstanceIdentifier<?>> createdData = changeEvent.getCreatedData() != null
+ ? changeEvent.getCreatedData().keySet() : Collections.<InstanceIdentifier<?>> emptySet();
+
+ for (final InstanceIdentifier<?> entryKey : createdData) {
+ final InstanceIdentifier<Node> nodeIdent = entryKey
+ .firstIdentifierOf(Node.class);
+ if ( ! nodeIdent.isWildcarded()) {
+ final NodeRef nodeRef = new NodeRef(nodeIdent);
+ // FIXME: these calls is a job for handshake or for inventory manager
+ /* check Group and Meter future */
+ manager.getRpcMsgManager().getGroupFeaturesStat(nodeRef);
+ manager.getRpcMsgManager().getMeterFeaturesStat(nodeRef);
+ }
+ }
+ }
}
package org.opendaylight.controller.md.statistics.manager.impl;
+import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
statNetCollectorServ.shutdown();
}
+ @Override
+ public boolean hasActiveNodes() {
+ return ( ! statNodeHolder.isEmpty());
+ }
+
@Override
public boolean isProvidedFlowNodeActive(
final InstanceIdentifier<Node> flowNode) {
@Override
public boolean connectedNodeRegistration(final InstanceIdentifier<Node> ident,
final List<StatCapabTypes> statTypes, final Short nrOfSwitchTables) {
- if (ident.isWildcarded()) {
- LOG.warn("FlowCapableNode IstanceIdentifier {} registration can not be wildcarded!", ident);
- } else {
+ if (isNodeIdentValidForUse(ident)) {
if ( ! statNodeHolder.containsKey(ident)) {
synchronized (statNodeHolderLock) {
final boolean startStatCollecting = statNodeHolder.size() == 0;
@Override
public boolean disconnectedNodeUnregistration(final InstanceIdentifier<Node> ident) {
- if (ident.isWildcarded()) {
- LOG.warn("FlowCapableNode IstanceIdentifier {} unregistration can not be wildcarded!", ident);
- } else {
+ if (isNodeIdentValidForUse(ident)) {
if (statNodeHolder.containsKey(ident)) {
synchronized (statNodeHolderLock) {
if (statNodeHolder.containsKey(ident)) {
return false;
}
+ @Override
+ public boolean registerAdditionalNodeFeature(final InstanceIdentifier<Node> ident,
+ final StatCapabTypes statCapab) {
+ if (isNodeIdentValidForUse(ident)) {
+ if ( ! statNodeHolder.containsKey(ident)) {
+ return false;
+ }
+ final StatNodeInfoHolder statNode = statNodeHolder.get(ident);
+ if ( ! statNode.getStatMarkers().contains(statCapab)) {
+ synchronized (statNodeHolderLock) {
+ if ( ! statNode.getStatMarkers().contains(statCapab)) {
+ final List<StatCapabTypes> statCapabForEdit = new ArrayList<>(statNode.getStatMarkers());
+ statCapabForEdit.add(statCapab);
+ final StatNodeInfoHolder nodeInfoHolder = new StatNodeInfoHolder(statNode.getNodeRef(),
+ Collections.unmodifiableList(statCapabForEdit), statNode.getMaxTables());
+
+ final Map<InstanceIdentifier<Node>, StatNodeInfoHolder> statNodes =
+ new HashMap<>(statNodeHolder);
+ statNodes.put(ident, nodeInfoHolder);
+ statNodeHolder = Collections.unmodifiableMap(statNodes);
+ }
+ }
+ }
+ }
+ return true;
+ }
+
@Override
public void collectNextStatistics() {
if (wakeMe) {
break;
case GROUP_STATS:
LOG.trace("STAT-MANAGER-collecting GROUP-STATS for NodeRef {}", actualNodeRef);
- manager.getRpcMsgManager().getGroupFeaturesStat(actualNodeRef);
- waitingForNotification();
manager.getRpcMsgManager().getAllGroupsConfStats(actualNodeRef);
waitingForNotification();
manager.getRpcMsgManager().getAllGroupsStat(actualNodeRef);
break;
case METER_STATS:
LOG.trace("STAT-MANAGER-collecting METER-STATS for NodeRef {}", actualNodeRef);
- manager.getRpcMsgManager().getMeterFeaturesStat(actualNodeRef);
- waitingForNotification();
manager.getRpcMsgManager().getAllMeterConfigStat(actualNodeRef);
waitingForNotification();
manager.getRpcMsgManager().getAllMetersStat(actualNodeRef);
}
}
- @Override
- public boolean hasActiveNodes() {
- return ( ! statNodeHolder.isEmpty());
+ private boolean isNodeIdentValidForUse(final InstanceIdentifier<Node> ident) {
+ if (ident == null) {
+ LOG.warn("FlowCapableNode InstanceIdentifier {} can not be null!");
+ return false;
+ }
+ if (ident.isWildcarded()) {
+ LOG.warn("FlowCapableNode InstanceIdentifier {} can not be wildcarded!", ident);
+ return false;
+ }
+ return true;
}
}
@Override
public Void call() throws Exception {
- Preconditions.checkArgument(nodeRef != null, "NodeRef can not be null!");
final GetGroupDescriptionInputBuilder builder =
new GetGroupDescriptionInputBuilder();
builder.setNode(nodeRef);
return;
}
}
- LOG.debug("Node {} has not removed.", nodeIdent);
+ LOG.debug("Node {} has not been removed.", nodeIdent);
+ }
+
+ @Override
+ public void registerAdditionalNodeFeature(final InstanceIdentifier<Node> nodeIdent,
+ final StatCapabTypes statCapab) {
+ for (final StatPermCollector collector : statCollectors) {
+ if (collector.registerAdditionalNodeFeature(nodeIdent, statCapab)) {
+ return;
+ }
+ }
+ LOG.debug("Node {} has not been extended for feature {}!", nodeIdent, statCapab);
}
/* Getter internal Statistic Manager Job Classes */
*/
package org.opendaylight.controller.netconf.cli;
+import static com.google.common.base.Throwables.getStackTraceAsString;
+
import com.google.common.base.Preconditions;
import java.io.IOException;
import java.net.InetAddress;
}
private static void handleException(final Exception e, final String message) {
- // FIXME syserr the exception and stacktrace
+ System.console().writer().println(String.format("Error %s cause %s", message, getStackTraceAsString(e.fillInStackTrace())));
}
private static void writeStatus(final ConsoleIO io, final String blueprint, final Object... args) {
import org.opendaylight.controller.netconf.client.NetconfClientDispatcherImpl;
import org.opendaylight.controller.netconf.client.conf.NetconfClientConfigurationBuilder;
import org.opendaylight.controller.sal.connect.netconf.NetconfDevice;
+import org.opendaylight.controller.sal.connect.netconf.NetconfDevice.SchemaResourcesDTO;
+import org.opendaylight.controller.sal.connect.netconf.NetconfStateSchemas.NetconfStateSchemasResolverImpl;
import org.opendaylight.controller.sal.connect.netconf.listener.NetconfDeviceCommunicator;
+import org.opendaylight.controller.sal.connect.netconf.schema.mapping.NetconfMessageTransformer;
import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
+import org.opendaylight.yangtools.yang.model.repo.api.SchemaContextFactory;
+import org.opendaylight.yangtools.yang.model.repo.api.SchemaSourceFilter;
+import org.opendaylight.yangtools.yang.model.repo.api.YangTextSchemaSource;
+import org.opendaylight.yangtools.yang.model.repo.util.FilesystemSchemaSourceCache;
import org.opendaylight.yangtools.yang.model.util.repo.AbstractCachingSchemaSourceProvider;
import org.opendaylight.yangtools.yang.model.util.repo.FilesystemSchemaCachingProvider;
import org.opendaylight.yangtools.yang.model.util.repo.SchemaSourceProvider;
import org.opendaylight.yangtools.yang.model.util.repo.SchemaSourceProviders;
+import org.opendaylight.yangtools.yang.parser.repo.SharedSchemaRepository;
+import org.opendaylight.yangtools.yang.parser.util.TextToASTTransformer;
/**
* Manages connect/disconnect to 1 remote device
private final NioEventLoopGroup nettyThreadGroup;
private final NetconfClientDispatcherImpl netconfClientDispatcher;
+ private static final String CACHE = "cache/schema";
+
// Connection
private NetconfDeviceConnectionHandler handler;
private NetconfDevice device;
handler = new NetconfDeviceConnectionHandler(commandDispatcher, schemaContextRegistry,
console, name);
- device = NetconfDevice.createNetconfDevice(deviceId, getGlobalNetconfSchemaProvider(), executor, handler);
+
+ final SharedSchemaRepository repository = new SharedSchemaRepository("repo");
+ final SchemaContextFactory schemaContextFactory = repository.createSchemaContextFactory(SchemaSourceFilter.ALWAYS_ACCEPT);
+ final FilesystemSchemaSourceCache<YangTextSchemaSource> cache = new FilesystemSchemaSourceCache<>(repository, YangTextSchemaSource.class, new File(CACHE));
+ repository.registerSchemaSourceListener(cache);
+ repository.registerSchemaSourceListener(TextToASTTransformer.create(repository, repository));
+
+ device = new NetconfDevice(new SchemaResourcesDTO(repository, schemaContextFactory, new NetconfStateSchemasResolverImpl()),
+ deviceId, handler, executor, new NetconfMessageTransformer());
listener = new NetconfDeviceCommunicator(deviceId, device);
configBuilder.withSessionListener(listener);
listener.initializeRemoteConnection(netconfClientDispatcher, configBuilder.build());
LOG.trace("SSH subsystem channel opened successfully on channel: {}", ctx.channel());
connectPromise.setSuccess();
- connectPromise = null;
// TODO we should also read from error stream and at least log from that
private synchronized void handleSshSetupFailure(final ChannelHandlerContext ctx, final Throwable e) {
LOG.warn("Unable to setup SSH connection on channel: {}", ctx.channel(), e);
- connectPromise.setFailure(e);
- connectPromise = null;
- throw new IllegalStateException("Unable to setup SSH connection on channel: " + ctx.channel(), e);
+ disconnect(ctx, ctx.newPromise());
+
+ // If the promise is not yet done, we have failed with initial connect and set connectPromise to failure
+ if(!connectPromise.isDone()) {
+ connectPromise.setFailure(e);
+ }
}
@Override
@Override
public synchronized void disconnect(final ChannelHandlerContext ctx, final ChannelPromise promise) {
+ // Super disconnect is necessary in this case since we are using NioSocketChannel and it needs to cleanup its resources
+ // e.g. Socket that it tries to open in its constructor (https://bugs.opendaylight.org/show_bug.cgi?id=2430)
+ // TODO better solution would be to implement custom ChannelFactory + Channel that will use mina SSH lib internally: port this to custom channel implementation
+ try {
+ super.disconnect(ctx, ctx.newPromise());
+ } catch (final Exception e) {
+ LOG.warn("Unable to cleanup all resources for channel: {}. Ignoring.", ctx.channel(), e);
+ }
+
if(sshReadAsyncListener != null) {
sshReadAsyncListener.close();
}
});
}
+ // If we have already succeeded and the session was dropped after, we need to fire inactive to notify reconnect logic
+ if(connectPromise.isSuccess()) {
+ ctx.fireChannelInactive();
+ }
+
channel = null;
- promise.setSuccess();
+ promise.setSuccess();
LOG.debug("SSH session closed on channel: {}", ctx.channel());
- ctx.fireChannelInactive();
}
}
package org.opendaylight.controller.netconf.nettyutil.handler.ssh.client;
-import static org.junit.Assert.fail;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyBoolean;
import static org.mockito.Matchers.anyObject;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.verifyNoMoreInteractions;
import static org.mockito.Mockito.verifyZeroInteractions;
import com.google.common.util.concurrent.FutureCallback;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import io.netty.channel.Channel;
+import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelPromise;
+import io.netty.channel.DefaultChannelPromise;
import java.io.IOException;
import java.net.SocketAddress;
import org.apache.sshd.ClientChannel;
doReturn(ctx).when(ctx).fireChannelActive();
doReturn(ctx).when(ctx).fireChannelInactive();
doReturn(ctx).when(ctx).fireChannelRead(anyObject());
+ doReturn(mock(ChannelFuture.class)).when(ctx).disconnect(any(ChannelPromise.class));
doReturn(getMockedPromise()).when(ctx).newPromise();
}
verify(subsystemChannel).setStreaming(ClientChannel.Streaming.Async);
verify(promise).setSuccess();
- verifyNoMoreInteractions(promise);
verify(ctx).fireChannelActive();
}
verify(subsystemChannel).setStreaming(ClientChannel.Streaming.Async);
- try {
- sshChannelOpenListener.operationComplete(getFailedOpenFuture());
- fail("Exception expected");
- } catch (final Exception e) {
- verify(promise).setFailure(any(Throwable.class));
- verifyNoMoreInteractions(promise);
- // TODO should ctx.channelInactive be called if we throw exception ?
- }
+ sshChannelOpenListener.operationComplete(getFailedOpenFuture());
+ verify(promise).setFailure(any(Throwable.class));
}
@Test
final AuthFuture authFuture = getFailedAuthFuture();
- try {
- sshAuthListener.operationComplete(authFuture);
- fail("Exception expected");
- } catch (final Exception e) {
- verify(promise).setFailure(any(Throwable.class));
- verifyNoMoreInteractions(promise);
- // TODO should ctx.channelInactive be called ?
- }
+ sshAuthListener.operationComplete(authFuture);
+ verify(promise).setFailure(any(Throwable.class));
}
private AuthFuture getFailedAuthFuture() {
asyncSshHandler.connect(ctx, remoteAddress, localAddress, promise);
final ConnectFuture connectFuture = getFailedConnectFuture();
- try {
- sshConnectListener.operationComplete(connectFuture);
- fail("Exception expected");
- } catch (final Exception e) {
- verify(promise).setFailure(any(Throwable.class));
- verifyNoMoreInteractions(promise);
- // TODO should ctx.channelInactive be called ?
- }
+ sshConnectListener.operationComplete(connectFuture);
+ verify(promise).setFailure(any(Throwable.class));
}
private ConnectFuture getFailedConnectFuture() {
}
private ChannelPromise getMockedPromise() {
- final ChannelPromise promise = mock(ChannelPromise.class);
- doReturn(promise).when(promise).setSuccess();
- doReturn(promise).when(promise).setFailure(any(Throwable.class));
- return promise;
+ return spy(new DefaultChannelPromise(channel));
}
private static abstract class SuccessFutureListener<T extends SshFuture<T>> implements FutureCallback<SshFutureListener<T>> {
Thread.sleep(100);
}
assertFalse(echoClientHandler.isConnected());
- assertEquals(State.CONNECTION_CLOSED, echoClientHandler.getState());
+ assertEquals(State.FAILED_TO_CONNECT, echoClientHandler.getState());
}
}
<modules>
<module>netconf-api</module>
- <!--FIXME make compilable-->
- <!--<module>netconf-cli</module>-->
+ <module>netconf-cli</module>
<module>netconf-config</module>
<module>netconf-impl</module>
<module>config-netconf-connector</module>
if (loadBalancerPoolInterface.neutronLoadBalancerPoolExists(singleton.getLoadBalancerPoolID())) {
throw new BadRequestException("LoadBalancerPool UUID already exists");
}
- loadBalancerPoolInterface.addNeutronLoadBalancerPool(singleton);
-
Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerPoolAware.class, this, null);
if (instances != null) {
for (Object instance : instances) {